text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` """Mainly Edited for private usage by: Ioannis Agriomallos Ioanna Mitsioni License: BSD 3 clause ============= CURRENT CODE USAGE ============= Current code trains MLP Classifiers, to classify force input samples as stable (0) or slip (1) ---- Input -> Input samples originate from optoforce sensors and are 3D (fx,fy,fz) and come from 2 different datasets, one training, containing several surfaces as well as slip-stable occurrences, and one validation, containing 1 surface with slip-stable occurrences on a completely unseen task-setup. ---- Input transformation -> Several pre-features can be taken from these inputs, but here |f| is kept. -> Several time and frequency domain features are extracted from pre-feature windows. (implemented in 'featext.py') These windows have size w and are shifted by s on each sample -> Then a feature selection-ranking is performed using MutualVariableInformation -> Finally PCA is performed to keep a reduced set among the best selected features ---- Training of ML Classifiers -> Several MLP Classifiers are trained for all combinations of selected featuresets-datasets ---- Results -> Stats of classification results are kept inside each .npz along with the respective trained model """ print(__doc__) import time start_time = time.time() import numpy as np from ml_training import * import matplotlib.pyplot as plt %matplotlib inline # %matplotlib qt # inline (suitable for ipython only, shown inside browser!) or qt (suitable in general, shown in external window!) from matplotlib.colors import ListedColormap import matplotlib.image as mpimg from mpl_toolkits.mplot3d import Axes3D ``` ## Prepare Config struct class to pass to the ml class, responsible for all function initializations ``` class struct: def __init__(self): ####### TRAINING DEFAULTS self.cv = KFold(n_splits=5,random_state=42) self.scaler = StandardScaler() ; self.decomp = PCA(n_components=20) self.names = ["NearNb", "RBFSVM1", "MLP1", "RandFor"] self.classifiers = [KNeighborsClassifier(5), SVC(gamma='auto', C=1), MLPClassifier(solver='lbfgs',alpha=1e-4,hidden_layer_sizes=(10,10),random_state=1,verbose=True), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)] self.download = 1 # Download pre-computed (1) data or compute them all anew (0) self.delete_big_features = 0 # Delete (1) or keep (0) computed big-in-size features, # helping mainly to avoid several computations when recomputing features ############ INITIALISATION PARAMETERS ############ self.window, self.shift = 1024, 20 self.samplesperdataset = 10000 self.havelabel = 1 self.returntime = 0 self.featlabel = 0 # 0: all features, 1: temporal, 2: frequency, 3: FFT only self.magnFFT = 0 # 0: FFT in magnitude format, 1: FFT in real and imag format, self.featall = 0 # 0: all, 1: feat1 (phinyomark's), 2: feat2 (golz's) self.CV = 5 # cross validation checks self.numfeat = 10 # number of features to show self.nfeat = 1000 # number of features to keep ###### Initialize necessary names and paths self.datapath = 'data/' self.datafile = self.datapath+'dataset.npz' self.validfile = self.datapath+'validation.mat' self.featpath = self.datapath+'features/'+str(self.window)+'_'+str(self.shift)+'/' self.allfeatpath = self.featpath+'AllFeatures/' self.prefeatname = 'prefeatures'+'_'+str(self.window)+'_'+str(self.shift)+'_'+str(self.samplesperdataset) self.prefeatfile = self.featpath+self.prefeatname+'.npz' self.featname = 'features'+'_'+str(self.window)+'_'+str(self.shift)+'_'+str(self.samplesperdataset) self.featfile = self.featpath+self.featname+'.npz' self.validfeatname = 'valid'+self.featname self.validfeatfile = self.featpath+self.validfeatname+'.npz' self.surffile = self.featpath+self.featname+'_2fing_6surf.npz' self.XYfile = self.featpath+self.featname+'_XY.npz' self.XYsplitfile = self.featpath+self.featname+'_XYsplit.npz' self.validsurffile = self.featpath+self.validfeatname+'_2fing_6surf.npz' self.validXYfile = self.featpath+self.validfeatname+'_XY.npz' self.validXYsplitfile = self.featpath+self.validfeatname+'_XYsplit.npz' self.respath = self.datapath+'results' self.toolfile = self.datapath+'bargraph.zip' self.toolpath = self.datapath+'bargraph-rel_4_8/' self.tool = './'+self.toolpath+'bargraph.pl' ######### INITIALIZE OBJECT-STRUCT WITH PARAMETERS AND PASS THEM TO ML MODULE ######## c = struct() m = ml(c) ``` ## DOWNLOAD NECESSARY FILES ``` download_required_files() ``` ## TRAINING PROCEDURE ``` # necessary steps before training f,l,fd,member,m1,m2 = data_prep(c.datafile) # read input force and labels prefeat = compute_prefeat(f) # compute corresponding prefeatures features, labels = feature_extraction(prefeat,member,c.featfile) # feature extraction from prefeatures avg_feat_comp_time(prefeat) # average feature extraction time new_labels = label_cleaning(prefeat,labels,member) # trim labels, around change points X,Y,Yn,Xsp,Ysp = computeXY(features,labels,new_labels,m1,m2, c.XYfile,c.XYsplitfile) # compute data and labels, trimmed and untrimmed surf, surfla = computeXY_persurf(Xsp,Ysp,c.surffile) # compute per surface data and labels # training and offline testing train_1_surface(surf,surfla) # training of all combinations per 1 surface train_2_surface(surf,surfla) # training of all combinations per 2 surfaces train_3_surface(surf,surfla) # training of all combinations per 3 surfaces train_4_surface(surf,surfla) # training of all combinations per 4 surfaces train_5_surface(surf,surfla) # training of all combinations per 5 surfaces ``` ## RESULT REPORTING ``` # generate files with stats bargraph_perf_gen1(6) bargraph_perf_gen2(6) bargraph_perf_gen3(6) bargraph_perf_gen4(6) bargraph_perf_gen5(6) # use the bargraph tool to plot graphs from generated files # -left column cross-accuracy (trained on one, tested on all the others), # -right column self-accuracy (trained and tested on the same) # -each row i represents training only with i surfaces. # -each stack represents a training group, each bar represents a subfeatureset(AFFT,FREQ,TIME,BOTH) # -blue,green,yellow,red : TP,TN,FN,FP plt.figure(figsize=(20,40)) for i in range(5): make_bargraphs_from_perf(i) ``` ## ONLINE TESTING PROCEDURE ``` # same necessary steps as in training for data preparation f,l,fd,member,m1,m2 = data_prep(c.validfile) prefeat = compute_prefeat(f) features, labels = feature_extraction(prefeat, member, c.validfeatfile, 'validfeat_') new_labels = label_cleaning(prefeat,labels,member) X,Y,Yn,Xsp,Ysp = computeXY(features,labels,new_labels,m1,m2,c.validXYfile,c.validXYsplitfile) surf, surfla = computeXY_persurf(Xsp,Ysp,c.validsurffile) ``` ## VISUALIZING ONLINE TESTING PROCEDURE ``` window=c.window subfeats = ['AFFT','FREQ','TIME','BOTH'] feats = ['fnorm','ftfn','fnormftfn'] matplotlib.rcParams['text.usetex'] = True fileid = filename1(0,3,0,5) fileidb = filename1(0,0,0,5) fileid5 = filename5(0,3,0,1,2,3,4,5) fileid5b = filename5(0,0,0,1,2,3,4,5) model = np.load(fileid)['model'][0] modelb = np.load(fileidb)['model'][0] model5 = np.load(fileid5)['model'][0] model5b = np.load(fileid5b)['model'][0] Yout = model.predict(X[0]) Youtb = modelb.predict(Xsp[0][:,-window-2:-window/2-1]) Yout5 = model5.predict(Xsp[0]) Yout5b = model5b.predict(Xsp[0][:,-window-2:-window/2-1]) print Yout.shape, Yout5.shape, Yout5b.shape plt.rc('text', usetex=True) plt.rc('axes', linewidth=2) plt.rc('font', weight='bold') plt.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath'] offset = 2000-window endset = 2650 skipf = 20 skipy = 15 ax = plt.figure(figsize=(20,10)) tf = np.linalg.norm(f[0][offset+window::skipf,:3][:endset],axis=1) p1, = plt.plot(tf/max(tf),linewidth=5) ty = Yout[offset/skipf:][:endset]+0.02 print tf.shape, ty.shape p = plt.scatter(range(len(tf))[::skipy],ty[::skipy],color='red',s=30) plt.hold plt.text(100, 0.15, r'\textbf{Stable}', ha="center", va="center", rotation=0, size=25) plt.text(1000, 0.85, r'\textbf{Slip}', ha="center", va="center", rotation=0, size=25) plt.annotate('', fontsize=10, xy=(100, 0.05), xytext=(100, 0.12), arrowprops=dict(facecolor='black', shrink=0.05)) plt.annotate('', xy=(1000, 0.98), xytext=(1000, 0.9), arrowprops=dict(facecolor='black', shrink=0.05)) plt.text(400, 0.55, r'\textbf{P1}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=810,linestyle='dashed',color='black',linewidth=5) plt.text(1000, 0.55, r'\textbf{P2}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=1200,linestyle='dashed',color='black',linewidth=5) plt.text(1250, 0.55, r'\textbf{P3}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=1335,linestyle='dashed',color='black',linewidth=5) plt.text(1385, 0.25, r'\textbf{P4}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=1445,linestyle='dashed',color='black',linewidth=5) plt.text(1650, 0.55, r'\textbf{P1}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=1830,linestyle='dashed',color='black',linewidth=5) plt.text(2000, 0.55, r'\textbf{P2}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=2200,linestyle='dashed',color='black',linewidth=5) plt.text(2250, 0.55, r'\textbf{P3}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=2330,linestyle='dashed',color='black',linewidth=5) plt.text(2385, 0.25, r'\textbf{P4}', ha="center", va="center", rotation=0, size=25) plt.axvline(x=2440,linestyle='dashed',color='black',linewidth=5) plt.text(2540, 0.55, r'\textbf{P1}', ha="center", va="center", rotation=0, size=25) plt.xlabel(r't ($1e^{-2} sec$)',fontsize=35) # plt.yticks([]) plt.legend([p1,p],[r'$|\textbf{f}|$',r'\textbf{out1}'],loc=2, prop={'size': 35}) plt.tick_params(labelsize=20) plt.tight_layout() savefig(c.datapath+'validation.pdf', bbox_inches='tight') ``` ## Testing ATI Datasets (No scaling) ``` tsd = ['ati_new_fd1.0N_kp3.5_152Hz_validation', 'ati_new_fd1.5N_kp3_152Hz_validation', 'ati_new_fd1.0N_kp3.5_326Hz_validation', 'ati_new_fd1.5N_kp3_326Hz_validation', 'ati_new_fd1.0N_kp3.5_836Hz_validation', 'ati_new_fd1.5N_kp3.5_836Hz_validation', 'ati_new_fd1N_kp3_nofilt_validation', 'ati_new_fd1.5N_kp3_nofilt_validation'] for sc in [1.0]: print "-------- SCALING = ",sc,"--------" for i in range(len(tsd)): ####### NEWER TESTING DATA FROM ATI F/T SENSOR TRANSLATIONAL CASE prediction(tsd[i]+'.mat') ####### NEWER TESTING DATA FROM ATI F/T SENSOR ROTATIONAL CASE prediction(tsd[i]+'_rot.mat') ``` ## Check the training forces and compare them with testing ones (2.86 1.35 2.12 1.68) ``` printit = False f,_,_,_,_,_ = data_prep(c.datafile,printit=printit) # read training input force pf = compute_prefeat(f,printit=printit) # compute corresponding prefeatures fv,_,_,_,_,_ = data_prep(c.validfile,printit=printit) # read validation input force pfv = compute_prefeat(fv,printit=printit) # compute corresponding prefeatures atifiles = ['ati_new_fd1.0N_kp3.5_152Hz_validation.mat', 'ati_new_fd1.0N_kp3.5_152Hz_validation_rot.mat', 'ati_new_fd1.5N_kp3_152Hz_validation.mat', 'ati_new_fd1.5N_kp3_152Hz_validation_rot.mat', 'ati_new_fd1.0N_kp3.5_326Hz_validation.mat', 'ati_new_fd1.0N_kp3.5_326Hz_validation_rot.mat', 'ati_new_fd1.5N_kp3_326Hz_validation.mat', 'ati_new_fd1.5N_kp3_326Hz_validation_rot.mat', 'ati_new_fd1.0N_kp3.5_836Hz_validation.mat', 'ati_new_fd1.0N_kp3.5_836Hz_validation_rot.mat', 'ati_new_fd1.5N_kp3.5_836Hz_validation.mat', 'ati_new_fd1.5N_kp3.5_836Hz_validation_rot.mat', 'ati_new_fd1N_kp3_nofilt_validation.mat', 'ati_new_fd1N_kp3_nofilt_validation_rot.mat', 'ati_new_fd1.5N_kp3_nofilt_validation.mat', 'ati_new_fd1.5N_kp3_nofilt_validation_rot.mat'] atiftr = [] atifrt = [] for filen in atifiles: tf,_,_,_,_,_ = data_prep(c.datapath+filen,k=1,printit=printit) ptf = compute_prefeat(tf,printit=printit) if filen[-7:-4] == 'rot': atifrt.append(ptf) else: atiftr.append(ptf) atiftr = np.array(atiftr).flatten() atifrt = np.array(atifrt).flatten() plist = [pf, pfv, atiftr, atifrt] pname = ['train', 'valid','atitran','atirot'] print pf.shape, pfv.shape, atiftr.shape, atifrt.shape mf, mfst, mfsl = np.zeros((4,4)), np.zeros((4,4)), np.zeros((4,4)) print 'datasetname: [all/stable/slip:[0:mean, 1:max, 2:min, 3:std]]' for ind in range(len(plist)): pt = plist[ind] # 0:mean, 1:max, 2:min, 3:std for p in range(len(pt)): mf[ind,0] += np.mean(pt[p][:,0]) mf[ind,1] += np.max(pt[p][:,0]) mf[ind,2] += np.min(pt[p][:,0]) mf[ind,3] += np.std(pt[p][:,0]) stind = pt[p][:,1]==0 slind = pt[p][:,1]==1 mfst[ind,0] += np.mean(pt[p][stind,0]) mfst[ind,1] += np.max(pt[p][stind,0]) mfst[ind,2] += np.min(pt[p][stind,0]) mfst[ind,3] += np.std(pt[p][stind,0]) mfsl[ind,0] += np.mean(pt[p][slind,0]) mfsl[ind,1] += np.max(pt[p][slind,0]) mfsl[ind,2] += np.min(pt[p][slind,0]) mfsl[ind,3] += np.std(pt[p][slind,0]) mf[ind,0] /= len(plist[ind]) mf[ind,1] /= len(plist[ind]) mf[ind,2] /= len(plist[ind]) mf[ind,3] /= len(plist[ind]) mfst[ind,0] /= len(plist[ind]) mfst[ind,1] /= len(plist[ind]) mfst[ind,2] /= len(plist[ind]) mfst[ind,3] /= len(plist[ind]) mfsl[ind,0] /= len(plist[ind]) mfsl[ind,1] /= len(plist[ind]) mfsl[ind,2] /= len(plist[ind]) mfsl[ind,3] /= len(plist[ind]) print pname[ind], mf[ind], mfst[ind], mfsl[ind] ```
github_jupyter
``` import ltfatpy import scipy as sc import numpy as np import librosa L = 2**12 signal, fs = ltfatpy.signals.gspi.gspi() signal, fs = ltfatpy.signals.greasy.greasy() print(signal.shape) signal = signal[:L] import IPython.display from IPython.display import Audio print(fs) display(Audio(signal, rate=fs)) import matplotlib import matplotlib.pyplot as plt font = {'family' : 'DejaVu Sans', 'weight' : 'normal', 'size' : 16} matplotlib.rc('font', **font) matplotlib.rcParams['image.cmap'] = 'inferno' from matplotlib.gridspec import GridSpec M = 1024 a = 8 dgt_lambda_2 = ltfatpy.dgtreal(signal, {'name': 'gauss', 'tfr': a*M/4/L}, int(a/2), int(M/2))[0] dgt_lambda_2 = dgt_lambda_2/np.abs(dgt_lambda_2).max() dgt_lambda_8 = ltfatpy.dgtreal(signal, {'name': 'gauss', 'tfr': a*M/L}, a, M)[0] dgt_lambda_8 = dgt_lambda_8/np.abs(dgt_lambda_8).max() dgt_lambda_32 = ltfatpy.dgtreal(signal, {'name': 'gauss', 'tfr': a*M*4/L}, a*2, M*2)[0] dgt_lambda_32 = dgt_lambda_32/np.abs(dgt_lambda_32).max() dgt_lambda_128 = ltfatpy.dgtreal(signal, {'name': 'gauss', 'tfr': a*M*16/L}, a*4, M*4)[0] dgt_lambda_128 = dgt_lambda_128/np.abs(dgt_lambda_128).max() extent = [0, 4200, 0, 1] plt.figure(figsize=(9,9)); gs = GridSpec(2, 2, width_ratios=[1, 1.22]) gs.update(wspace=0.08, hspace=0.08) ax = plt.subplot(gs[0]); ltfatpy.plotdgtreal(dgt_lambda_2, a/2, M/2, dynrange=60, colorbar=False); plt.tick_params( axis='x', which='both', bottom=False, labelbottom=False) plt.xlabel('') plt.text(0.5, 0.9, '$\lambda = 2$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_lambda_2) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[1]); ltfatpy.plotdgtreal(dgt_lambda_8, a, M, dynrange=60); plt.tick_params( axis='both', which='both', left=False, bottom=False, labelleft=False, labelbottom=False) plt.ylabel('') plt.xlabel('') plt.text(0.5, 0.9, '$\lambda = 8$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_lambda_8) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[2]); ltfatpy.plotdgtreal(dgt_lambda_32, a*2, M*2, dynrange=60, colorbar=False); plt.text(0.5, 0.9, '$\lambda = 32$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_lambda_32) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[3]); ltfatpy.plotdgtreal(dgt_lambda_128, a*4, M*2, dynrange=60); plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off plt.ylabel('') plt.text(0.5, 0.9, '$\lambda = 128$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_lambda_128) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) plt.savefig("spectrograms_lambda.pdf", bbox_inches='tight') M = 256 a = 32 g = {'name': 'gauss', 'tfr': a*M/L} dgt_red2 = ltfatpy.dgtreal(signal, g, a*2, int(M/2))[0] dgt_red2 = dgt_red2/np.abs(dgt_red2).max() dgt_red8 = ltfatpy.dgtreal(signal, g, a, M)[0] dgt_red8 = dgt_red8/np.abs(dgt_red8).max() dgt_red32 = ltfatpy.dgtreal(signal, g, int(a/2), M*2)[0] dgt_red32 = dgt_red32/np.abs(dgt_red32).max() dgt_red128 = ltfatpy.dgtreal(signal, g, int(a/4), M*4)[0] dgt_red128 = dgt_red128/np.abs(dgt_red128).max() extent = [0, 4200, 0, 1] plt.figure(figsize=(9,9)); gs = GridSpec(2, 2, width_ratios=[1, 1.22]) gs.update(wspace=0.08, hspace=0.08) ax = plt.subplot(gs[0]); ltfatpy.plotdgtreal(dgt_red2, a*2, M/2, dynrange=60, colorbar=False); plt.tick_params( axis='x', which='both', bottom=False, labelbottom=False) plt.xlabel('') plt.text(0.5, 0.9, '$D = 2$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_red2) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[1]); image = ltfatpy.plotdgtreal(dgt_red8, a, M, dynrange=60); plt.tick_params( axis='both', which='both', left=False, bottom=False, labelleft=False, labelbottom=False) plt.ylabel('') plt.xlabel('') plt.text(0.5, 0.9, '$D = 8$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_red8) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[2]); ltfatpy.plotdgtreal(dgt_red32, a/2, M*2, dynrange=60, colorbar=False); plt.text(0.5, 0.9, '$D = 32$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) coef = 20. * np.log10(np.abs(dgt_red32) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1) ax = plt.subplot(gs[3]); ltfatpy.plotdgtreal(dgt_red128, a/4, M*4, dynrange=60); plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off plt.ylabel('') plt.text(0.5, 0.9, '$D = 128$', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', alpha=0.8)) axins = ax.inset_axes([0.5, 0.4, 0.47, .4]) coef = 20. * np.log10(np.abs(dgt_red128) + np.finfo(np.float64).tiny) maxclim = np.nanmax(coef) clim = (maxclim - 60, maxclim) np.clip(coef, clim[0], clim[1], out=coef) axins.imshow(coef, extent=extent, origin="lower", clim=clim) # sub region of the original image x1, x2, y1, y2 = 1000, 1500, .1, .2 axins.set_xlim(x1, x2) axins.set_ylim(y1, y2) axins.set_xticklabels('') axins.set_yticklabels('') axins.set_aspect('auto') ax.indicate_inset_zoom(axins, alpha=1); plt.savefig("spectrograms_red.pdf", bbox_inches='tight') ```
github_jupyter
# tfdeepsurv for simulated data ## Introduction Let's use `tfdeepsurv` package to build a neural network for predicting hazard ratio. This notebook will show you how to build and train a neural network. ## Preparation For all things going well, you would be better to get acquainted with **Survival Analysis**. Otherwise, I suggest you read the [reference](https://lifelines.readthedocs.io/en/latest/Survival%20Analysis%20intro.html). ## Package installation Please follow the instructions on [README](../README.md) to install `tfdeepsurv` package. ## Get it started ### Obtain datasets ``` from tfdeepsurv.datasets import load_simulated_data ### generate simulated data (Pandas.DataFrame) # data configuration: # hazard ratio = 2000 # number of features = 10 # number of valid features = 2 # No. of training data = 2000 train_data = load_simulated_data(2000, N=2000, num_var=2, num_features=10, seed=1) # No. of training data = 800 test_data = load_simulated_data(2000, N=800, num_var=2, num_features=10, seed=1) train_data.head() ``` ### Dataset statistics ``` from tfdeepsurv.datasets import survival_stats survival_stats(train_data, t_col="t", e_col="e", plot=True) survival_stats(test_data, t_col="t", e_col="e", plot=True) ``` ### Survival data transfrom The transformed survival data contains an new label. Negtive values are considered as right censored, and positive values are considered as event occurrence. **NOTE**: In version 2.0, survival data must be transformed via `tfdeepsurv.datasets.survival_df`. ``` from tfdeepsurv.datasets import survival_df surv_train = survival_df(train_data, t_col="t", e_col="e", label_col="Y") surv_test = survival_df(test_data, t_col="t", e_col="e", label_col="Y") # columns 't' and 'e' are packed into an new column 'Y' surv_train.head() ``` ### Model initialization **NOTE:** You can freely change all hyper-parameters during model initialization or training as you want. All hyper-parameters is as follows: - `nn_config`: model configuration - `hidden_layers_nodes`: hidden layers configuration - `num_steps`: training steps Hyperparameters tuning can refer to README in directory `byopt`. ``` from tfdeepsurv import dsnn input_nodes = 10 hidden_layers_nodes = [6, 3, 1] # the arguments of dsnn can be obtained by Bayesian Hyperparameters Tuning nn_config = { "learning_rate": 0.7, "learning_rate_decay": 1.0, "activation": 'relu', "L1_reg": 3.4e-5, "L2_reg": 8.8e-5, "optimizer": 'sgd', "dropout_keep_prob": 1.0, "seed": 1 } # ESSENTIAL STEP: Pass arguments model = dsnn( input_nodes, hidden_layers_nodes, nn_config ) # ESSENTIAL STEP: Build Computation Graph model.build_graph() ``` ### Model training You can save your trained model by passing `save_model="file_name.ckpt"` or load your trained model by passing `load_model="file_name.ckpt"` ``` Y_col = ["Y"] X_cols = [c for c in surv_train.columns if c not in Y_col] # model saving and loading is also supported! # read comments of `train()` function if necessary. watch_list = model.train( surv_train[X_cols], surv_train[Y_col], num_steps=1900, num_skip_steps=100, plot=True ) ``` ### Model evaluation ``` print("CI on training data:", model.evals(surv_train[X_cols], surv_train[Y_col])) print("CI on test data:", model.evals(surv_test[X_cols], surv_test[Y_col])) ``` ### Model prediction Model prediction includes: - predicting hazard ratio or log hazard ratio - predicting survival function ``` # predict log hazard ratio print(model.predict(surv_test.loc[0:4, X_cols])) # predict hazard ratio print(model.predict(surv_test.loc[0:4, X_cols], output_margin=False)) # predict survival function model.predict_survival_function(surv_test.loc[0:4, X_cols], plot=True) ``` ### tf.session close To release resources, we use `model.close_session()` to close session in tensorflow! ``` model.close_session() ```
github_jupyter
# 2A.ml - Séries temporelles - correction Prédictions sur des séries temporelles. ``` from jyquickhelper import add_notebook_menu add_notebook_menu() %matplotlib inline ``` ## Une série temporelles On récupère le nombre de sessions d'un site web. ``` import pandas data = pandas.read_csv("xavierdupre_sessions.csv", sep="\t") data.set_index("Date", inplace=True) data.head() data.plot(figsize=(12,4)); data[-365:].plot(figsize=(12,4)); ``` ## Trends Fonction [detrend](http://www.statsmodels.org/dev/generated/statsmodels.tsa.tsatools.detrend.html#statsmodels.tsa.tsatools.detrend). ``` from statsmodels.tsa.tsatools import detrend notrend = detrend(data['Sessions']) data["notrend"] = notrend data["trend"] = data['Sessions'] - notrend data.tail() data.plot(y=["Sessions", "notrend", "trend"], figsize=(14,4)); ``` On essaye de calculer une tendance en minimisant : $Y=\alpha + \beta t + \gamma t^2$. ``` notrend2 = detrend(data['Sessions'], order=2) data["notrend2"] = notrend2 data["trend2"] = data["Sessions"] - data["notrend2"] data.plot(y=["Sessions", "notrend2", "trend2"], figsize=(14,4)); ``` On passe au log. ``` import numpy data["logSess"] = data["Sessions"].apply(lambda x: numpy.log(x+1)) lognotrend = detrend(data['logSess']) data["lognotrend"] = lognotrend data["logtrend"] = data["logSess"] - data["lognotrend"] data.plot(y=["logSess", "lognotrend", "logtrend"], figsize=(14,4)); ``` La série est assez particulière. Elle donne l'impression d'avoir un changement de régime. On extrait la composante saisonnière avec [seasonal_decompose](http://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.seasonal_decompose.html#statsmodels.tsa.seasonal.seasonal_decompose). ``` from statsmodels.tsa.seasonal import seasonal_decompose res = seasonal_decompose(data["Sessions"].values.ravel(), freq=7, two_sided=False) data["season"] = res.seasonal data["trendsea"] = res.trend data.plot(y=["Sessions", "season", "trendsea"], figsize=(14,4)); data[-365:].plot(y=["Sessions", "season", "trendsea"], figsize=(14,4)); res = seasonal_decompose(data["Sessions"].values.ravel() + 1, freq=7, two_sided=False, model='multiplicative') data["seasonp"] = res.seasonal data["trendseap"] = res.trend data[-365:].plot(y=["Sessions", "seasonp", "trendseap"], figsize=(14,4)); ``` ## Enlever la saisonnalité sans la connaître Avec [fit_seasons](https://github.com/welch/seasonal/blob/master/seasonal/seasonal.py#L25). ``` from seasonal import fit_seasons cv_seasons, trend = fit_seasons(data["Sessions"]) print(cv_seasons) # data["cs_seasons"] = cv_seasons data["trendcs"] = trend data[-365:].plot(y=["Sessions", "trendcs", "trendsea"], figsize=(14,4)); ``` ## Autocorrélograme On s'inspire de l'exemple : [Autoregressive Moving Average (ARMA): Sunspots data](http://www.statsmodels.org/dev/examples/notebooks/generated/tsa_arma_0.html). ``` import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = plot_acf(data["Sessions"], lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = plot_pacf(data["Sessions"], lags=40, ax=ax2); ``` On retrouve bien une période de 7. ## Changements de régime * [Gaussian HMM of stock data](https://hmmlearn.readthedocs.io/en/latest/auto_examples/plot_hmm_stock_analysis.html#sphx-glr-auto-examples-plot-hmm-stock-analysis-py) * [MixedLM](http://www.statsmodels.org/dev/generated/statsmodels.regression.mixed_linear_model.MixedLM.html) * [RLM](http://www.statsmodels.org/dev/examples/notebooks/generated/robust_models_0.html) * [Local Linear Trend](http://www.statsmodels.org/dev/examples/notebooks/generated/statespace_local_linear_trend.html) * [MarkovAutoregression](http://www.statsmodels.org/dev/generated/statsmodels.tsa.regime_switching.markov_autoregression.MarkovAutoregression.html)
github_jupyter
``` import json with open('/home/mehdi/invariance_data/Vert/Mask_Vert/Mask_LOC_IMG.json', 'r') as f: data = json.load(f)# #with open('/home/mehdi/Evaluation_detection/coco/Correct/coco_GT.json', 'r') as f: # refined_data = json.load(f) instances_val2017_invariance_vert_flip #with open('/home/mehdi/Evaluation_detection/coco/instances_val2017.json', 'r') as f: # data_valid = json.load(f) coco_invariance_vertical_GT with open('/home/mehdi/invariance_data/annotations/coco_invariance_vertical_GT.json', 'r') as f: Img_valid = json.load(f) # Error type -localization refined_data = [] scores = [] for det in data: scores.append(det['score']) sorted_mylist = sorted(((v,i) for i,v in enumerate(scores)), reverse = True) for index in sorted_mylist: max_iou = 0 max_iou_2 = 0 detections = data[index[1]] Flag = False for gt_index,annot in enumerate(data_valid[str(detections['image_id'])]): if (annot ['category_id'] == detections ['category_id']): x_left = max (annot['bbox'][0], detections ['bbox'][0]) y_top = max (annot['bbox'][1],detections ['bbox'][1]) x_right = min(annot['bbox'][0]+annot['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (annot['bbox'][1]+annot['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = annot['bbox'][2]* annot['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (iou > max_iou and not 'localization' in data_valid[str(detections['image_id'])][gt_index].keys()): max_iou = iou updated_index = gt_index Flag=True else: if (iou > max_iou_2): max_iou_2 = iou updated_index_2 = gt_index if Flag: if (max_iou> 0.1 and max_iou <0.5): corrected_bbox = {} corrected_bbox['image_id'] = detections['image_id'] corrected_bbox ['score'] = detections['score'] corrected_bbox['category_id'] = detections['category_id'] corrected_bbox['bbox'] = data_valid[str(detections['image_id'])][updated_index]['bbox'] data_valid[str(detections['image_id'])][updated_index]['localization']=1 refined_data.append (corrected_bbox) elif(max_iou>= 0.5): refined_data.append(detections) data_valid[str(detections['image_id'])][updated_index]['localization']=1 else: if (max_iou_2> 0.1 and max_iou_2 <0.5): corrected_bbox = {} corrected_bbox['image_id'] = detections['image_id'] corrected_bbox ['score'] = detections['score'] corrected_bbox['category_id'] = detections['category_id'] corrected_bbox['bbox'] = data_valid[str(detections['image_id'])][updated_index_2]['bbox'] refined_data.append (corrected_bbox) elif (max_iou_2>=0.5): refined_data.append(detections) # Duplicates nms_data= [] for images in Img_valid: for gtboxes in Img_valid [images]: if (not 'seen_flag' in gtboxes.keys()): scores = [] for det in data[images]: scores.append(det['score']) sorted_mylist = sorted(((v,i) for i,v in enumerate(scores)), reverse = True) for index in sorted_mylist: maxiou_sofar = 0 detections = data[images][index[1]] if (gtboxes ['category_id']== detections['category_id']): x_left = max (gtboxes['bbox'][0], detections ['bbox'][0]) y_top = max (gtboxes['bbox'][1],detections ['bbox'][1]) x_right = min(gtboxes['bbox'][0]+gtboxes['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (gtboxes['bbox'][1]+gtboxes['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = gtboxes['bbox'][2]* gtboxes['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (not 'flag_dup' in detections.keys()): maxiou_sofar = iou max_iou = iou for gt_index,otherbboxes in enumerate (Img_valid [images]): if (not 'seen_flag' in otherbboxes.keys() and otherbboxes!= gtboxes and otherbboxes['category_id'] == detections['category_id']): x_left = max (otherbboxes['bbox'][0], detections ['bbox'][0]) y_top = max (otherbboxes['bbox'][1],detections ['bbox'][1]) x_right = min(otherbboxes['bbox'][0]+otherbboxes['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (otherbboxes['bbox'][1]+otherbboxes['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = otherbboxes['bbox'][2]* otherbboxes['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (iou > max_iou and iou >= 0.5): max_iou = iou updated_gtindex = gt_index if (max_iou>maxiou_sofar): Img_valid [images][updated_gtindex]['seen_flag'] = 1 nms_data.append(detections) detections['flag_dup']=1 elif (max_iou <= maxiou_sofar and maxiou_sofar >=0.5): gtboxes['seen_flag'] =1 nms_data.append(detections) detections['flag_dup']=1 break # Add_misses nms_data= [] for images in Img_valid: for gtboxes in Img_valid [images]: if (not 'seen_flag' in gtboxes.keys()): scores = [] for det in data[images]: scores.append(det['score']) sorted_mylist = sorted(((v,i) for i,v in enumerate(scores)), reverse = True) for index in sorted_mylist: maxiou_sofar = 0 detections = data[images][index[1]] if (gtboxes ['category_id']== detections['category_id']): x_left = max (gtboxes['bbox'][0], detections ['bbox'][0]) y_top = max (gtboxes['bbox'][1],detections ['bbox'][1]) x_right = min(gtboxes['bbox'][0]+gtboxes['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (gtboxes['bbox'][1]+gtboxes['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = gtboxes['bbox'][2]* gtboxes['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (not 'flag_det' in detections.keys()): maxiou_sofar = iou max_iou = iou for gt_index,otherbboxes in enumerate (Img_valid [images]): if (not 'seen_flag' in otherbboxes.keys() and otherbboxes!= gtboxes and otherbboxes['category_id'] == detections['category_id']): x_left = max (otherbboxes['bbox'][0], detections ['bbox'][0]) y_top = max (otherbboxes['bbox'][1],detections ['bbox'][1]) x_right = min(otherbboxes['bbox'][0]+otherbboxes['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (otherbboxes['bbox'][1]+otherbboxes['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = otherbboxes['bbox'][2]* otherbboxes['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (iou > max_iou and iou >= 0.5): max_iou = iou updated_gtindex = gt_index if (max_iou>maxiou_sofar): Img_valid [images][updated_gtindex]['seen_flag'] = 1 corrected_box = {} corrected_box['bbox']= Img_valid [images][updated_gtindex]['bbox'] corrected_box['category_id'] = Img_valid [images][updated_gtindex]['category_id'] corrected_box['score'] =detections ['score'] corrected_box ['image_id']= Img_valid [images][updated_gtindex] ['image_id'] nms_data.append(corrected_box) detections['flag_det']=1 elif (max_iou <= maxiou_sofar and maxiou_sofar >=0.5): gtboxes['seen_flag'] =1 corrected_box = {} corrected_box['bbox']= gtboxes['bbox'] corrected_box['category_id'] = gtboxes['category_id'] corrected_box['score'] =detections ['score'] corrected_box ['image_id']= gtboxes['image_id'] nms_data.append(corrected_box) detections['flag_det']=1 break for images in Img_valid: for gtboxes in Img_valid [images]: if (not 'seen_flag' in gtboxes.keys()): missed_detection = {} missed_detection['bbox']= gtboxes['bbox'] missed_detection['category_id'] = gtboxes['category_id'] missed_detection['score'] =1 missed_detection ['image_id']= gtboxes ['image_id'] nms_data.append (missed_detection) # Error confusion with background refined_data = [] scores = [] for det in data: scores.append(det['score']) sorted_mylist = sorted(((v,i) for i,v in enumerate(scores)), reverse = True) for index in sorted_mylist: max_iou = 0 max_iou_2 = 0 detections = data[index[1]] Flag = False for gt_index,annot in enumerate(data_valid[str(detections['image_id'])]): if (annot ['category_id'] == detections ['category_id']): x_left = max (annot['bbox'][0], detections ['bbox'][0]) y_top = max (annot['bbox'][1],detections ['bbox'][1]) x_right = min(annot['bbox'][0]+annot['bbox'][2],detections ['bbox'][0]+detections ['bbox'][2]) y_bottom = min (annot['bbox'][1]+annot['bbox'][3],detections ['bbox'][1]+detections ['bbox'][3]) intersect = (x_right-x_left)*(y_bottom-y_top) target_area = annot['bbox'][2]* annot['bbox'][3] detect_area = detections ['bbox'][2]*detections ['bbox'][3] iou = intersect/float(target_area +detect_area -intersect) if (iou > max_iou and not 'background' in data_valid[str(detections['image_id'])][gt_index].keys()): max_iou = iou updated_index = gt_index Flag=True else: if (iou > max_iou_2): max_iou_2 = iou updated_index_2 = gt_index if Flag: if ((max_iou> 0.1 and max_iou <0.5) or max_iou>= 0.5): data_valid[str(detections['image_id'])][updated_index]['background']=1 refined_data.append (detections) else: if ((max_iou_2> 0.1 and max_iou_2 <0.5) or max_iou_2>=0.5): refined_data.append (detections) #convert the json file to Img based Detect_imgkey = {} print (len(data)) count = 0 for img in data_valid['images']: img_detections= [] for detections in data: if (detections['image_id']==img['id']): count +=1 img_detections.append(detections) Detect_imgkey[img['id']]=img_detections print(count) GT_table = {} for img in data_valid['images']: img_bboxes = [] for bboxes in data_valid['annotations']: if (bboxes['image_id']==img['id']): img_bboxes.append(bboxes) GT_table[img['id']]=img_bboxes ```
github_jupyter
# <center>Chapter 4</center> ## <center>Python - OOP</center> ### Example 4.1 ``` >>> isinstance(int, object) >>> issubclass(bool, int) >>> int.__bases__ >>> isinstance(int, object) ``` ### Example 4.2 ``` >>> bool.__mro__ ``` ### Example 4.3 ``` >>> type(bool) >>> type.__bases__ >>> isinstance(type, object) >>> issubclass(type, object) ``` ### Example 4.4 ``` >>> #function from a module ... >>> import math >>> type(math.sqrt) >>> #built-in function ... >>> type(id) >>> #user defined function ... >>> def hello(): ... print ('Hello World') ... >>> type(hello) >>> #module is also an object ... >>> type(math) ``` ### Example 4.5 ``` >>> #range is also an object ... >>> obj=range(10) >>> type(obj) >>> range.__bases__ ``` ### Example 4.6 ``` >>> #user defined class ... >>> class MyClass: ... pass ... >>> type(MyClass) >>> MyClass.__bases__ >>> issubclass(MyClass, object) ``` ### Example 4.7 ``` >>> class MyClass(object): ... pass ... >>> MyClass.__bases__ ``` ### Example 4.8 ``` >>> obj1=MyClass() >>> obj2=MyClass() >>> obj1.myname='Ashok' >>> setattr(obj1, 'myage',21) ``` ### Example 4.9 ``` >>> def aboutme(obj): ... print ('My name is {} and I am {} years old'.format(obj.myname,obj.myage)) ... >>> setattr(MyClass, 'about', aboutme) >>> obj1.about() obj1.myname ``` ### Example 4.11 ``` >>> from myclass1 import MyClass >>> obj1=MyClass() >>> obj1.myname >>> obj1.myage >>> obj1.about() ``` ### Example 4.13 ``` >>> from myclass2 import MyClass >>> obj1=MyClass('Ashok', 21) >>> obj2=MyClass('Asmita',20) >>> obj1.about() >>> obj2.about() ``` ### Example 4.14 ``` >>> setattr(obj1,'marks',50) >>> obj1.marks ``` ### Example 4.16 ``` >>> from myclass3 import MyClass >>> obj1=MyClass('Ashok', 21) >>> obj1.about() >>> setattr(obj1, 'marks', 50) #new attribute not allowed >>> setattr(obj1, 'myage', 25) #value of existing module can be modified >>> obj1.about() ``` ### Example 4.18 ``` >>> from myclass4 import MyClass >>> obj1=MyClass('Ashok',21) >>> obj1.getage() >>> obj1.setname('Amar') >>> obj1.about() ``` ### Example 4.19 ``` >>> obj1.myname >>> getattr(obj1,'myage') >>> obj1.myage=25 >>> setattr(obj1,'myname', 'Ashok') >>> obj1.about() ``` ### Example 4.20 ``` >>> #private variable in class ... >>> class tester: ... def __init__(self): ... self.__var=10 >>> t=tester() >>> t.__var ``` ### Example 4.22 ``` >>> from myclass5 import MyClass >>> obj1=MyClass('Ashok', 21) >>> obj1.about() #initial values of object's attributes >>> #change age property >>> obj1.age=30 >>> #access name property >>> obj1.name >>> obj1.about() #object's attributes after property changes ``` ### Example 4.27 ``` >>> from myclass6 import MyClass >>> obj1=MyClass('Ashok', 21) >>> obj1.about() #initial values of object's attributes >>> #change age property >>> obj1.age=30 >>> #access name property >>> obj1.name >>> obj1.about() ``` ### Example 4.29 ``` >>> from classattr import player >>> p1=player('Virat', 60) >>> p2=player('Rahul', 45) ``` ### Example 4.30 ``` >>> from classattr2 import player >>> p1=player('Virat',60) >>> p2=player('Rahul',45) >>> player.printtotal() >>> player.displaytotal() ``` ### Example 4.32 ``` >>> from inheritEllipse import ellipse >>> e1=ellipse(20,30) >>> e1.area() >>> e1.perimeter() >>> e1=ellipse(20,20) >>> e1.area() >>> e1.perimeter() ``` ### Example 4.34 ``` >>> from inheritEllipse import ellipse, circle >>> c1=circle(20) >>> c1.area() >>> c1.perimeter() ``` ### Example 4.36 ``` >>> dir(object) ``` ### Example 4.37 ``` >>> a=123 >>> a.__str__() ``` ### Example 4.38 ``` >>> a=123 >>> str(a) ``` ### Example 4.39 ``` >>> class MyClass: pass >>> obj1=MyClass() >>> setattr(obj1,'myname','Madhav') >>> #using __setattr__() method >>> obj1.__setattr__('myage',21) ``` ### Example 4.40 ``` >>> a.__dir__() ``` ### Example 4.41 ``` >>> a=20 >>> b=10 >>> a+b >>> a.__add__(b) >>> a.__mul__(b) >>> a*b >>> a>b >>> a.__le__(b) ``` ### Example 4.43 ``` >>> from timerclass import timer >>> t1=timer(2,45) >>> t2=timer(3,30) >>> t3=t1+t2 >>> print (t3) >>> t3=t1.__add__(t2) >>> print (t3) ```
github_jupyter
# Segmented Regression This page demonstrates how to perform linear regressions on small segments of data using a moving window technique ## Load ABF Data ``` import matplotlib.pyplot as plt import random import numpy as np # start with sample holding current data sweepCurrents = [-4.63465, -5.51081, -5.66655, -5.79674, -7.6818, -3.8968, -6.69579, -5.71098, -6.10336, -6.24999, -6.75877, -6.69165, -4.90577, -2.20656, 0.43463, -3.94292, -4.78706, -5.34989, -6.36574, -4.63652, -5.31478, -2.10792, -5.61191, -4.36993, -4.36985, -3.11592, -2.12315, -4.33662, -4.02691, -2.33449, -6.08645, -8.26997, -5.08048, -3.97689, -4.42698, -3.04758, -4.79167, -4.6762, -5.4704, -6.55669, -3.9469, -5.35243, -6.06626, -4.8891, -4.84388, -5.19486, -7.22855, -8.71245, -8.21538, -8.53336, -13.44261, -13.40911, -13.16543, -13.80675, -15.79994, -16.88096, -16.646, -17.51654, -18.83702, -19.76602, -20.98734, -19.5933, -17.60265, -19.83353, -17.38733, -20.43717, -18.97912, -18.89598, -21.83465, -21.5112, -20.73059, -20.95396, -17.39119, -19.00177, -17.78977, -19.49595, -20.53733, -20.35184, -20.44803, -18.31261, -20.20521, -17.43426, -19.37423, -18.71905, -21.69927, -19.36978, -17.57406, -17.88728, -18.6616, -18.69261, -18.60541, -19.35205, -18.19312, -19.84095, -19.69003, -19.24259, -19.01911, -21.18759, -12.80981, -20.64312, -20.95864, -18.32492, -20.93545, -20.20677, -19.91784, -20.88027, -19.61825, -18.63966, -19.68358, -17.61944, -17.39662, -19.35619, -19.50095, -20.77288, -18.49318, -15.97496, -15.10942, -17.82308, -18.22963, -17.02892, -17.62901, -16.23215, -16.86382, -16.66236, -17.03443, -17.1747, -16.72648, -12.3339, -17.41958, -15.19357, -16.1032, -15.99566, -14.69315, -15.98535, -16.32571, -14.60911, -16.44876, -15.93673, -15.74539, -16.03647, -15.1236, -13.85857, -14.54128, -15.55408, -14.56132, -15.37301, -14.16098, -11.41092, -15.53702, -14.24759] # determine the times for data points sweepPeriod = 10/60 # minutes sweepTimes = np.arange(len(sweepCurrents)) * sweepPeriod # plot the data plt.plot(sweepTimes, sweepCurrents, '.-') plt.grid(alpha=.5, ls='--') plt.ylabel("Current (pA)") plt.xlabel("Time (minutes)") plt.show() ``` ## Break Data into Segments * Each segment will contain `windowSize` points * There will be fewer segments than sweeps ``` def getMovingWindowSegments(data, windowSize): """ Given a 1D list of data, slide a window along to create individual segments and return a list of lists (each of length windowSize) """ segmentCount = len(data) - windowSize segments = [None] * segmentCount for i in range(segmentCount): segments[i] = data[i:i+windowSize] return segments ``` Make it easy for the user to define the window size! ``` windowSize = 12 # in sweeps segments = getMovingWindowSegments(sweepCurrents, windowSize) for segment in segments: print(segment) ``` ## Calculate Slope for Every Segment Linear regression is used same way as before ``` import numpy import scipy.stats def getSingleSegmentSlope(segment, samplePeriod): """ Return the slope of a linear line fitted to a single segment. Sample period must be in minutes, and returned slope will be pA/min. """ xs = numpy.arange(len(segment)) * samplePeriod slope, intercept, r, p, stdErr = scipy.stats.linregress(xs, segment) return slope def getAllSegmentSlopes(segments, samplePeriod): """ Given a list of segments, return a list of slopes (one per segment). Sample period must be in minutes, and returned slopes will be pA/min. """ slopes = [] for segment in segments: slope = getSingleSegmentSlope(segment, samplePeriod) slopes.append(slope) return slopes slopesBySegment = getAllSegmentSlopes(segments, sweepPeriod) for slope in slopesBySegment: print(slope) ``` ## Find Maximum Negative Slope ``` peakSlopeValue = slopesBySegment[0] peakSlopeIndex = 0 for i in range(len(slopesBySegment)): if (slopesBySegment[i] < peakSlopeValue): peakSlopeValue = slopesBySegment[i] peakSlopeIndex = i slopeTimeOffset = windowSize * sweepPeriod / 2 slopeTimes = np.arange(len(slopesBySegment)) * sweepPeriod + slopeTimeOffset peakSlopeTime = slopeTimes[peakSlopeIndex] windowSizeMinutes = windowSize * sweepPeriod print(f"Peak negative slope ({windowSizeMinutes} minute window) " + f"is {round(peakSlopeValue, 3)} pA/min " + f"at {round(peakSlopeTime, 3)} min (index {peakSlopeIndex})") ``` ## Display Findings (Current & Slope vs. Time) ``` ax1 = plt.subplot(211) plt.plot(sweepTimes, sweepCurrents, '.-') plt.xlabel("Time (minutes)") plt.ylabel("Current (pA)") plt.axvline(peakSlopeTime, ls='--', color='r', lw=1, alpha=.2) # highlight the window around the peak neagtive slope halfWindowTime = (windowSize * sweepPeriod) / 2 plt.axvspan(peakSlopeTime - halfWindowTime, peakSlopeTime + halfWindowTime, color='r', alpha=.2) ax2 = plt.subplot(212, sharex=ax1) plt.plot(slopeTimes, slopesBySegment, '.-') plt.plot(peakSlopeTime, peakSlopeValue, 'r.', ms=15, alpha=.5) plt.xlabel("Time (minutes)") plt.ylabel("Slope (pA / min)") plt.axhline(0, ls='--', color='k', lw=1, alpha=.2) plt.axvline(peakSlopeTime, ls='--', color='r', lw=1, alpha=.2) plt.show() ```
github_jupyter
``` from bs4 import BeautifulSoup import requests import pandas as pd import time import progressbar # Let's get started: scrape main page url = "https://daphnecaruanagalizia.com" response = requests.get(url) daphne = BeautifulSoup(response.text, 'html.parser') # Get structural information based on developer tools in Google Chrome posts = daphne.find_all("div", class_="postmaster") # Explore first entry posts[0] # url posts[0].a["href"] # time stamp posts[0].find(class_="time").get_text() # title of posts posts[0].a["title"] # post id posts[0].get('data-postid') # Extract relevant content from main page, loop through posts new_lst = [] for element in posts: url = element.a["href"] title = element.a["title"] title = title[18:] date = element.find(class_="time").get_text() post_id = element.get('data-postid') #print(url) response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') text = soup.find('div', {'class': 'entry'}).text.strip() temp_dict = {'URL': url, 'Title': title, 'Date': date, 'ID': post_id, 'Txt': text} new_lst.append(temp_dict) pd.DataFrame(new_lst)[0:5] # Putting everything together: scrape posts from all pages for relevant content bar = progressbar.ProgressBar() new_lst = [] # showcase for the first 9 pages / to get all pages change to range(1,1443) for elem,i in zip(range(1,10), bar((range(1,10)))): page = "https://daphnecaruanagalizia.com/page/" + str(elem) response = requests.get(page) soup = BeautifulSoup(response.text, 'html.parser') posts = soup.find_all("div", class_="postmaster") #soup.find_all('div', {'class':'postmaster'}) for element in posts: url = element.a["href"] url_temp = url.replace("https://daphnecaruanagalizia.com/", "") date_y = url_temp[:4] date_m = url_temp[5:7] # dealing with error message stemming from one post on page 127 try: date_t = element.find(class_="time").get_text() except AttributeError: date_t = "n.a." title = element.a["title"] title = title.replace("Permanent Link to ", "") post_id = element.get('data-postid') response = requests.get(url) abc = BeautifulSoup(response.text, 'html.parser') text = abc.find('div', {'class': 'entry'}).text.strip() text = text.replace('\n', ' ') temp_dict = {'Link': url, 'Title': title, 'Txt': text, 'Date_1': date_y, 'Date_2': date_m, 'Date_3': date_t, 'ID_post': post_id, 'ID_page': i } new_lst.append(temp_dict) df = pd.DataFrame(new_lst) df.to_csv('daphne.csv', sep='\t', encoding='utf-16') pd.DataFrame(new_lst)[0:5] ```
github_jupyter
### Install dependencies Here we install Openslide: https://openslide.org/ This library allows us to load the whole-slide images ``` !apt-get install openslide-tools --assume-yes !apt-get install python-openslide --assume-yes !pip install --upgrade setuptools==45.3 !pip install openslide-python==1.1.1 ``` ### Helper Classes ``` import os import numpy as np import openslide from shapely.geometry import Polygon, MultiPolygon from json import JSONEncoder, loads, dumps class Slide: """Implements common functions to most of the standard slides. It is based on openslide. """ def __init__(self, filename): self.path = filename self.slide = openslide.OpenSlide(filename) @property def file_name(self): return os.path.split(self.path)[1] @property def size(self): """Returns the size (width, height) in pixels of the maximum magnification level """ return self.slide.level_dimensions[0] @property def level_count(self): """Returns the number of zoom levels""" return self.slide.level_count def get_downsample_factor(self, level): """Returns the downsample factor of a level. For example the downsample factor of level 0 is always 1, level 1 is usually 2... """ return self.slide.level_downsamples[level] def get_level_size(self, level): """Returns the size in pixels (width, height) of a specific level of the slide """ return self.slide.level_dimensions[level] def get_patch(self, x, y, size, level): """Returns an RBG array of a rectangular region""" ds = self.get_downsample_factor(level) return np.array(self.slide.read_region(location=(int(ds * x), int(ds * y)), size=size, level=level).convert( mode='RGB')) class Patch: """Class representing a rectangular region of a slide""" def __init__(self, slide, x, y, width, height, level): self.x, self.y = x, y self.width, self.height = width, height self.level = level self.slide = slide level_width, level_height = slide.get_level_size(level) self._polygon = Polygon.from_bounds( self.x / level_width, self.y / level_height, (self.x + self.width) / level_width, (self.y + self.width) / level_height) rel_width = self.width / level_width rel_height = self.height / level_height self._internal_polygon = Polygon.from_bounds( self.x / level_width + 0.2 * rel_width, self.y / level_height + 0.2 * rel_height, (self.x + self.width) / level_width - 0.2 * rel_width, (self.y + self.height) / level_height - 0.2 * rel_height) @property def polygon(self): return self._polygon @property def internal_polygon(self): return self._internal_polygon @property def size(self): return (self.width, self.height) def plot(self): points = np.array(self.polygon.exterior.coords) plt.plot(points[:,0], points[:,1]) def get_image(self): ds = self.slide.get_downsample_factor(self.level) return self.slide.get_patch(int(self.x * ds), int(self.y * ds), self.size, self.level) def get_intersection(self, annotations): area = 0.0 pol = Polygon() for annotation in annotations: pol = pol.union(annotation.polygon.intersection(self.polygon)) return pol.area / self.polygon.area def get_internal_intersection(self, annotations): area = 0.0 pol = Polygon() for annotation in annotations: pol = pol.union(annotation.polygon.intersection(self.internal_polygon)) return pol.area / self.internal_polygon.area ``` ### Mount google drive ``` from google.colab import drive drive.mount('/content/drive/') ``` ### Patches Dataset + Data augmentation a) Try some data augmentation ``` import pandas as pd import matplotlib.pyplot as plt import torch import torchvision.transforms as transforms from torch.utils.data import Dataset class PatchesDataset(Dataset): def __init__(self, df, transforms=None): self.df = df.copy() self.transforms = transforms self.slides = {} def load_image(self, row): if row['slide'] not in self.slides: self.slides[row['slide']] = Slide(row['slide']) slide = self.slides[row['slide']] x, y = row['x'], row['y'] width, height = row['width'], row['height'] level = row['level'] return slide.get_patch(x, y, (width, height), level) def __getitem__(self, item): row = self.df.iloc[item] image = self.load_image(row) if self.transforms is not None: image = self.transforms(image) return image, torch.tensor(data=row['target']).long() def __len__(self): return len(self.df) def set_targets(df): df['target'] = (df['internal-tumor'] > 1e-3).astype(int) results_path = "/content/drive/My Drive/Results" dataset_path = "/content/drive/My Drive/Dataset A3" mean = [0.5, 0.5, 0.5] std = [0.5, 0.5, 0.5] ################## Change these lines #################################### # TODO: Try some data augmentation data_transforms = { 'train': transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]), 'val': transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]) } ################## Finish changing here ################################## datasets = {} max_samples = 20000 for phase in ['train', 'val']: df = pd.read_csv(os.path.join(dataset_path, f'{phase}.csv')) set_targets(df) print('Tumor patches: %d' % len(df[df['target'] == 1])) print('Normal patches: %d' % len(df[df['target'] == 0])) normal_df = df[df['target'] == 0] tumor_df = df[df['target'] == 1] normal_df = normal_df.sample(min(len(normal_df), max_samples), replace=True) df = pd.concat([normal_df, tumor_df]) datasets[phase] = PatchesDataset(df, transforms=data_transforms[phase]) for i in range(10): image, target = datasets['train'][0] plt.title(target.item()) plt.imshow(np.clip(std * (np.array(image)).transpose(1, 2, 0) + mean, 0, 1)) plt.show() ``` ### Data loaders ``` from torch.utils.data import DataLoader def visualize_batch(images, labels, ncols=8): nrows = (len(images) + ncols - 1) // ncols plt.figure(figsize=(15, 2 * nrows)) for i in range(len(images)): plt.subplot(nrows, ncols, i+1) plt.title(labels[i]) plt.imshow(np.clip(std * np.array(images[i]).transpose(1, 2, 0) + mean, 0, 1)) plt.axis('off') plt.show() loaders = {} batch_size = 64 workers = 8 for part in ['train', 'val']: loaders[part] = DataLoader( datasets[part], batch_size=batch_size, shuffle=True, num_workers=workers, drop_last=True) images, labels = next(iter(loaders['train'])) visualize_batch(images, labels) ``` ### Create the neural network, set the hyperparameters, the loss function and the optimizer ``` ################## Change these lines #################################### # TODO: Create the neural network, set the hyperparameters, the loss function and the optimizer ################## Finish changing here ################################## ``` ## Train the network ``` import IPython.display ####### statistics part ######### loss_history = {'train': [], 'val': []} acc_history = {'train': [], 'val': []} batch_loss = [] batch_acc = [] def plot_results(epoch, batch, total_batches): IPython.display.clear_output(wait=True) plt.figure(figsize=(15, 4)) plt.subplot(1, 4, 1) plt.title('Batch loss (epoch: %d, batch: %d/%d)' % (epoch, batch, total_batches)) plt.plot(batch_loss) plt.yscale('log') plt.xlabel('step') plt.subplot(1, 4, 2) plt.title('Batch accuracy') plt.plot(batch_acc) plt.xlabel('step') plt.subplot(1, 4, 3) plt.title('loss') for phase in ['train', 'val']: plt.plot(loss_history[phase], label=phase) plt.yscale('log') plt.xlabel('epoch') plt.legend() plt.subplot(1, 4, 4) plt.xlabel('epoch') if len(acc_history['train']) > 0: plt.title('accuracy ' + ','.join(['%s: %.3f' % (phase, acc_history[phase][-1]) for phase in ['train', 'val']])) else: plt.title('accuracy') for phase in ['train', 'val']: plt.plot(acc_history[phase], label=phase) plt.legend() plt.tight_layout() plt.show(block=False) ####### end statistics part ######### ################## Change these lines #################################### # TODO: Train the network ################## Finish changing here ################################## ``` ## Confusion Matrix ``` import numpy as np model.to(device) model.eval() matrix = np.zeros(shape=(2, 2), dtype=np.int) bad_patches = [] bad_labels = [] th = 0.5 model.eval() with torch.set_grad_enabled(False): matrix = np.zeros(shape=(2, 2)) # iterate on batches for samples, targets in loaders['val']: samples, targets = samples.to(device), targets.to(device) outputs = model(samples) pred = outputs.max(dim=1)[1] for i in range(targets.shape[0]): matrix[targets[i]][pred[i]] += 1 bad_patches.append(samples[i].cpu()) bad_labels.append(pred[i].cpu()) print(matrix) ``` ## Prediction examples ``` label_names=['normal', 'tumor'] model.eval() with torch.set_grad_enabled(False): samples, targets = next(iter(loaders['val'])) outputs = model(samples.to(device)) pred = outputs.max(dim=1)[1] visualize_batch(samples, ['%s/%s' % (label_names[targets[i]], label_names[pred[i]]) for i in range(len(labels))]) ``` ## Probabilities of some samples ``` import torch.nn.functional as F import numpy as np model.eval() with torch.set_grad_enabled(False): samples, targets = next(iter(loaders['val'])) samples, targets = samples.to(device), targets.to(device) outputs = model(samples) for i in range(min(10, targets.shape[0])): plt.figure(figsize=(8, 2)) plt.subplot(1, 2, 1) plt.imshow(np.clip(std * np.array(samples[i].cpu()).transpose(1, 2, 0) + mean, 0, 1)) plt.axis('off') plt.subplot(1, 2, 2) p = F.softmax(outputs[i].detach().squeeze()).cpu() plt.bar(range(len(p)), p) plt.xticks(range(len(p)), label_names, rotation='vertical') plt.show() ``` ## Visualize wrongly classified patches ``` visualize_batch(bad_patches[:32], bad_labels[:32]) ``` ## Generate heatmaps a) In order to make this code work the dataset should also return the index of the patch in the dataframe ``` from PIL import Image def generate_heatmap(slide, patches_dataset): model.eval() with torch.set_grad_enabled(False): level = slide.level_count - 2 heatmap_width, heatmap_height = slide.get_level_size(level) heatmap = np.zeros((heatmap_height, heatmap_width)) print(heatmap.shape) loader = DataLoader(patches_dataset, batch_size=64) for images, labels, idx in loader: images, labels = images.to(device), labels.to(device) outputs = torch.nn.functional.softmax(model(images), 1).detach().cpu() probs = outputs.max(dim=1)[0] for i in range(len(idx)): patch = patches_dataset.df.iloc[idx[i].item()] patch_level = patch['level'] level_width, level_height = slide.get_level_size(patch_level) x, y = patch['x']/level_width, patch['y']/level_height patch_width, patch_height = patch['width']/level_width, patch['height']/level_height patch_width = int(patch_width * heatmap_width) patch_height = int(patch_height * heatmap_height) x_start = int(x * heatmap_width) + int(0.25 * patch_width) y_start = int(y * heatmap_height) + int(0.25 * patch_height) x_end = int(x * heatmap_width) + int(0.75 * patch_width) y_end = int(y * heatmap_height) + int(0.75 * patch_height) heatmap[y_start: y_end + 1, x_start: x_end + 1] = np.maximum(heatmap[y_start: y_end + 1, x_start: x_end + 1], probs[i]) return heatmap for part in ['train', 'val', 'test']: df = pd.read_csv(os.path.join(dataset_path, f'{part}.csv')) set_targets(df) slides = df['slide'].unique() for slide_path in slides: print(slide_path) slide = Slide(slide_path) slide_df = df[(df['slide'] == slide_path) & (df['source'] == 'tissue')] dataset = PatchesDataset(slide_df, data_transforms[part]) heatmap = generate_heatmap(slide, dataset) plt.figure(figsize=(30, 20)) plt.imshow(heatmap) plt.show() image = np.asarray(255 * heatmap).astype(np.uint8) heatmap_path = os.path.join(results_path, os.path.split(slide_path)[1].replace('.ndpi', '.png')) Image.fromarray(image, mode = 'L').save(heatmap_path) ```
github_jupyter
# Read Matrix Data Read in the gene expression data sets. ``` library(annotate) library(illuminaHumanv3.db) library(illuminaHumanv2.db) library(hgu133plus2.db) library(illuminaHumanv4.db) library(plyr) working.dir <- "~/NLM_Reproducibility_Workshop/tb_and_arthritis/working" setwd(working.dir) dat.v2 <- read.delim("GSE15573_series_matrix_networkanalyst.txt") id.v2 <- select(illuminaHumanv2.db, as.character(dat.v2[2:nrow(dat.v2),1]), c("SYMBOL","ENTREZID", "GENENAME")) dat.v3.1 <- read.delim("GSE19435_series_matrix_networkanalyst.txt") id.v3.1 <- select(illuminaHumanv3.db, as.character(dat.v3.1[2:nrow(dat.v3.1),1]), c("SYMBOL","ENTREZID", "GENENAME")) dat.v3.2 <- read.delim("GSE19444_series_matrix_networkanalyst.txt") id.v3.2 <- select(illuminaHumanv3.db, as.character(dat.v3.2[2:nrow(dat.v3.2),1]), c("SYMBOL","ENTREZID", "GENENAME")) dat.v4 <- read.delim("GSE65517_series_matrix_networkanalyst.txt") id.v4 <- select(illuminaHumanv4.db, as.character(dat.v4[2:nrow(dat.v4),1]), c("SYMBOL","ENTREZID", "GENENAME")) dat.plus2 <- read.delim("GSE4588_series_matrix_networkanalyst.txt") id.plus2 <- select(hgu133plus2.db, as.character(dat.plus2[2:nrow(dat.plus2),1]), c("SYMBOL","ENTREZID", "GENENAME")) dat.plus2.2 <- read.delim("GSE54992_series_matrix_networkanalyst.txt") id.plus2.2 <- select(hgu133plus2.db, as.character(dat.plus2.2[2:nrow(dat.plus2.2),1]), c("SYMBOL","ENTREZID", "GENENAME")) colnames(dat.v2)[1]=colnames(id.v2)[1] dat.v2.all <- join(dat.v2,id.v2,by="PROBEID") colnames(dat.v3.1)[1]=colnames(id.v3.1)[1] dat.v3.1.all <- join(dat.v3.1,id.v3.1,by="PROBEID") colnames(dat.v3.2)[1]=colnames(id.v3.2)[1] dat.v3.2.all <- join(dat.v3.2,id.v3.2,by="PROBEID") colnames(dat.v4)[1]=colnames(id.v4)[1] dat.v4.all <- join(dat.v4,id.v4,by="PROBEID") colnames(dat.plus2)[1]=colnames(id.plus2)[1] dat.plus2.all <- join(dat.plus2,id.plus2,by="PROBEID") colnames(dat.plus2.2)[1]=colnames(id.plus2.2)[1] dat.plus2.2.all <- join(dat.plus2.2,id.plus2.2,by="PROBEID") ``` # Sample Filtering The paper used some inclusion criteria to select samples from each study. Samples without class labels are thus removed from further analyses. ``` datasets <- list(dat.v2.all,dat.v3.1.all,dat.v3.2.all,dat.v4.all,dat.plus2.all,dat.plus2.2.all) for (i in 1:length(datasets)) { dataset <- datasets[[i]] dataset[1,(ncol(dataset)-2):ncol(dataset)] <- "Metadata" cat("Removing",sum(is.na(dataset[1,])),"samples.\n") datasets[[i]] <- dataset[,!is.na(dataset[1,])] } ``` # Convert into gene-based matrices Remove rows without gene mapping. Merge rows mapping to the same gene using the median values. ``` dataset.class <- data.frame() for (i in 1:length(datasets)) { dataset <- datasets[[i]] dataset.class <- rbind(dataset.class,t(dataset[1,-c(1,(ncol(dataset)-2):ncol(dataset)),drop=F])) dataset <- dataset[-1,] cat("Number of rows without gene symbols:",sum(is.na(dataset$SYMBOL)),"\n") dataset <- subset(dataset,!is.na(dataset$SYMBOL)) dataset.expr <- apply(dataset[,-c(1,(ncol(dataset)-2):ncol(dataset))],2,as.numeric) #print(head(dataset.expr)) dataset <- aggregate(dataset.expr, list(dataset$SYMBOL),median) rownames(dataset) <- dataset$Group.1 cat("From",nrow(datasets[[i]]),"rows to",nrow(dataset),"rows\n") datasets[[i]] <- dataset } ``` # Merge datasets Merge all studies into one, keeping only the genes that appear in all studies ``` common.genes <- unlist(sapply(datasets,rownames)) common.genes <- table(common.genes) common.genes <- names(common.genes)[common.genes == length(datasets)] cat("Number of common genes:",length(common.genes),"\n") merged.dataset <- data.frame() for (i in 1:length(datasets)) { dataset <- datasets[[i]] if (max(dataset[,-1]) > 100) { tmp <- dataset[,-1] tmp[tmp < 0] <- 0 dataset[,-1] <- log2(tmp+0.001) } cat(min(dataset[,-1]),":",max(dataset[,-1]),"\n") merged.dataset <- rbind(merged.dataset,t(dataset[common.genes,])) } merged.dataset <- cbind(dataset.class,merged.dataset[rownames(dataset.class),]) colnames(merged.dataset)[1] <- "#CLASS" write.table(t(merged.dataset),file = "../data/merged.dataset.txt",sep="\t",quote = F,row.names = T,col.names = T) head(datasets[[4]]) install.packages("MetaIntegrator") ```
github_jupyter
``` import bert_model as modeling import tensorflow as tf import os import numpy as np from utils import * from sklearn.cross_validation import train_test_split trainset = sklearn.datasets.load_files( container_path = 'data', encoding = 'UTF-8' ) trainset.data, trainset.target = separate_dataset(trainset, 1.0) print(trainset.target_names) print(len(trainset.data)) print(len(trainset.target)) concat = ' '.join(trainset.data).split() vocabulary_size = len(list(set(concat))) data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size) print('vocab from size: %d'%(vocabulary_size)) print('Most common words', count[4:10]) print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]]) GO = dictionary['GO'] PAD = dictionary['PAD'] EOS = dictionary['EOS'] UNK = dictionary['UNK'] size_layer = 128 num_layers = 2 embedded_size = 128 dimension_output = len(trainset.target_names) learning_rate = 1e-3 maxlen = 50 batch_size = 128 tf.reset_default_graph() sess = tf.InteractiveSession() bert_config = modeling.BertConfig( vocab_size = len(dictionary), hidden_size = size_layer, num_hidden_layers = num_layers, num_attention_heads = size_layer // 4, intermediate_size = size_layer * 2, ) input_ids = tf.placeholder(tf.int32, [None, maxlen]) input_mask = tf.placeholder(tf.int32, [None, maxlen]) segment_ids = tf.placeholder(tf.int32, [None, maxlen]) label_ids = tf.placeholder(tf.int32, [None]) is_training = tf.placeholder(tf.bool) def create_model( bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings, reuse_flag = False, ): model = modeling.BertModel( config = bert_config, is_training = is_training, input_ids = input_ids, input_mask = input_mask, token_type_ids = segment_ids, use_one_hot_embeddings = use_one_hot_embeddings, ) output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value with tf.variable_scope('weights', reuse = reuse_flag): output_weights = tf.get_variable( 'output_weights', [num_labels, hidden_size], initializer = tf.truncated_normal_initializer(stddev = 0.02), ) output_bias = tf.get_variable( 'output_bias', [num_labels], initializer = tf.zeros_initializer() ) with tf.variable_scope('loss'): def apply_dropout_last_layer(output_layer): output_layer = tf.nn.dropout(output_layer, keep_prob = 0.9) return output_layer def not_apply_dropout(output_layer): return output_layer output_layer = tf.cond( is_training, lambda: apply_dropout_last_layer(output_layer), lambda: not_apply_dropout(output_layer), ) logits = tf.matmul(output_layer, output_weights, transpose_b = True) print( 'output_layer:', output_layer.shape, ', output_weights:', output_weights.shape, ', logits:', logits.shape, ) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels = labels, logits = logits ) loss = tf.reduce_mean(loss) correct_pred = tf.equal(tf.argmax(logits, 1, output_type = tf.int32), labels) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) return loss, logits, probabilities, model, accuracy use_one_hot_embeddings = False loss, logits, probabilities, model, accuracy = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, dimension_output, use_one_hot_embeddings, ) global_step = tf.Variable(0, trainable = False, name = 'Global_Step') optimizer = tf.contrib.layers.optimize_loss( loss, global_step = global_step, learning_rate = learning_rate, optimizer = 'Adam', clip_gradients = 3.0, ) sess.run(tf.global_variables_initializer()) vectors = str_idx(trainset.data, dictionary, maxlen) train_X, test_X, train_Y, test_Y = train_test_split( vectors, trainset.target, test_size = 0.2 ) from tqdm import tqdm import time EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0 while True: lasttime = time.time() if CURRENT_CHECKPOINT == EARLY_STOPPING: print('break epoch:%d\n' % (EPOCH)) break train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0 pbar = tqdm( range(0, len(train_X), batch_size), desc = 'train minibatch loop' ) for i in pbar: batch_x = train_X[i : min(i + batch_size, train_X.shape[0])] batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])] np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32) np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32) acc, cost, _ = sess.run( [accuracy, loss, optimizer], feed_dict = { input_ids: batch_x, label_ids: batch_y, input_mask: np_mask, segment_ids: np_segment, is_training: True }, ) assert not np.isnan(cost) train_loss += cost train_acc += acc pbar.set_postfix(cost = cost, accuracy = acc) pbar = tqdm(range(0, len(test_X), batch_size), desc = 'test minibatch loop') for i in pbar: batch_x = test_X[i : min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])] np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32) np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32) acc, cost = sess.run( [accuracy, loss], feed_dict = { input_ids: batch_x, label_ids: batch_y, input_mask: np_mask, segment_ids: np_segment, is_training: False }, ) test_loss += cost test_acc += acc pbar.set_postfix(cost = cost, accuracy = acc) train_loss /= len(train_X) / batch_size train_acc /= len(train_X) / batch_size test_loss /= len(test_X) / batch_size test_acc /= len(test_X) / batch_size if test_acc > CURRENT_ACC: print( 'epoch: %d, pass acc: %f, current acc: %f' % (EPOCH, CURRENT_ACC, test_acc) ) CURRENT_ACC = test_acc CURRENT_CHECKPOINT = 0 else: CURRENT_CHECKPOINT += 1 print('time taken:', time.time() - lasttime) print( 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n' % (EPOCH, train_loss, train_acc, test_loss, test_acc) ) EPOCH += 1 real_Y, predict_Y = [], [] pbar = tqdm( range(0, len(test_X), batch_size), desc = 'validation minibatch loop' ) for i in pbar: batch_x = test_X[i : min(i + batch_size, test_X.shape[0])] batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])] np_mask = np.ones((len(batch_x), maxlen), dtype = np.int32) np_segment = np.ones((len(batch_x), maxlen), dtype = np.int32) predict_Y += np.argmax( sess.run( logits, feed_dict = { input_ids: batch_x, label_ids: batch_y, input_mask: np_mask, segment_ids: np_segment, is_training: False, }, ), 1, ).tolist() real_Y += batch_y from sklearn import metrics print(metrics.classification_report(real_Y, predict_Y, target_names = ['negative','positive'])) ```
github_jupyter
# Preprocessing Using Dataflow **Learning Objectives** - Creating datasets for Machine Learning using Dataflow ## Introduction While Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming. ``` #Ensure that we have Apache Beam version installed. !pip freeze | grep apache-beam || sudo pip install apache-beam[gcp]==2.12.0 import tensorflow as tf import apache_beam as beam import shutil import os print(tf.__version__) ``` Next, set the environment variables related to your GCP Project. ``` PROJECT = "cloud-training-demos" # Replace with your PROJECT BUCKET = "cloud-training-bucket" # Replace with your BUCKET REGION = "us-central1" # Choose an available region for Cloud MLE TFVERSION = "1.14" # TF version for CMLE to use import os os.environ["BUCKET"] = BUCKET os.environ["PROJECT"] = PROJECT os.environ["REGION"] = REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi ``` ## Save the query from earlier The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that. ``` # Create SQL query using natality data after the year 2000 query_string = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ # Call BigQuery and examine in dataframe from google.cloud import bigquery bq = bigquery.Client(project = PROJECT) df = bq.query(query_string + "LIMIT 100").to_dataframe() df.head() ``` ## Create ML dataset using Dataflow Let's use Cloud Dataflow to read in the BigQuery data, do some preprocessing, and write it out as CSV files. Instead of using Beam/Dataflow, I had three other options: * Use Cloud Dataprep to visually author a Dataflow pipeline. Cloud Dataprep also allows me to explore the data, so we could have avoided much of the handcoding of Python/Seaborn calls above as well! * Read from BigQuery directly using TensorFlow. * Use the BigQuery console (http://bigquery.cloud.google.com) to run a Query and save the result as a CSV file. For larger datasets, you may have to select the option to "allow large results" and save the result into a CSV file on Google Cloud Storage. However, in this case, I want to do some preprocessing, modifying data so that we can simulate what is known if no ultrasound has been performed. If I didn't need preprocessing, I could have used the web console. Also, I prefer to script it out rather than run queries on the user interface, so I am using Cloud Dataflow for the preprocessing. The `preprocess` function below includes an arugment `in_test_mode`. When this is set to `True`, running `preprocess` initiates a *local* Beam job. This is helpful for quickly debugging your pipeline and ensuring it works before submitting a job to the Cloud. Setting `in_test_mode` to `False` will launch a processing that is happening on the Cloud. Go to the GCP webconsole to [the Dataflow section](https://pantheon.corp.google.com/dataflow) and monitor the running job. It took about 20 minutes for me. If you wish to continue without doing this step, you can copy my preprocessed output: <pre> gsutil -m cp -r gs://cloud-training-demos/babyweight/preproc gs://YOUR_BUCKET/ </pre> ``` import apache_beam as beam import datetime, os def to_csv(rowdict): # Pull columns from BQ and create a line import hashlib import copy CSV_COLUMNS = "weight_pounds,is_male,mother_age,plurality,gestation_weeks".split(',') # Create synthetic data where we assume that no ultrasound has been performed # and so we don"t know sex of the baby. Let"s assume that we can tell the difference # between single and multiple, but that the errors rates in determining exact number # is difficult in the absence of an ultrasound. no_ultrasound = copy.deepcopy(rowdict) w_ultrasound = copy.deepcopy(rowdict) no_ultrasound["is_male"] = "Unknown" if rowdict["plurality"] > 1: no_ultrasound["plurality"] = "Multiple(2+)" else: no_ultrasound["plurality"] = "Single(1)" # Change the plurality column to strings w_ultrasound["plurality"] = ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"][rowdict["plurality"] - 1] # Write out two rows for each input row, one with ultrasound and one without for result in [no_ultrasound, w_ultrasound]: data = ','.join([str(result[k]) if k in result else "None" for k in CSV_COLUMNS]) yield str("{}".format(data)) def preprocess(in_test_mode): import shutil, os, subprocess job_name = "preprocess-babyweight-features" + "-" + datetime.datetime.now().strftime("%y%m%d-%H%M%S") if in_test_mode: print("Launching local job ... hang on") OUTPUT_DIR = "./preproc" shutil.rmtree(OUTPUT_DIR, ignore_errors=True) os.makedirs(OUTPUT_DIR) else: print("Launching Dataflow job {} ... hang on".format(job_name)) OUTPUT_DIR = "gs://{0}/babyweight/preproc/".format(BUCKET) try: subprocess.check_call("gsutil -m rm -r {}".format(OUTPUT_DIR).split()) except: pass options = { "staging_location": os.path.join(OUTPUT_DIR, "tmp", "staging"), "temp_location": os.path.join(OUTPUT_DIR, "tmp"), "job_name": job_name, "project": PROJECT, "teardown_policy": "TEARDOWN_ALWAYS", "no_save_main_session": True } opts = beam.pipeline.PipelineOptions(flags = [], **options) if in_test_mode: RUNNER = "DirectRunner" else: RUNNER = "DataflowRunner" p = beam.Pipeline(RUNNER, options = opts) query = """ SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 AND month > 0 """ if in_test_mode: query = query + " LIMIT 100" for step in ["train", "eval"]: if step == "train": selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) < 80".format(query) elif step == "eval": selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) >= 80 AND MOD(hashmonth, 100) < 90".format(query) else: selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) >= 90".format(query) (p | "{}_read".format(step) >> beam.io.Read(beam.io.BigQuerySource(query = selquery, use_standard_sql = True)) | "{}_csv".format(step) >> beam.FlatMap(to_csv) | "{}_out".format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, "{}.csv".format(step)))) ) job = p.run() if in_test_mode: job.wait_until_finish() print("Done!") preprocess(in_test_mode = False) ``` For a Cloud preprocessing job (i.e. setting `in_test_mode` to `False`), the above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the follwing step. ## View results We can have a look at the elements in our bucket to see the results of our pipeline above. ``` !gsutil ls gs://$BUCKET/babyweight/preproc/*-00000* ``` # Preprocessing with BigQuery Create SQL query for BigQuery that will union all both the ultrasound and no ultrasound datasets. ``` query = """ WITH CTE_Raw_Data AS ( SELECT weight_pounds, CAST(is_male AS STRING) AS is_male, mother_age, plurality, gestation_weeks, ABS(FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING)))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 AND weight_pounds > 0 AND mother_age > 0 AND plurality > 0 AND gestation_weeks > 0 AND month > 0) -- Ultrasound SELECT weight_pounds, is_male, mother_age, CASE WHEN plurality = 1 THEN "Single(1)" WHEN plurality = 2 THEN "Twins(2)" WHEN plurality = 3 THEN "Triplets(3)" WHEN plurality = 4 THEN "Quadruplets(4)" WHEN plurality = 5 THEN "Quintuplets(5)" ELSE "NULL" END AS plurality, gestation_weeks, hashmonth FROM CTE_Raw_Data UNION ALL -- No ultrasound SELECT weight_pounds, "Unknown" AS is_male, mother_age, CASE WHEN plurality = 1 THEN "Single(1)" WHEN plurality > 1 THEN "Multiple(2+)" END AS plurality, gestation_weeks, hashmonth FROM CTE_Raw_Data """ ``` Create temporary BigQuery dataset ``` from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() # Set dataset_id to the ID of the dataset to create. dataset_name = "temp_babyweight_dataset" dataset_id = "{}.{}".format(client.project, dataset_name) # Construct a full Dataset object to send to the API. dataset = bigquery.Dataset.from_string(dataset_id) # Specify the geographic location where the dataset should reside. dataset.location = "US" # Send the dataset to the API for creation. # Raises google.api_core.exceptions.Conflict if the Dataset already # exists within the project. try: dataset = client.create_dataset(dataset) # API request print("Created dataset {}.{}".format(client.project, dataset.dataset_id)) except: print("Dataset {}.{} already exists".format(client.project, dataset.dataset_id)) ``` Execute query and write to BigQuery table. ``` job_config = bigquery.QueryJobConfig() for step in ["train", "eval"]: if step == "train": selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) < 80".format(query) elif step == "eval": selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) >= 80 AND MOD(hashmonth, 100) < 90".format(query) else: selquery = "SELECT * FROM ({}) WHERE MOD(hashmonth, 100) >= 90".format(query) # Set the destination table table_name = "babyweight_{}".format(step) table_ref = client.dataset(dataset_name).table(table_name) job_config.destination = table_ref job_config.write_disposition = "WRITE_TRUNCATE" # Start the query, passing in the extra configuration. query_job = client.query( query=selquery, # Location must match that of the dataset(s) referenced in the query # and of the destination table. location="US", job_config=job_config) # API request - starts the query query_job.result() # Waits for the query to finish print("Query results loaded to table {}".format(table_ref.path)) ``` Export BigQuery table to CSV in GCS. ``` dataset_ref = client.dataset(dataset_id=dataset_name, project=PROJECT) for step in ["train", "eval"]: destination_uri = "gs://{}/{}".format(BUCKET, "babyweight/bq_data/{}*.csv".format(step)) table_name = "babyweight_{}".format(step) table_ref = dataset_ref.table(table_name) extract_job = client.extract_table( table_ref, destination_uri, # Location must match that of the source table. location="US", ) # API request extract_job.result() # Waits for job to complete. print("Exported {}:{}.{} to {}".format(PROJECT, dataset_name, table_name, destination_uri)) ``` ## View results We can have a look at the elements in our bucket to see the results of our pipeline above. ``` !gsutil ls gs://$BUCKET/babyweight/bq_data/*000000000000* ``` Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
``` from scipy.io import loadmat from pprint import pprint from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.svm import LinearSVC import torch from tqdm import tqdm import pandas as pd import numpy as np from torch.utils.data import DataLoader, Dataset import matplotlib.pyplot as plt import pickle from mpl_toolkits.mplot3d import Axes3D import copy from pathlib import Path import yaml import os import sys # Get the current project path (where you open the notebook) # and go up two levels to get the project path current_dir = Path.cwd() #proj_path = current_dir.parent proj_path = current_dir.parent # make the code in src available to import in this notebook sys.path.append(os.path.join(proj_path, 'src')) # Catalog contains all the paths related to datasets with open(os.path.join(proj_path, 'conf/data_catalog.yml'), "r") as f: catalog = yaml.safe_load(f) # Params contains all of the dataset creation parameters and model parameters with open(os.path.join(proj_path, 'conf/parameters_svm.yml'), "r") as f: params = yaml.safe_load(f) ``` # Import Dataset ``` with open('../data/02_interim/bmodes_steatosis_assessment_IJCARS.pickle', 'rb') as handle: df = pickle.load(handle) M, N= 434, 636 # ultrasound image dimension def create_cumul_var_graph(pca, J= None): ratio = pca.explained_variance_ratio_ cumulative = np.cumsum(ratio) n_components = np.arange(0, len(cumulative)) if J is None: plt.plot(n_components, cumulative,label= "Raw US" ) plt.title("Cumulative Explained Variance by number of components") else: plt.plot(n_components, cumulative,label= f'J:{J}' ) plt.title(f"Cumulative Explained Variance by number of components ") plt.ylabel("Cumulative Ratio") plt.xlabel("Number of components") plt.legend() ``` ## 1. Visualization of Raw US images (No transformation) ``` # Reshape the data appropriately data = df['img'].iloc[0].view(1,M*N) for i in tqdm(range(1,len(df['img']))): data = torch.cat([data,df['img'].iloc[i].view(1,M*N)]) ``` ## 1. Visulalization GLOBAL PCA - Scattering Transform ``` plt.figure(figsize=(10,10)) pca = PCA(n_components=200) pca.fit(data) pca_data = pca.transform(data) create_cumul_var_graph(pca) for J in tqdm(range(2,7)): print(J) with open(f'../data/03_features/scattering_features_J_{J}.pickle', 'rb') as handle: scatter_dict = pickle.load(handle) df_scattering = scatter_dict['df'] scattering_params = {'J':scatter_dict['J'], 'M':scatter_dict['M'], 'N':scatter_dict['N']} print("Scattering loaded") #Transforming df_scattering to a numpy array excluding class and id df_scattering.drop("class", inplace = True, axis=1) df_scattering.drop("id", inplace=True, axis=1) print(df_scattering.shape) df_scattering = df_scattering.to_numpy() print("PCA started") pca_s = PCA(n_components=200) pca_data_s = pca_s.fit_transform(df_scattering) create_cumul_var_graph(pca_s, J) ``` # 2. def get_scattering_features(catalog, J): ``` def get_scattering_features(catalog, J): with open(os.path.join(catalog['data_root'], catalog[f'03_feature_scatt_{J}']), 'rb') as handle: scatter_dict = pickle.load(handle) df_scattering = scatter_dict['df'] scattering_params = {'J':scatter_dict['J'], 'M':scatter_dict['M'], 'N':scatter_dict['N']} df_scattering = df_scattering.drop(columns=['id', 'class']) return df_scattering, scattering_params with open('../data/02_interim/bmodes_steatosis_assessment_IJCARS.pickle', 'rb') as handle: df = pickle.load(handle) plt.figure(figsize=(10,10)) for J in tqdm(range(2,7)): print(J) data, _ = get_scattering_features(catalog, J) print("Scattering loaded") dataset = pd.concat([df, data], axis=1) subtrain_data_flatten = dataset.drop(['img','fat', 'class', 'id'], axis=1) size = len(subtrain_data_flatten) print(f"Size datase: {subtrain_data_flatten.shape}") if J == 2: subtrain_data_flatten = torch.from_numpy(subtrain_data_flatten.values).view(size,108,159,81) subtrain_data_flatten = subtrain_data_flatten.reshape(-1,81) nm_component = 81 elif J==3: subtrain_data_flatten = torch.from_numpy(subtrain_data_flatten.values).view(size,54,79,217) subtrain_data_flatten = subtrain_data_flatten.reshape(-1,217) nm_component = 100 elif J==4: subtrain_data_flatten = torch.from_numpy(subtrain_data_flatten.values).view(size,27,39,417) subtrain_data_flatten = subtrain_data_flatten.reshape(-1,417) nm_component = 100 elif J==5: subtrain_data_flatten = torch.from_numpy(subtrain_data_flatten.values).view(size,13,19,681) subtrain_data_flatten = subtrain_data_flatten.reshape(-1,681) nm_component = 100 elif J==6: subtrain_data_flatten = torch.from_numpy(subtrain_data_flatten.values).view(size,6,9,1009) subtrain_data_flatten = subtrain_data_flatten.reshape(-1,1009) nm_component = 100 else: raise NotImplemented(f"J {self.J} parameter for scattering not implemented") print("PCA started") pca_s = PCA(n_components=80) pca_data_s = pca_s.fit_transform(subtrain_data_flatten) create_cumul_var_graph(pca_s, J) torch.from_numpy(subtrain_data_flatten.values).shape ```
github_jupyter
# Introduction This notebook shows how to evaluate neural cross-lingual summarization (cls) models presented in paper [NCLS: Neural Cross-Lingual Summarization]( https://arxiv.org/abs/1909.00156) with Th2En dataset. Their original codes are available [here](https://github.com/ZNLP/NCLS-Corpora). <br><br> **Neural Cross-Lingual Summarization (NCLS)** is an end-to-end cross-lingual summarization framework. The architecture of the model is based entirely on Transformer with multi-task learning: CLS+MT, and CLS+MS (cross-lingual summarization + monolingual summarization). --- TNCLS refers to Transformer-based neural CLS models. --- In MS+CLS refers to the multi-task NCLS model which accepts an input text and simultaneously performs text generation for both CLS and MS tasks and calculates the total losses. <br> `task1` refers to monolingual summarization task. <br> `task2` refers to <i><u>cross-lingual summarization</u></i> task. --- In MT+CLS jointly trains cross-lingual summarization with machine translation.<br> `task1` refers to machine translation task. <br> `task2` refers to <i><u>cross-lingual summarization</u></i> task. ``` from google.colab import drive drive._mount('/content/drive') #!pip install -q torch==1.5.1 torchvision==0.6.1 !pip install -q rouge !pip install -q bert_score import pandas as pd from tqdm.notebook import tqdm import rouge from bert_score import score !git clone https://github.com/nakhunchumpolsathien/ThaiCrossSum_Corpora %cd /content/ThaiCrossSum_Corpora/src/NCLS ``` # 1. Evaluate TNCLS Model <b>[TNCLS](https://arxiv.org/pdf/1909.00156.pdf)</b>: Transformer-based NCLS models where the input and output are different granularities combinations of unit. ## 1.1 TH2EN ``` %cd '/content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-base' !CUDA_VISIBLE_DEVICES=0, python translate.py -config /content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-base/run_config/decode-example.json ``` ### 1.1.1 Evaluate CLS results with ROUGE ``` !rouge -f '/content/sample_data/outputs/tncls_th2en_cls.out' '/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2' --avg ``` ### 1.1.2 Evaluate CLS results with BertScore ``` import logging import transformers transformers.tokenization_utils.logger.setLevel(logging.ERROR) transformers.configuration_utils.logger.setLevel(logging.ERROR) transformers.modeling_utils.logger.setLevel(logging.ERROR) %matplotlib inline with open("/content/sample_data/outputs/tncls_th2en_cls.out") as f: cands = [line.strip() for line in f] with open("/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2") as f: refs = [line.strip() for line in f] P, R, F1 = score(cands, refs, lang='en', verbose=False) print(f"System level F1 score: {F1.mean():.3f}") print(f"System level P score: {P.mean():.3f}") print(f"System level R score: {R.mean():.3f}") import matplotlib.pyplot as plt plt.hist(F1, bins=30) plt.xlabel("score") plt.ylabel("counts") plt.show() ``` # 2. NCLS: CLS+MS <b>[CLS+MS](https://arxiv.org/pdf/1909.00156.pdf)</b>: refers to the multi-task NCLS model which accepts an input text and simultaneously performs text generation for both CLS and MS tasks and calculates the total losses. ## 2.1 TH2EN ``` %cd '/content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task' !CUDA_VISIBLE_DEVICES=0, python translate.py -config /content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task/run_config/decode-example.json ``` ### 2.1.1 Evaluate CLS results with ROUGE ``` !rouge -f '/content/drive/MyDrive/Final_Datasets/final_crosslingual_sum_datasets/trained_models/beaver-2task/th2en-filtered/output_task2.txt' '/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2' --avg ``` ### 2.1.2 Evaluate CLS results with BertScore ``` with open("/content/drive/MyDrive/Final_Datasets/final_crosslingual_sum_datasets/trained_models/beaver-2task/th2en-filtered/output_task2.txt") as f: cands = [line.strip() for line in f] with open("/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2") as f: refs = [line.strip() for line in f] P, R, F1 = score(cands, refs, lang='en', verbose=True) print(f"System level F1 score: {F1.mean():.3f}") print(f"System level P score: {P.mean():.3f}") print(f"System level R score: {R.mean():.3f}") plt.hist(F1, bins=30) plt.xlabel("score") plt.ylabel("counts") plt.show() ``` ### 2.1.3 Evaluate MS results with ROUGE MS = Monolingual Summarization task ``` !rouge -f '/content/drive/MyDrive/Final_Datasets/final_crosslingual_sum_datasets/trained_models/beaver-2task/th2en-filtered/output_task1.txt' '/content/sample_data/test.MS.target.TH.txt' --avg ``` 😲 &nbsp;&nbsp; CLS+MS model performs monolingual task surprisingly well. It produces too high ROUGE scores. Let me double check. ``` import codecs def get_text_list(fpath): texts = [] with codecs.open(fpath, encoding='utf-8') as f: for line in f: text = line.replace('!', '').replace('.', '').replace(',', '').replace('\n', '') text = text.replace(',', '').replace('?', '').replace('。', '') texts.append(text) return texts hypos = get_text_list('/content/drive/MyDrive/Final_Datasets/final_crosslingual_sum_datasets/trained_models/beaver-2task/th2en-filtered/output_task1.txt') refs = get_text_list('/content/sample_data/test.MS.target.TH.txt') from rouge import Rouge rouge = Rouge() scores = rouge.get_scores(hypos, refs, avg=True) r1_fs = [] r2_fs = [] rl_fs = [] for i in tqdm(range(len(refs))): hyp = hypos[i] ref = refs[i] scores = rouge.get_scores(hyp, ref, avg=True) r1_fs.append(scores['rouge-1']['f']) r2_fs.append(scores['rouge-2']['f']) rl_fs.append(scores['rouge-l']['f']) ncls2task_ms_output_df = pd.DataFrame(list(zip(refs, hypos, r1_fs, r2_fs, rl_fs)), columns =['ref', 'hyp', 'r1', 'r2', 'rl']) ncls2task_ms_output_df.sample(n=10) print('ROUGE F1') print(f'rouge-l: {ncls2task_ms_output_df["r1"].mean()}') print(f'rouge-2: {ncls2task_ms_output_df["r2"].mean()}') print(f'rouge-3: {ncls2task_ms_output_df["rl"].mean()}') ``` 💡 💡 💡 <br> Initial inspection of these outputs indicated that CLS+MS model tends to select the very first utterances to form monolingual summary. The leading sentences of (Thai) news articles especially contain the most important information of the news. This particular reason explains why CLS+MS model products very high ROUGE scores. However, this model might not perform well in other article domains. --- # 3. NCLS: CLS+MT <b>[CLS+MT](https://arxiv.org/pdf/1909.00156.pdf)</b>: It trains CLS and MT tasks via alternating training strategy. Specifically, they optimize the CLS task in a mini-batch, and they optimize the MT task in the next mini-batch. ## 3.1 TH2EN ``` %cd '/content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task+' !CUDA_VISIBLE_DEVICES=0, python translate.py -config /content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task+/run_config/decode-example.json ``` ### 3.1.1 Example of CLS summary outputs ``` hypos = get_text_list('/content/sample_data/outputs/2taskpluse_output_task2_CLS_EN.txt') refs = get_text_list('/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2') t2e_2tpl_cls_output_df = pd.DataFrame(list(zip(refs, hypos)), columns =['ref', 'hyp']) t2e_2tpl_cls_output_df.sample(n=10) ``` ### 3.1.2 Evaluate CLS results with ROUGE ``` !rouge -f '/content/sample_data/outputs/2taskpluse_output_task2_CLS_EN.txt' '/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2' --avg ``` ### 3.1.3 Evaluate CLS results with BertScore ``` with open("/content/sample_data/outputs/2taskpluse_output_task2_CLS_EN.txt") as f: cands = [line.strip() for line in f] with open("/content/drive/MyDrive/Projects/Model_Checkpoints/cross-lingual-projects/NCLS/beaver-2task+/th2en/trained-on-full-dataset/test.CLS.ref.language2") as f: refs = [line.strip() for line in f] P, R, F1 = score(cands, refs, lang='en', verbose=True) print(f"System level F1 score: {F1.mean():.3f}") print(f"System level P score: {P.mean():.3f}") print(f"System level R score: {R.mean():.3f}") plt.hist(F1, bins=30) plt.xlabel("score") plt.ylabel("counts") plt.show() ``` ### 3.1.4 Evaluate MT Results with BLUE score ``` import nltk scores = [] for i in tqdm(range(len(refs))): hyp = hypos[i].split() ref = refs[i].split() scores.append(nltk.translate.bleu_score.sentence_bleu([ref], hyp)) th2en_mt_output_df = pd.DataFrame(list(zip(refs, hypos, scores)), columns =['ref', 'hyp', 'bleu']) th2en_mt_output_df.sample(n=10) print(f'Average BLEU scores {round(th2en_mt_output_df["bleu"].mean()*100, 2)}') ``` ## 3.2 TH2ZH ``` %cd /content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task+ !CUDA_VISIBLE_DEVICES=0, python translate.py -config /content/ThaiCrossSum_Corpora/src/NCLS/code/beaver-2task+/run_config/decode-example.json ``` ### 3.2.1 Examples of CLS summary output ``` hypos = get_text_list('/content/sample_data/th2zh_output_task2.txt') refs = get_text_list('/content/drive/MyDrive/Projects/Model_Checkpoints/XLS-proposedModel/dataset/th2zh-full/test.CLS.ref.language2') t2z_2tpl_cls_output_df = pd.DataFrame(list(zip(refs, hypos)), columns =['ref', 'hyp']) t2z_2tpl_cls_output_df.sample(n=10) ``` ### 3.2.2 Evaluate CLS results with ROUGE ``` !rouge -f '/content/sample_data/th2zh_output_task2.txt' '/content/drive/MyDrive/Projects/Model_Checkpoints/XLS-proposedModel/dataset/th2zh-full/test.CLS.ref.language2' --avg ``` ### 3.2.3 Evaluate CLS results with BertScore ``` with open("/content/sample_data/th2zh_output_task2.txt") as f: cands = [line.strip() for line in f] with open("/content/drive/MyDrive/Projects/Model_Checkpoints/XLS-proposedModel/dataset/th2zh-full/test.CLS.ref.language2") as f: refs = [line.strip() for line in f] P, R, F1 = score(cands, refs, lang='zh', verbose=True) print(f"System level F1 score: {F1.mean():.3f}") print(f"System level P score: {P.mean():.3f}") print(f"System level R score: {R.mean():.3f}") plt.hist(F1, bins=30) plt.xlabel("score") plt.ylabel("counts") plt.show() ``` ### 3.2.4 Evaluate MT Results with BLUE score ``` import nltk import pandas as pd hypos = get_text_list('/content/sample_data/th2zh_output_task1.txt') refs = get_text_list('/content/sample_data/test.MT.target.ZH.txt') scores = [] for i in tqdm(range(len(refs))): hyp = hypos[i].split() ref = refs[i].split() scores.append(nltk.translate.bleu_score.sentence_bleu([ref], hyp)) th2zh_mt_output_df = pd.DataFrame(list(zip(refs, hypos, scores)), columns =['ref', 'hyp', 'bleu']) th2zh_mt_output_df.sample(n=12) print(f'Average BLEU scores {round(th2zh_mt_output_df["bleu"].mean()*100, 2)}') ```
github_jupyter
# Automated Machine Learning #### Forecasting away from training data ## Contents 1. [Introduction](#Introduction) 2. [Setup](#Setup) 3. [Data](#Data) 4. [Prepare remote compute and data.](#prepare_remote) 4. [Create the configuration and train a forecaster](#train) 5. [Forecasting from the trained model](#forecasting) 6. [Forecasting away from training data](#forecasting_away) ## Introduction This notebook demonstrates the full interface to the `forecast()` function. The best known and most frequent usage of `forecast` enables forecasting on test sets that immediately follows training data. However, in many use cases it is necessary to continue using the model for some time before retraining it. This happens especially in **high frequency forecasting** when forecasts need to be made more frequently than the model can be retrained. Examples are in Internet of Things and predictive cloud resource scaling. Here we show how to use the `forecast()` function when a time gap exists between training data and prediction period. Terminology: * forecast origin: the last period when the target value is known * forecast periods(s): the period(s) for which the value of the target is desired. * forecast horizon: the number of forecast periods * lookback: how many past periods (before forecast origin) the model function depends on. The larger of number of lags and length of rolling window. * prediction context: `lookback` periods immediately preceding the forecast origin ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/automl-forecasting-function.png) ## Setup Please make sure you have followed the `configuration.ipynb` notebook so that your ML workspace information is saved in the config file. ``` import os import pandas as pd import numpy as np import logging import warnings import azureml.core from azureml.core.dataset import Dataset from pandas.tseries.frequencies import to_offset from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies # Squash warning messages for cleaner output in the notebook warnings.showwarning = lambda *args, **kwargs: None np.set_printoptions(precision=4, suppress=True, linewidth=120) ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.5.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig ws = Workspace.from_config() # choose a name for the run history container in the workspace experiment_name = 'automl-forecast-function-demo' experiment = Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['SKU'] = ws.sku output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Run History Name'] = experiment_name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Data For the demonstration purposes we will generate the data artificially and use them for the forecasting. ``` TIME_COLUMN_NAME = 'date' GRAIN_COLUMN_NAME = 'grain' TARGET_COLUMN_NAME = 'y' def get_timeseries(train_len: int, test_len: int, time_column_name: str, target_column_name: str, grain_column_name: str, grains: int = 1, freq: str = 'H'): """ Return the time series of designed length. :param train_len: The length of training data (one series). :type train_len: int :param test_len: The length of testing data (one series). :type test_len: int :param time_column_name: The desired name of a time column. :type time_column_name: str :param :param grains: The number of grains. :type grains: int :param freq: The frequency string representing pandas offset. see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html :type freq: str :returns: the tuple of train and test data sets. :rtype: tuple """ data_train = [] # type: List[pd.DataFrame] data_test = [] # type: List[pd.DataFrame] data_length = train_len + test_len for i in range(grains): X = pd.DataFrame({ time_column_name: pd.date_range(start='2000-01-01', periods=data_length, freq=freq), target_column_name: np.arange(data_length).astype(float) + np.random.rand(data_length) + i*5, 'ext_predictor': np.asarray(range(42, 42 + data_length)), grain_column_name: np.repeat('g{}'.format(i), data_length) }) data_train.append(X[:train_len]) data_test.append(X[train_len:]) X_train = pd.concat(data_train) y_train = X_train.pop(target_column_name).values X_test = pd.concat(data_test) y_test = X_test.pop(target_column_name).values return X_train, y_train, X_test, y_test n_test_periods = 6 n_train_periods = 30 X_train, y_train, X_test, y_test = get_timeseries(train_len=n_train_periods, test_len=n_test_periods, time_column_name=TIME_COLUMN_NAME, target_column_name=TARGET_COLUMN_NAME, grain_column_name=GRAIN_COLUMN_NAME, grains=2) ``` Let's see what the training data looks like. ``` X_train.tail() # plot the example time series import matplotlib.pyplot as plt whole_data = X_train.copy() target_label = 'y' whole_data[target_label] = y_train for g in whole_data.groupby('grain'): plt.plot(g[1]['date'].values, g[1]['y'].values, label=g[0]) plt.legend() plt.show() ``` ### Prepare remote compute and data. <a id="prepare_remote"></a> The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation. ``` # We need to save thw artificial data and then upload them to default workspace datastore. DATA_PATH = "fc_fn_data" DATA_PATH_X = "{}/data_train.csv".format(DATA_PATH) if not os.path.isdir('data'): os.mkdir('data') pd.DataFrame(whole_data).to_csv("data/data_train.csv", index=False) # Upload saved data to the default data store. ds = ws.get_default_datastore() ds.upload(src_dir='./data', target_path=DATA_PATH, overwrite=True, show_progress=True) train_data = Dataset.Tabular.from_delimited_files(path=ds.path(DATA_PATH_X)) ``` You will need to create a [compute target](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource. ``` from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your CPU cluster amlcompute_cluster_name = "fcfn-cluster" # Verify that cluster does not exist already try: compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2', max_nodes=6) compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config) compute_target.wait_for_completion(show_output=True) ``` ## Create the configuration and train a forecaster <a id="train"></a> First generate the configuration, in which we: * Set metadata columns: target, time column and grain column names. * Validate our data using cross validation with rolling window method. * Set normalized root mean squared error as a metric to select the best model. * Set early termination to True, so the iterations through the models will stop when no improvements in accuracy score will be made. * Set limitations on the length of experiment run to 15 minutes. * Finally, we set the task to be forecasting. * We apply the lag lead operator to the target value i.e. we use the previous values as a predictor for the future ones. ``` lags = [1,2,3] max_horizon = n_test_periods time_series_settings = { 'time_column_name': TIME_COLUMN_NAME, 'grain_column_names': [ GRAIN_COLUMN_NAME ], 'max_horizon': max_horizon, 'target_lags': lags } ``` Run the model selection and training process. ``` from azureml.core.workspace import Workspace from azureml.core.experiment import Experiment from azureml.train.automl import AutoMLConfig automl_config = AutoMLConfig(task='forecasting', debug_log='automl_forecasting_function.log', primary_metric='normalized_root_mean_squared_error', experiment_timeout_hours=0.25, enable_early_stopping=True, training_data=train_data, compute_target=compute_target, n_cross_validations=3, verbosity = logging.INFO, max_concurrent_iterations=4, max_cores_per_iteration=-1, label_column_name=target_label, **time_series_settings) remote_run = experiment.submit(automl_config, show_output=False) remote_run.wait_for_completion() # Retrieve the best model to use it further. _, fitted_model = remote_run.get_output() ``` ## Forecasting from the trained model <a id="forecasting"></a> In this section we will review the `forecast` interface for two main scenarios: forecasting right after the training data, and the more complex interface for forecasting when there is a gap (in the time sense) between training and testing data. ### X_train is directly followed by the X_test Let's first consider the case when the prediction period immediately follows the training data. This is typical in scenarios where we have the time to retrain the model every time we wish to forecast. Forecasts that are made on daily and slower cadence typically fall into this category. Retraining the model every time benefits the accuracy because the most recent data is often the most informative. ![Forecasting after training](forecast_function_at_train.png) We use `X_test` as a **forecast request** to generate the predictions. #### Typical path: X_test is known, forecast all upcoming periods ``` # The data set contains hourly data, the training set ends at 01/02/2000 at 05:00 # These are predictions we are asking the model to make (does not contain thet target column y), # for 6 periods beginning with 2000-01-02 06:00, which immediately follows the training data X_test y_pred_no_gap, xy_nogap = fitted_model.forecast(X_test) # xy_nogap contains the predictions in the _automl_target_col column. # Those same numbers are output in y_pred_no_gap xy_nogap ``` #### Confidence intervals Forecasting model may be used for the prediction of forecasting intervals by running ```forecast_quantiles()```. This method accepts the same parameters as forecast(). ``` quantiles = fitted_model.forecast_quantiles(X_test) quantiles ``` #### Distribution forecasts Often the figure of interest is not just the point prediction, but the prediction at some quantile of the distribution. This arises when the forecast is used to control some kind of inventory, for example of grocery items or virtual machines for a cloud service. In such case, the control point is usually something like "we want the item to be in stock and not run out 99% of the time". This is called a "service level". Here is how you get quantile forecasts. ``` # specify which quantiles you would like fitted_model.quantiles = [0.01, 0.5, 0.95] # use forecast_quantiles function, not the forecast() one y_pred_quantiles = fitted_model.forecast_quantiles(X_test) # quantile forecasts returned in a Dataframe along with the time and grain columns y_pred_quantiles ``` #### Destination-date forecast: "just do something" In some scenarios, the X_test is not known. The forecast is likely to be weak, because it is missing contemporaneous predictors, which we will need to impute. If you still wish to predict forward under the assumption that the last known values will be carried forward, you can forecast out to "destination date". The destination date still needs to fit within the maximum horizon from training. ``` # We will take the destination date as a last date in the test set. dest = max(X_test[TIME_COLUMN_NAME]) y_pred_dest, xy_dest = fitted_model.forecast(forecast_destination=dest) # This form also shows how we imputed the predictors which were not given. (Not so well! Use with caution!) xy_dest ``` ## Forecasting away from training data <a id="forecasting_away"></a> Suppose we trained a model, some time passed, and now we want to apply the model without re-training. If the model "looks back" -- uses previous values of the target -- then we somehow need to provide those values to the model. ![Forecasting after training](forecast_function_away_from_train.png) The notion of forecast origin comes into play: the forecast origin is **the last period for which we have seen the target value**. This applies per grain, so each grain can have a different forecast origin. The part of data before the forecast origin is the **prediction context**. To provide the context values the model needs when it looks back, we pass definite values in `y_test` (aligned with corresponding times in `X_test`). ``` # generate the same kind of test data we trained on, # but now make the train set much longer, so that the test set will be in the future X_context, y_context, X_away, y_away = get_timeseries(train_len=42, # train data was 30 steps long test_len=4, time_column_name=TIME_COLUMN_NAME, target_column_name=TARGET_COLUMN_NAME, grain_column_name=GRAIN_COLUMN_NAME, grains=2) # end of the data we trained on print(X_train.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].max()) # start of the data we want to predict on print(X_away.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].min()) ``` There is a gap of 12 hours between end of training and beginning of `X_away`. (It looks like 13 because all timestamps point to the start of the one hour periods.) Using only `X_away` will fail without adding context data for the model to consume. ``` try: y_pred_away, xy_away = fitted_model.forecast(X_away) xy_away except Exception as e: print(e) ``` How should we read that eror message? The forecast origin is at the last time the model saw an actual value of `y` (the target). That was at the end of the training data! The model is attempting to forecast from the end of training data. But the requested forecast periods are past the maximum horizon. We need to provide a define `y` value to establish the forecast origin. We will use this helper function to take the required amount of context from the data preceding the testing data. It's definition is intentionally simplified to keep the idea in the clear. ``` def make_forecasting_query(fulldata, time_column_name, target_column_name, forecast_origin, horizon, lookback): """ This function will take the full dataset, and create the query to predict all values of the grain from the `forecast_origin` forward for the next `horizon` horizons. Context from previous `lookback` periods will be included. fulldata: pandas.DataFrame a time series dataset. Needs to contain X and y. time_column_name: string which column (must be in fulldata) is the time axis target_column_name: string which column (must be in fulldata) is to be forecast forecast_origin: datetime type the last time we (pretend to) have target values horizon: timedelta how far forward, in time units (not periods) lookback: timedelta how far back does the model look? Example: ``` forecast_origin = pd.to_datetime('2012-09-01') + pd.DateOffset(days=5) # forecast 5 days after end of training print(forecast_origin) X_query, y_query = make_forecasting_query(data, forecast_origin = forecast_origin, horizon = pd.DateOffset(days=7), # 7 days into the future lookback = pd.DateOffset(days=1), # model has lag 1 period (day) ) ``` """ X_past = fulldata[ (fulldata[ time_column_name ] > forecast_origin - lookback) & (fulldata[ time_column_name ] <= forecast_origin) ] X_future = fulldata[ (fulldata[ time_column_name ] > forecast_origin) & (fulldata[ time_column_name ] <= forecast_origin + horizon) ] y_past = X_past.pop(target_column_name).values.astype(np.float) y_future = X_future.pop(target_column_name).values.astype(np.float) # Now take y_future and turn it into question marks y_query = y_future.copy().astype(np.float) # because sometimes life hands you an int y_query.fill(np.NaN) print("X_past is " + str(X_past.shape) + " - shaped") print("X_future is " + str(X_future.shape) + " - shaped") print("y_past is " + str(y_past.shape) + " - shaped") print("y_query is " + str(y_query.shape) + " - shaped") X_pred = pd.concat([X_past, X_future]) y_pred = np.concatenate([y_past, y_query]) return X_pred, y_pred ``` Let's see where the context data ends - it ends, by construction, just before the testing data starts. ``` print(X_context.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count'])) print(X_away.groupby(GRAIN_COLUMN_NAME)[TIME_COLUMN_NAME].agg(['min','max','count'])) X_context.tail(5) # Since the length of the lookback is 3, # we need to add 3 periods from the context to the request # so that the model has the data it needs # Put the X and y back together for a while. # They like each other and it makes them happy. X_context[TARGET_COLUMN_NAME] = y_context X_away[TARGET_COLUMN_NAME] = y_away fulldata = pd.concat([X_context, X_away]) # forecast origin is the last point of data, which is one 1-hr period before test forecast_origin = X_away[TIME_COLUMN_NAME].min() - pd.DateOffset(hours=1) # it is indeed the last point of the context assert forecast_origin == X_context[TIME_COLUMN_NAME].max() print("Forecast origin: " + str(forecast_origin)) # the model uses lags and rolling windows to look back in time n_lookback_periods = max(lags) lookback = pd.DateOffset(hours=n_lookback_periods) horizon = pd.DateOffset(hours=max_horizon) # now make the forecast query from context (refer to figure) X_pred, y_pred = make_forecasting_query(fulldata, TIME_COLUMN_NAME, TARGET_COLUMN_NAME, forecast_origin, horizon, lookback) # show the forecast request aligned X_show = X_pred.copy() X_show[TARGET_COLUMN_NAME] = y_pred X_show ``` Note that the forecast origin is at 17:00 for both grains, and periods from 18:00 are to be forecast. ``` # Now everything works y_pred_away, xy_away = fitted_model.forecast(X_pred, y_pred) # show the forecast aligned X_show = xy_away.reset_index() # without the generated features X_show[['date', 'grain', 'ext_predictor', '_automl_target_col']] # prediction is in _automl_target_col ```
github_jupyter
# MIRI FQPM 1550 Observations of Fomalhaut --- Here we create the basics for a MIRI simulation to observe the Fomalhaut system with the FQPM 1550. This includes simulating the Fomalhaut stellar source behind the center of the phase mask, some fake off-axis companions, and a debris disk model that crosses the mask's quadrant boundaries. From JWST PID 1193, the pointing file provides some the relevant information. Here's a truncated version of the pointing file for the first roll position. In this example, we skip the target acquisition observations and only simulate the science exposure image. ``` * Fomalhaut-1550C-Rot1 (Obs 6) ** Visit 6:1 Aperture Name Target RA Dec V2 V3 IdlX IdlY Level Type MIRIM_TABLOCK 1 VEGA +279.23474 +38.78369 -407.464 -387.100 +0.000 +0.000 TARGET T_ACQ MIRIM_TA1550_UR 1 VEGA +279.23474 +38.78369 -395.471 -365.842 +0.000 +0.000 TARGET T_ACQ MIRIM_TA1550_CUR 1 VEGA +279.23474 +38.78369 -391.430 -370.519 +0.122 +0.134 TARGET T_ACQ MIRIM_MASK1550 1 VEGA +279.23474 +38.78369 -389.892 -372.181 +0.000 +0.000 TARGET SCIENCE ``` Final outputs will be detector-sampled slope images (counts/sec). ``` # Import the usual libraries import numpy as np import matplotlib import matplotlib.pyplot as plt #import matplotlib.patches as mpatches # Enable inline plotting %matplotlib inline # Progress bar from tqdm.auto import trange, tqdm import webbpsf_ext, pysiaf from astropy.io import fits from webbpsf_ext import image_manip, setup_logging, spectra, coords ``` # MIRI Observation with FQPM1550 ## 1. Create PSF structure ``` # Mask information mask_id = '1550' filt = f'F{mask_id}C' mask = f'FQPM{mask_id}' pupil = 'MASKFQPM' # Initiate instrument class with selected filters, pupil mask, and image mask inst = webbpsf_ext.MIRI_ext(filter=filt, pupil_mask=pupil, image_mask=mask) # Set desired PSF size and oversampling inst.fov_pix = 256 inst.oversample = 2 # Calculate PSF coefficients inst.gen_psf_coeff() # Calculate position-dependent PSFs due to FQPM # Equivalent to generating a giant library to interpolate over inst.gen_wfemask_coeff() ``` ## 1.1 Observation setup __Configuring observation settings__ Observations consist of nested visit, mosaic tiles, exposures, and dithers. In this section, we configure a pointing class that houses information for a single observation defined in the APT .pointing file. The primary information includes a pointing reference SIAF aperturne name, RA and Dec of the ref aperture, Base X/Y offset relative to the ref aperture position, and Dith X/Y offsets. From this information, along with the V2/V3 position angle, we can determine the orientation and location of objects on the detector focal plane. **Note**: The reference aperture is not necessarily the same as the observed aperture. For instance, you may observe simultaneously with four of NIRCam's SWA detectors, so the reference aperture would be the entire SWA channel, while the observed apertures are A1, A2, A3, and A4. ``` # Import class to setup pointing info from webbpsf_ext.coords import jwst_point # Observed and reference apertures ap_obs = inst.aperturename ap_ref = 'MIRIM_MASK1550' # Define the RA/Dec of reference aperture and telescope position angle # Position angle is angle of V3 axis rotated towards East ra_ref, dec_ref = (+279.23474, +38.78369) pos_ang = 0 # Set any baseline pointing offsets (e.g., specified in APT's Special Requirements) base_offset=(0,0) # Define a list of nominal dither offsets dith_offsets = [(0,0)] # Telescope pointing information tel_point = jwst_point(ap_obs, ap_ref, ra_ref, dec_ref, pos_ang=pos_ang, base_offset=base_offset, dith_offsets=dith_offsets, base_std=0, dith_std=0) print(f"Reference aperture: {tel_point.siaf_ap_ref.AperName}") print(f" Nominal RA, Dec = ({tel_point.ra_ref:.6f}, {tel_point.dec_ref:.6f})") print(f"Observed aperture: {tel_point.siaf_ap_obs.AperName}") print(f" Nominal RA, Dec = ({tel_point.ra_obs:.6f}, {tel_point.dec_obs:.6f})") print(f"Relative offsets in 'idl' for each dither position (incl. pointing errors)") for i, offset in enumerate(tel_point.position_offsets_act): print(f" Position {i}: ({offset[0]:.4f}, {offset[1]:.4f}) arcsec") ``` ## 1.2 Add central source Here we define the stellar atmosphere parameters for Fomalhaut, including spectral type, optional values for (Teff, log_g, metallicity), normalization flux and bandpass, as well as RA and Dec. ``` from webbpsf_ext import miri_filter, nircam_filter, bp_2mass from webbpsf_ext.image_manip import pad_or_cut_to_size def make_spec(name=None, sptype=None, flux=None, flux_units=None, bp_ref=None, **kwargs): """ Create pysynphot stellar spectrum from input dictionary properties. """ from webbpsf_ext import stellar_spectrum # Renormalization arguments renorm_args = (flux, flux_units, bp_ref) # Create spectrum sp = stellar_spectrum(sptype, *renorm_args, **kwargs) if name is not None: sp.name = name return sp # Information necessary to create pysynphot spectrum of star obj_params = { 'name': 'Vega', 'sptype': 'A0V', 'Teff': 9602, 'log_g': 4.1, 'metallicity': -0.5, 'dist': 7.7, 'flux': 16.09, 'flux_units': 'Jy', 'bp_ref': miri_filter('F1550C'), 'RA_obj' : +279.23474, # RA (decimal deg) of source 'Dec_obj' : +38.78369, # Dec (decimal deg) of source } # Create stellar spectrum and add to dictionary sp_star = make_spec(**obj_params) obj_params['sp'] = sp_star # Get `sci` coord positions coord_obj = (obj_params['RA_obj'], obj_params['Dec_obj']) xsci, ysci = tel_point.radec_to_frame(coord_obj, frame_out='sci') # Get sci position shifts from center in units of detector pixels siaf_ap = tel_point.siaf_ap_obs xsci_cen, ysci_cen = siaf_ap.reference_point('sci') xsci_off, ysci_off = (xsci-xsci_cen, ysci-ysci_cen) # and now oversampled pixel shifts osamp = inst.oversample xsci_off_over = xsci_off * osamp ysci_off_over = ysci_off * osamp print("Image shifts (oversampled pixels):", xsci_off_over, ysci_off_over) # Create PSF # PSFs already includes geometric distortions based on SIAF info sp = obj_params['sp'] hdul = inst.calc_psf_from_coeff(sp=sp, coord_vals=(xsci,ysci), coord_frame='sci') # Expand PSF to full frame and offset to proper position ny_pix, nx_pix = (siaf_ap.YSciSize, siaf_ap.XSciSize) ny_pix_over, nx_pix_over = np.array([ny_pix, nx_pix]) * osamp shape_new = (ny_pix*osamp, nx_pix*osamp) delyx = (ysci_off_over, xsci_off_over) image_full = pad_or_cut_to_size(hdul[0].data, shape_new, offset_vals=delyx) # Make new HDUList of target (just central source so far) hdul_full = fits.HDUList(fits.PrimaryHDU(data=image_full, header=hdul[0].header)) fig, ax = plt.subplots(1,1) extent = 0.5 * np.array([-1,1,-1,1]) * inst.fov_pix * inst.pixelscale ax.imshow(hdul_full[0].data, extent=extent, cmap='magma') ax.set_xlabel('Arcsec') ax.set_ylabel('Arcsec') ax.tick_params(axis='both', color='white', which='both') for k in ax.spines.keys(): ax.spines[k].set_color('white') ax.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10]) ax.yaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10]) fig.tight_layout() ``` ## 1.3 Convolve extended disk image Properly including extended objects is a little more complicated than for point sources. First, we need properly format the input model to a pixel binning and flux units appropriate for the simulations (ie., pixels should be equal to oversampled PSFs with flux units of counts/sec). Then, the image needs to be rotated relative to the 'idl' coordinate plane and subsequently shifted for any pointing offsets. Once in the appropriate 'idl' system ### 1.3.1 PSF Grid ``` # Create grid locations for array of PSFs to generate apname = inst.psf_coeff_header['APERNAME'] siaf_ap = inst.siaf[apname] field_rot = 0 if inst._rotation is None else inst._rotation xyoff_half = 10**(np.linspace(-2,1,10)) xoff = yoff = np.concatenate([-1*xyoff_half[::-1],[0],xyoff_half]) # Mask Offset grid positions in arcsec xgrid_off, ygrid_off = np.meshgrid(xoff, yoff) xgrid_off, ygrid_off = xgrid_off.flatten(), ygrid_off.flatten() # Science positions in detector pixels xoff_sci_asec, yoff_sci_asec = coords.xy_rot(-1*xgrid_off, -1*ygrid_off, -1*field_rot) xsci = xoff_sci_asec / siaf_ap.XSciScale + siaf_ap.XSciRef ysci = yoff_sci_asec / siaf_ap.YSciScale + siaf_ap.YSciRef %%time # Now, create all PSFs, one for each (xsci, ysci) location # Only need to do this once. Can be used for multiple dither positions. hdul_psfs = inst.calc_psf_from_coeff(coord_vals=(xsci, ysci), coord_frame='sci', return_oversample=True) ``` ### 1.3.2 Disk Model Image ``` # Disk model information disk_params = { 'file': "Vega/Vega_F1550Csc.fits", 'pixscale': 0.02775, 'wavelength': 15.5, 'units': 'Jy/pixel', 'dist' : 7.7, 'cen_star' : False, } # Open model and rebin to PSF sampling # Scale to instrument wavelength assuming grey scattering function # Converts to phot/sec/lambda hdul_disk_model = image_manip.make_disk_image(inst, disk_params, sp_star=obj_params['sp']) # Rotation necessary to go from sky coordinates to 'idl' frame rotate_to_idl = -1*(tel_point.siaf_ap_obs.V3IdlYAngle + tel_point.pos_ang) ``` ### 1.3.3 Dither Position ``` # Select the first dither location offset delx, dely = tel_point.position_offsets_act[0] hdul_out = image_manip.rotate_shift_image(hdul_disk_model, PA_offset=rotate_to_idl, delx_asec=delx, dely_asec=dely) # Distort image on 'sci' coordinate grid im_sci, xsci_im, ysci_im = image_manip.distort_image(hdul_out, ext=0, to_frame='sci', return_coords=True) # Distort image onto 'tel' (V2, V3) coordinate grid for plot illustration im_tel, v2_im, v3_im = image_manip.distort_image(hdul_out, ext=0, to_frame='tel', return_coords=True) # Plot locations for PSFs that we will generate fig, ax = plt.subplots(1,1) # Show image in V2/V3 plane extent = [v2_im.min(), v2_im.max(), v3_im.min(), v3_im.max()] ax.imshow(im_tel**0.1, extent=extent, cmap='magma') # Add on SIAF aperture boundaries tel_point.plot_inst_apertures(ax=ax, clear=False, label=True) tel_point.plot_ref_aperture(ax=ax) tel_point.plot_obs_aperture(ax=ax, color='C3') # Add PSF location points v2, v3 = siaf_ap.convert(xsci, ysci, 'sci', 'tel') ax.scatter(v2, v3, marker='.', alpha=0.5, color='C2', edgecolors='none', linewidths=0) ax.set_title('Model disk image and PSF Locations in SIAF FoV') fig.tight_layout() ``` This particular disk image is oversized, so we will need to crop the image after convolving PSFs. We may want to consider trimming some of this image prior to convolution, depending on how some of the FoV is blocked before reaching the coronagraphic optics. ``` # If the image is too large, then this process will eat up much of your computer's RAM # So, crop image to more reasonable size (20% oversized) osamp = inst.oversample xysize = int(1.2 * np.max([siaf_ap.XSciSize,siaf_ap.YSciSize]) * osamp) xy_add = osamp - np.mod(xysize, osamp) xysize += xy_add im_sci = pad_or_cut_to_size(im_sci, xysize) hdul_disk_model_sci = fits.HDUList(fits.PrimaryHDU(data=im_sci, header=hdul_out[0].header)) # Convolve image im_conv = image_manip.convolve_image(hdul_disk_model_sci, hdul_psfs) # Add cropped image to final oversampled image im_conv = pad_or_cut_to_size(im_conv, hdul_full[0].data.shape) hdul_full[0].data += im_conv def quick_ref_psf(idl_coord, inst, out_shape, sp=None): """ Create a quick reference PSF for subtraction of the science target. """ # Observed SIAF aperture siaf_ap = tel_point.siaf_ap_obs # Location of observation xidl, yidl = idl_coord # Get offset in SCI pixels xsci_off, ysci_off = np.array(siaf_ap.convert(xidl, yidl, 'idl', 'sci')) - \ np.array(siaf_ap.reference_point('sci')) # Get oversampled pixels offests osamp = inst.oversample xsci_off_over, ysci_off_over = np.array([xsci_off, ysci_off]) * osamp yx_offset = (ysci_off_over, xsci_off_over) # Create PSF prev_log = webbpsf_ext.conf.logging_level setup_logging('WARN', verbose=False) hdul_psf_ref = inst.calc_psf_from_coeff(sp=sp, coord_vals=(xidl, yidl), coord_frame='idl') setup_logging(prev_log, verbose=False) im_psf = pad_or_cut_to_size(hdul_psf_ref[0].data, out_shape, offset_vals=yx_offset) return im_psf # Rebin science data to detector pixels im_sci = image_manip.frebin(hdul_full[0].data, scale=1/osamp) # Subtract a reference PSF from the science data coord_vals = tel_point.position_offsets_act[0] im_psf = quick_ref_psf(coord_vals, inst, hdul_full[0].data.shape, sp=sp_star) im_ref = image_manip.frebin(im_psf, scale=1/osamp) imdiff = im_sci - im_ref # De-rotate to sky orientation imrot = image_manip.rotate_offset(imdiff, rotate_to_idl, reshape=False, cval=np.nan) from matplotlib.colors import LogNorm from webbpsf_ext.coords import plotAxes fig, axes = plt.subplots(1,3, figsize=(12,4.5)) ############################ # Plot raw image ax = axes[0] im = im_sci mn = np.median(im) std = np.std(im) vmin = 0 vmax = mn+10*std xsize_asec = siaf_ap.XSciSize * siaf_ap.XSciScale ysize_asec = siaf_ap.YSciSize * siaf_ap.YSciScale extent = [-1*xsize_asec/2, xsize_asec/2, -1*ysize_asec/2, ysize_asec/2] norm = LogNorm(vmin=im.max()/1e5, vmax=im.max()) ax.imshow(im, extent=extent, norm=norm, cmap='magma') ax.set_title("Raw Image (log scale)") ax.set_xlabel('XSci (arcsec)') ax.set_ylabel('YSci (arcsec)') plotAxes(ax, angle=-1*siaf_ap.V3SciYAngle) ############################ # Basic PSF subtraction # Subtract a near-perfect reference PSF ax = axes[1] norm = LogNorm(vmin=imdiff.max()/1e5, vmax=imdiff.max()) ax.imshow(imdiff, extent=extent, norm=norm, cmap='magma') ax.set_title("PSF Subtracted (log scale)") ax.set_xlabel('XSci (arcsec)') ax.set_ylabel('YSci (arcsec)') plotAxes(ax, angle=-1*siaf_ap.V3SciYAngle) ############################ # De-rotate to sky orientation ax = axes[2] ax.imshow(imrot, extent=extent, norm=norm, cmap='magma') ax.set_title("De-Rotated (log scale)") ax.set_xlabel('RA offset (arcsec)') ax.set_ylabel('Dec offset (arcsec)') plotAxes(ax, position=(0.95,0.35), label1='E', label2='N') fig.suptitle(f"Fomalhaut ({siaf_ap.AperName})", fontsize=14) fig.tight_layout() hdul_disk_model_sci[0].header # Save image to FITS file hdu_diff = fits.PrimaryHDU(imdiff) copy_keys = [ 'PIXELSCL', 'DISTANCE', 'INSTRUME', 'FILTER', 'PUPIL', 'CORONMSK', 'APERNAME', 'MODULE', 'CHANNEL', 'DET_NAME', 'DET_X', 'DET_Y', 'DET_V2', 'DET_V3' ] hdr = hdu_diff.header for head_temp in (inst.psf_coeff_header, hdul_out[0].header): for key in copy_keys: try: hdr[key] = (head_temp[key], head_temp.comments[key]) except (AttributeError, KeyError): pass hdr['PIXELSCL'] = inst.pixelscale name = obj_params['name'] outfile = f'Vega/{name}_{inst.aperturename}_{inst.filter}.fits' hdu_diff.writeto(outfile, overwrite=True) ```
github_jupyter
## Summary - *hidden_size = 162*. - *num_heads = 9*. - *dropout = 0*. - N=16. - Add node and edge features (node features as 81-dim. embedding in `hidden_size`-dim space). - Edgeconv: embed x and edge to half their size and keep row x only. ---- ## Install dependencies (Google Colab only) ``` try: import google.colab GOOGLE_COLAB = True except ImportError: GOOGLE_COLAB = False if GOOGLE_COLAB: !pip install --upgrade torch-scatter !pip install --upgrade torch-sparse !pip install --upgrade torch-cluster !pip install --upgrade torch-spline-conv !pip install torch-geometric if GOOGLE_COLAB: !pip install git+https://gitlab.com/ostrokach/proteinsolver.git ``` ## Imports ``` import atexit import csv import itertools import tempfile import time import uuid import warnings from collections import deque from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd import pyarrow import torch import torch.nn as nn import torch.utils.tensorboard from torch import optim from torch_geometric.data import DataLoader import proteinsolver import proteinsolver.datasets %load_ext autoreload %autoreload 2 assert torch.cuda.is_available() ``` ## Parameters ``` device = torch.device("cuda:0") DATA_ROOT = Path(tempfile.gettempdir()) DATA_ROOT = next(Path("/localscratch/").glob("strokach.*")).joinpath("sudoku") DATA_ROOT.mkdir(exist_ok=True) DATA_ROOT UNIQUE_ID = "0a0fcd1d" CONTINUE_PREVIOUS = True try: NOTEBOOK_PATH UNIQUE_PATH except NameError: NOTEBOOK_PATH = Path("sudoku_train").resolve() NOTEBOOK_PATH.mkdir(exist_ok=True) if UNIQUE_ID is None: UNIQUE_ID = uuid.uuid4().hex[:8] exist_ok = False else: exist_ok = True UNIQUE_PATH = NOTEBOOK_PATH.joinpath(UNIQUE_ID) UNIQUE_PATH.mkdir(exist_ok=exist_ok) NOTEBOOK_PATH, UNIQUE_PATH DATAPKG_DATA_DIR = Path(f"~/datapkg_output_dir").expanduser().resolve() DATAPKG_DATA_DIR proteinsolver.settings.data_url = DATAPKG_DATA_DIR.as_posix() proteinsolver.settings.data_url ``` ## Datasets ``` datasets = {} ``` ### `SudokuDataset` ``` for i in range(10): dataset_name = f"sudoku_train_{i}" datasets[dataset_name] = proteinsolver.datasets.SudokuDataset4( root=DATA_ROOT.joinpath(dataset_name), subset=f"train_{i}" ) datasets["sudoku_valid_0"] = proteinsolver.datasets.SudokuDataset4( root=DATA_ROOT.joinpath("sudoku_valid_0"), subset=f"valid_0" ) datasets["sudoku_valid_old"] = proteinsolver.datasets.SudokuDataset2( root=DATA_ROOT.joinpath("sudoku_valid_old"), data_url=DATAPKG_DATA_DIR.joinpath( "deep-protein-gen", "sudoku", "sudoku_valid.csv.gz" ).as_posix(), ) ``` # Models ``` %%file {UNIQUE_PATH}/model.py import copy import tempfile import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.container import ModuleList from torch_geometric.nn.inits import reset from torch_geometric.utils import ( add_self_loops, remove_self_loops, scatter_, to_dense_adj, to_dense_batch, ) class EdgeConvMod(torch.nn.Module): def __init__(self, nn, aggr="max"): super().__init__() self.nn = nn self.aggr = aggr self.reset_parameters() def reset_parameters(self): reset(self.nn) def forward(self, x, edge_index, edge_attr=None): """""" row, col = edge_index x = x.unsqueeze(-1) if x.dim() == 1 else x # TODO: Try -x[col] instead of x[col] - x[row] if edge_attr is None: out = torch.cat([x[row], x[col]], dim=-1) else: out = torch.cat([x[row], x[col], edge_attr], dim=-1) out = self.nn(out) x = scatter_(self.aggr, out, row, dim_size=x.size(0)) return x, out def __repr__(self): return "{}(nn={})".format(self.__class__.__name__, self.nn) class EdgeConvBatch(nn.Module): def __init__(self, gnn, hidden_size, batch_norm=True, dropout=0.2): super().__init__() self.gnn = gnn x_post_modules = [] edge_attr_post_modules = [] if batch_norm is not None: x_post_modules.append(nn.LayerNorm(hidden_size)) edge_attr_post_modules.append(nn.LayerNorm(hidden_size)) if dropout: x_post_modules.append(nn.Dropout(dropout)) edge_attr_post_modules.append(nn.Dropout(dropout)) self.x_postprocess = nn.Sequential(*x_post_modules) self.edge_attr_postprocess = nn.Sequential(*edge_attr_post_modules) def forward(self, x, edge_index, edge_attr=None): x, edge_attr = self.gnn(x, edge_index, edge_attr) x = self.x_postprocess(x) edge_attr = self.edge_attr_postprocess(edge_attr) return x, edge_attr def get_graph_conv_layer(input_size, hidden_size, output_size): mlp = nn.Sequential( # nn.Linear(input_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, output_size), ) gnn = EdgeConvMod(nn=mlp, aggr="add") graph_conv = EdgeConvBatch(gnn, output_size, batch_norm=True, dropout=0.2) return graph_conv class MyEdgeConv(torch.nn.Module): def __init__(self, hidden_size): super().__init__() self.embed_x = nn.Linear(hidden_size, hidden_size // 2) self.embed_edge = nn.Linear(hidden_size, hidden_size // 2) self.nn = nn.Sequential( # nn.Linear(hidden_size, hidden_size * 2), nn.ReLU(), nn.Linear(hidden_size * 2, hidden_size), ) self.reset_parameters() def reset_parameters(self): reset(self.nn) def forward(self, x, edge_index, edge_attr=None): """""" row, col = edge_index x = x.unsqueeze(-1) if x.dim() == 1 else x x_in = self.embed_x(x) edge_attr_in = self.embed_edge(edge_attr) x_edge_attr_in = torch.cat([x_in[row], edge_attr_in], dim=-1) edge_attr_out = self.nn(x_edge_attr_in) # if edge_attr is None: # out = torch.cat([x[row], x[col]], dim=-1) # else: # out = torch.cat([x[row], x[col], edge_attr], dim=-1) # edge_attr_out = self.nn(out) return edge_attr_out def __repr__(self): return "{}(nn={})".format(self.__class__.__name__, self.nn) class MyAttn(torch.nn.Module): def __init__(self, hidden_size): super().__init__() self.attn = nn.MultiheadAttention( embed_dim=hidden_size, num_heads=9, dropout=0, bias=True ) self.reset_parameters() def reset_parameters(self): reset(self.attn) def forward(self, x, edge_index, edge_attr, batch): """""" query = x.unsqueeze(0) key = to_dense_adj(edge_index, batch=batch, edge_attr=edge_attr).squeeze(0) adjacency = to_dense_adj(edge_index, batch=batch).squeeze(0) key_padding_mask = adjacency == 0 key_padding_mask[torch.eye(key_padding_mask.size(0)).to(torch.bool)] = 0 # attn_mask = torch.zeros_like(key) # attn_mask[mask] = -float("inf") x_out, _ = self.attn(query, key, key, key_padding_mask=key_padding_mask) # x_out = torch.where(torch.isnan(x_out), torch.zeros_like(x_out), x_out) x_out = x_out.squeeze(0) assert (x_out == x_out).all().item() assert x.shape == x_out.shape, (x.shape, x_out.shape) return x_out def __repr__(self): return "{}(nn={})".format(self.__class__.__name__, self.nn) class Net(nn.Module): def __init__( self, x_input_size, adj_input_size, hidden_size, output_size, batch_size=1 ): super().__init__() x_labels = torch.arange(81, dtype=torch.long) self.register_buffer("x_labels", x_labels) self.register_buffer("batch", torch.zeros(10000, dtype=torch.int64)) self.embed_x = nn.Sequential(nn.Embedding(x_input_size, hidden_size), nn.ReLU()) self.embed_x_labels = nn.Sequential(nn.Embedding(81, hidden_size), nn.ReLU()) self.finalize_x = nn.Sequential( nn.Linear(hidden_size * 2, hidden_size), nn.LayerNorm(hidden_size) ) if adj_input_size: self.embed_adj = nn.Sequential( nn.Linear(adj_input_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, hidden_size), nn.LayerNorm(hidden_size), # nn.ELU(), ) else: self.embed_adj = None N = 16 self.N = N norm = nn.LayerNorm(hidden_size) self.x_norms_0 = _get_clones(norm, N) self.adj_norms_0 = _get_clones(norm, N) self.x_norms_1 = _get_clones(norm, N) self.adj_norms_1 = _get_clones(norm, N) edge_conv = MyEdgeConv(hidden_size) self.edge_convs = _get_clones(edge_conv, N) attn = MyAttn(hidden_size) self.attns = _get_clones(attn, N) self.dropout = nn.Dropout(0.1) self.linear_out = nn.Linear(hidden_size, output_size) def forward(self, x, edge_index, edge_attr): x = self.embed_x(x) x_labels = self.embed_x_labels(self.x_labels) x_labels = x_labels.repeat(x.size(0) // x_labels.size(0), 1) x = torch.cat([x, x_labels], dim=1) x = self.finalize_x(x) edge_attr = self.embed_adj(edge_attr) for i in range(self.N): edge_attr_out = self.edge_convs[i](x, edge_index, edge_attr) edge_attr = edge_attr + self.dropout(edge_attr_out) edge_attr = self.adj_norms_1[i](edge_attr) x_out = self.attns[i]( x, edge_index, self.adj_norms_0[i](edge_attr_out), self.batch[: x.size(0)], ) x = x + self.dropout(x_out) x = self.x_norms_1[i](x) x = self.linear_out(x) return x def _get_clones(module, N): return ModuleList([copy.deepcopy(module) for i in range(N)]) %run {UNIQUE_PATH}/model.py %%file {UNIQUE_PATH}/stats.py import atexit import csv import time import warnings import numpy as np class Stats: epoch: int step: int batch_size: int echo: bool total_loss: float num_correct_preds: int num_preds: int num_correct_preds_missing: int num_preds_missing: int num_correct_preds_missing_valid: int num_preds_missing_valid: int num_correct_preds_missing_valid_old: int num_preds_missing_valid_old: int start_time: float def __init__( self, *, epoch=0, step=0, batch_size=1, filename=None, echo=True, tb_writer=None ): self.epoch = epoch self.step = step self.batch_size = batch_size self.echo = echo self.tb_writer = tb_writer self.prev = {} self.init_parameters() if filename: self.filehandle = open(filename, "wt", newline="") self.writer = csv.DictWriter( self.filehandle, list(self.stats.keys()), dialect="unix" ) atexit.register(self.filehandle.close) else: self.filehandle = None self.writer = None def init_parameters(self): self.num_steps = 0 self.total_loss = 0 self.num_correct_preds = 0 self.num_preds = 0 self.num_correct_preds_missing = 0 self.num_preds_missing = 0 self.num_correct_preds_missing_valid = 0 self.num_preds_missing_valid = 0 self.num_correct_preds_missing_valid_old = 0 self.num_preds_missing_valid_old = 0 self.start_time = time.perf_counter() def reset_parameters(self): self.prev = self.stats self.init_parameters() @property def header(self): return "".join(to_fixed_width(self.stats.keys())) @property def row(self): return "".join(to_fixed_width(self.stats.values(), 4)) @property def stats(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") return { "epoch": self.epoch, "step": self.step, "datapoint": self.datapoint, "avg_loss": np.float64(1) * self.total_loss / self.num_steps, "accuracy": np.float64(1) * self.num_correct_preds / self.num_preds, "accuracy_m": np.float64(1) * self.num_correct_preds_missing / self.num_preds_missing, "accuracy_mv": self.accuracy_mv, "accuracy_mv_old": self.accuracy_mv_old, "time_elapsed": time.perf_counter() - self.start_time, } @property def accuracy_mv(self): return ( np.float64(1) * self.num_correct_preds_missing_valid / self.num_preds_missing_valid ) @property def accuracy_mv_old(self): return ( np.float64(1) * self.num_correct_preds_missing_valid_old / self.num_preds_missing_valid_old ) @property def datapoint(self): return self.step * self.batch_size def write_header(self): if self.echo: print(self.header) if self.writer is not None: self.writer.writeheader() def write_row(self): if self.echo: print(self.row, end="\r") if self.writer is not None: self.writer.writerow(self.stats) self.filehandle.flush() if self.tb_writer is not None: stats = self.stats datapoint = stats.pop("datapoint") for key, value in stats.items(): self.tb_writer.add_scalar(key, value, datapoint) self.tb_writer.flush() def to_fixed_width(lst, precision=None): lst = [round(l, precision) if isinstance(l, float) else l for l in lst] return [f"{l: <18}" for l in lst] %run {UNIQUE_PATH}/stats.py def get_stats_on_missing(x, y, output): mask = (x == 9).squeeze() if not mask.any(): return 0.0, 0.0 output_missing = output[mask] _, predicted_missing = torch.max(output_missing.data, 1) return (predicted_missing == y[mask]).sum().item(), len(predicted_missing) from contextlib import contextmanager @contextmanager def eval_net(net: nn.Module): training = net.training try: net.train(False) yield finally: net.train(training) batch_size = 6 info_size = 200 hidden_size = 162 checkpoint_size = 100_000 batch_size, info_size, hidden_size tensorboard_path = NOTEBOOK_PATH.joinpath("runs", UNIQUE_PATH.name) tensorboard_path.mkdir(exist_ok=True) tensorboard_path last_epoch = None last_step = None last_datapoint = None last_state_file = None if CONTINUE_PREVIOUS: for path in UNIQUE_PATH.glob("*.state"): e, s, d, amv = path.name.split("-") datapoint = int(d.strip("d")) if last_datapoint is None or datapoint >= last_datapoint: last_datapoint = datapoint last_epoch = int(e.strip("e")) last_step = int(s.strip("s")) last_state_file = path last_epoch, last_step, last_datapoint, last_state_file net = Net( x_input_size=13, adj_input_size=3, hidden_size=hidden_size, output_size=9, batch_size=batch_size, ).to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.0001) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, "max", verbose=True) if CONTINUE_PREVIOUS: net.load_state_dict(torch.load(last_state_file)) print("Loaded network state file.") stats = Stats( epoch=last_epoch if CONTINUE_PREVIOUS else 0, step=last_step if CONTINUE_PREVIOUS else 0, batch_size=batch_size, filename=UNIQUE_PATH.joinpath("training.log"), echo=True, tb_writer=torch.utils.tensorboard.writer.SummaryWriter( log_dir=tensorboard_path.with_suffix(f".xxx"), purge_step=(last_datapoint if CONTINUE_PREVIOUS else None), ), ) stats.write_header() datasets[f"sudoku_valid_0"].reset() valid_0_data = list(itertools.islice(datasets[f"sudoku_valid_0"], 300)) valid_old_data = list(itertools.islice(datasets[f"sudoku_valid_old"], 300)) tmp_data = valid_0_data[0].to(device) edge_index = tmp_data.edge_index edge_attr = tmp_data.edge_attr net = net.train() for epoch in range(stats.epoch, 100_000): stats.epoch = epoch train_dataloader = DataLoader( datasets[f"sudoku_train_{epoch}"], shuffle=False, num_workers=1, batch_size=batch_size, drop_last=True, ) for data in train_dataloader: stats.step += 1 if CONTINUE_PREVIOUS and stats.step <= last_step: continue optimizer.zero_grad() data = data.to(device) output = net(data.x, data.edge_index, data.edge_attr) loss = criterion(output, data.y) loss.backward() stats.total_loss += loss.detach().item() stats.num_steps += 1 # Accuracy for all _, predicted = torch.max(output.data, 1) stats.num_correct_preds += (predicted == data.y).sum().item() stats.num_preds += len(predicted) # Accuracy for missing only num_correct, num_total = get_stats_on_missing(data.x, data.y, output) stats.num_correct_preds_missing += num_correct stats.num_preds_missing += num_total optimizer.step() if (stats.datapoint % info_size) < batch_size: for j, data in enumerate(valid_0_data): data = data.to(device) with torch.no_grad() and eval_net(net): output = net(data.x, data.edge_index, data.edge_attr) num_correct, num_total = get_stats_on_missing(data.x, data.y, output) stats.num_correct_preds_missing_valid += num_correct stats.num_preds_missing_valid += num_total for j, data in enumerate(valid_old_data): data = data.to(device) with torch.no_grad() and eval_net(net): output = net(data.x, data.edge_index, edge_attr) num_correct, num_total = get_stats_on_missing(data.x, data.y, output) stats.num_correct_preds_missing_valid_old += num_correct stats.num_preds_missing_valid_old += num_total stats.write_row() stats.reset_parameters() if (stats.datapoint % checkpoint_size) < batch_size: output_filename = ( f"e{stats.epoch}-s{stats.step}-d{stats.datapoint}" f"-amv{str(round(stats.prev['accuracy_mv'], 4)).replace('.', '')}.state" ) torch.save(net.state_dict(), UNIQUE_PATH.joinpath(output_filename)) scheduler.step(stats.prev["accuracy_mv"]) output_filename = ( f"e{stats.epoch}-s{stats.step}-d{stats.datapoint}" f"-amv{str(round(stats.prev['accuracy_mv'], 4)).replace('.', '')}.state" ) torch.save(net.state_dict(), UNIQUE_PATH.joinpath(output_filename)) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 이미지 분할 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/images/segmentation"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/images/segmentation.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> 구글 코랩(Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/images/segmentation.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> 깃허브(GitHub) 소스보기</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/images/segmentation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />노트북(notebook) 다운받기</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 메일을 보내주시기 바랍니다. 이 튜토리얼은 수정된 <a href="https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/" class="external">U-Net</a>을 이용하여 이미지 분할에 집중합니다. ## 이미지 분할이란? 지금까지 네트워크의 과제가 입력 이미지에 레이블이나 클래스를 할당하는 이미지 분류를 보았습니다. 그러나 이미지에서 개체가 있는 위치, 해당 개체의 모양, 어떤 픽셀이 어떤 객체에 속하는지 등을 알고 싶다고 가정해 보세요. 이 경우 이미지를 분할하고 싶을 것입니다. 즉, 이미지의 각 픽셀에 레이블이 부여됩니다. 따라서 영상 분할의 과제는 영상의 픽셀 단위의 마스크를 출력하도록 신경망를 훈련시키는 것입니다.이것은 훨씬 낮은 레벨, 즉 픽셀 레벨에서 이미지를 이해하는 데 도움이 됩니다. 이미지 분할은 의료 영상, 자율주행차, 위성 영상화 분야에서 많이 응용이 되고 있습니다. 이번 튜토리얼에 사용 될 데이터 세트는 Parkhi *et al*이 만든 [Oxford-IIIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)입니다. 데이터 세트는 영상, 해당 레이블과 픽셀 단위의 마스크로 구성됩니다. 마스크는 기본적으로 각 픽셀의 레이블입니다. 각 픽셀은 다음 세 가지 범주 중 하나가 주어집니다: * class 1 : 애완동물이 속한 픽셀 * class 2 : 애완동물과 인접한 픽셀 * class 3 : 위에 속하지 않는 경우/주변 픽셀 ``` !pip install git+https://github.com/tensorflow/examples.git !pip install -U tfds-nightly import tensorflow as tf from tensorflow_examples.models.pix2pix import pix2pix import tensorflow_datasets as tfds tfds.disable_progress_bar() from IPython.display import clear_output import matplotlib.pyplot as plt ``` ## Oxford-IIIT Pets 데이터 세트를 다운로드 하기 데이터 세트는 이미 텐서플로 데이터 세트에 포함되어 있으며, 다운로드만 하면 됩니다. 분할 마스크는 버전 3+에 포함되어 있습니다. ``` dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True) ``` 다음 코드는 이미지를 뒤집는 간단한 확장을 수행합니다. 또한, 영상이 [0,1]로 정규화됩니다. 마지막으로, 위에서 언급한 것처럼 분할 마스크의 픽셀에 {1, 2, 3}이라는 레이블이 붙습니다. 편의성을 위해 분할 마스크에서 1을 빼서 레이블이 {0, 1, 2}이 되도록 합시다. ``` def normalize(input_image, input_mask): input_image = tf.cast(input_image, tf.float32) / 255.0 input_mask -= 1 return input_image, input_mask @tf.function def load_image_train(datapoint): input_image = tf.image.resize(datapoint['image'], (128, 128)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) if tf.random.uniform(()) > 0.5: input_image = tf.image.flip_left_right(input_image) input_mask = tf.image.flip_left_right(input_mask) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask def load_image_test(datapoint): input_image = tf.image.resize(datapoint['image'], (128, 128)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128)) input_image, input_mask = normalize(input_image, input_mask) return input_image, input_mask ``` 데이터 세트에는 이미 필요한 몫의 시험과 훈련이 포함되어 있으므로 동일한 분할을 계속 사용합시다. ``` TRAIN_LENGTH = info.splits['train'].num_examples BATCH_SIZE = 64 BUFFER_SIZE = 1000 STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) test = dataset['test'].map(load_image_test) train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) test_dataset = test.batch(BATCH_SIZE) ``` 이미지 예제와 데이터 세트에서 대응하는 마스크를 보도록 합시다. ``` def display(display_list): plt.figure(figsize=(15, 15)) title = ['Input Image', 'True Mask', 'Predicted Mask'] for i in range(len(display_list)): plt.subplot(1, len(display_list), i+1) plt.title(title[i]) plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i])) plt.axis('off') plt.show() for image, mask in train.take(1): sample_image, sample_mask = image, mask display([sample_image, sample_mask]) ``` ## 모델 정의하기 여기서 사용하는 모델은 수정된 U-Net입니다. U-Net은 인코더(다운샘플러)와 디코더(업샘플러)를 포함합니다. 강력한 기능을 학습하고 훈련 가능한 매개변수의 수를 줄이기 위해 미리 훈련된 모델을 인코더로 사용할 수 있습니다. 따라서 이번 과제의 인코더는 미리 훈련된 MobileNetV2 모델이 될 것이며 이 모델의 중간 출력이 사용될 것입니다. 디코더는 [Pix2pix 튜토리얼](https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/pix2pix/pix2pix.py)의 TensorFlow 예제에서 이미 구현된 업샘플 블록이 될 것입니다. 3개의 채널을 출력하는 이유는 픽셀당 3개의 가능한 라벨이 있기 때문입니다. 이것을 각 화소가 세 개의 class로 분류되는 다중 분류라고 생각하세요. ``` OUTPUT_CHANNELS = 3 ``` 언급된 바와 같이 인코더는 미리 훈련된 MobileNetV2 모델이 될 것이며, [tf.keras.applications](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/applications)에서 사용될 준비가 될 것입니다. 인코더는 모델의 중간 층에서 나오는 특정 출력으로 구성됩니다. 인코더는 교육 과정 중에 학습되지 않는다는 점에 유의하세요. ``` base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False) #이 층들의 활성화를 이용합시다 layer_names = [ 'block_1_expand_relu', # 64x64 'block_3_expand_relu', # 32x32 'block_6_expand_relu', # 16x16 'block_13_expand_relu', # 8x8 'block_16_project', # 4x4 ] layers = [base_model.get_layer(name).output for name in layer_names] # 특징추출 모델을 만듭시다 down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers) down_stack.trainable = False ``` 디코더/업샘플러는 TensorFlow 예제에서 구현된 일련의 업샘플 블록입니다. ``` up_stack = [ pix2pix.upsample(512, 3), # 4x4 -> 8x8 pix2pix.upsample(256, 3), # 8x8 -> 16x16 pix2pix.upsample(128, 3), # 16x16 -> 32x32 pix2pix.upsample(64, 3), # 32x32 -> 64x64 ] def unet_model(output_channels): inputs = tf.keras.layers.Input(shape=[128, 128, 3]) x = inputs # 모델을 통해 다운샘플링합시다 skips = down_stack(x) x = skips[-1] skips = reversed(skips[:-1]) # 건너뛰기 연결을 업샘플링하고 설정하세요 for up, skip in zip(up_stack, skips): x = up(x) concat = tf.keras.layers.Concatenate() x = concat([x, skip]) # 이 모델의 마지막 층입니다 last = tf.keras.layers.Conv2DTranspose( output_channels, 3, strides=2, padding='same') #64x64 -> 128x128 x = last(x) return tf.keras.Model(inputs=inputs, outputs=x) ``` ## 모델 훈련하기 이제 모델을 컴파일하고 훈련시키는 일만 남았습니다. 여기서 사용되고 있는 손실 함수는 loss.sparse_categorical_crossentropy입니다. 이 손실 함수를 사용하는 이유는 네트워크가 멀티 클래스 예측과 마찬가지로 픽셀마다 레이블을 할당하려고 하기 때문입니다. 실제 분할 마스크에서 각 픽셀은 {0,1,2}를 가지고 있습니다. 이곳의 네트워크는 세 개의 채널을 출력하고 있습니다. 기본적으로 각 채널은 클래스를 예측하는 방법을 배우려고 하고 있으며, loss.sparse_categical_crossentropy는 그러한 시나리오에 권장되는 손실입니다. 네트워크의 출력을 사용하여 픽셀에 할당된 레이블은 가장 높은 값을 가진 채널입니다.이것이 create_mask 함수가 하는 일입니다. ``` model = unet_model(OUTPUT_CHANNELS) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) ``` 만들어진 모델의 구조를 간략히 살펴 보겠습니다. ``` tf.keras.utils.plot_model(model, show_shapes=True) ``` 모델을 시험해보고 훈련 전에 예측한 것이 무엇인지 알아봅시다. ``` def create_mask(pred_mask): pred_mask = tf.argmax(pred_mask, axis=-1) pred_mask = pred_mask[..., tf.newaxis] return pred_mask[0] def show_predictions(dataset=None, num=1): if dataset: for image, mask in dataset.take(num): pred_mask = model.predict(image) display([image[0], mask[0], create_mask(pred_mask)]) else: display([sample_image, sample_mask, create_mask(model.predict(sample_image[tf.newaxis, ...]))]) show_predictions() ``` 모델이 훈련하는 동안 어떻게 향상되는지 관찰해 봅시다. 이 작업을 수행하기 위해 콜백 함수가 아래에 정의되어 있습니다. ``` class DisplayCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): clear_output(wait=True) show_predictions() print ('\n에포크 이후 예측 예시 {}\n'.format(epoch+1)) EPOCHS = 20 VAL_SUBSPLITS = 5 VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS model_history = model.fit(train_dataset, epochs=EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, validation_steps=VALIDATION_STEPS, validation_data=test_dataset, callbacks=[DisplayCallback()]) loss = model_history.history['loss'] val_loss = model_history.history['val_loss'] epochs = range(EPOCHS) plt.figure() plt.plot(epochs, loss, 'r', label='Training loss') plt.plot(epochs, val_loss, 'bo', label='Validation loss') plt.title('Training and Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss Value') plt.ylim([0, 1]) plt.legend() plt.show() ``` ## 예측하기 몇 가지 예측을 해봅시다. 시간을 절약하기 위해 에포크 수를 작게 유지했지만, 보다 정확한 결과를 얻기 위해 에포크를 더 높게 설정할 수 있습니다. ``` show_predictions(test_dataset, 3) ``` ## 다음 단계 이제 이미지 분할이 무엇이고 어떻게 작동하는지 이해했으니, 이 튜토리얼을 다른 중간 계층 출력 또는 미리 조정한 다른 모델에도 사용해 보세요. 여러분은 또한 카글에서 주최하는 [Carvana](https://www.kaggle.com/c/carvana-image-masking-challenge/overview) 이미지 마스킹 챌린지을 시도함으로써 본인의 실력을 시험해볼 수 있습니다. 자신의 데이터를 재훈련할 수 있는 다른 모델의 [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection)를 참조하세요.
github_jupyter
# Scheduling Real-Time Events with Simpy [Simpy](https://simpy.readthedocs.io/en/latest/index.html) is a Python package for discrete-event simulation in Python. Simpy includes a provision for [real-time simulation](https://simpy.readthedocs.io/en/latest/topical_guides/real-time-simulations.html) which provides an potentially useful tool for coding laboratory experiments with complex scheduling requirements. Keep in mind that Python is not a designed for real-time use, and Simpy should not be trusted for applications requiring time accuracy tighter than, say, 100ms. Futher, it is not an asynchronous implementation, so your interpreter will be blocked during the course of the experiment. But for quick-and-dirty applications with modest performance requirements, Simpy real-time may be a simple solution. ``` !pip install simpy ``` ## Blinkers ``` from pymata4 import pymata4 import simpy.rt import time led0 = 13 led1 = 9 def blinker(env, board, pin, period): board.set_pin_mode_digital_output(pin) while True: board.digital_write(pin, 1) end = time.perf_counter() print(f"led {pin:2d} on at {end-start:5.3f}") yield env.timeout(period/2) board.digital_write(pin, 0) end = time.perf_counter() print(f"led {pin:2d} off at {end-start:5.3f}") yield env.timeout(period/2) board = pymata4.Pymata4() env = simpy.rt.RealtimeEnvironment() env.process(blinker(env, board, led0, 2.0)) env.process(blinker(env, board, led1, 2.0)) start = time.perf_counter() env.run(until=20) board.shutdown() ``` ## Asyncio ``` import asyncio from pymata4 import pymata4 import time led0 = 13 led1 = 9 async def blinker(board, pin, period, start): board.set_pin_mode_digital_output(pin) while time.perf_counter() < start + 20: board.digital_write(pin, 1) print(f"led {pin:2d} on at {round(time.perf_counter()-start, 2)}") k = round((time.perf_counter() - start)/(period/2)) dt = (k+1)*period/2 - (time.perf_counter() - start) await asyncio.sleep(dt-0.05) board.digital_write(pin, 0) print(f"led {pin:2d} off at {round(time.perf_counter()-start, 2)}") k = round((time.perf_counter() - start)/(period/2)) dt = (k+1)*period/2 - (time.perf_counter() - start) await asyncio.sleep(dt-0.05) board = pymata4.Pymata4() async def expt(): start = time.perf_counter() coroutines = [ blinker(board, led0, 2.0, start), blinker(board, led1, 2.0, start) ] await asyncio.gather(*coroutines) await expt() board.shutdown() ```
github_jupyter
# Residual analysis to determine the optimal cutoff frequency > Marcos Duarte > Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/)) > Federal University of ABC, Brazil A common problem in signal processing is to automatically determine the optimal cutoff frequency that should be employed in a low-pass filter to attenuate as much as possible the noise without compromising the signal content of the data. Before we continue, see [this Jupyter notebook](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb) for an overview about data filtering if needed. Unfortunately, there is no definite solution for this problem, but there are some techniques, with different degrees of success, to try to determine the optimal cutoff frequency. David Winter, in his classic book *Biomechanics and motor control of human movement*, proposed a method to find the optimal cutoff frequency based on residual analysis of the difference between filtered and unfiltered signals over a range of cutoff frequencies. The optimal cutoff frequency is the one where the residual starts to change very little because it is considered that from this point, it's being filtered mostly noise and minimally signal, ideally. This concept is straightforward to implement. The function `residual_analysis.py` (code at the end of this text) is an implementation of this method and it is divided in three parts (after the help section): first, the residuals over a range of cutoff frequencies are calculated; second, an algorithm tries to find the noisy region (with a supposed linear behavior in the frequency domain) of the residuals versus cutoff frequencies plot and finds the optimal cutoff frequency; and third, the results are plotted. The code is lengthy relatively to the simplicity of the idea because of the long help section, the implementation of the automatic search and a rich plot. Here is the function signature: ```python fc_opt = residual_analysis(y, freq=1, fclim=[], show=False, ax=None): ``` Let's test this function with benchmark data. In 1977, Pezzack, Norman and Winter published a paper where they investigated the effects of differentiation and filtering processes on experimental data (the angle of a bar manipulated in space). Since then, these data have became a benchmark to test new algorithms. Let's work with these data (available at [http://isbweb.org/data/pezzack/index.html](http://isbweb.org/data/pezzack/index.html)). The data have the angular displacement measured by video and the angular acceleration directly measured by an accelerometer, which we will consider as the true acceleration. Part of these data are showing next: ``` # Import the necessary libraries import numpy as np %matplotlib inline import matplotlib.pyplot as plt import sys sys.path.insert(1, r'./../functions') # add dir to pythonpath # load data file time, disp, disp2, aacc = np.loadtxt('./../data/Pezzack.txt', skiprows=6, unpack=True) dt = np.mean(np.diff(time)) # plot data fig, (ax1,ax2) = plt.subplots(1, 2, sharex = True, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time, disp, 'b') ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular displacement [rad]') ax2.plot(time, aacc, 'g') ax2.set_xlabel('Time [s]'); ax2.set_ylabel('Angular acceleration [rad/s$^2$]') plt.subplots_adjust(wspace=0.3) ``` And using the residual analsysis code: ``` from residual_analysis import residual_analysis freq = np.mean(1/np.diff(time)) fc_opt = residual_analysis(disp, freq=freq, show=True) ``` The optimal cutoff frequency found is 5.6 Hz. Note that the filtering process is relevant only for the derivative of the data; we cannot distinguish the unfiltered and unfiltered displacements (see that the RMSE residual is very small). Let's employ this filter, differentiate the data twice and compare with the true acceleration as we did before: ``` from scipy.signal import butter, filtfilt # Butterworth filter # Correct the cutoff frequency for the number of passes in the filter C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 b, a = butter(2, (fc_opt/C)/(freq/2)) dispf = filtfilt(b, a, disp) aaccBW = np.diff(dispf, 2)*freq*freq # RMSE: rmseBW = np.sqrt(np.mean((aaccBW-aacc[1:-1])**2)) # plot data fig, ax1 = plt.subplots(1, 1, figsize=(11, 4)) plt.suptitle("Pezzack's benchmark data", fontsize=20) ax1.plot(time[1:-1], aacc[1:-1], 'g', label='Analog acceleration: (True value)') ax1.plot(time[1:-1], aaccBW, 'r', label='Butterworth %.3g Hz: RMSE = %0.2f' %(fc_opt,rmseBW)) ax1.set_xlabel('Time [s]'); ax1.set_ylabel('Angular acceleration [rad/s$^2$]'); plt.legend(frameon=False, fontsize=12, loc='upper left'); ``` The performance seems satisfactory (see [this Jupyter notebook](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb) for a comparison using other filters), but it is known that this residual analysis algorithm results in oversmoothing the kinematic data (see [http://www.clinicalgaitanalysis.com/faq/cutoff.html](http://www.clinicalgaitanalysis.com/faq/cutoff.html)). To read more about the determination of the optimal cutoff frequency, see the following papers: - Pezzack, Norman, & Winter (1977). [An assessment of derivative determining techniques used for motion analysis](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). Journal of Biomechanics, 10, 377-382. - Giakas & Baltizopoulos (1997) [A comparison of automatic filtering techniques applied to biomechanical walking data](http://www.pe.uth.gr/sk_cms/scriptlib/getblob.php?redir=../sk_cms/images/notfound.htm&table=pepublications&field=doc&id=30). J. Biomech. 30, 847-850. - Alonso, Salgado, Cuadrado & Pintado (2009) [Automatic smoothing of raw kinematic signals using SSA and cluster analysis](http://lim.ii.udc.es/docs/proceedings/2009_09_EUROMECH_Automatic.pdf). 7th EUROMECH Solid Mechanics Conference. - Kristianslund, Krosshaug & Bogert (2012) [Effect of low pass filtering on joint moments from inverse dynamics: Implications for injury prevention](http://www.klokavskade.no/upload/Publication/Kristianslund_2012_J%20Biomechan_Effect%20of%20low-pass%20filtering%20on%20joint%20moments%20from%20inverse%20dynamics.pdf). J. Biomech. 45, 666-671. ## References - Pezzack JC, Norman RW, & Winter DA (1977). [An assessment of derivative determining techniques used for motion analysis](http://www.health.uottawa.ca/biomech/courses/apa7305/JB-Pezzack-Norman-Winter-1977.pdf). Journal of Biomechanics, 10, 377-382. [PubMed](http://www.ncbi.nlm.nih.gov/pubmed/893476). - Winter DA (2009) [Biomechanics and motor control of human movement](http://books.google.com.br/books?id=_bFHL08IWfwC&printsec=frontcover&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false). 4 ed. Hoboken, EUA: Wiley. ## Function `residual_analysis.py` ``` # %load ./../functions/residual_analysis #!/usr/bin/env python """Automatic search of filter cutoff frequency based on residual analysis.""" import numpy as np from scipy.signal import butter, filtfilt __author__ = 'Marcos Duarte, https://github.com/demotu/BMC' __version__ = "1.0.5" __license__ = "MIT" def residual_analysis(y, freq=1, fclim=[], show=False, ax=None): """ Automatic search of filter cutoff frequency based on residual analysis. This method was proposed by Winter in his book [1]_. The 'optimal' cutoff frequency (in the sense that a filter with such cutoff frequency removes as much noise as possible without considerably affecting the signal) is found by performing a residual analysis of the difference between filtered and unfiltered signals over a range of cutoff frequencies. The optimal cutoff frequency is the one where the residual starts to change very little because it is considered that from this point, it's being filtered mostly noise and minimally signal, ideally. Parameters ---------- y : 1D array_like Data freq : float, optional (default = 1) sampling frequency of the signal y fclim : list with 2 numbers, optional (default = []) limit frequencies of the noisy part or the residuals curve show : bool, optional (default = False) True (1) plots data in a matplotlib figure False (0) to not plot ax : a matplotlib.axes.Axes instance, optional (default = None). Returns ------- fc_opt : float optimal cutoff frequency (None if not found) Notes ----- A second-order zero-phase digital Butterworth low-pass filter is used. # The cutoff frequency is correctyed for the number of passes: # C = (2**(1/npasses) - 1)**0.25. C = 0.802 for a dual pass filter. The matplotlib figure with the results will show a plot of the residual analysis with the optimal cutoff frequency, a plot with the unfiltered and filtered signals at this optimal cutoff frequency (with the RMSE of the difference between these two signals), and a plot with the respective second derivatives of these signals which should be useful to evaluate the quality of the optimal cutoff frequency found. Winter should not be blamed for the automatic search algorithm used here. The algorithm implemented is just to follow as close as possible Winter's suggestion of fitting a regression line to the noisy part of the residuals. This function performs well with data where the signal has frequencies considerably bellow the Niquist frequency and the noise is predominantly white in the higher frequency region. If the automatic search fails, the lower and upper frequencies of the noisy part of the residuals curve cam be inputed as a parameter (fclim). These frequencies can be chosen by viewing the plot of the residuals (enter show=True as input parameter when calling this function). It is known that this residual analysis algorithm results in oversmoothing kinematic data [2]_. Use it with moderation. This code is described elsewhere [3]_. References ---------- .. [1] Winter DA (2009) Biomechanics and motor control of human movement. .. [2] http://www.clinicalgaitanalysis.com/faq/cutoff.html .. [3] http://nbviewer.ipython.org/github/duartexyz/BMC/blob/master/ResidualAnalysis.ipynb Examples -------- >>> import numpy as np >>> from residual_analysis import residual_analysis >>> y = np.cumsum(np.random.randn(1000)) >>> # optimal cutoff frequency based on residual analysis and plot: >>> fc_opt = residual_analysis(y, freq=1000, show=True) >>> # sane analysis but specifying the frequency limits and plot: >>> residual_analysis(y, freq=1000, fclim=[200,400], show=True) >>> # It's not always possible to find an optimal cutoff frequency >>> # or the one found can be wrong (run this example many times): >>> y = np.random.randn(100) >>> residual_analysis(y, freq=100, show=True) """ from scipy.interpolate import UnivariateSpline # Correct the cutoff frequency for the number of passes in the filter C = 0.802 # for dual pass; C = (2**(1/npasses)-1)**0.25 # signal filtering freqs = np.linspace((freq/2) / 100, (freq/2)*C, 101, endpoint=False) res = [] for fc in freqs: b, a = butter(2, (fc/C) / (freq / 2)) yf = filtfilt(b, a, y) # residual between filtered and unfiltered signals res = np.hstack((res, np.sqrt(np.mean((yf - y)**2)))) # find the optimal cutoff frequency by fitting an exponential curve # y = A*exp(B*x)+C to the residual data and consider that the tail part # of the exponential (which should be the noisy part of the residuals) # decay starts after 3 lifetimes (exp(-3), 95% drop) if not len(fclim) or np.any(fclim < 0) or np.any(fclim > freq/2): fc1 = 0 fc2 = int(0.95*(len(freqs)-1)) # log of exponential turns the problem to first order polynomial fit # make the data always greater than zero before taking the logarithm reslog = np.log(np.abs(res[fc1:fc2 + 1] - res[fc2]) + 1000 * np.finfo(np.float).eps) Blog, Alog = np.polyfit(freqs[fc1:fc2 + 1], reslog, 1) fcini = np.nonzero(freqs >= -3 / Blog) # 3 lifetimes fclim = [fcini[0][0], fc2] if np.size(fcini) else [] else: fclim = [np.nonzero(freqs >= fclim[0])[0][0], np.nonzero(freqs >= fclim[1])[0][0]] # find fc_opt with linear fit y=A+Bx of the noisy part of the residuals if len(fclim) and fclim[0] < fclim[1]: B, A = np.polyfit(freqs[fclim[0]:fclim[1]], res[fclim[0]:fclim[1]], 1) # optimal cutoff frequency is the frequency where y[fc_opt] = A roots = UnivariateSpline(freqs, res - A, s=0).roots() fc_opt = roots[0] if len(roots) else None else: fc_opt = None if show: _plot(y, freq, freqs, res, fclim, fc_opt, B, A, ax) return fc_opt def _plot(y, freq, freqs, res, fclim, fc_opt, B, A, ax): """Plot results of the residual_analysis function, see its help.""" try: import matplotlib.pyplot as plt except ImportError: print('matplotlib is not available.') else: if ax is None: plt.figure(num=None, figsize=(10, 5)) ax = np.array([plt.subplot(121), plt.subplot(222), plt.subplot(224)]) plt.rc('axes', labelsize=12, titlesize=12) plt.rc('xtick', labelsize=12) plt.rc('ytick', labelsize=12) ax[0].plot(freqs, res, 'b.', markersize=9) time = np.linspace(0, len(y) / freq, len(y)) ax[1].plot(time, y, 'g', linewidth=1, label='Unfiltered') ydd = np.diff(y, n=2) * freq ** 2 ax[2].plot(time[:-2], ydd, 'g', linewidth=1, label='Unfiltered') if fc_opt: ylin = np.poly1d([B, A])(freqs) ax[0].plot(freqs, ylin, 'r--', linewidth=2) ax[0].plot(freqs[fclim[0]], res[fclim[0]], 'r>', freqs[fclim[1]], res[fclim[1]], 'r<', ms=9) ax[0].set_ylim(ymin=0, ymax=4 * A) ax[0].plot([0, freqs[-1]], [A, A], 'r-', linewidth=2) ax[0].plot([fc_opt, fc_opt], [0, A], 'r-', linewidth=2) ax[0].plot(fc_opt, 0, 'ro', markersize=7, clip_on=False, zorder=9, label='$Fc_{opt}$ = %.1f Hz' % fc_opt) ax[0].legend(fontsize=12, loc='best', numpoints=1, framealpha=.5) # Correct the cutoff frequency for the number of passes C = 0.802 # for dual pass; C = (2**(1/npasses) - 1)**0.25 b, a = butter(2, (fc_opt/C) / (freq / 2)) yf = filtfilt(b, a, y) ax[1].plot(time, yf, color=[1, 0, 0, .5], linewidth=2, label='Opt. filtered') ax[1].legend(fontsize=12, loc='best', framealpha=.5) ax[1].set_title('Signals (RMSE = %.3g)' % A) yfdd = np.diff(yf, n=2) * freq ** 2 ax[2].plot(time[:-2], yfdd, color=[1, 0, 0, .5], linewidth=2, label='Opt. filtered') ax[2].legend(fontsize=12, loc='best', framealpha=.5) resdd = np.sqrt(np.mean((yfdd - ydd) ** 2)) ax[2].set_title('Second derivatives (RMSE = %.3g)' % resdd) else: ax[0].text(.5, .5, 'Unable to find optimal cutoff frequency', horizontalalignment='center', color='r', zorder=9, transform=ax[0].transAxes, fontsize=12) ax[1].set_title('Signal') ax[2].set_title('Second derivative') ax[0].set_xlabel('Cutoff frequency [Hz]') ax[0].set_ylabel('Residual RMSE') ax[0].set_title('Residual analysis') ax[0].grid() # ax2.set_xlabel('Time [s]') ax[1].set_xlim(0, time[-1]) ax[1].grid() ax[2].set_xlabel('Time [s]') ax[2].set_xlim(0, time[-1]) ax[2].grid() plt.tight_layout() plt.show() ```
github_jupyter
# 1장. 소개 *아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.* <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://nbviewer.org/github/rickiepark/intro_ml_with_python_2nd_revised/blob/main/01-introduction.ipynb"><img src="https://jupyter.org/assets/share.png" width="60" />주피터 노트북 뷰어로 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/rickiepark/intro_ml_with_python_2nd_revised/blob/main/01-introduction.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> </td> </table> ``` # 노트북이 코랩에서 실행 중인지 체크합니다. import sys if 'google.colab' in sys.modules: # 사이킷런 최신 버전을 설치합니다. !pip install -q --upgrade scikit-learn # mglearn을 다운받고 압축을 풉니다. !wget -q -O mglearn.tar.gz https://bit.ly/mglearn-tar-gz !tar -xzf mglearn.tar.gz import sklearn from preamble import * ``` ## 1.1 왜 머신 러닝인가? ### 1.1.1 머신 러닝으로 풀 수 있는 문제 ### 1.1.2 문제와 데이터를 이해하기 ## 1.2 왜 파이썬인가? ## 1.3 scikit-learn ### 1.3.1 Scikit-learn 설치 ## 1.4 필수 라이브러리와 도구들 ### 1.4.1 주피터 노트북 ### 1.4.2 NumPy ``` import numpy as np x = np.array([[1, 2, 3], [4, 5, 6]]) print("x:\n", x) ``` ### 1.4.3 SciPy ``` from scipy import sparse # 대각선 원소는 1이고 나머지는 0인 2차원 NumPy 배열을 만듭니다. eye = np.eye(4) print("NumPy 배열:\n", eye) # NumPy 배열을 CSR 포맷의 SciPy 희박 행렬로 변환합니다. # 0이 아닌 원소만 저장됩니다. sparse_matrix = sparse.csr_matrix(eye) print("\nSciPy의 CSR 행렬:\n", sparse_matrix) data = np.ones(4) row_indices = np.arange(4) col_indices = np.arange(4) eye_coo = sparse.coo_matrix((data, (row_indices, col_indices))) print("COO 표현:\n", eye_coo) ``` ### 1.4.4 matplotlib ``` import matplotlib.pyplot as plt # -10에서 10까지 100개의 간격으로 나뉘어진 배열을 생성합니다. x = np.linspace(-10, 10, 100) # 사인 함수를 사용하여 y 배열을 생성합니다. y = np.sin(x) # plot 함수는 한 배열의 값을 다른 배열에 대응해서 선 그래프를 그립니다. plt.plot(x, y, marker="x") plt.show() # 책에는 없음 ``` ### 1.4.5 pandas ``` import pandas as pd # 회원 정보가 들어간 간단한 데이터셋을 생성합니다. data = {'Name': ["John", "Anna", "Peter", "Linda"], 'Location' : ["New York", "Paris", "Berlin", "London"], 'Age' : [24, 13, 53, 33] } data_pandas = pd.DataFrame(data) # 주피터 노트북은 Dataframe을 미려하게 출력해줍니다. data_pandas # Age 열의 값이 30 이상인 모든 행을 선택합니다. data_pandas[data_pandas.Age > 30] ``` ### 1.4.6 mglearn ## 1.5 파이썬 2 vs. 파이썬 3 ## 1.6 이 책에서 사용하는 소프트웨어 버전 ``` import sys print("Python 버전:", sys.version) import pandas as pd print("pandas 버전:", pd.__version__) import matplotlib print("matplotlib 버전:", matplotlib.__version__) import numpy as np print("NumPy 버전:", np.__version__) import scipy as sp print("SciPy 버전:", sp.__version__) import IPython print("IPython 버전:", IPython.__version__) import sklearn print("scikit-learn 버전:", sklearn.__version__) ``` ## 1.7 첫 번째 애플리케이션: 붓꽃의 품종 분류 ### 1.7.1 데이터 적재 ``` from sklearn.datasets import load_iris iris_dataset = load_iris() print("iris_dataset의 키:\n", iris_dataset.keys()) print(iris_dataset['DESCR'][:193] + "\n...") print("타깃의 이름:", iris_dataset['target_names']) print("특성의 이름:\n", iris_dataset['feature_names']) print("data의 타입:", type(iris_dataset['data'])) print("data의 크기:", iris_dataset['data'].shape) print("data의 처음 다섯 행:\n", iris_dataset['data'][:5]) print("target의 타입:", type(iris_dataset['target'])) print("target의 크기:", iris_dataset['target'].shape) print("타깃:\n", iris_dataset['target']) ``` ### 1.7.2 성과 측정: 훈련 데이터와 테스트 데이터 ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( iris_dataset['data'], iris_dataset['target'], random_state=0) print("X_train 크기:", X_train.shape) print("y_train 크기:", y_train.shape) print("X_test 크기:", X_test.shape) print("y_test 크기:", y_test.shape) ``` ### 1.7.3 가장 먼저 할 일: 데이터 살펴보기 ``` # X_train 데이터를 사용해서 데이터프레임을 만듭니다. # 열의 이름은 iris_dataset.feature_names에 있는 문자열을 사용합니다. iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names) # 데이터프레임을 사용해 y_train에 따라 색으로 구분된 산점도 행렬을 만듭니다. pd.plotting.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o', hist_kwds={'bins': 20}, s=60, alpha=.8, cmap=mglearn.cm3) plt.show() # 책에는 없음 ``` ### 1.7.4 첫 번째 머신 러닝 모델: k-최근접 이웃 알고리즘 ``` from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=1) knn.fit(X_train, y_train) ``` ### 1.7.5 예측하기 ``` X_new = np.array([[5, 2.9, 1, 0.2]]) print("X_new.shape:", X_new.shape) prediction = knn.predict(X_new) print("예측:", prediction) print("예측한 타깃의 이름:", iris_dataset['target_names'][prediction]) ``` ### 1.7.6 모델 평가하기 ``` y_pred = knn.predict(X_test) print("테스트 세트에 대한 예측값:\n", y_pred) print("테스트 세트의 정확도: {:.2f}".format(np.mean(y_pred == y_test))) print("테스트 세트의 정확도: {:.2f}".format(knn.score(X_test, y_test))) ``` ## 1.8 요약 ``` X_train, X_test, y_train, y_test = train_test_split( iris_dataset['data'], iris_dataset['target'], random_state=0) knn = KNeighborsClassifier(n_neighbors=1) knn.fit(X_train, y_train) print("테스트 세트의 정확도: {:.2f}".format(knn.score(X_test, y_test))) ```
github_jupyter
## More Tesseract ``` # In the previous example, we were using a clear, unambiguous image for conversion. Sometimes there will # be noise in images you want to OCR, making it difficult to extract the text. Luckily, there are # techniques we can use to increase the efficacy of OCR with pytesseract and Pillow. # # Let's use a different image this time, with the same text as before but with added noise in the picture. # We can view this image using the following code. from PIL import Image img = Image.open("readonly/Noisy_OCR.PNG") display(img) # As you can see, this image had shapes of different opacities behind the text, which can confuse # the tesseract engine. Let's see if OCR will work on this noisy image import pytesseract text = pytesseract.image_to_string(Image.open("readonly/Noisy_OCR.PNG")) print(text) # This is a bit surprising given how nicely tesseract worked previously! Let's experiment on the image # using techniqes that will allow for more effective image analysis. First up, lets change the size of # the image # First we will import PIL import PIL # Then set the base width of our image basewidth = 600 # Now lets open it img = Image.open("readonly/Noisy_OCR.PNG") # We want to get the correct aspect ratio, so we can do this by taking the base width and dividing # it by the actual width of the image wpercent = (basewidth / float(img.size[0])) # With that ratio we can just get the appropriate height of the image. hsize = int((float(img.size[1]) * float(wpercent))) # Finally, lets resize the image. antialiasing is a specific way of resizing lines to try and make them # appear smooth img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS) # Now lets save this to a file img.save('resized_nois.png') # save the image as a jpg # And finally, lets display it display(img) # and run OCR text = pytesseract.image_to_string(Image.open('resized_nois.png')) print(text) # hrm, no improvement for resizing the image. Let's convert the image to greyscale. Converting images # can be done in many different ways. If we poke around in the PILLOW documentation we find that one of # the easiest ways to do this is to use the convert() function and pass in the string 'L' img = Image.open('readonly/Noisy_OCR.PNG') img = img.convert('L') # Now lets save that image img.save('greyscale_noise.jpg') # And run OCR on the greyscale image text = pytesseract.image_to_string(Image.open('greyscale_noise.jpg')) print(text) # Wow, that worked really well! If we look at the help documentation using the help function # as in help(img.convert) we see that the conversion mechanism is the ITU-R 601-2 luma transform. # There's more information about this out there, but this method essentially takes a three channel image, # where there is information for the amount of red, green, and blue (R, G, and B), and reduces it # to a single channel to represent luminosity. This method actually comes from how standard # definition television sets encoded color onto black and while images. If you get really interested # in image manipulation and recognition, learning about color spaces and how we represent color, both # computationally and through human perception, is really an interesting field. # Even though we have now the complete text of the image, there are a few other techniques # we could use to help improve OCR detection in the event that the above two don't help. # The next approach I would use is called binarization, which means to separate into two # distinct parts - in this case, black and white. Binarization is enacted through a process # called thresholding. If a pixel value is greater than a threshold value, it will be converted # to a black pixel; if it is lower than the threshold it will be converted to a white pixel. # This process eliminates noise in the OCR process allowing greater image recognition accuracy. # With Pillow, this process is straightforward. # Lets open the noisy impage and convert it using binarization img = Image.open('readonly/Noisy_OCR.PNG').convert('1') # Now lets save and display that image img.save('black_white_noise.jpg') display(img) # So, that was a bit magical, and really required a fine reading of the docs to figure out # that the number "1" is a string parameter to the convert function actually does the binarization. # But you actually have all of the skills you need to write this functionality yourself. # Lets walk through an example. First, lets define a function called binarize, which takes in # an image and a threshold value: def binarize(image_to_transform, threshold): # now, lets convert that image to a single greyscale image using convert() output_image=image_to_transform.convert("L") # the threshold value is usually provided as a number between 0 and 255, which # is the number of bits in a byte. # the algorithm for the binarization is pretty simple, go through every pixel in the # image and, if it's greater than the threshold, turn it all the way up (255), and # if it's lower than the threshold, turn it all the way down (0). # so lets write this in code. First, we need to iterate over all of the pixels in the # image we want to work with for x in range(output_image.width): for y in range(output_image.height): # for the given pixel at w,h, lets check its value against the threshold if output_image.getpixel((x,y))< threshold: #note that the first parameter is actually a tuple object # lets set this to zero output_image.putpixel( (x,y), 0 ) else: # otherwise lets set this to 255 output_image.putpixel( (x,y), 255 ) #now we just return the new image return output_image # lets test this function over a range of different thresholds. Remember that you can use # the range() function to generate a list of numbers at different step sizes. range() is called # with a start, a stop, and a step size. So lets try range(0, 257, 64), which should generate 5 # images of different threshold values for thresh in range(0,257,64): print("Trying with threshold " + str(thresh)) # Lets display the binarized image inline display(binarize(Image.open('readonly/Noisy_OCR.PNG'), thresh)) # And lets use tesseract on it. It's inefficient to binarize it twice but this is just for # a demo print(pytesseract.image_to_string(binarize(Image.open('readonly/Noisy_OCR.PNG'), thresh))) # We can see from this that a threshold of 0 essentially turns everything white, # that the text becomes more bold as we move towards a higher threshold, and that # the shapes, which have a filled in grey color, become more evident at higher # thresholds. In the next lecture we'll look a bit more at some of the challenges # you can expect when doing OCR on real data ```
github_jupyter
``` import arviz as az import matplotlib.pyplot as plt import pandas as pd import numpy as onp from scipy.interpolate import BSpline from jax import lax, vmap import jax.numpy as np from jax.random import PRNGKey import numpyro from numpyro.contrib.autoguide import (AutoContinuousELBO, AutoLaplaceApproximation) from numpyro.diagnostics import hpdi, print_summary import numpyro.distributions as dist from numpyro.infer import Predictive, SVI, init_to_value import numpyro.optim as optim az.style.use("arviz-darkgrid") %matplotlib inline from IPython.display import set_matplotlib_formats set_matplotlib_formats('svg') ``` # Chapter 4 Practice Problems are labeled Easy (E), Medium (M), and Hard (H). #### 4E1. In the model definition below, which line is the likelihood? - [x] $y_i \sim \text{Normal}(\mu, \sigma)$ - [ ] $\mu \sim \text{Normal}(0, 10)$ - [ ] $\sigma \sim \text{Exponential}(1)$ #### 4E2. In the model definition just above, how many parameters are in the posterior distribution? - 2 #### 4E3. Using the model definition above, write down the appropriate form of Bayes’ theorem that includes the proper likelihood and priors. $$ Pr(\mu,\sigma|y_i) = \frac{\Pi_i\text{Normal}(y_i|\mu,\sigma)\text{Normal}(\mu|0,10)\text{Exponential}(\sigma|1)}{\int\int\Pi_i\text{Normal}(y_i|\mu,\sigma)\text{Normal}(\mu|0,10)\text{Exponential}(\sigma|1)d\mu d\sigma} $$ #### 4E4. In the model definition below, which line is the linear model? - [ ] $y_i \sim \text{Normal}(\mu, \sigma)$ - [x] $\mu_i = \alpha + \beta x_i $ - [ ] $\alpha \sim \text{Normal}(0, 10)$ - [ ] $\beta \sim \text{Normal}(0,1)$ - [ ] $\sigma \sim \text{Exponential}(2)$ #### 4E5. In the model definition just above, how many parameters are in the posterior distribution? - 3 #### 4M1. For the model definition below, simulate observed y values from the prior (not the posterior). $$\begin{matrix} y_i &\sim \text{Normal}(\mu, \sigma)\\ \mu &\sim \text{Normal}(0, 10)\\ \sigma &\sim \text{Exponential}(1) \end{matrix}$$ ``` with numpyro.handlers.seed(rng=100): N = 10000 mu = numpyro.sample("mu", dist.Normal(0, 10), sample_shape=(N,)) sigma = numpyro.sample("sigma", dist.Exponential(1), sample_shape=(N,)) y = numpyro.sample('y', dist.Normal(mu,sigma)) print_summary({"y":y}, 0.89, 0) ``` #### 4M2. Translate the model just above into a quap formula. ``` def model_4m2(y): mu = numpyro.sample("mu", dist.Normal(0, 10), sample_shape=(N,)) sigma = numpyro.sample("sigma", dist.Exponential(1), sample_shape=(N,)) y = numpyro.sample('y', dist.Normal(mu,sigma), obs=y) ``` #### 4M3. Translate the quap model formula below into a mathematical model definition. y ~ dnorm( mu , sigma ) mu <- a + b*x a ~ dnorm( 0 , 10 ) b ~ dunif( 0 , 1 ) sigma ~ dexp( 1 ) $$ Pr(\alpha,\beta,\sigma|y_i,x_i) = \frac{\Pi_i\text{Normal}\big(y_i|\mu=(\text{Normal}(\alpha|0,10)+ \text{Uniform}(\beta|0,1)\times x_i),\sigma \big)\text{Exponential}(\sigma|1)}{\int\int\int\Pi_i\text{Normal}\big(y_i|\mu=(\text{Normal}(\alpha|0,10)+ \text{Uniform}(\beta|0,1)\times x_i),\sigma \big)\text{Exponential}(\sigma|1)\;d\alpha\;d\beta\; d\sigma} $$ #### 4M4. A sample of students is measured for height each year for 3 years. After the third year, you want to fit a linear regression predicting height using year as a predictor. Write down the mathematical model definition for this regression, using any variable names and priors you choose. Be prepared to defend your choice of priors. h ~ ( mu, sigma) mu = alpha + beta * year alpha ~ Normal(165, 10) beta ~ Log-Normal(0, 10) sigma ~ Exponential(0.05) Explanation: - Suppose this is sample of young adult - alpha is the mean of height, as a Vietnamese, which is quite smaller than other countries. The mean should be around 160 -170. - beta is slope for `year`, and it should be postive as taller as people get older, so i use log normal. - sigma is positive, the rate of exponenial is inverse of scale, so 1/0.05 = 20, so height can be 145 or 185. $$ Pr(\alpha,\beta,\sigma|h_i,y_i) = \frac{\Pi_i\text{Normal}\big(h_i|\mu=(\text{Normal}(\alpha|165,10)+ \text{Log-Normal}(\beta|0,1)\times y_i),\sigma \big)\text{Exponential}(\sigma|0.05)}{\int\int\int\Pi_i\text{Normal}\big(h_i|\mu=(\text{Normal}(\alpha|165,10)+ \text{Log-Normal}(\beta|0,1)\times y_i),\sigma \big)\text{Exponential}(\sigma|0.05)\;d\alpha\;d\beta\; d\sigma} $$ #### 4M5. Now suppose I remind you that every student got taller each year. Does this information lead you to change your choice of priors? How? No, as slope of weight is already positive. #### 4M6. Now suppose I tell you that the variance among heights for students of the same age is never more than 64cm. How does this lead you to revise your priors? It means sigma <= 8. So sigma prior maybe exponential (rate =1/8 = 0.125) Let's try if it's true. ``` for rate in np.arange(0.1, 2, 0.1): samples = dist.Exponential(rate).sample(PRNGKey(10), (10000,)) max_variance = np.max(samples)**2 print("rate=", rate, "maxvar=", np.round(max_variance,2), end='') if max_variance > 64: print('-'*10, '> not accepted') else: print('-'*10, '> accepted') az.plot_dist(dist.Exponential(1.6).sample(PRNGKey(10), (1000,))) ``` -> I now choose prior for sigma ~ Exponential (1.6)
github_jupyter
##### Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Channel Attribution -- Building Blocks of Interpretability This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well! This notebook demonstrates **Channel Attribution**, a technique for exploring how different detectors in the network effected its output. <br> <img src="https://storage.googleapis.com/lucid-static/building-blocks/notebook_heroes/channel-attribution.jpeg" width="648"></img> <br> This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research. **Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going: > **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU** Thanks for trying Lucid! # Install / Import / Load This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate. ``` !pip install --quiet lucid==0.0.5 !npm install -g svelte-cli@2.2.0 import numpy as np import tensorflow as tf import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform from lucid.misc.io import show, load from lucid.misc.io.reading import read from lucid.misc.io.showing import _image_url, _display_html import lucid.scratch.web.svelte as lucid_svelte model = models.InceptionV1() model.load_graphdef() ``` # Setup (feel free to skip) **ChannelAttrWidget** Let's make a little widget for showing all our channels and attribution values. ``` %%html_define_svelte ChannelAttrWidget <div class="figure"> <div class="channel_list" > {{#each attrsPos as attr}} <div class="entry"> <div class="sprite" style="background-image: url({{spritemap_url}}); width: {{sprite_size}}px; height: {{sprite_size}}px; background-position: -{{sprite_size*(attr.n%sprite_n_wrap)}}px -{{sprite_size*Math.floor(attr.n/sprite_n_wrap)}}px;"></div> <div class="value" style="background-color: hsl({{(attr.v > 0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)">{{attr.v}}</div> </div> {{/each}} {{#if attrsPos.length > 5}} <br style="clear:both;"> <br style="clear:both;"> {{/if}} <div class="gap">...</div> {{#each attrsNeg as attr}} <div class="entry"> <div class="sprite" style="background-image: url({{spritemap_url}}); width: {{sprite_size}}px; height: {{sprite_size}}px; background-position: -{{sprite_size*(attr.n%sprite_n_wrap)}}px -{{sprite_size*Math.floor(attr.n/sprite_n_wrap)}}px;"></div> <div class="value" style="background-color: hsl({{(attr.v > 0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)">{{attr.v}}</div> </div> {{/each}} </div> <br style="clear:both"> </div> <style> .entry{ float: left; margin-right: 4px; } .gap { float: left; margin: 8px; font-size: 400%; } </style> <script> function range(n){ return Array(n).fill().map((_, i) => i); } export default { data () { return { spritemap_url: "", sprite_size: 110, sprite_n_wrap: 22, attrsPos: [], attrsNeg: [], }; }, computed: { }, helpers: {range} }; </script> ``` **BarsWidget** It would also be nice to see the distribution of attribution magnitudes. Let's make another widget for that. ``` %%html_define_svelte BarsWidget <div class="figure"> <div class="channel_list" > {{#each vals as val}} <div class="bar" style="height: {{15*Math.abs(val)}}px; background-color: hsl({{(val > 0)? 210 : 0}}, {{Math.max(90, 110*Math.abs(val)/1.8)}}%, {{Math.min(80, 100-40*Math.abs(val)/1.8)}}%);"> </div> {{/each}} </div> <br style="clear:both"> </div> <style> .channel_list { background-color: #FEFEFE; } .bar { width: 1.5px; height: 10px; display: inline-block; } </style> <script> export default { data () { return { vals: [] }; } }; </script> ``` ## **Spritemaps** In order to show the channels, we need "spritemaps" of channel visualizations. These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer. We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but you can make your own channel spritemaps to explore other models. Check out other notebooks on how to make your own neuron visualizations. It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions. ``` layer_spritemap_sizes = { 'mixed3a' : 16, 'mixed3b' : 21, 'mixed4a' : 22, 'mixed4b' : 22, 'mixed4c' : 22, 'mixed4d' : 22, 'mixed4e' : 28, 'mixed5a' : 28, } def googlenet_spritemap(layer): assert layer in layer_spritemap_sizes size = layer_spritemap_sizes[layer] url = "https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg" % layer return size, url ``` **Attribution Code** ``` def score_f(logit, name): if name is None: return 0 elif name == "logsumexp": base = tf.reduce_max(logit) return base + tf.log(tf.reduce_sum(tf.exp(logit-base))) elif name in model.labels: return logit[model.labels.index(name)] else: raise RuntimeError("Unsupported") def channel_attr_simple(img, layer, class1, class2, n_show=4): # Set up a graph for doing attribution... with tf.Graph().as_default(), tf.Session() as sess: t_input = tf.placeholder_with_default(img, [None, None, 3]) T = render.import_model(model, t_input, t_input) # Compute activations acts = T(layer).eval() # Compute gradient logit = T("softmax2_pre_activation")[0] score = score_f(logit, class1) - score_f(logit, class2) t_grad = tf.gradients([score], [T(layer)])[0] grad = t_grad.eval() # Let's do a very simple linear approximation attribution. # That is, we say the attribution of y to x is # the rate at which x changes y times the value of x. attr = (grad*acts)[0] # Then we reduce down to channels. channel_attr = attr.sum(0).sum(0) # Now we just need to present the results. # Get spritemaps spritemap_n, spritemap_url = googlenet_spritemap(layer) # Let's show the distribution of attributions print "Distribution of attribution accross channels:" print "" lucid_svelte.BarsWidget({"vals" : [float(v) for v in np.sort(channel_attr)[::-1]]}) # Let's pick the most extreme channels to show ns_pos = list(np.argsort(-channel_attr)[:n_show]) ns_neg = list(np.argsort(channel_attr)[:n_show][::-1]) # ... and show them with ChannelAttrWidget print "" print "Top", n_show, "channels in each direction:" print "" lucid_svelte.ChannelAttrWidget({ "spritemap_url": spritemap_url, "sprite_size": 110, "sprite_n_wrap": spritemap_n, "attrsPos": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_pos], "attrsNeg": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_neg] }) ``` # Channel attributions from article teaser ``` img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png") channel_attr_simple(img, "mixed4d", "Labrador retriever", "tiger cat", n_show=3) img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png") channel_attr_simple(img, "mixed4d", "vase", "lemon", n_show=3) img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/sunglasses_tux.png") channel_attr_simple(img, "mixed4d", "bow tie", "sunglasses", n_show=3) ``` # Bigger channel attribution!!! ``` img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png") channel_attr_simple(img, "mixed4d", "Labrador retriever", "tiger cat", n_show=30) ``` # Channel Attribution - Path Integrated ``` def channel_attr_path(img, layer, class1, class2, n_show=4, stochastic_path=False, N = 100): # Set up a graph for doing attribution... with tf.Graph().as_default(), tf.Session() as sess: t_input = tf.placeholder_with_default(img, [None, None, 3]) T = render.import_model(model, t_input, t_input) # Compute activations acts = T(layer).eval() # Compute gradient logit = T("softmax2_pre_activation")[0] score = score_f(logit, class1) - score_f(logit, class2) t_grad = tf.gradients([score], [T(layer)])[0] # Inegrate on a path from acts=0 to acts=acts attr = np.zeros(acts.shape[1:]) for n in range(N): acts_ = acts * float(n) / N if stochastic_path: acts_ *= (np.random.uniform(0, 1, [528])+np.random.uniform(0, 1, [528]))/1.5 grad = t_grad.eval({T(layer): acts_}) attr += 1.0 / N * (grad*acts)[0] # Then we reduce down to channels. channel_attr = attr.sum(0).sum(0) # Now we just need to present the results. # Get spritemaps spritemap_n, spritemap_url = googlenet_spritemap(layer) # Let's show the distribution of attributions print "Distribution of attribution accross channels:" print "" lucid_svelte.BarsWidget({"vals" : [float(v) for v in np.sort(channel_attr)[::-1]]}) # Let's pick the most extreme channels to show ns_pos = list(np.argsort(-channel_attr)[:n_show]) ns_neg = list(np.argsort(channel_attr)[:n_show][::-1]) # ... and show them with ChannelAttrWidget print "" print "Top", n_show, "channels in each direction:" print "" lucid_svelte.ChannelAttrWidget({ "spritemap_url": spritemap_url, "sprite_size": 110, "sprite_n_wrap": spritemap_n, "attrsPos": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_pos], "attrsNeg": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_neg] }) def compare_attr_methods(img, class1, class2): _display_html("<h2>Linear Attribtuion</h2>") channel_attr_simple(img, "mixed4d", class1, class2, n_show=10) _display_html("<br><br><h2>Path Integrated Attribtuion</h2>") channel_attr_path(img, "mixed4d", class1, class2, n_show=10) _display_html("<br><br><h2>Stochastic Path Integrated Attribtuion</h2>") channel_attr_path(img, "mixed4d", class1, class2, n_show=10, stochastic_path=True) img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png") compare_attr_methods(img, "Labrador retriever", "tiger cat") img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png") compare_attr_methods(img, "vase", "lemon") img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/pig.jpeg") compare_attr_methods(img, "hog", "dalmatian") ```
github_jupyter
## Linear Regression to predict Olympic Medal count for top 25 countries --- ``` import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt ``` ### Top 25 countries that won most medals --- ``` top_df = pd.read_csv('top_25.csv') print(top_df.shape) top_df.head() # Assigning Id to each Country top_df['CountryId'] = [(x+1) for x in range(25)] print(top_df.shape) top_df.head() top_df = top_df[['CountryId','Country']] print(top_df.shape) top_df.head() ``` ### Summer Olympic data with athlete, sports, events and medals count --- ``` df = pd.read_csv('summer_athlete_medals_count.csv') print(df.shape) df.head() # Adding country_id column to summer df df = pd.merge(df,top_df, how = 'left', on = 'Country') print(df.shape) df.head() # Dropping the countries that are not in top 25 df = df.dropna() print(df.shape) df.head() df['Athletes per sport'] = round(df['Athletes']/df['Sports'], 2) print(df.shape) df.head() # Bringing 'CountryId' column to the front col = df.pop('CountryId') df.insert(0,'CountryId', col) print(df.shape) df.head() df = df.sort_values(['Year', 'Medals', 'Country'], ascending = [True, False, True]) print(df.shape) df.head() df = pd.get_dummies(df, columns = ['Country']) print(df.shape) df.head() ``` ## Predicting medals for 2020 --- ``` predict_year = 2020 ``` ### Train data --- ``` train_df = df[df['Year'] < predict_year] print(train_df.shape) train_df.head() X = train_df.drop(['Gold', 'Silver', 'Bronze', 'Medals'], axis=1) y1 = train_df['Gold'].values.reshape(-1, 1) y2 = train_df['Silver'].values.reshape(-1, 1) y3 = train_df['Bronze'].values.reshape(-1, 1) y4 = train_df['Medals'].values.reshape(-1, 1) print(X.shape, y1.shape, y2.shape, y3.shape, y4.shape) ``` ### 2020 Test data #### Read athlete sport data for the year 2020 ``` df_2020 = pd.read_csv('2020_athlete_sport_count.csv') print(df_2020.shape) df_2020.head() df_2020['Athletes per sport'] = round(df_2020['Athletes']/df_2020['Sports'], 2) print(df_2020.shape) df_2020.head() # Bringing 'CountryId' column to the front col = df_2020.pop('CountryId') df_2020.insert(0,'CountryId', col) print(df_2020.shape) df_2020.head() df_2020 = pd.get_dummies(df_2020, columns = ['Country']) print(df_2020.shape) df_2020.head() ``` ### Test Train Split & Standard Scaler --- ``` # Use train_test_split to create training and testing data from sklearn.model_selection import train_test_split # Gold X1_train, X1_test, y1_train, y1_test = train_test_split(X, y1, random_state=2) # Silver X2_train, X2_test, y2_train, y2_test = train_test_split(X, y2, random_state=2) # Bronze X3_train, X3_test, y3_train, y3_test = train_test_split(X, y3, random_state=2) # Total Medals X4_train, X4_test, y4_train, y4_test = train_test_split(X, y4, random_state=2) print(X1_train.shape, y1_train.shape, X1_test.shape, y1_test.shape) print(X2_train.shape, y2_train.shape, X2_test.shape, y2_test.shape) print(X3_train.shape, y3_train.shape, X3_test.shape, y3_test.shape) print(X4_train.shape, y4_train.shape, X4_test.shape, y4_test.shape) ``` ## Linear Regression Model --- ``` from sklearn.linear_model import LinearRegression ``` ### Gold --- ``` model1 = LinearRegression() model1.fit(X1_train, y1_train) training_score1 = model1.score(X1_train, y1_train) testing_score1 = model1.score(X1_test, y1_test) print('Gold Medals:') print(f"Gold Training Score: {training_score1}") print(f"Gold Testing Score: {testing_score1}") ``` ### Silver --- ``` model2 = LinearRegression() model2.fit(X2_train, y2_train) training_score2 = model1.score(X2_train, y2_train) testing_score2 = model1.score(X2_test, y2_test) print('Silver Medals:') print(f"Silver Training Score: {training_score2}") print(f"Silver Testing Score: {testing_score2}") ``` ### Bronze --- ``` model3 = LinearRegression() model3.fit(X3_train, y3_train) training_score3 = model3.score(X3_train, y3_train) testing_score3 = model3.score(X3_test, y3_test) print('Bronze Medals:') print(f"Bronze Training Score: {training_score3}") print(f"Bronze Testing Score: {testing_score3}") ``` ### Total Medals --- ``` model4 = LinearRegression() model4.fit(X4_train, y4_train) training_score4 = model4.score(X4_train, y4_train) testing_score4 = model4.score(X4_test, y4_test) print('Total Medals Medals:') print(f"Total Medals Training Score: {training_score4}") print(f"Total Medals Testing Score: {testing_score4}") ``` ### Using the models to predict medals for 2020 --- ``` test_data = df_2020 gold_predictions = model1.predict(test_data) gold_predictions = np.ravel(gold_predictions) gold_predictions = np.around(gold_predictions, decimals =0).astype(int) gold_predictions silver_predictions = model2.predict(test_data) silver_predictions = np.ravel(silver_predictions) silver_predictions = np.around(silver_predictions, decimals =0).astype(int) silver_predictions bronze_predictions = model3.predict(test_data) bronze_predictions = np.ravel(bronze_predictions) bronze_predictions = np.around(bronze_predictions, decimals =0).astype(int) bronze_predictions total_medals_predictions = model4.predict(test_data) total_medals_predictions = np.ravel(total_medals_predictions) total_medals_predictions = np.around(total_medals_predictions, decimals =0).astype(int) total_medals_predictions ``` ### Creating a Dataframe to show all the predictions --- ``` top_df['Gold Predicted'] = gold_predictions top_df['Silver Predicted'] = silver_predictions top_df['Bronze Predicted'] = bronze_predictions # Not using total_medals_predictions as the below option gave slightly better results #top_df['Total Medals Predicted'] = total_medals_predictions top_df['Total Medals Predicted'] = top_df['Gold Predicted'] + \ top_df['Silver Predicted'] + \ top_df['Silver Predicted'] top_df # Rearranging the columns top_df = top_df[['Country',\ 'Gold Predicted',\ 'Silver Predicted',\ 'Bronze Predicted',\ #'Total Medals Predicted',\ 'Total Medals Predicted' ]] top_df top_df = top_df.sort_values(['Total Medals Predicted'], ascending = [False])\ .reset_index(drop=True) top_df top_df.to_csv('Predictions-2020.csv', index = False) top_df['Gold Actual'] = '' top_df['Silver Actual'] = '' top_df['Bronze Actual'] = '' top_df['Total Medals Actual'] = '' top_df # Rearranging the columns top_df = top_df[['Country',\ 'Gold Actual', 'Gold Predicted',\ 'Silver Actual','Silver Predicted',\ 'Bronze Actual', 'Bronze Predicted',\ 'Total Medals Actual', 'Total Medals Predicted']] top_df top_df.to_csv('Predictions-2020-copy.csv', index = False) ```
github_jupyter
# T<sub>2</sub> Ramsey Characterization The purpose of the $T_2$Ramsey experiment is to determine two of the qubit's properties: *Ramsey* or *detuning frequency* and $T_2^\ast$. The rough frequency of the qubit was already determined previously. The control pulses are based on this frequency. In this experiment, we would like to get a more precise estimate of the qubit's frequency. The difference between the frequency used for the control rotation pulses, and the precise frequency is called the *detuning frequency*. This part of the experiment is called a *Ramsey Experiment*. $T_2^\ast$ represents the rate of decay toward a mixed state, when the qubit is initialized to the $\left|1\right\rangle$ state. Since the detuning frequency is relatively small, we add a phase gate to the circuit to enable better measurement. The actual frequency measured is the sum of the detuning frequency and the user induced *oscillation frequency* (`osc_freq` parameter). ``` import qiskit from qiskit_experiments.library import T2Ramsey ``` The circuit used for the experiment comprises the following: 1. Hadamard gate 2. delay 3. RZ gate that rotates the qubit in the x-y plane 4. Hadamard gate 5. measurement The user provides as input a series of delays and the time unit for the delays, e.g., seconds, milliseconds, etc. In addition, the user provides the oscillation frequency in Hz. During the delay, we expect the qubit to precess about the z-axis. If the p gate and the precession offset each other perfectly, then the qubit will arrive at the $\left|0\right\rangle$ state (after the second Hadamard gate). By varying the extension of the delays, we get a series of oscillations of the qubit state between the $\left|0\right\rangle$ and $\left|1\right\rangle$ states. We can draw the graph of the resulting function, and can analytically extract the desired values. ``` # set the computation units to microseconds unit = "us" # microseconds qubit = 0 # set the desired delays delays = list(range(1, 50, 1)) # Create a T2Ramsey experiment. Print the first circuit as an example exp1 = T2Ramsey(qubit, delays, unit=unit, osc_freq=1e5) print(exp1.circuits()[0]) ``` We run the experiment on a simple, simulated backend, created specifically for this experiment's tutorial. ``` from qiskit_experiments.test.t2ramsey_backend import T2RamseyBackend # FakeJob is a wrapper for the backend, to give it the form of a job from qiskit_experiments.test.utils import FakeJob conversion_factor = 1e-6 # The behavior of the backend is determined by the following parameters backend = T2RamseyBackend( p0={ "A": [0.5], "T2star": [20.0], "f": [100100], "phi": [0.0], "B": [0.5], }, initial_prob_plus=[0.0], readout0to1=[0.02], readout1to0=[0.02], conversion_factor=conversion_factor, ) ``` The resulting graph will have the form: $f(t) = a^{-t/T_2*} \cdot \cos(2 \pi f t + \phi) + b$ where *t* is the delay, $T_2^\ast$ is the decay factor, and *f* is the detuning frequency. `conversion_factor` is a scaling factor that depends on the measurement units used. It is 1E-6 here, because the unit is microseconds. ``` expdata1 = exp1.run(backend=backend, shots=2000) expdata1.block_for_results() # Wait for job/analysis to finish. # Display the figure display(expdata1.figure(0)) # Print results for result in expdata1.analysis_results(): print(result) ``` Additional fitter result data is stored in the `result.extra` field ``` expdata1.analysis_results("T2star").extra ``` ### Providing initial user estimates The user can provide initial estimates for the parameters to help the analysis process. Because the curve is expected to decay toward $0.5$, the natural choice for parameters $A$ and $B$ is $0.5$. Varying the value of $\phi$ will shift the graph along the x-axis. Since this is not of interest to us, we can safely initialize $\phi$ to 0. In this experiment, `t2ramsey` and `f` are the parameters of interest. Good estimates for them are values computed in previous experiments on this qubit or a similar values computed for other qubits. ``` from qiskit_experiments.library.characterization import T2RamseyAnalysis user_p0={ "A": 0.5, "T2star": 20.0, "f": 110000, "phi": 0, "B": 0.5 } exp_with_p0 = T2Ramsey(qubit, delays, unit=unit, osc_freq=1e5) exp_with_p0.set_analysis_options(p0=user_p0) expdata_with_p0 = exp_with_p0.run(backend=backend, shots=2000) expdata_with_p0.block_for_results() # Display fit figure display(expdata_with_p0.figure(0)) # Print results for result in expdata_with_p0.analysis_results(): print(result) ``` The units can be changed, but the output in the result is always given in seconds. The units in the backend must be adjusted accordingly. ``` from qiskit.utils import apply_prefix unit = "ns" delays = list(range(1000, 50000, 1000)) conversion_factor = apply_prefix(1, unit) print(conversion_factor) p0 = { "A": [0.5], "T2star": [20000], "f": [100000], "phi": [0.0], "B": [0.5], } backend_in_ns = T2RamseyBackend( p0=p0, initial_prob_plus=[0.0], readout0to1=[0.02], readout1to0=[0.02], conversion_factor=conversion_factor, ) exp_in_ns = T2Ramsey(qubit, delays, unit=unit, osc_freq=1e5) user_p0_ns = { "A": 0.5, "T2star": 20000.0, "f": 110000, "phi": 0, "B": 0.5 } exp_in_ns.set_analysis_options(p0=user_p0_ns) # Run experiment expdata_in_ns = exp_in_ns.run(backend=backend_in_ns, shots=2000).block_for_results() # Display Figure display(expdata_in_ns.figure(0)) # Print Results for result in expdata_in_ns.analysis_results(): print(result) import qiskit.tools.jupyter %qiskit_copyright ```
github_jupyter
# Seaborn plots ``` # Hide all warnings import warnings warnings.simplefilter('ignore') # Data import pandas as pd import numpy as np # Graphics import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=2, rc={'figure.figsize':(16,9)}) # Show plots directly within Jupyter Out %matplotlib inline # https://www.kaggle.com/c/titanic/download/train.csv df = pd.read_csv('../../day_1/2_seminar_basics/data/train.csv') df.head() df.shape df.info() ``` # Heatmap ``` f, ax = plt.subplots(figsize=(15, 8)) cmap = sns.diverging_palette(220, 10, as_cmap=True) corr = df[['Age', 'Fare', 'Pclass']].corr() sns.heatmap(corr, cmap=cmap, annot=True, fmt='.2f', annot_kws={"size": 36}); # TODO: plot correlations between all variables ``` Heatmap ignores non-numerical data, so you don't see _Sex_ variable here. ``` # TODO: add new numerical variable fSex encoded variable Sex and replot heatmap # use e.g. df['Sex'].factorize() df['fSex'] = ``` Variable _Survived_ is correlated to _fSex_, _Fare_ and is anticorrelated to _Pclass_, but _Pclass_ depends on _Fare_ obviously. ## How much missing data we have ``` fig, ax = plt.subplots(figsize=(25,10)) sns.heatmap(df.notnull(), cbar=False); fig, ax = plt.subplots(figsize=(25,10)) sns_heatmap = sns.heatmap(df.notnull(), yticklabels=False, cbar=False) fig, ax = plt.subplots(figsize=(25,10)) df_clean = df.dropna() sns_heatmap = sns.heatmap(df_clean.isnull(), yticklabels=False, cbar=False) # TODO: What fraction of data (observations) is rested after cleaning? # use df.shape and df_clean.shape ``` 0.2053872053872054 - not so much. Let's drop only not sparse columns _Age_ and _Embarked_ and exclude _Cabin_ column at all ``` df_clean = df.drop('Cabin', axis=1).dropna(subset=['Age', 'Embarked']) df_clean.shape ``` # Countplot (bar plot counts of variable values) ``` sns.set(font_scale=2, rc={'figure.figsize':(16,9)}) sns.countplot(df['Sex']); sns.set(font_scale=2, rc={'figure.figsize':(16,9)}) sns.countplot(df['Pclass']); sns.distplot(df['Fare']); sns.distplot(df_clean['Age']); sns.distplot(df_clean['Age'], bins=20, kde=False, rug=True); sns.distplot(df_clean['Age'][df['Survived'] == 0], bins=20, kde=False, label='Unsurvived') sns.distplot(df_clean['Age'][df['Survived'] == 1], bins=20, kde=False, label='Survived') plt.legend(); df['Pclass'].unique() # How the Age of unsurvived passengers depends on Pclass? # TODO: Plot histograms of Age for each pClass of unsurvived to see how it is distributed # use filter rule (df['Survived'] == 0) & (df['Pclass'] == pclass) # TODO: Enable norm_hist = True ``` ### Hometask What the class of passenges is the most unsurvived? # Box vs violin plots ``` sns.boxplot(data=df[['Age']]); ``` Change orientation ``` sns.boxplot(x='Age', data=df[['Age']]); ``` Violinplot draws a combination of boxplot and kernel density estimate. ``` sns.kdeplot(data=df['Age'], shade=True); sns.violinplot(x='Age', data=df[['Age']]); ``` # Jointplot ``` sns.jointplot(data=df, x='Age', y='Fare'); ``` --- # Homework Download [sample dataset](https://github.com/oreillymedia/doing_data_science/raw/master/dds_datasets.zip). Unzip it, unzip `dds_datasets/dds_ch2_rollingsales.zip`. Load Bronx in dataframe: ``` df = pd.read_excel('dds_datasets/dds_ch2_rollingsales/rollingsales_bronx.xls', skiprows=4) df.head() df.info() ``` ## Regression plot within jointplot You can call just `regplot()` ``` f, ax = plt.subplots(figsize=(8, 8)) jnt = sns.regplot(data=df, x='SALE\nPRICE', y='GROSS SQUARE FEET') ``` But try to understand the anatomy of `jointplot()` ``` sns.jointplot(data=df, x='SALE\nPRICE', y='GROSS SQUARE FEET', height=8); ``` There are three subplots (`axes`-objects) within jointplot: ``` [member for member in filter(lambda m: '__' not in m, dir(jnt))] ``` Do the following: * Filter outliers by 0.5e8 for x and by 6e5 for y * Plot regression line onto scatterplot (pass certain parameter to `jointplot`) * Disable tick labels of both X&Y `distplot`s to hide "1e8" (get x axis from x distplot (marginal plot) and use `set_visible()`) * Low case both labels of scatterplot axis X&Y (e.g. `'GROSS' -> 'Gross'`) * Pad label of scatterplot axis Y ``` # Your code here ``` Optional: * Replace format of axis Y to exponential (get y axis from y distplot and use its method `set_major_formatter(matplotlib.tickerFuncFormatter(lambda string, position: ...))`) ``` # Your code here ```
github_jupyter
# MATH 100.2 Project 1: VaR for Currencies and Bonds ### Instructors: Jakov Ivan S. Dumbrique (jdumbrique@ateneo.edu) and Juan Carlo F. Mallari (jmallari@ateneo.edu) MATH 100.2: Topics in Financial Mathematics II \ First Semester, S.Y. 2021-2022 \ Ateneo de Manila University # Submission Mechanics 1. Upload the complete Jupyter notebook to the appropriate submission bin on Canvas on or before **November 12, 2021 (Friday) 11:59 PM**. 2. Make sure that your notebook is organized, well-documented and clearly-annotated. 3. Your notebook should show all codes required for Parts 1, 2, and 3. The answers for Part 2 should be displayed in your Notebook. For questions requiring some analysis/explanation (Part 3), make sure to write your detailed paragraphs (and display your graphs) in [markdown cells](https://www.tutorialspoint.com/jupyter/jupyter_notebook_markdown_cells.htm). 4. The filename for your Jupyter notebook should follow this format: `MATH100.2_[lastnameofTeamMember1]_[lastnameofTeamMember2]_[lastnameofTeamMember3 (if any)]_Project1.ipynb` 5. Include comments (using the # symbol) in your codes so that it will be easier for the instructors to understand them. For your helper functions, use descriptive [docstrings](https://www.programiz.com/python-programming/docstrings) to clarify what they do. 6. Make sure to include the following in a markdown cell at the start of your notebook: * Your names * Names of the people you asked help from * Resources (e.g., websites, books) you used/consulted for this project (Sample markdown cell) Submitted by: 1. Jakov Ivan S. Dumbrique 2. Juan Carlo F. Mallari People I asked help from: 1. Elon Musk 2. Jeff Bezos 3. Warren Buffett Resources used for completing this project: 1. https://numpy.org/doc/stable/reference/generated/numpy.cov.html Project 1 has three parts: # Part 1: Functional Implementation In Part 1, you are tasked to create these functions: 1. `undiversified_VaR_delta_normal_fx()` - Returns the undiversified d-day p% VaR of a portfolio of currencies using the Delta-Normal Approach - This should include the option to implement EWMA in estimating volatilities of exchange-rate returns - Uses N-day logarithmic returns for calculating the N-day VaR 2. `diversified_VaR_delta_normal_fx()` - Returns the diversified d-day p% VaR of a portfolio of currencies using the Delta-Normal Approach - When you want to use EWMA for an N-asset portfolio, this function will only calculate the EWMA for a two-asset portfolio - Uses N-day logarithmic returns in calculating the N-day VaR 3. `VaR_hs_fx()` - Returns the d-day p% VaR of a portfolio of currencies using the Historical Simulation (HS) Approach - Implements the normal HS method (which simulates scenarios for the return of each individual currency), not the "alternative" HS method (which runs scenarios for the returns of the entire portfolio) 4. `VaR_brw_fx()` - Returns the d-day p% VaR of a portfolio of currencies using the Boudoukh-Richardson-Whitelaw (BRW) Approach - Similar to `VaR_hs_fx()`, this function implements the normal method (which simulates scenarios for the return of each individual currency), not the "alternative" method (which runs scenarios for the returns of the entire portfolio) 5. `VaR_cfm_bonds()` - Returns the d-day p% VaR of a portfolio of bonds using Cash Flow Mapping (CFM) 6. `undiversified_VaR_delta_normal_bonds()` - Returns the undiversified d-day p% VaR of a portfolio of bonds using the Delta-Normal Approach 7. `diversified_VaR_delta_normal_bonds()` - Returns the diversified d-day p% VaR of a portfolio of bonds using the Delta-Normal Approach 8. `VaR_hs_bonds()` - Returns the d-day p% VaR of a portfolio of bonds using the Historical Simulation Approach The arguments and other specifics of these functions are up to your team's discretion. Just make sure you document them properly using docstrings. You may create helper functions useful for repeated computations. # Part 2: Validation In Part 2, you will be using the functions you created in Part 1 to answer the following questions. The historical data for these items can be found in the `data` folder of our [class Github repository](https://github.com/ateneomathdept/math100.2_2021Sem1). The historical data for currencies cover the trading period from October 17, 2011 to October 18, 2021. Assume that today is October 18, 2021. You are a portfolio risk manager who is assigned to analyze the market risk associated to the following portfolios (A and B) of currencies: <table> <tr> <td> Portfolio </td> <td> USD </td> <td> EUR </td> <td> JPY </td> <td> GBP </td> <td> CHF </td> </tr> <tr> <td> Position </td> <td> long </td> <td> short </td> <td> long </td> <td> short </td> <td> long </td> </tr> <tr> <td> A </td> <td> 10,362 </td> <td> 17,135 </td> <td> 2,235,292 </td> <td> 14,937 </td> <td> 9,465 </td> </tr> <tr> <td> B </td> <td> - </td> <td> - </td> <td> - </td> <td> 14,937 </td> <td> 9,465 </td> </tr> </table> ## Question A Suppose market risk metrics are requested for **Portfolio A**. 1. Calculate the undiversified and diversified one-day 99% VaR for the portfolio using the delta-normal approach. 2. Use the historical simulation approach to compute the portfolio's one-day 99% VaR. 3. Use the BRW approach with a decay factor of $\lambda$= 0.85 to calculate the portfolio's one-day 99% VaR. ## Question B We now look at the value-at-risk metrics for **Portfolio B**. Assume that the five-day volatility of each currency and the covariance of their returns follow exponentially weighted moving average models with the following decay parameters: <table> <tr> <td> Data </td> <td> GBP </td> <td> CHF </td> <td> Covariance </td> </tr> <tr> <td> Decay Parameter </td> <td> 0.87 </td> <td> 0.90 </td> <td> 0.95 </td> </tr> </table> Determine the undiversified and diversified five-day 95\% VaR for the portfolio. What is the benefit of diversification? ## Question C Assume that today is October 18, 2021. Suppose market risk metrics are requested for **Portfolio C** consisting of long positions on the following four bonds: 1. Roquefort Bond - A corporate bond with Php 1,000,000 face value, 8% p.a. coupon rate that pays interest annually, with maturity date on December 31, 2026. 2. Camembert Bond - A corporate bond with Php 1,000,000 face value, 7.5% p.a. coupon rate that pays interest semiannually, with maturity date on June 1, 2022. 3. Feta Bond - A T-bond with Php 1,000,000 face value, 6% p.a. coupon rate that pays interest quarterly, with maturity date on January 15, 2035. 4. Gouda Bond - A zero-coupon bond with Php 1,000,000 face value, with maturity date on September 28, 2030. The historical **continuously-compounded** market yields for the four bonds can be found in the `portfolio_c_yield_data.csv` file. Assume an actual/360 day-count convention. 1. Determine the dollar duration (DV01) for each bond. 2. Calculate the portfolio's undiversified and diversified 10-day 99% VaR using Delta-Normal Approach. What is the benefit of diversification? 4. Consider the subportfolio consisting only of Roquefort Bond and Camembert Bond. Assume that the ten-day bond volatilities and the covariance of their returns follow exponentially weighted moving average models with the following decay parameters: <table> <tr> <td> Data </td> <td> Roquefort </td> <td> Camembert </td> <td> Covariance </td> </tr> <tr> <td> Decay Parameter </td> <td> 0.85 </td> <td> 0.80 </td> <td> 0.975 </td> </tr> </table> Determine the undiversified and diversified 10-day 99\% VaR for the subportfolio. What is the benefit of diversification? ## Question D Assume that today is October 18, 2021. Suppose market risk metrics are requested for **Portfolio D** consisting solely of a peso-denominated bond corporate bond with a principal of Php 10,000,000 and with maturity date on December 31, 2024. Suppose that the bond provides a coupon of 10% per annum payable semiannually. The historical continuously-compounded zero rates for benchmark tenors are provided in the `portfolio_d_zero_rate_data.csv` file. ### Historical Simulation Approach 1. Generate possible zero rates 10 days from now for the relevant tenors and determine the portfolio's 10-day 99% VaR using the historical simulation approach. ### Cash Flow Mapping 1. Estimate the volatilities and correlations of zero-coupon bonds maturing on the benchmark tenors using the given data. Print out the volatilities and the correlation matrix. 2. Map the portfolio's cash flows to the benchmark tenors/standard buckets. Print out the weight $\alpha_i$ for each cash flow. 2. Print out the present values of the mapped cash flows at each benchmark tenor/standard bucket. 3. Determine the portfolio's 10-day 99% VaR. # Part 3: Analysis For Part 3, you will analyze the effect of changing various parameters to the resulting Value-at-Risk of a portfolio. ## Question A on Currencies Consider **Portfolio A**. Explore, analyze, **and** discuss the effect of changing the following parameters in the resulting VaR of **each of the three methods** (Delta-Normal, HS, BRW): 1. confidence level 2. value of N for N-day VaR 3. time window of historical data used to calculate VaR 4. decay parameter for BRW Approach Based on your sensitivity analysis, which of the three methods would best capture the market risk of the portfolio? Justify your choice. ## Question B on Bonds Consider **Portfolio C** and its undiversified 10-day 99% VaR which you calculated using Delta-Normal Approach in Part 2. Which of the four bonds in the fixed-income portfolio has the highest contribution to the undiversified 10-day 99% VaR? Why? Compare the individual VaRs and determine which factors and bond features led to the bond's high VaR contribution. Explore and discuss the effect of these factors and features to the resulting VaR.
github_jupyter
## Simulate expected misclassification rate ``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import multivariate_normal from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC %matplotlib inline %config InlineBackend.figure_formats = {'retina',} plt.style.use('seaborn-white') ``` #### Model to be used for generating data with two classes: <BR> $y_i = 0,\quad x_i \sim N_{10}(0, I_{10})$ $y_i = 1,\quad x_i \sim N_{10}(\mu, I_{10})\,$ with $\mu = (1,1,1,1,1,0,0,0,0,0)$ $x_i \in \mathbb{R^{10}}$ normally distributed $y_i$ equally divided between the two classes (balanced dataset) <BR> ### Create Gaussian Normal ``` mu1 = np.repeat(0, 10) mu2 = np.repeat([0, 1], 5) sigma = np.identity(10) X = multivariate_normal(mean=mu1, cov=sigma).rvs(5) mu sigma X def simulate_clf_error(clf, train_sample_n=100, test_sample_n=2000): # Generate training sample and train classifier X_train_0 = multivariate_normal(mean=np.repeat(0, 10), cov=np.identity(10)).rvs(train_sample_n//2) X_train_1 = multivariate_normal(mean=np.repeat([0, 1], 5), cov=np.identity(10)).rvs(train_sample_n//2) X_train = np.r_[X_train_0, X_train_1] y_train = np.repeat([0, 1], train_sample_n//2) clf.fit(X_train, y_train) # Generate large set of test data and return error rate of classifier X_test_0 = multivariate_normal(mean=np.repeat(0, 10), cov=np.identity(10)).rvs(test_sample_n//2) X_test_1 = multivariate_normal(mean=np.repeat([0, 1], 5), cov=np.identity(10)).rvs(test_sample_n//2) X_test = np.r_[X_test_0, X_test_1] y_test = np.repeat([0, 1], test_sample_n//2) return 1 - clf.score(X_test, y_test) ``` #### Run simulations ``` repeats = 1000 svm_radial = [simulate_clf_error(SVC(kernel='rbf')) for i in np.arange(repeats)] svm_linear = [simulate_clf_error(SVC(kernel='linear')) for i in np.arange(repeats)] log_regr = [simulate_clf_error(LogisticRegression(C=100)) for i in np.arange(repeats)] ``` #### Average error rate ``` print('SVM - radial kernel: mean: {} sd: {}'.format(np.mean(svm_radial).round(3), np.var(svm_radial)**.5)) print('SVM - linear kernel: mean: {} sd: {}'.format(np.mean(svm_linear).round(3), np.var(svm_linear)**.5)) print('Logistic regression: mean: {} sd: {}'.format(np.mean(log_regr).round(3), np.var(log_regr)**.5)) ``` ### Visualise ``` plt.plot(svm_radial, 'g', alpha=0.4, label='SVM Radial') plt.plot(svm_linear, 'r', alpha=0.4, label='SVM Linear') plt.plot(log_regr, 'b', alpha=0.4, label='Logistic Regression') plt.hlines(np.mean(np.c_[svm_radial, svm_linear, log_regr], axis=0), 0, repeats, colors=['g', 'r', 'b']) plt.xlabel('Simulation') plt.ylabel('Error rate') plt.title('Simulation: expected misclassification rates') plt.legend(); fig, (ax1, ax2, ax3) = plt.subplots(1,3, sharey=True, figsize=(18,5)) _ = ax1.plot(svm_radial, 'g', alpha=0.4, label='SVM Radial') _ = ax1.hlines(np.mean(svm_radial), 0, repeats, colors='g') _ = ax1.set_ylabel('Error rate') _ = ax2.plot(svm_linear, 'r', alpha=0.4, label='SVM Linear') _ = ax2.hlines(np.mean(svm_linear), 0, repeats, colors='r') _ = ax3.plot(log_regr, 'b', alpha=0.4, label='Logistic Regression') _ = ax3.hlines(np.mean(log_regr), 0, repeats, colors='b'); _ = fig.suptitle('Simulation: expected misclassification rates', fontsize=16) _ = fig.subplots_adjust(wspace=0.02) for ax in fig.axes: _ = ax.set_xlabel('{} simulations'.format(repeats)) _ = ax.tick_params(labelbottom='off') _ = ax.legend() ```
github_jupyter
# AWS Elastic Kubernetes Service (EKS) Deep MNIST In this example we will deploy a tensorflow MNIST model in Amazon Web Services' Elastic Kubernetes Service (EKS). This tutorial will break down in the following sections: 1) Train a tensorflow model to predict mnist locally 2) Containerise the tensorflow model with our docker utility 3) Send some data to the docker model to test it 4) Install and configure AWS tools to interact with AWS 5) Use the AWS tools to create and setup EKS cluster with Seldon 6) Push and run docker image through the AWS Container Registry 7) Test our Elastic Kubernetes deployment by sending some data Let's get started! 🚀🔥 ## Dependencies: * Helm v3.0.0+ * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM) * kubectl v1.14+ * EKS CLI v0.1.32 * AWS Cli v1.16.163 * Python 3.6+ * Python DEV requirements ## 1) Train a tensorflow model to predict mnist locally We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels ``` from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) import tensorflow as tf if __name__ == "__main__": x = tf.placeholder(tf.float32, [None, 784], name="x") W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b, name="y") y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) ) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) saver = tf.train.Saver() saver.save(sess, "model/deep_mnist_model") ``` ## 2) Containerise the tensorflow model with our docker utility First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content: ``` !cat .s2i/environment ``` Now we can build a docker image named "deep-mnist" with the tag 0.1 ``` !s2i build . seldonio/seldon-core-s2i-python36:1.12.0-dev deep-mnist:0.1 ``` ## 3) Send some data to the docker model to test it We first run the docker image we just created as a container called "mnist_predictor" ``` !docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1 ``` Send some random features that conform to the contract ``` import matplotlib.pyplot as plt # This is the variable that was initialised at the beginning of the file i = [0] x = mnist.test.images[i] y = mnist.test.labels[i] plt.imshow(x.reshape((28, 28)), cmap="gray") plt.show() print("Expected label: ", np.sum(range(0, 10) * y), ". One hot encoding: ", y) import math import numpy as np from seldon_core.seldon_client import SeldonClient # We now test the REST endpoint expecting the same result endpoint = "0.0.0.0:5000" batch = x payload_type = "ndarray" sc = SeldonClient(microservice_endpoint=endpoint) # We use the microservice, instead of the "predict" function client_prediction = sc.microservice( data=batch, method="predict", payload_type=payload_type, names=["tfidf"] ) for proba, label in zip( client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0, 10), ): print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %") !docker rm mnist_predictor --force ``` ## 4) Install and configure AWS tools to interact with AWS First we install the awscli ``` !pip install awscli --upgrade --user ``` ### Configure aws so it can talk to your server (if you are getting issues, make sure you have the permmissions to create clusters) ``` %%bash # You must make sure that the access key and secret are changed aws configure << END_OF_INPUTS YOUR_ACCESS_KEY YOUR_ACCESS_SECRET us-west-2 json END_OF_INPUTS ``` ### Install EKCTL *IMPORTANT*: These instructions are for linux Please follow the official installation of ekctl at: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html ``` !curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz" | tar xz !chmod 755 ./eksctl !./eksctl version ``` ## 5) Use the AWS tools to create and setup EKS cluster with Seldon In this example we will create a cluster with 2 nodes, with a minimum of 1 and a max of 3. You can tweak this accordingly. If you want to check the status of the deployment you can go to AWS CloudFormation or to the EKS dashboard. It will take 10-15 minutes (so feel free to go grab a ☕). *IMPORTANT*: If you get errors in this step it is most probably IAM role access requirements, which requires you to discuss with your administrator. ``` %%bash ./eksctl create cluster \ --name demo-eks-cluster \ --region us-west-2 \ --nodes 2 ``` ### Configure local kubectl We want to now configure our local Kubectl so we can actually reach the cluster we've just created ``` !aws eks --region us-west-2 update-kubeconfig --name demo-eks-cluster ``` And we can check if the context has been added to kubectl config (contexts are basically the different k8s cluster connections) You should be able to see the context as "...aws:eks:eu-west-1:27...". If it's not activated you can activate that context with kubectlt config set-context <CONTEXT_NAME> ``` !kubectl config get-contexts ``` ## Setup Seldon Core Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html). ## Push docker image In order for the EKS seldon deployment to access the image we just built, we need to push it to the Elastic Container Registry (ECR). If you have any issues please follow the official AWS documentation: https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.html ### First we create a registry You can run the following command, and then see the result at https://us-west-2.console.aws.amazon.com/ecr/repositories?# ``` !aws ecr create-repository --repository-name seldon-repository --region us-west-2 ``` ### Now prepare docker image We need to first tag the docker image before we can push it ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi docker tag deep-mnist:0.1 "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository" ``` ### We now login to aws through docker so we can access the repository ``` !`aws ecr get-login --no-include-email --region us-west-2` ``` ### And push the image Make sure you add your AWS Account ID ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository" ``` ## Running the Model We will now run the model. Let's first have a look at the file we'll be using to trigger the model: ``` !cat deep_mnist.json ``` Now let's trigger seldon to run the model. We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi sed 's|REPLACE_FOR_IMAGE_AND_TAG|'"$AWS_ACCOUNT_ID"'.dkr.ecr.'"$AWS_REGION"'.amazonaws.com/seldon-repository|g' deep_mnist.json | kubectl apply -f - ``` And let's check that it's been created. You should see an image called "deep-mnist-single-model...". We'll wait until STATUS changes from "ContainerCreating" to "Running" ``` !kubectl get pods ``` ## Test the model Now we can test the model, let's first find out what is the URL that we'll have to use: ``` !kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' ``` We'll use a random example from our dataset ``` import matplotlib.pyplot as plt # This is the variable that was initialised at the beginning of the file i = [0] x = mnist.test.images[i] y = mnist.test.labels[i] plt.imshow(x.reshape((28, 28)), cmap="gray") plt.show() print("Expected label: ", np.sum(range(0, 10) * y), ". One hot encoding: ", y) ``` We can now add the URL above to send our request: ``` import math import numpy as np from seldon_core.seldon_client import SeldonClient host = "a68bbac487ca611e988060247f81f4c1-707754258.us-west-2.elb.amazonaws.com" port = "80" # Make sure you use the port above batch = x payload_type = "ndarray" sc = SeldonClient( gateway="ambassador", ambassador_endpoint=host + ":" + port, namespace="default" ) client_prediction = sc.predict( data=batch, deployment_name="deep-mnist", names=["text"], payload_type=payload_type ) print(client_prediction) ``` ### Let's visualise the probability for each label It seems that it correctly predicted the number 7 ``` for proba, label in zip( client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0, 10), ): print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %") ```
github_jupyter
# Shadow Rollout with Seldon and Ambassador This notebook shows how you can deploy "shadow" deployments to direct traffic not only to the main Seldon Deployment but also to a shadow deployment whose reponse will be dicarded. This allows you to test new models in a production setting and with production traffic and anlalyse how they perform before putting them live. These are useful when you want to test a new model or higher latency inference piepline (e.g., with explanation components) with production traffic but without affecting the live deployment. ## Setup Seldon Core Use the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html). ## Set up Port Forward **Ensure you port forward to Grafana** ``` kubectl port-forward $(kubectl get pods -n seldon -l app=grafana-prom-server -o jsonpath='{.items[0].metadata.name}') -n seldon 3000:3000 ``` ## Launch main model We will create a very simple Seldon Deployment with a dummy model image `seldonio/mock_classifier:1.0`. This deployment is named `example`. ``` !pygmentize model.json !kubectl create -f model.json !kubectl rollout status deploy/production-model-single-7cd068f ``` ### Get predictions ``` from seldon_core.seldon_client import SeldonClient sc = SeldonClient(deployment_name="example",namespace="seldon") ``` #### REST Request ``` r = sc.predict(gateway="ambassador",transport="rest") print(r) ``` #### gRPC Request ``` r = sc.predict(gateway="ambassador",transport="grpc") print(r) ``` ## Launch Shadow We will now create a new Seldon Deployment for our Shadow deployment with a new model `seldonio/mock_classifier_rest:1.1`. To make it a shadow of the original `example` deployment we add two annotations ``` "annotations": { "seldon.io/ambassador-service-name":"example", "seldon.io/ambassador-shadow":"true" }, ``` The first says to use `example` as our service endpoint rather than the default which would be our deployment name - in this case `example-shadow`. This will ensure that this Ambassador setting will apply to the same prefix as the previous one. The second states we want to use Ambassador's shadow functionality. ``` !pygmentize shadow.json !kubectl create -f shadow.json !kubectl rollout status deploy/shadow-model-single-4c8805f ``` Let's send a bunch of requests to the endpoint. ``` for i in range(1000): r = sc.predict(gateway="ambassador",transport="rest") ``` Now view the analytics dashboard at http://localhost:3000 You should see a dashboard view like below showing the two models production and shadow both receiving requests. ![shadow](shadow.png)
github_jupyter
# Stats ## Questions - [Q1](#Q1): Total number of trip legs by gender and distribution of worthwhileness ratings - [Q2](#Q2): Gender distribution by country (needed for data interpretation later on) - [Q3](#Q3): Modal split for all transport modes in a pie chart (not categories) – count of all trip legs - [Q4](#Q4): Modal split for all transport mode categories – count of all trip legs – by country - [Q5](#Q5): Total travel time per mode - [Q6](#Q6): Total distance per mode - [Q7](#Q7): Age distribution by country: number of users and trips ``` import os import sys import pandas as pd import numpy as np import importlib import itertools from pandas.io.json import json_normalize import sklearn.metrics as metrics import matplotlib.pyplot as plt import seaborn as sns import datetime from matplotlib import rcParams import json import math %matplotlib inline from IPython.core.display import display, HTML display(HTML("<style>.container { width:95% !important; }</style")) ``` **READ DATA** ``` # Global variables meta_data_path = "../../data-campaigns/meta-data/" legs = "all_legs_merged_no_outlier_0.01.pkl" input_path = "../../2019-12-16.out/" out_path = "../../2019-12-16.out/D5.2/" img_path = "../../2019-12-16.out/D5.2/" # Graphical parameters rcParams["axes.titlepad"] = 45 rcParams["font.size"] = 16 rcParams["figure.figsize"] = 12, 8 sns.set_style("whitegrid") try: os.makedirs(os.path.abspath(out_path)) except FileExistsError: print("Directory '{}' already exists".format(out_path), file=sys.stderr) all_legs = pd.read_pickle(input_path + legs) trips_users_df = pd.read_pickle(input_path + "trips_users_df.pkl") trips_df = pd.read_pickle(input_path + "trips_df.pkl") ## select only trips in all_legs # trips_df = trips_df[trips_df['tripid'].isin(all_legs['tripid'])] # transport categories with open(input_path + "category_transp_mode_dict.json", "r") as f: category_transp_mode_dict = json.load(f) inverted_category_transp_mode_dict = dict( (v, k) for k in category_transp_mode_dict for v in category_transp_mode_dict[k] ) #### remove "unknown" as transport category (?) print("Legs:", all_legs.shape[0]) print("Trips: ", len(all_legs.tripid.unique())) print("Users:", len(all_legs.userid.unique())) print() ## Divide between male and female users all_legs_M = all_legs[all_legs.gender == "Male"] print("Legs of male users:", all_legs_M.shape[0]) print("Trips of male users:", all_legs_M.tripid.nunique()) print("Male users:", len(all_legs_M.userid.unique())) print() all_legs_F = all_legs[all_legs.gender == "Female"] print("Legs of female users:", all_legs_F.shape[0]) print("Trips of female users:", all_legs_F.tripid.nunique()) print("Female users:", len(all_legs_F.userid.unique())) print() all_legs_O = all_legs[all_legs.gender == "Other"] print("Legs of other users:", all_legs_O.shape[0]) print("Trips of other users:", all_legs_O.tripid.nunique()) print("Other users:", len(all_legs_O.userid.unique())) # define for plots age_range = list(all_legs.age.unique()) # assign 'CHE' to the class Other (AAA) all_legs["onCampaigns"] = all_legs["onCampaigns"].apply( lambda x: "AAA" if x == "CHE" else x ) top10 = list(all_legs.onCampaigns.unique()) ``` <a id='Q1' ></a> ### Q1: Total number of trip legs by gender and distribution of worthwhileness ratings ``` # histogram of wastedTime def wt_histo(data, color, users, filepath): bins = np.arange(1, data.wastedTime.max() + 1.5) - 0.5 hist = data.hist(column="wastedTime", bins=bins, color=color) plt.title("Distribution of Worthhileness rating by {} users".format(users)) plt.ylabel("Number of legs") plt.xlabel("Worthwhileness Rating") plt.tight_layout() plt.savefig(filepath) # take only values in 1-5 all_legs_tmp = all_legs[ (all_legs["wastedTime"] > 0) & (all_legs["wastedTime"] <= 5) ].copy() # round to integer all_legs_tmp["wastedTime"] = all_legs_tmp["wastedTime"].apply(lambda x: int(x)) filepath = img_path + "D5.2_start_dist_worthwhileness_rating_all.png" wt_histo(all_legs_tmp, "green", "all", filepath) # take only values in 1-5 all_legs_M_tmp = all_legs_M[ (all_legs_M["wastedTime"] > 0) & (all_legs_M["wastedTime"] <= 5) ].copy() # round to integer all_legs_M_tmp["wastedTime"] = all_legs_M_tmp["wastedTime"].apply(lambda x: int(x)) filepath = img_path + "D5.2_start_dist_worthwhileness_rating_male.png" wt_histo(all_legs_M_tmp, "blue", "male", filepath) # take only values in 1-5 all_legs_F_tmp = all_legs_F[ (all_legs_F["wastedTime"] > 0) & (all_legs_F["wastedTime"] <= 5) ].copy() # round to integer all_legs_F_tmp["wastedTime"] = all_legs_F_tmp["wastedTime"].apply(lambda x: int(x)) filepath = img_path + "D5.2_start_dist_worthwhileness_rating_female.png" wt_histo(all_legs_F_tmp, "red", "female", filepath) all_legs_tmp fig, ax = plt.subplots(nrows=1, ncols=1) bins = np.arange(1, all_legs_tmp.wastedTime.max() + 1.5) - 0.5 colors = ["green", "blue", "red"] labels = ["All", "Male", "Female"] ax.hist( [all_legs_tmp.wastedTime, all_legs_M_tmp.wastedTime, all_legs_F_tmp.wastedTime,], bins, histtype="bar", color=colors, label=labels, ) ax.legend(prop={"size": 10}) ax.set_title("Distribution of worthwhileness ratings by gender") plt.ylabel("Number of trips") plt.xlabel("Worthwhileness Rating") plt.tight_layout() filepath = img_path + "D5.2_start_dist_worthwhileness_rating_multibar.png" plt.savefig(filepath) trips_users_df.columns # take only values in 1-5 all_trips_tmp = trips_df.loc[ (trips_df["overallScore"] > 0) & (trips_df["overallScore"] <= 5) ].copy() # round to integer all_trips_tmp["overallScore"] = all_trips_tmp["overallScore"].apply(lambda x: int(x)) all_trips_users_tmp = all_trips_tmp.merge(trips_users_df, on="tripid")[ ["tripid", "userid", "overallScore"] ] all_legs_gender = ( all_legs[["userid", "gender"]].drop_duplicates(keep="first").reset_index() ) all_trips_users_tmp = all_trips_users_tmp.merge(all_legs_gender, on="userid") all_trips_users_M = all_trips_users_tmp.loc[all_trips_users_tmp.gender == "Male"] all_trips_users_F = all_trips_users_tmp.loc[all_trips_users_tmp.gender == "Female"] fig, ax = plt.subplots(nrows=1, ncols=1) bins = np.arange(1, all_trips_users_tmp.overallScore.max() + 1.5) - 0.5 colors = ["green", "blue", "red"] labels = ["All", "Male", "Female"] ax.hist( [ all_trips_users_tmp.overallScore, all_trips_users_M.overallScore, all_trips_users_F.overallScore, ], bins, histtype="bar", color=colors, label=labels, ) ax.legend(prop={"size": 10}) ax.set_title("Distribution of mood ratings by gender") plt.ylabel("Number of trips") plt.xlabel("Mood Rating") plt.tight_layout() filepath = img_path + "D5.2_start_dist_mood_rating_multibar.png" plt.savefig(filepath) ``` <a id='Q2' ></a> ### Q2: Gender distribution by country (needed for data interpretation later on) ``` all_legs_M_country = ( all_legs_M[["userid", "onCampaigns"]].groupby("onCampaigns").size().reset_index() ) all_legs_M_country.columns = ["campaign_country", "nusers"] all_legs_M_country.set_index("campaign_country") all_legs_F_country = ( all_legs_F[["userid", "onCampaigns"]].groupby("onCampaigns").size().reset_index() ) all_legs_F_country.columns = ["campaign_country", "nusers"] all_legs_F_country.set_index("campaign_country") df = pd.DataFrame( { "male": all_legs_M_country.nusers, "female": all_legs_F_country.nusers, "countries": all_legs_M_country.campaign_country.values, } ) ax = df.plot.bar(x="countries", rot=45) plt.tight_layout() filepath = img_path + "D5.2_start_dist_legs_by_country_gender.png" plt.savefig(filepath) all_legs_M_country.set_index("campaign_country") all_legs_F_country.set_index("campaign_country") all_legs_country = ( all_legs[["userid", "onCampaigns"]].groupby("onCampaigns").size().reset_index() ) all_legs_country.columns = ["campaign_country", "nusers"] all_legs_country.set_index("campaign_country") all_legs_M_country = ( all_legs_M[["userid", "onCampaigns"]] .groupby("onCampaigns") .nunique()["userid"] .reset_index() ) all_legs_M_country.columns = ["campaign_country", "nusers"] all_legs_M_country.set_index("campaign_country") all_legs_F_country = ( all_legs_F[["userid", "onCampaigns"]] .groupby("onCampaigns") .nunique()["userid"] .reset_index() ) all_legs_F_country.columns = ["campaign_country", "nusers"] all_legs_F_country.set_index("campaign_country") df = pd.DataFrame( { "male": all_legs_M_country.nusers, "female": all_legs_F_country.nusers, "countries": all_legs_M_country.campaign_country.values, } ) ax = df.plot.bar(x="countries", rot=45) plt.tight_layout() filepath = img_path + "D5.2_start_dist_users_by_country_gender.png" plt.savefig(filepath) ``` <a id='Q3' ></a> ### Q3: Modal split for all transport modes in a pie chart (not categories) – count of all trip legs ``` tm_legs = ( all_legs[["legid", "correctedModeOfTransport_str"]] .groupby("correctedModeOfTransport_str") .size() .reset_index() ) tm_legs.columns = ["transport_mode", "nlegs"] tm_legs = tm_legs.loc[tm_legs["transport_mode"] != "unknown"] tm_dict = dict(zip(tm_legs.transport_mode, tm_legs.nlegs)) tm_legs_top = tm_legs.sort_values(by="nlegs", ascending=False)[:10] from collections import defaultdict tm_top = tm_legs_top.transport_mode.values.tolist() new_tm_dict = defaultdict(int) for tm, nlegs in tm_dict.items(): if tm in tm_top: new_tm_dict[tm] += nlegs else: new_tm_dict["other"] += nlegs sorted_tm_dict = { k: v for k, v in sorted(new_tm_dict.items(), key=lambda item: item[1]) } old_keys = [ "motorcycle", "tram", "electricBike", "subway", "other", "train", "carPassenger", "bus", "bicycle", "carDriver", "walking", ] new_keys = [ "motorcycle", "tram", "electric bike", "subway", "other", "train", "car passenger", "bus", "bicycle", "car driver", "walking", ] for oldkey, newkey in zip(old_keys, new_keys): sorted_tm_dict[newkey] = sorted_tm_dict.pop(oldkey) # Plot # https://mycolor.space/?hex=%23845EC2&sub=1 colors = [ "#2c73d2", "#845ec2", "#d83121", "#d65db1", "#ff6f91", "#c493ff", "#f3c5ff", "#ff9671", "#ffc75f", "#f9f871", "#4b4453", ][::-1] explode = np.arange(0.1, 0.6, 0.1).tolist()[::-1] + [0] * 6 # explode 1st slice plt.pie( sorted_tm_dict.values(), explode=explode, labels=sorted_tm_dict.keys(), colors=colors, autopct="%.1f%%", startangle=90, ) plt.axis("equal") filepath = img_path + "D5.2_mode_split_pie.png" plt.savefig(filepath) ``` <a id='Q4' ></a> ### Q4: Modal split for all transport mode categories – count of all trip legs – by country ``` all_legs_country_tc = ( all_legs[["legid", "onCampaigns", "transp_category"]] .groupby(["onCampaigns", "transp_category"]) .size() .reset_index() ) all_legs_country_tc.columns = ["campaign_country", "transp_category", "nlegs"] all_legs_country_tc.head() top10countries = ( all_legs[["legid", "onCampaigns"]].groupby("onCampaigns").size().keys().tolist() ) transport_categories = ( all_legs[["legid", "transp_category"]] .groupby("transp_category") .size() .keys() .tolist() ) transport_categories_short = { "cycling_emerging_micromobility": "C", "private_motorized": "Pm", "public_transp_long_dist": "PTl", "public_transp_short_dist": "PTs", "walking": "W", } tcs = [transport_categories_short[tc] for tc in transport_categories] def plot_country(values_count, axid, title_str): sns.barplot( data=values_count, x="transp_category", y="count", ax=axes[axid], ) axes[axid].set_xticks(range(5)) axes[axid].set_xticklabels(tcs) for item in axes[axid].get_xticklabels(): item.set_rotation(45) axes[axid].tick_params(labelsize=10) axes[axid].set_title(title_str, fontsize=14) axes[axid].set_xlabel("") axes[axid].set_ylabel("") tmp = all_legs.loc[all_legs.onCampaigns == "ITA"] tmp.groupby("transp_category").size().reset_index(name="count") ### BY COUNTRY fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(12, 7)) axes = axes.ravel() axid = 0 for c in top10countries: tmp = all_legs.loc[all_legs.onCampaigns == c].copy() val_count = tmp.groupby("transp_category").size().reset_index(name="count") plot_country(val_count, axid=axid, title_str=c) if axid == 0: fig.legend(loc="best", fontsize="x-small") axid += 1 plt.tight_layout() plt.savefig( img_path + "D5.2_stat_ct_country.png", bbox_to_anchor=True, bbox_inches="tight" ) ``` <a id='Q5' ></a> ### Q5: Total travel time per mode ``` # 1. wastedTime should be integer between 1 and 5 (stars). # Remove values outside this range and round all values to integer. # double values are because of the merging of the legs. # take only values in 1-5 all_legs_tmp = all_legs[ (all_legs["wastedTime"] > 0) & (all_legs["wastedTime"] <= 5) ].copy() # round to integer all_legs_tmp["wastedTime"] = all_legs["wastedTime"].apply(lambda x: int(x)) print("useful legs:", len(all_legs_tmp)) # all_legs_tmp.groupby("wastedTime").size().reset_index(name="count") # create a df with sum of inferred_leg_duration_min (PER TRIP) and avg wastedTime (PER TRIP) trips_tt_wt = ( all_legs_tmp.groupby("tripid")["inferred_leg_duration_min"] .sum() .reset_index(name="total_tt") ) # total travel time tmp_wt = ( all_legs_tmp.groupby("tripid")["wastedTime"].mean().reset_index(name="avg_wt") ) # average wasted time trips_tt_wt = trips_tt_wt.merge(tmp_wt) trips_tt_wt = trips_tt_wt[trips_tt_wt["total_tt"] > 0] print("useful trips:", trips_tt_wt.shape) trips_tt_wt.head() # histogram of total travel time #### TODO: plot the mean # create short-medium-long trips dist_segs = trips_tt_wt["total_tt"].quantile([0.33, 0.66]).values medium_threshold = dist_segs[0] long_threshold = dist_segs[1] print("medium_threshold:", medium_threshold) print("long_threshold:", long_threshold) print() # fig = plt.figure(figsize=(12,12)) hist = trips_tt_wt.hist( column="total_tt", bins=[i * 2 for i in range(0, math.ceil(long_threshold * 4))] ) plt.title("Histogram of total travel time") plt.ylabel("Number of trips") plt.xlabel("Minutes") plt.tight_layout() plt.savefig(img_path + "D5.2_total_trip_travel_time.png") ``` <a id='Q6' ></a> ### Q6: Total distance per mode ``` # create a df with sum of inferred_leg_duration_min (PER TRIP) and avg wastedTime (PER TRIP) trips_td_wt = ( all_legs_tmp.groupby("tripid")["trueDistance"].sum().reset_index(name="total_td") ) # total travel time tmp_wt = ( all_legs_tmp.groupby("tripid")["wastedTime"].mean().reset_index(name="avg_wt") ) # average wasted time trips_td_wt = trips_td_wt.merge(tmp_wt) trips_td_wt = trips_td_wt[trips_td_wt["total_td"] > 0] print("useful trips:", trips_td_wt.shape) trips_td_wt.head() # histogram of total travel time #### TODO: plot the mean # create short-medium-long trips dist_segs = trips_td_wt["total_td"].quantile([0.33, 0.66]).values medium_threshold = dist_segs[0] long_threshold = dist_segs[1] print("medium_threshold:", medium_threshold) print("long_threshold:", long_threshold) print() # fig = plt.figure(figsize=(12,12)) hist = trips_td_wt.hist(column="total_td", bins=range(0, 25000, 500),) plt.title("Histogram of total travel distance") plt.ylabel("Number of trips") plt.xlabel("Distance") plt.tight_layout() plt.savefig(img_path + "D5.2_total_trip_travel_distance.png") trips_td_wt["total_td"].mean() trips_td_wt["total_td"].median() ``` <a id='Q7' ></a> ### Q7: Age distribution by country: number of users and trips ``` all_legs_age_country = all_legs[ ["legid", "tripid", "userid", "onCampaigns", "age"] ].copy() all_legs_age_country.columns = [ "legid", "tripid", "userid", "campaign_country", "age_range", ] all_legs_age_country.head() age_country_ntrips = ( all_legs_age_country[["tripid", "campaign_country", "age_range"]] .drop_duplicates("tripid", keep="first") .groupby(["campaign_country", "age_range"]) .size() .reset_index() ) age_country_ntrips.columns = ["campaign_country", "age_range", "ntrips"] age_country_ntrips.head(3) age_country_nusers = ( all_legs_age_country[["userid", "campaign_country", "age_range"]] .drop_duplicates("userid", keep="first") .groupby(["campaign_country", "age_range"]) .size() .reset_index() ) age_country_nusers.columns = ["campaign_country", "age_range", "nusers"] age_country_nusers.head(3) age_country_ntrips_nusers = age_country_ntrips.merge( age_country_nusers, on=["campaign_country", "age_range"] ) age_country_ntrips_nusers.head() # top10countries # ['AAA', 'BEL', 'ESP', 'FIN', 'FRA', 'HRV', 'ITA', 'NOR', 'PRT', 'SVK'] top10countries = ( all_legs[["legid", "onCampaigns"]].groupby("onCampaigns").size().keys().tolist() ) # age ranges # ['16-24', '25-49', '50-64', '65+'] age_ranges = sorted(all_legs["age"].unique().tolist()) def plot_var_age_country(var, values_count, axid, title_str): sns.barplot( data=values_count, x="age_range", y=var, ax=axes[axid], ) axes[axid].set_xticks(range(5)) axes[axid].set_xticklabels(age_ranges) for item in axes[axid].get_xticklabels(): item.set_rotation(45) axes[axid].tick_params(labelsize=10) axes[axid].set_title(title_str, fontsize=14) axes[axid].set_xlabel("") axes[axid].set_ylabel("") tmp = age_country_ntrips_nusers.loc[age_country_ntrips_nusers.campaign_country == "ITA"] tmp # plot ntrips fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(12, 7)) axes = axes.ravel() axid = 0 for c in top10countries: tmp = age_country_ntrips.loc[age_country_ntrips.campaign_country == c] val_count = tmp plot_var_age_country("ntrips", val_count, axid=axid, title_str=c) if axid == 0: fig.legend(loc="best", fontsize="x-small") axid += 1 plt.tight_layout() plt.savefig( img_path + "D5.2_stat_ntrips_age_country.png", bbox_to_anchor=True, bbox_inches="tight", ) # plot nusers fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(12, 7)) axes = axes.ravel() axid = 0 for c in top10countries: tmp = age_country_nusers.loc[age_country_nusers.campaign_country == c] val_count = tmp plot_var_age_country("nusers", val_count, axid=axid, title_str=c) if axid == 0: fig.legend(loc="best", fontsize="x-small") axid += 1 plt.tight_layout() plt.savefig( img_path + "D5.2_stat_nusers_age_country.png", bbox_to_anchor=True, bbox_inches="tight", ) def plot_grouped_age_country(values_count, axid, title_str): values_count.plot.bar() axes[axid].set_xticks(range(5)) axes[axid].set_xticklabels(age_ranges) for item in axes[axid].get_xticklabels(): item.set_rotation(45) axes[axid].tick_params(labelsize=10) axes[axid].set_title(title_str, fontsize=14) axes[axid].set_xlabel("") axes[axid].set_ylabel("") country_axes = dict( el for el in zip(top10countries, itertools.product(range(2), range(5))) ) country_axes # plot grouped fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(12, 7)) axid = 0 country_axes = dict( el for el in zip(top10countries, itertools.product(range(2), range(5))) ) for c in top10countries: tmp = age_country_ntrips_nusers.loc[age_country_ntrips_nusers.campaign_country == c] tmp.plot.bar(x="age_range", ax=axes[country_axes[c]], legend=False, title=c) if axid == 0: fig.legend(loc="best", fontsize="x-small") axid += 1 # handles, labels = ax.get_legend_handles_labels() # fig.legend(handles, labels, loc='upper center') plt.tight_layout() plt.savefig( img_path + "D5.2_stat_grouped_age_country.png", bbox_to_anchor=True, bbox_inches="tight", ) ```
github_jupyter
**Chapter 13 – Loading and Preprocessing Data with TensorFlow** _This notebook contains all the sample code and solutions to the exercises in chapter 13._ <table align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/13_loading_and_preprocessing_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> </table> # Setup First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0. ``` # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x !pip install -q -U tfx==0.21.2 print("You can safely ignore the package incompatibility errors.") except Exception: pass # TensorFlow ≥2.0 is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "data" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) ``` ## Datasets ``` X = tf.range(10) dataset = tf.data.Dataset.from_tensor_slices(X) dataset ``` Equivalently: ``` dataset = tf.data.Dataset.range(10) for item in dataset: print(item) dataset = dataset.repeat(3).batch(7) for item in dataset: print(item) dataset = dataset.map(lambda x: x * 2) for item in dataset: print(item) #dataset = dataset.apply(tf.data.experimental.unbatch()) # Now deprecated dataset = dataset.unbatch() dataset = dataset.filter(lambda x: x < 10) # keep only items < 10 for item in dataset.take(3): print(item) tf.random.set_seed(42) dataset = tf.data.Dataset.range(10).repeat(3) dataset = dataset.shuffle(buffer_size=3, seed=42).batch(7) for item in dataset: print(item) ``` ## Split the California dataset to multiple CSV files Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it: ``` from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target.reshape(-1, 1), random_state=42) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full, random_state=42) scaler = StandardScaler() scaler.fit(X_train) X_mean = scaler.mean_ X_std = scaler.scale_ ``` For a very large dataset that does not fit in memory, you will typically want to split it into many files first, then have TensorFlow read these files in parallel. To demonstrate this, let's start by splitting the housing dataset and save it to 20 CSV files: ``` def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10): housing_dir = os.path.join("datasets", "housing") os.makedirs(housing_dir, exist_ok=True) path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv") filepaths = [] m = len(data) for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)): part_csv = path_format.format(name_prefix, file_idx) filepaths.append(part_csv) with open(part_csv, "wt", encoding="utf-8") as f: if header is not None: f.write(header) f.write("\n") for row_idx in row_indices: f.write(",".join([repr(col) for col in data[row_idx]])) f.write("\n") return filepaths train_data = np.c_[X_train, y_train] valid_data = np.c_[X_valid, y_valid] test_data = np.c_[X_test, y_test] header_cols = housing.feature_names + ["MedianHouseValue"] header = ",".join(header_cols) train_filepaths = save_to_multiple_csv_files(train_data, "train", header, n_parts=20) valid_filepaths = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10) test_filepaths = save_to_multiple_csv_files(test_data, "test", header, n_parts=10) ``` Okay, now let's take a peek at the first few lines of one of these CSV files: ``` import pandas as pd pd.read_csv(train_filepaths[0]).head() ``` Or in text mode: ``` with open(train_filepaths[0]) as f: for i in range(5): print(f.readline(), end="") train_filepaths ``` ## Building an Input Pipeline ``` filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42) for filepath in filepath_dataset: print(filepath) n_readers = 5 dataset = filepath_dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers) for line in dataset.take(5): print(line.numpy()) ``` Notice that field 4 is interpreted as a string. ``` record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])] parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults) parsed_fields ``` Notice that all missing fields are replaced with their default value, when provided: ``` parsed_fields = tf.io.decode_csv(',,,,5', record_defaults) parsed_fields ``` The 5th field is compulsory (since we provided `tf.constant([])` as the "default value"), so we get an exception if we do not provide it: ``` try: parsed_fields = tf.io.decode_csv(',,,,', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) ``` The number of fields should match exactly the number of fields in the `record_defaults`: ``` try: parsed_fields = tf.io.decode_csv('1,2,3,4,5,6,7', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) n_inputs = 8 # X_train.shape[-1] @tf.function def preprocess(line): defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)] fields = tf.io.decode_csv(line, record_defaults=defs) x = tf.stack(fields[:-1]) y = tf.stack(fields[-1:]) return (x - X_mean) / X_std, y preprocess(b'4.2083,44.0,5.3232,0.9171,846.0,2.3370,37.47,-122.2,2.782') def csv_reader_dataset(filepaths, repeat=1, n_readers=5, n_read_threads=None, shuffle_buffer_size=10000, n_parse_threads=5, batch_size=32): dataset = tf.data.Dataset.list_files(filepaths).repeat(repeat) dataset = dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers, num_parallel_calls=n_read_threads) dataset = dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) tf.random.set_seed(42) train_set = csv_reader_dataset(train_filepaths, batch_size=3) for X_batch, y_batch in train_set.take(2): print("X =", X_batch) print("y =", y_batch) print() train_set = csv_reader_dataset(train_filepaths, repeat=None) valid_set = csv_reader_dataset(valid_filepaths) test_set = csv_reader_dataset(test_filepaths) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1), ]) model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3)) batch_size = 32 model.fit(train_set, steps_per_epoch=len(X_train) // batch_size, epochs=10, validation_data=valid_set) model.evaluate(test_set, steps=len(X_test) // batch_size) new_set = test_set.map(lambda X, y: X) # we could instead just pass test_set, Keras would ignore the labels X_new = X_test model.predict(new_set, steps=len(X_new) // batch_size) optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error n_epochs = 5 batch_size = 32 n_steps_per_epoch = len(X_train) // batch_size total_steps = n_epochs * n_steps_per_epoch global_step = 0 for X_batch, y_batch in train_set.take(total_steps): global_step += 1 print("\rGlobal step {}/{}".format(global_step, total_steps), end="") with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error @tf.function def train(model, n_epochs, batch_size=32, n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5): train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers, n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size, n_parse_threads=n_parse_threads, batch_size=batch_size) for X_batch, y_batch in train_set: with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train(model, 5) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) optimizer = keras.optimizers.Nadam(lr=0.01) loss_fn = keras.losses.mean_squared_error @tf.function def train(model, n_epochs, batch_size=32, n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5): train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers, n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size, n_parse_threads=n_parse_threads, batch_size=batch_size) n_steps_per_epoch = len(X_train) // batch_size total_steps = n_epochs * n_steps_per_epoch global_step = 0 for X_batch, y_batch in train_set.take(total_steps): global_step += 1 if tf.equal(global_step % 100, 0): tf.print("\rGlobal step", global_step, "/", total_steps) with tf.GradientTape() as tape: y_pred = model(X_batch) main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred)) loss = tf.add_n([main_loss] + model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train(model, 5) ``` Here is a short description of each method in the `Dataset` class: ``` for m in dir(tf.data.Dataset): if not (m.startswith("_") or m.endswith("_")): func = getattr(tf.data.Dataset, m) if hasattr(func, "__doc__"): print("● {:21s}{}".format(m + "()", func.__doc__.split("\n")[0])) ``` ## The `TFRecord` binary format A TFRecord file is just a list of binary records. You can create one using a `tf.io.TFRecordWriter`: ``` with tf.io.TFRecordWriter("my_data.tfrecord") as f: f.write(b"This is the first record") f.write(b"And this is the second record") ``` And you can read it using a `tf.data.TFRecordDataset`: ``` filepaths = ["my_data.tfrecord"] dataset = tf.data.TFRecordDataset(filepaths) for item in dataset: print(item) ``` You can read multiple TFRecord files with just one `TFRecordDataset`. By default it will read them one at a time, but if you set `num_parallel_reads=3`, it will read 3 at a time in parallel and interleave their records: ``` filepaths = ["my_test_{}.tfrecord".format(i) for i in range(5)] for i, filepath in enumerate(filepaths): with tf.io.TFRecordWriter(filepath) as f: for j in range(3): f.write("File {} record {}".format(i, j).encode("utf-8")) dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=3) for item in dataset: print(item) options = tf.io.TFRecordOptions(compression_type="GZIP") with tf.io.TFRecordWriter("my_compressed.tfrecord", options) as f: f.write(b"This is the first record") f.write(b"And this is the second record") dataset = tf.data.TFRecordDataset(["my_compressed.tfrecord"], compression_type="GZIP") for item in dataset: print(item) ``` ### A Brief Intro to Protocol Buffers For this section you need to [install protobuf](https://developers.google.com/protocol-buffers/docs/downloads). In general you will not have to do so when using TensorFlow, as it comes with functions to create and parse protocol buffers of type `tf.train.Example`, which are generally sufficient. However, in this section we will learn about protocol buffers by creating our own simple protobuf definition, so we need the protobuf compiler (`protoc`): we will use it to compile the protobuf definition to a Python module that we can then use in our code. First let's write a simple protobuf definition: ``` %%writefile person.proto syntax = "proto3"; message Person { string name = 1; int32 id = 2; repeated string email = 3; } ``` And let's compile it (the `--descriptor_set_out` and `--include_imports` options are only required for the `tf.io.decode_proto()` example below): ``` !protoc person.proto --python_out=. --descriptor_set_out=person.desc --include_imports !ls person* from person_pb2 import Person person = Person(name="Al", id=123, email=["a@b.com"]) # create a Person print(person) # display the Person person.name # read a field person.name = "Alice" # modify a field person.email[0] # repeated fields can be accessed like arrays person.email.append("c@d.com") # add an email address s = person.SerializeToString() # serialize to a byte string s person2 = Person() # create a new Person person2.ParseFromString(s) # parse the byte string (27 bytes) person == person2 # now they are equal ``` #### Custom protobuf In rare cases, you may want to parse a custom protobuf (like the one we just created) in TensorFlow. For this you can use the `tf.io.decode_proto()` function: ``` person_tf = tf.io.decode_proto( bytes=s, message_type="Person", field_names=["name", "id", "email"], output_types=[tf.string, tf.int32, tf.string], descriptor_source="person.desc") person_tf.values ``` For more details, see the [`tf.io.decode_proto()`](https://www.tensorflow.org/api_docs/python/tf/io/decode_proto) documentation. ### TensorFlow Protobufs Here is the definition of the tf.train.Example protobuf: ```proto syntax = "proto3"; message BytesList { repeated bytes value = 1; } message FloatList { repeated float value = 1 [packed = true]; } message Int64List { repeated int64 value = 1 [packed = true]; } message Feature { oneof kind { BytesList bytes_list = 1; FloatList float_list = 2; Int64List int64_list = 3; } }; message Features { map<string, Feature> feature = 1; }; message Example { Features features = 1; }; ``` **Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details. ``` #from tensorflow.train import BytesList, FloatList, Int64List #from tensorflow.train import Feature, Features, Example BytesList = tf.train.BytesList FloatList = tf.train.FloatList Int64List = tf.train.Int64List Feature = tf.train.Feature Features = tf.train.Features Example = tf.train.Example person_example = Example( features=Features( feature={ "name": Feature(bytes_list=BytesList(value=[b"Alice"])), "id": Feature(int64_list=Int64List(value=[123])), "emails": Feature(bytes_list=BytesList(value=[b"a@b.com", b"c@d.com"])) })) with tf.io.TFRecordWriter("my_contacts.tfrecord") as f: f.write(person_example.SerializeToString()) feature_description = { "name": tf.io.FixedLenFeature([], tf.string, default_value=""), "id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "emails": tf.io.VarLenFeature(tf.string), } for serialized_example in tf.data.TFRecordDataset(["my_contacts.tfrecord"]): parsed_example = tf.io.parse_single_example(serialized_example, feature_description) parsed_example parsed_example parsed_example["emails"].values[0] tf.sparse.to_dense(parsed_example["emails"], default_value=b"") parsed_example["emails"].values ``` ### Putting Images in TFRecords ``` from sklearn.datasets import load_sample_images img = load_sample_images()["images"][0] plt.imshow(img) plt.axis("off") plt.title("Original Image") plt.show() data = tf.io.encode_jpeg(img) example_with_image = Example(features=Features(feature={ "image": Feature(bytes_list=BytesList(value=[data.numpy()]))})) serialized_example = example_with_image.SerializeToString() # then save to TFRecord feature_description = { "image": tf.io.VarLenFeature(tf.string) } example_with_image = tf.io.parse_single_example(serialized_example, feature_description) decoded_img = tf.io.decode_jpeg(example_with_image["image"].values[0]) ``` Or use `decode_image()` which supports BMP, GIF, JPEG and PNG formats: ``` decoded_img = tf.io.decode_image(example_with_image["image"].values[0]) plt.imshow(decoded_img) plt.title("Decoded Image") plt.axis("off") plt.show() ``` ### Putting Tensors and Sparse Tensors in TFRecords Tensors can be serialized and parsed easily using `tf.io.serialize_tensor()` and `tf.io.parse_tensor()`: ``` t = tf.constant([[0., 1.], [2., 3.], [4., 5.]]) s = tf.io.serialize_tensor(t) s tf.io.parse_tensor(s, out_type=tf.float32) serialized_sparse = tf.io.serialize_sparse(parsed_example["emails"]) serialized_sparse BytesList(value=serialized_sparse.numpy()) dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).batch(10) for serialized_examples in dataset: parsed_examples = tf.io.parse_example(serialized_examples, feature_description) parsed_examples ``` ## Handling Sequential Data Using `SequenceExample` ```proto syntax = "proto3"; message FeatureList { repeated Feature feature = 1; }; message FeatureLists { map<string, FeatureList> feature_list = 1; }; message SequenceExample { Features context = 1; FeatureLists feature_lists = 2; }; ``` **Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details. ``` #from tensorflow.train import FeatureList, FeatureLists, SequenceExample FeatureList = tf.train.FeatureList FeatureLists = tf.train.FeatureLists SequenceExample = tf.train.SequenceExample context = Features(feature={ "author_id": Feature(int64_list=Int64List(value=[123])), "title": Feature(bytes_list=BytesList(value=[b"A", b"desert", b"place", b"."])), "pub_date": Feature(int64_list=Int64List(value=[1623, 12, 25])) }) content = [["When", "shall", "we", "three", "meet", "again", "?"], ["In", "thunder", ",", "lightning", ",", "or", "in", "rain", "?"]] comments = [["When", "the", "hurlyburly", "'s", "done", "."], ["When", "the", "battle", "'s", "lost", "and", "won", "."]] def words_to_feature(words): return Feature(bytes_list=BytesList(value=[word.encode("utf-8") for word in words])) content_features = [words_to_feature(sentence) for sentence in content] comments_features = [words_to_feature(comment) for comment in comments] sequence_example = SequenceExample( context=context, feature_lists=FeatureLists(feature_list={ "content": FeatureList(feature=content_features), "comments": FeatureList(feature=comments_features) })) sequence_example serialized_sequence_example = sequence_example.SerializeToString() context_feature_descriptions = { "author_id": tf.io.FixedLenFeature([], tf.int64, default_value=0), "title": tf.io.VarLenFeature(tf.string), "pub_date": tf.io.FixedLenFeature([3], tf.int64, default_value=[0, 0, 0]), } sequence_feature_descriptions = { "content": tf.io.VarLenFeature(tf.string), "comments": tf.io.VarLenFeature(tf.string), } parsed_context, parsed_feature_lists = tf.io.parse_single_sequence_example( serialized_sequence_example, context_feature_descriptions, sequence_feature_descriptions) parsed_context parsed_context["title"].values parsed_feature_lists print(tf.RaggedTensor.from_sparse(parsed_feature_lists["content"])) ``` # The Features API Let's use the variant of the California housing dataset that we used in Chapter 2, since it contains categorical features and missing values: ``` import os import tarfile import urllib DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH = os.path.join("datasets", "housing") HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz" def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH): os.makedirs(housing_path, exist_ok=True) tgz_path = os.path.join(housing_path, "housing.tgz") urllib.request.urlretrieve(housing_url, tgz_path) housing_tgz = tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() fetch_housing_data() import pandas as pd def load_housing_data(housing_path=HOUSING_PATH): csv_path = os.path.join(housing_path, "housing.csv") return pd.read_csv(csv_path) housing = load_housing_data() housing.head() housing_median_age = tf.feature_column.numeric_column("housing_median_age") age_mean, age_std = X_mean[1], X_std[1] # The median age is column in 1 housing_median_age = tf.feature_column.numeric_column( "housing_median_age", normalizer_fn=lambda x: (x - age_mean) / age_std) median_income = tf.feature_column.numeric_column("median_income") bucketized_income = tf.feature_column.bucketized_column( median_income, boundaries=[1.5, 3., 4.5, 6.]) bucketized_income ocean_prox_vocab = ['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN'] ocean_proximity = tf.feature_column.categorical_column_with_vocabulary_list( "ocean_proximity", ocean_prox_vocab) ocean_proximity # Just an example, it's not used later on city_hash = tf.feature_column.categorical_column_with_hash_bucket( "city", hash_bucket_size=1000) city_hash bucketized_age = tf.feature_column.bucketized_column( housing_median_age, boundaries=[-1., -0.5, 0., 0.5, 1.]) # age was scaled age_and_ocean_proximity = tf.feature_column.crossed_column( [bucketized_age, ocean_proximity], hash_bucket_size=100) latitude = tf.feature_column.numeric_column("latitude") longitude = tf.feature_column.numeric_column("longitude") bucketized_latitude = tf.feature_column.bucketized_column( latitude, boundaries=list(np.linspace(32., 42., 20 - 1))) bucketized_longitude = tf.feature_column.bucketized_column( longitude, boundaries=list(np.linspace(-125., -114., 20 - 1))) location = tf.feature_column.crossed_column( [bucketized_latitude, bucketized_longitude], hash_bucket_size=1000) ocean_proximity_one_hot = tf.feature_column.indicator_column(ocean_proximity) ocean_proximity_embed = tf.feature_column.embedding_column(ocean_proximity, dimension=2) ``` ### Using Feature Columns for Parsing ``` median_house_value = tf.feature_column.numeric_column("median_house_value") columns = [housing_median_age, median_house_value] feature_descriptions = tf.feature_column.make_parse_example_spec(columns) feature_descriptions with tf.io.TFRecordWriter("my_data_with_features.tfrecords") as f: for x, y in zip(X_train[:, 1:2], y_train): example = Example(features=Features(feature={ "housing_median_age": Feature(float_list=FloatList(value=[x])), "median_house_value": Feature(float_list=FloatList(value=[y])) })) f.write(example.SerializeToString()) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) def parse_examples(serialized_examples): examples = tf.io.parse_example(serialized_examples, feature_descriptions) targets = examples.pop("median_house_value") # separate the targets return examples, targets batch_size = 32 dataset = tf.data.TFRecordDataset(["my_data_with_features.tfrecords"]) dataset = dataset.repeat().shuffle(10000).batch(batch_size).map(parse_examples) ``` **Warning**: the `DenseFeatures` layer currently does not work with the Functional API, see [TF issue #27416](https://github.com/tensorflow/tensorflow/issues/27416). Hopefully this will be resolved before the final release of TF 2.0. ``` columns_without_target = columns[:-1] model = keras.models.Sequential([ keras.layers.DenseFeatures(feature_columns=columns_without_target), keras.layers.Dense(1) ]) model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5) some_columns = [ocean_proximity_embed, bucketized_income] dense_features = keras.layers.DenseFeatures(some_columns) dense_features({ "ocean_proximity": [["NEAR OCEAN"], ["INLAND"], ["INLAND"]], "median_income": [[3.], [7.2], [1.]] }) ``` # TF Transform ``` try: import tensorflow_transform as tft def preprocess(inputs): # inputs is a batch of input features median_age = inputs["housing_median_age"] ocean_proximity = inputs["ocean_proximity"] standardized_age = tft.scale_to_z_score(median_age - tft.mean(median_age)) ocean_proximity_id = tft.compute_and_apply_vocabulary(ocean_proximity) return { "standardized_median_age": standardized_age, "ocean_proximity_id": ocean_proximity_id } except ImportError: print("TF Transform is not installed. Try running: pip3 install -U tensorflow-transform") ``` # TensorFlow Datasets ``` import tensorflow_datasets as tfds datasets = tfds.load(name="mnist") mnist_train, mnist_test = datasets["train"], datasets["test"] print(tfds.list_builders()) plt.figure(figsize=(6,3)) mnist_train = mnist_train.repeat(5).batch(32).prefetch(1) for item in mnist_train: images = item["image"] labels = item["label"] for index in range(5): plt.subplot(1, 5, index + 1) image = images[index, ..., 0] label = labels[index].numpy() plt.imshow(image, cmap="binary") plt.title(label) plt.axis("off") break # just showing part of the first batch datasets = tfds.load(name="mnist") mnist_train, mnist_test = datasets["train"], datasets["test"] mnist_train = mnist_train.repeat(5).batch(32) mnist_train = mnist_train.map(lambda items: (items["image"], items["label"])) mnist_train = mnist_train.prefetch(1) for images, labels in mnist_train.take(1): print(images.shape) print(labels.numpy()) keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) datasets = tfds.load(name="mnist", batch_size=32, as_supervised=True) mnist_train = datasets["train"].repeat().prefetch(1) model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28, 1]), keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)), keras.layers.Dense(10, activation="softmax")]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5) ``` # TensorFlow Hub ``` keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) import tensorflow_hub as hub hub_layer = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1", output_shape=[50], input_shape=[], dtype=tf.string) model = keras.Sequential() model.add(hub_layer) model.add(keras.layers.Dense(16, activation='relu')) model.add(keras.layers.Dense(1, activation='sigmoid')) model.summary() sentences = tf.constant(["It was a great movie", "The actors were amazing"]) embeddings = hub_layer(sentences) embeddings ``` # Exercises ## 1. to 8. See Appendix A ## 9. ### a. _Exercise: Load the Fashion MNIST dataset (introduced in Chapter 10); split it into a training set, a validation set, and a test set; shuffle the training set; and save each dataset to multiple TFRecord files. Each record should be a serialized `Example` protobuf with two features: the serialized image (use `tf.io.serialize_tensor()` to serialize each image), and the label. Note: for large images, you could use `tf.io.encode_jpeg()` instead. This would save a lot of space, but it would lose a bit of image quality._ ``` (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] keras.backend.clear_session() np.random.seed(42) tf.random.set_seed(42) train_set = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(len(X_train)) valid_set = tf.data.Dataset.from_tensor_slices((X_valid, y_valid)) test_set = tf.data.Dataset.from_tensor_slices((X_test, y_test)) def create_example(image, label): image_data = tf.io.serialize_tensor(image) #image_data = tf.io.encode_jpeg(image[..., np.newaxis]) return Example( features=Features( feature={ "image": Feature(bytes_list=BytesList(value=[image_data.numpy()])), "label": Feature(int64_list=Int64List(value=[label])), })) for image, label in valid_set.take(1): print(create_example(image, label)) ``` The following function saves a given dataset to a set of TFRecord files. The examples are written to the files in a round-robin fashion. To do this, we enumerate all the examples using the `dataset.enumerate()` method, and we compute `index % n_shards` to decide which file to write to. We use the standard `contextlib.ExitStack` class to make sure that all writers are properly closed whether or not an I/O error occurs while writing. ``` from contextlib import ExitStack def write_tfrecords(name, dataset, n_shards=10): paths = ["{}.tfrecord-{:05d}-of-{:05d}".format(name, index, n_shards) for index in range(n_shards)] with ExitStack() as stack: writers = [stack.enter_context(tf.io.TFRecordWriter(path)) for path in paths] for index, (image, label) in dataset.enumerate(): shard = index % n_shards example = create_example(image, label) writers[shard].write(example.SerializeToString()) return paths train_filepaths = write_tfrecords("my_fashion_mnist.train", train_set) valid_filepaths = write_tfrecords("my_fashion_mnist.valid", valid_set) test_filepaths = write_tfrecords("my_fashion_mnist.test", test_set) ``` ### b. _Exercise: Then use tf.data to create an efficient dataset for each set. Finally, use a Keras model to train these datasets, including a preprocessing layer to standardize each input feature. Try to make the input pipeline as efficient as possible, using TensorBoard to visualize profiling data._ ``` def preprocess(tfrecord): feature_descriptions = { "image": tf.io.FixedLenFeature([], tf.string, default_value=""), "label": tf.io.FixedLenFeature([], tf.int64, default_value=-1) } example = tf.io.parse_single_example(tfrecord, feature_descriptions) image = tf.io.parse_tensor(example["image"], out_type=tf.uint8) #image = tf.io.decode_jpeg(example["image"]) image = tf.reshape(image, shape=[28, 28]) return image, example["label"] def mnist_dataset(filepaths, n_read_threads=5, shuffle_buffer_size=None, n_parse_threads=5, batch_size=32, cache=True): dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=n_read_threads) if cache: dataset = dataset.cache() if shuffle_buffer_size: dataset = dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) train_set = mnist_dataset(train_filepaths, shuffle_buffer_size=60000) valid_set = mnist_dataset(train_filepaths) test_set = mnist_dataset(train_filepaths) for X, y in train_set.take(1): for i in range(5): plt.subplot(1, 5, i + 1) plt.imshow(X[i].numpy(), cmap="binary") plt.axis("off") plt.title(str(y[i].numpy())) keras.backend.clear_session() tf.random.set_seed(42) np.random.seed(42) class Standardization(keras.layers.Layer): def adapt(self, data_sample): self.means_ = np.mean(data_sample, axis=0, keepdims=True) self.stds_ = np.std(data_sample, axis=0, keepdims=True) def call(self, inputs): return (inputs - self.means_) / (self.stds_ + keras.backend.epsilon()) standardization = Standardization(input_shape=[28, 28]) # or perhaps soon: #standardization = keras.layers.Normalization() sample_image_batches = train_set.take(100).map(lambda image, label: image) sample_images = np.concatenate(list(sample_image_batches.as_numpy_iterator()), axis=0).astype(np.float32) standardization.adapt(sample_images) model = keras.models.Sequential([ standardization, keras.layers.Flatten(), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"]) from datetime import datetime logs = os.path.join(os.curdir, "my_logs", "run_" + datetime.now().strftime("%Y%m%d_%H%M%S")) tensorboard_cb = tf.keras.callbacks.TensorBoard( log_dir=logs, histogram_freq=1, profile_batch=10) model.fit(train_set, epochs=5, validation_data=valid_set, callbacks=[tensorboard_cb]) ``` **Warning:** The profiling tab in TensorBoard works if you use TensorFlow 2.2+. You also need to make sure `tensorboard_plugin_profile` is installed (and restart Jupyter if necessary). ``` %load_ext tensorboard %tensorboard --logdir=./my_logs --port=6006 ``` ## 10. _Exercise: In this exercise you will download a dataset, split it, create a `tf.data.Dataset` to load it and preprocess it efficiently, then build and train a binary classification model containing an `Embedding` layer._ ### a. _Exercise: Download the [Large Movie Review Dataset](https://homl.info/imdb), which contains 50,000 movies reviews from the [Internet Movie Database](https://imdb.com/). The data is organized in two directories, `train` and `test`, each containing a `pos` subdirectory with 12,500 positive reviews and a `neg` subdirectory with 12,500 negative reviews. Each review is stored in a separate text file. There are other files and folders (including preprocessed bag-of-words), but we will ignore them in this exercise._ ``` from pathlib import Path DOWNLOAD_ROOT = "http://ai.stanford.edu/~amaas/data/sentiment/" FILENAME = "aclImdb_v1.tar.gz" filepath = keras.utils.get_file(FILENAME, DOWNLOAD_ROOT + FILENAME, extract=True) path = Path(filepath).parent / "aclImdb" path for name, subdirs, files in os.walk(path): indent = len(Path(name).parts) - len(path.parts) print(" " * indent + Path(name).parts[-1] + os.sep) for index, filename in enumerate(sorted(files)): if index == 3: print(" " * (indent + 1) + "...") break print(" " * (indent + 1) + filename) def review_paths(dirpath): return [str(path) for path in dirpath.glob("*.txt")] train_pos = review_paths(path / "train" / "pos") train_neg = review_paths(path / "train" / "neg") test_valid_pos = review_paths(path / "test" / "pos") test_valid_neg = review_paths(path / "test" / "neg") len(train_pos), len(train_neg), len(test_valid_pos), len(test_valid_neg) ``` ### b. _Exercise: Split the test set into a validation set (15,000) and a test set (10,000)._ ``` np.random.shuffle(test_valid_pos) test_pos = test_valid_pos[:5000] test_neg = test_valid_neg[:5000] valid_pos = test_valid_pos[5000:] valid_neg = test_valid_neg[5000:] ``` ### c. _Exercise: Use tf.data to create an efficient dataset for each set._ Since the dataset fits in memory, we can just load all the data using pure Python code and use `tf.data.Dataset.from_tensor_slices()`: ``` def imdb_dataset(filepaths_positive, filepaths_negative): reviews = [] labels = [] for filepaths, label in ((filepaths_negative, 0), (filepaths_positive, 1)): for filepath in filepaths: with open(filepath) as review_file: reviews.append(review_file.read()) labels.append(label) return tf.data.Dataset.from_tensor_slices( (tf.constant(reviews), tf.constant(labels))) for X, y in imdb_dataset(train_pos, train_neg).take(3): print(X) print(y) print() %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass ``` It takes about 20 seconds to load the dataset and go through it 10 times. But let's pretend the dataset does not fit in memory, just to make things more interesting. Luckily, each review fits on just one line (they use `<br />` to indicate line breaks), so we can read the reviews using a `TextLineDataset`. If they didn't we would have to preprocess the input files (e.g., converting them to TFRecords). For very large datasets, it would make sense a tool like Apache Beam for that. ``` def imdb_dataset(filepaths_positive, filepaths_negative, n_read_threads=5): dataset_neg = tf.data.TextLineDataset(filepaths_negative, num_parallel_reads=n_read_threads) dataset_neg = dataset_neg.map(lambda review: (review, 0)) dataset_pos = tf.data.TextLineDataset(filepaths_positive, num_parallel_reads=n_read_threads) dataset_pos = dataset_pos.map(lambda review: (review, 1)) return tf.data.Dataset.concatenate(dataset_pos, dataset_neg) %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).repeat(10): pass ``` Now it takes about 34 seconds to go through the dataset 10 times. That's much slower, essentially because the dataset is not cached in RAM, so it must be reloaded at each epoch. If you add `.cache()` just before `.repeat(10)`, you will see that this implementation will be about as fast as the previous one. ``` %timeit -r1 for X, y in imdb_dataset(train_pos, train_neg).cache().repeat(10): pass batch_size = 32 train_set = imdb_dataset(train_pos, train_neg).shuffle(25000).batch(batch_size).prefetch(1) valid_set = imdb_dataset(valid_pos, valid_neg).batch(batch_size).prefetch(1) test_set = imdb_dataset(test_pos, test_neg).batch(batch_size).prefetch(1) ``` ### d. _Exercise: Create a binary classification model, using a `TextVectorization` layer to preprocess each review. If the `TextVectorization` layer is not yet available (or if you like a challenge), try to create your own custom preprocessing layer: you can use the functions in the `tf.strings` package, for example `lower()` to make everything lowercase, `regex_replace()` to replace punctuation with spaces, and `split()` to split words on spaces. You should use a lookup table to output word indices, which must be prepared in the `adapt()` method._ Let's first write a function to preprocess the reviews, cropping them to 300 characters, converting them to lower case, then replacing `<br />` and all non-letter characters to spaces, splitting the reviews into words, and finally padding or cropping each review so it ends up with exactly `n_words` tokens: ``` def preprocess(X_batch, n_words=50): shape = tf.shape(X_batch) * tf.constant([1, 0]) + tf.constant([0, n_words]) Z = tf.strings.substr(X_batch, 0, 300) Z = tf.strings.lower(Z) Z = tf.strings.regex_replace(Z, b"<br\\s*/?>", b" ") Z = tf.strings.regex_replace(Z, b"[^a-z]", b" ") Z = tf.strings.split(Z) return Z.to_tensor(shape=shape, default_value=b"<pad>") X_example = tf.constant(["It's a great, great movie! I loved it.", "It was terrible, run away!!!"]) preprocess(X_example) ``` Now let's write a second utility function that will take a data sample with the same format as the output of the `preprocess()` function, and will output the list of the top `max_size` most frequent words, ensuring that the padding token is first: ``` from collections import Counter def get_vocabulary(data_sample, max_size=1000): preprocessed_reviews = preprocess(data_sample).numpy() counter = Counter() for words in preprocessed_reviews: for word in words: if word != b"<pad>": counter[word] += 1 return [b"<pad>"] + [word for word, count in counter.most_common(max_size)] get_vocabulary(X_example) ``` Now we are ready to create the `TextVectorization` layer. Its constructor just saves the hyperparameters (`max_vocabulary_size` and `n_oov_buckets`). The `adapt()` method computes the vocabulary using the `get_vocabulary()` function, then it builds a `StaticVocabularyTable` (see Chapter 16 for more details). The `call()` method preprocesses the reviews to get a padded list of words for each review, then it uses the `StaticVocabularyTable` to lookup the index of each word in the vocabulary: ``` class TextVectorization(keras.layers.Layer): def __init__(self, max_vocabulary_size=1000, n_oov_buckets=100, dtype=tf.string, **kwargs): super().__init__(dtype=dtype, **kwargs) self.max_vocabulary_size = max_vocabulary_size self.n_oov_buckets = n_oov_buckets def adapt(self, data_sample): self.vocab = get_vocabulary(data_sample, self.max_vocabulary_size) words = tf.constant(self.vocab) word_ids = tf.range(len(self.vocab), dtype=tf.int64) vocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids) self.table = tf.lookup.StaticVocabularyTable(vocab_init, self.n_oov_buckets) def call(self, inputs): preprocessed_inputs = preprocess(inputs) return self.table.lookup(preprocessed_inputs) ``` Let's try it on our small `X_example` we defined earlier: ``` text_vectorization = TextVectorization() text_vectorization.adapt(X_example) text_vectorization(X_example) ``` Looks good! As you can see, each review was cleaned up and tokenized, then each word was encoded as its index in the vocabulary (all the 0s correspond to the `<pad>` tokens). Now let's create another `TextVectorization` layer and let's adapt it to the full IMDB training set (if the training set did not fit in RAM, we could just use a smaller sample of the training set by calling `train_set.take(500)`): ``` max_vocabulary_size = 1000 n_oov_buckets = 100 sample_review_batches = train_set.map(lambda review, label: review) sample_reviews = np.concatenate(list(sample_review_batches.as_numpy_iterator()), axis=0) text_vectorization = TextVectorization(max_vocabulary_size, n_oov_buckets, input_shape=[]) text_vectorization.adapt(sample_reviews) ``` Let's run it on the same `X_example`, just to make sure the word IDs are larger now, since the vocabulary bigger: ``` text_vectorization(X_example) ``` Good! Now let's take a look at the first 10 words in the vocabulary: ``` text_vectorization.vocab[:10] ``` These are the most common words in the reviews. Now to build our model we will need to encode all these word IDs somehow. One approach is to create bags of words: for each review, and for each word in the vocabulary, we count the number of occurences of that word in the review. For example: ``` simple_example = tf.constant([[1, 3, 1, 0, 0], [2, 2, 0, 0, 0]]) tf.reduce_sum(tf.one_hot(simple_example, 4), axis=1) ``` The first review has 2 times the word 0, 2 times the word 1, 0 times the word 2, and 1 time the word 3, so its bag-of-words representation is `[2, 2, 0, 1]`. Similarly, the second review has 3 times the word 0, 0 times the word 1, and so on. Let's wrap this logic in a small custom layer, and let's test it. We'll drop the counts for the word 0, since this corresponds to the `<pad>` token, which we don't care about. ``` class BagOfWords(keras.layers.Layer): def __init__(self, n_tokens, dtype=tf.int32, **kwargs): super().__init__(dtype=tf.int32, **kwargs) self.n_tokens = n_tokens def call(self, inputs): one_hot = tf.one_hot(inputs, self.n_tokens) return tf.reduce_sum(one_hot, axis=1)[:, 1:] ``` Let's test it: ``` bag_of_words = BagOfWords(n_tokens=4) bag_of_words(simple_example) ``` It works fine! Now let's create another `BagOfWord` with the right vocabulary size for our training set: ``` n_tokens = max_vocabulary_size + n_oov_buckets + 1 # add 1 for <pad> bag_of_words = BagOfWords(n_tokens) ``` We're ready to train the model! ``` model = keras.models.Sequential([ text_vectorization, bag_of_words, keras.layers.Dense(100, activation="relu"), keras.layers.Dense(1, activation="sigmoid"), ]) model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) ``` We get about 75% accuracy on the validation set after just the first epoch, but after that the model makes no progress. We will do better in Chapter 16. For now the point is just to perform efficient preprocessing using `tf.data` and Keras preprocessing layers. ### e. _Exercise: Add an `Embedding` layer and compute the mean embedding for each review, multiplied by the square root of the number of words (see Chapter 16). This rescaled mean embedding can then be passed to the rest of your model._ To compute the mean embedding for each review, and multiply it by the square root of the number of words in that review, we will need a little function: ``` def compute_mean_embedding(inputs): not_pad = tf.math.count_nonzero(inputs, axis=-1) n_words = tf.math.count_nonzero(not_pad, axis=-1, keepdims=True) sqrt_n_words = tf.math.sqrt(tf.cast(n_words, tf.float32)) return tf.reduce_mean(inputs, axis=1) * sqrt_n_words another_example = tf.constant([[[1., 2., 3.], [4., 5., 0.], [0., 0., 0.]], [[6., 0., 0.], [0., 0., 0.], [0., 0., 0.]]]) compute_mean_embedding(another_example) ``` Let's check that this is correct. The first review contains 2 words (the last token is a zero vector, which represents the `<pad>` token). The second review contains 1 word. So we need to compute the mean embedding for each review, and multiply the first one by the square root of 2, and the second one by the square root of 1: ``` tf.reduce_mean(another_example, axis=1) * tf.sqrt([[2.], [1.]]) ``` Perfect. Now we're ready to train our final model. It's the same as before, except we replaced the `BagOfWords` layer with an `Embedding` layer followed by a `Lambda` layer that calls the `compute_mean_embedding` layer: ``` embedding_size = 20 model = keras.models.Sequential([ text_vectorization, keras.layers.Embedding(input_dim=n_tokens, output_dim=embedding_size, mask_zero=True), # <pad> tokens => zero vectors keras.layers.Lambda(compute_mean_embedding), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(1, activation="sigmoid"), ]) ``` ### f. _Exercise: Train the model and see what accuracy you get. Try to optimize your pipelines to make training as fast as possible._ ``` model.compile(loss="binary_crossentropy", optimizer="nadam", metrics=["accuracy"]) model.fit(train_set, epochs=5, validation_data=valid_set) ``` The model is not better using embeddings (but we will do better in Chapter 16). The pipeline looks fast enough (we optimized it earlier). ### g. _Exercise: Use TFDS to load the same dataset more easily: `tfds.load("imdb_reviews")`._ ``` import tensorflow_datasets as tfds datasets = tfds.load(name="imdb_reviews") train_set, test_set = datasets["train"], datasets["test"] for example in train_set.take(1): print(example["text"]) print(example["label"]) ```
github_jupyter
# Simple Pump Example Script This script shows basic I/O operations that can be performed with this toolkit, and gives a very brief overview into basic fault modelling. This script runs these basic operations on the simple model defined in pump_script.py. ``` #First, import the fault propogation library as well as the model #since the package is in a parallel location to examples... import sys sys.path.append('../') import fmdtools.faultsim.propagate as propagate import fmdtools.resultdisp as rd from ex_pump import * from IPython.display import HTML mdl = Pump() ``` ### Initial Model Checks Before seeing how faults propogate, it's useful to see how the model performs in the nominal state to check to see that the model has been defined correctly. Some things worth checking: - are all functions on the graph? - are the functions connected with the correct flows? - do any faults occur in the nominal state? - do all the flow states proceed as desired over time? The following code runs the model with no faults to let us do that. The inputs are: - mdl (the model we imported at the start of the script) - track (whether or not we would like to track the flows) The outputs are: - endresults (a dictionary of the degraded flows, resulting faults, and fault classification at final t) - resgraph (the results superimposed on the graph at final t) - mdlhist (the states of the functions and flows over time) ``` endresults, resgraph, mdlhist=propagate.nominal(mdl, track=True) ``` With these results, we can now plot the graph of results resgraph using: ``` rd.graph.show(resgraph) ``` As can be seen, this gives a graphical representation of the functional model with the various flows. Since all of the functions are *green*, no faults were accidentally introduced in this run. We can further look at the state of the model using: ``` rd.plot.mdlhist(mdlhist, 'Nominal') ``` As we can see, the state of these flows does exactly what we would expect--when the switch turns on at $t=5$, the pump switches on and there is a flow of water in and out of the model. ### Tables If we want to see this data in tabular form, we can use `fp.tabulate.hist()`: ``` nominal_histtable = rd.tabulate.hist(mdlhist) nominal_histtable[:10] #only displaying 10 ``` This table is a pandas dataframe. We can save this dataframe to a .csv using `nominal_histtable.to_csv("filename.csv")` ### Propagating and Viewing Results for Individual Faults It is often necessary to see how the system reacts to individual faults. This can gives us better understanding of how the system behaves under individual faults and can let us iterate with the model better. The following code runs the model with a single fault in a single function. In this case, we are initiating a short in the 'Move Water' function at 10 hours into the system's operation. The inputs are: - mdl (the model we imported at the start of the script) - Function (the function the fault we're interested in propagating occurs in) - faultmode (the fault to initiate) - time (the time when the fault is initiated) - track (whether or not we want to track flows) The outputs are (the same as ffermat.runnominal): - endresults (a dictionary of the degraged flows, resulting faults, and fault classification at final t) - resgraph (the results superimposed on the graph at final t) - mdlhist (the states of the model over time) ``` endresults, resgraph, mdlhist=propagate.one_fault(mdl, 'MoveWater', 'short', time=10) ``` `rp.process.hist(mdlhist)` compares the results over time so we can see what functions and flows were degraded over time. We can then use the summary to view a list of the functions and flows that were impacted over time. ``` reshist,diff, summary = rd.process.hist(mdlhist) #summarytable = fp.makesummarytable(summary) tab = rd.tabulate.result(endresults, summary) tab ``` We can also see what happens with the graph view: ``` rd.graph.show(resgraph) ``` As can be seen, at the final t, the short causes a degraded flow of electricity as well as a fault in the Import EE function. However, we would imagine that the short would cause the water to stop moving also--so why is it green? The answer is that the results graph gives the values of the variables at the final time, which is the same both for the failed model and the nominal model, since the pump is switched "off." In this case we might be more interested in looking at how the graph looks in operation, rather than at the end. We can do that that by constructing graphs based on the history of the plot. Below we use `reshist` to plot the state of the graph at a particular time. ``` rd.graph.result_from(mdl, reshist, 20, gtype = 'normal') ``` We can view an animation over time using: ``` #%matplotlib notebook #ani = rp.animate_resultsgraphs_from(mdl, reshist, 'all', faultscen='MoveWater Short (10)', gtype='normal') #HTML(ani.to_jshtml()) #saving plot (if desired--.gif does not seem to work) #ani.save('test.mp4') ``` Bipartite representations of the graphs can also be made, see: ``` pos=nx.spring_layout(mdl.bipartite) #(use this option to keep node locations consistent) rd.graph.result_from(mdl, reshist, 20, gtype = 'bipartite', scale=2, pos=pos) ``` We can also plot the states of this against the nominal run using: ``` rd.plot.mdlhist(mdlhist, 'short', time=10) ``` As you can see, the system begins nominal until the fault is injected at $t=10$. At this moment, not only are the electrical energy flows degraded, the flow of water is degraded also. However, at $t=55$ when the system is supposed to be turned off, this flow of water is no longer "degraded" because it is in the same state as the nominal system. We can look at a table of to see more precisely what happened (and export, if needed). Note that we need to give the plotting function the mode ('short') and the time for it to plot properly. Here we can see that the short dropped the voltage to zero, (this was because an open circuit resulted in the Import EE function), causing the water to stop flowing. Below, we use the processed model history to show the faults and *degradation* of states over time. In this case, 1 means nominal while 0 means degraded. ``` short_histtable = rd.tabulate.hist(reshist) short_histtable ``` If we want a simpler view of just the degraded faults and flows (rather than the specific faults, etc), we can use: ``` short_deghisttable = rd.tabulate.deghist(reshist) short_deghisttable[1:20] ``` We can also look at statistics of degradation over time using: ``` short_statstable = rd.tabulate.stats(reshist) short_statstable[:20] ``` We can also look at other faults. The results below are for a blockage of the pipe. In this case we're only interested in the effect on the water going through, so only those flows are tracked. ``` endresults2, resgraph2, mdlhist2=propagate.one_fault(mdl, 'ExportWater', 'block', time=10) reshist2,diff2, summary2 = rd.process.hist(mdlhist2) restab2 = rd.tabulate.result(endresults2, summary2) restab2 rd.graph.show(resgraph2) rd.plot.mdlhist(mdlhist2, 'blockage', time=10) ``` ### Visualization of resilience metrics We can use the processed time history to now make visualizations of the resilience of the system over time. Below we use the "makeheatmaps" function, which calculates the following metrics of interest: - "degtime," the percentage of the time the function/flow of the system was degraded - "maxdeg," the number of flow values that were degraded at a given time - "intdeg," the number of flow values that were degraded at a given time * the time degraded - "maxfaults," the maximum number of faults in each function at any given time - "maxdiff," the max distance between states of functions/flows and the nominal - "intdiff," the distance between states of functions/flows and the nominal * the time off-nominal ``` heatmaps = rd.process.heatmaps(reshist2, diff2) heatmapstable = rd.tabulate.heatmaps(heatmaps) heatmapstable ``` Note: not all of these maps will display values for all functions and flows, as shown by the NaN's in the table. I'll use "degtime" to illustrate. ``` rd.graph.show(mdl.bipartite,gtype='bipartite', heatmap=heatmaps['degtime'], scale=2, pos=pos) ``` These maps can also be plotted on the graph view, where only those for funcions will be shown. Here the maximum number of faults is plotted. ``` rd.graph.show(mdl.graph, heatmap=heatmaps['maxfaults']) ``` ### Running a List of Faults Finally, to get the results of all of the single-fault scenarios defined in the model, we can run them all at once using the `single_faults()` function. Note that this will propagate faults based on the times vector put in the model, e.g. if mdl.times=[0,3,15,55], it will propogate the faults at the begining, end, and at t=15 and t=15. This function only takes in the model mdl and outputs two similar kinds of output--resultsdict (the results in a python dictionary) and resultstab (the results in a nice tabular form). Note that the rates provide for this table do not use the opportunity vector information, instead using the assumption that the fault scenario has the rate provided over the entire simulation. See below: ``` endclasses, mdlhists=propagate.single_faults(mdl, staged=True) simplefmea = rd.tabulate.simplefmea(endclasses) simplefmea[:5] ``` To process these results, use `rd.process.hists(mdlhists)`, which will calculate the degradation of the system over time in the model for all scenarios. ``` reshists, diffs, summaries = rd.process.hists(mdlhists) fullfmea = rd.tabulate.fullfmea(endclasses, summaries) fullfmea[:10] ``` ### Running a Fault Sampling Approach Note that only gives accurate results for costs and fault responses--in order to get an accurate idea of *expected cost*, we instead run an Approach, which develops an underlying probability model for faults. See below. ``` app = SampleApproach(mdl) #using default parameters--note there are a variety of options for this appraoch endclasses, mdlhists=propagate.approach(mdl, app, staged=True) simplefmea = rd.tabulate.simplefmea(endclasses) #note the costs are the same, but the rates and expected costs are not simplefmea[:5] ``` We can now summarize the risks of faults over the operational phases and overall. ``` phasefmea = rd.tabulate.phasefmea(endclasses, app) phasefmea summfmea = rd.tabulate.summfmea(endclasses, app) summfmea ``` To visualize the results, the histories need to be processed. ``` reshists, diffs, summaries = rd.process.hists(mdlhists) ``` Now that these results have been processed, we can use them to visualize the expected resilience of the model to the fault scenarios. Here we will use the average percentage of time degraded ``` heatmap1 = rd.process.avgdegtimeheatmap(reshists) rd.tabulate.dicttab(heatmap1) rd.graph.show(mdl.bipartite, gtype='bipartite', heatmap=heatmap1, scale=2, pos=pos) ``` Using this table (and the visualization) we would conclude that in our set of fault scenarios the Wat_1, Wat_2, and EE_1 flows degrade as often as each other. However, this does not tell us which flows are most likely to be degraded based on our simulations. In order to determine that, rate information must be used to get the *expected* degradation of each node. ``` heatmap2 = rd.process.expdegtimeheatmap(reshists, endclasses) rd.tabulate.dicttab(heatmap2) ``` The results here are roughly the same, though. The expected degradation of the EE_1 flow is less here than the Wat_1 and Wat_2 flows. ``` rd.graph.show(mdl.bipartite,gtype='bipartite', heatmap=heatmap2, scale=2, pos=pos) ``` We can do the same looking at the maximum number of faults occuring in each scenario. ``` heatmap3= rd.process.faultsheatmap(reshists) rd.tabulate.dicttab(heatmap3) rd.graph.show(mdl.graph, heatmap=heatmap3) ``` MoveWater and ImportEE most commonly have a high number of faults in the list of scenarios. Again, we may be more interested in the expected number, however. ``` heatmap4= rd.process.expfaultsheatmap(reshists, endclasses) rd.tabulate.dicttab(heatmap4) rd.graph.show(mdl.graph, heatmap=heatmap4) ``` So even though ImportEE has faults very commonly in the set of scenarios, when weighted by the occurence of scenarios, the MoveWater function has the most faults.
github_jupyter
# Intro In this example, we'll use a combination of Jupyter notebooks, Pandas, and Pachyderm to analyze Citi Bike sales data. ``` %matplotlib inline import os import datetime from io import StringIO import pandas as pd import python_pachyderm from python_pachyderm.service import pps_proto ``` # Insert Data First, we'll create a couple of repos and populate them: - **trips** - This repo is populated with a daily file that records the number of bicycle trips recorded by NYC's citibike bike sharing company on that particular day (data from [here](https://www.citibikenyc.com/system-data)). - **weather** - This repo is populated daily with a JSON file representing the weather forecast for that day from [forecast.io](https://darksky.net/forecast/40.7127,-74.0059/us12/en). ``` client = python_pachyderm.Client() # First create the repos/pipelines client.create_repo("trips") client.create_repo("weather") client.create_pipeline( "jupyter", transform=pps_proto.Transform( image="pachyderm/pachyderm_jupyter:2019", cmd=["python3", "merge.py"], ), input=pps_proto.Input(cross=[ pps_proto.Input(pfs=pps_proto.PFSInput(glob="/", repo="weather")), pps_proto.Input(pfs=pps_proto.PFSInput(glob="/", repo="trips")), ]) ) # Populate the input repos def insert_data(name): print("Inserting {} data...".format(name)) with client.commit(name, "master") as c: data_dir = "{}_data".format(name) python_pachyderm.put_files(client, data_dir, c, "/") return c trips_commit = insert_data("trips") weather_commit = insert_data("weather") # Wait for the commits to finish print("Waiting for commits to finish...") for commit in [client.wait_commit(c.id)[0] for c in [trips_commit, weather_commit]]: print(commit) file = client.get_file(("jupyter", "master"), "data.csv") contents = "\n".join([chunk.decode("utf8") for chunk in file]) df = pd.read_csv(StringIO(contents), names=["Date", "Precipitation", "Trips", "Sales"], index_col="Date") df.index = pd.to_datetime(df.index) df.sort_index(inplace=True) # Get just July 2016 df = df[datetime.datetime(year=2016, month=7, day=1):datetime.datetime(year=2016, month=7, day=31)] print(df) ``` # Visualize the sales in the context of weather Finally, we confirm our suspicions by visualizing the precipitation probabilities with the sales data: ``` ax = df.plot(secondary_y=["Precipitation"], figsize=(10, 8)) ax.set_ylabel("Sales ($), # Trips") ax.right_ax.set_ylabel("Precipitation probability") ax.right_ax.legend(loc="best") ax.legend(loc="upper left") ``` We can see that their was a probability of precipitation in NYC above 70% both of the days in question. This is likely to be the explanation for the poor sales. Of course, we can attach our Jupyter notebook other parts of the data to explore other unexpected behavior, develop further analyses, etc.
github_jupyter
## Amazon SageMaker Processing jobs With Amazon SageMaker Processing jobs, you can leverage a simplified, managed experience to run data pre- or post-processing and model evaluation workloads on the Amazon SageMaker platform. A processing job downloads input from Amazon Simple Storage Service (Amazon S3), then uploads outputs to Amazon S3 during or after the processing job. <img src="Processing-1.jpg"> This notebook shows how you can: 1. Run a processing job to run a scikit-learn script that cleans, pre-processes, performs feature engineering, and splits the input data into train and test sets. 2. Run a training job on the pre-processed training data to train a model 3. Run a processing job on the pre-processed test data to evaluate the trained model's performance 4. Use your own custom container to run processing jobs with your own Python libraries and dependencies. The dataset used here is the [Census-Income KDD Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29). You select features from this dataset, clean the data, and turn the data into features that the training algorithm can use to train a binary classification model, and split the data into train and test sets. The task is to predict whether rows representing census responders have an income greater than `$50,000`, or less than `$50,000`. The dataset is heavily class imbalanced, with most records being labeled as earning less than `$50,000`. After training a logistic regression model, you evaluate the model against a hold-out test dataset, and save the classification evaluation metrics, including precision, recall, and F1 score for each label, and accuracy and ROC AUC for the model. ## Data pre-processing and feature engineering To run the scikit-learn preprocessing script as a processing job, create a `SKLearnProcessor`, which lets you run scripts inside of processing jobs using the scikit-learn image provided. ``` import boto3 import sagemaker from sagemaker import get_execution_role from sagemaker.sklearn.processing import SKLearnProcessor region = boto3.session.Session().region_name role = get_execution_role() sklearn_processor = SKLearnProcessor(framework_version='0.20.0', role=role, instance_type='ml.m5.xlarge', instance_count=1) ``` Before introducing the script you use for data cleaning, pre-processing, and feature engineering, inspect the first 20 rows of the dataset. The target is predicting the `income` category. The features from the dataset you select are `age`, `education`, `major industry code`, `class of worker`, `num persons worked for employer`, `capital gains`, `capital losses`, and `dividends from stocks`. ``` import pandas as pd input_data = 's3://sagemaker-sample-data-{}/processing/census/census-income.csv'.format(region) df = pd.read_csv(input_data, nrows=10) df.head(n=10) ``` This notebook cell writes a file `preprocessing.py`, which contains the pre-processing script. You can update the script, and rerun this cell to overwrite `preprocessing.py`. You run this as a processing job in the next cell. In this script, you * Remove duplicates and rows with conflicting data * transform the target `income` column into a column containing two labels. * transform the `age` and `num persons worked for employer` numerical columns into categorical features by binning them * scale the continuous `capital gains`, `capital losses`, and `dividends from stocks` so they're suitable for training * encode the `education`, `major industry code`, `class of worker` so they're suitable for training * split the data into training and test datasets, and saves the training features and labels and test features and labels. Our training script will use the pre-processed training features and labels to train a model, and our model evaluation script will use the trained model and pre-processed test features and labels to evaluate the model. ``` %%writefile preprocessing.py import argparse import os import warnings import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelBinarizer, KBinsDiscretizer from sklearn.preprocessing import PolynomialFeatures from sklearn.compose import make_column_transformer from sklearn.exceptions import DataConversionWarning warnings.filterwarnings(action='ignore', category=DataConversionWarning) columns = ['age', 'education', 'major industry code', 'class of worker', 'num persons worked for employer', 'capital gains', 'capital losses', 'dividends from stocks', 'income'] class_labels = [' - 50000.', ' 50000+.'] def print_shape(df): negative_examples, positive_examples = np.bincount(df['income']) print('Data shape: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples)) if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('--train-test-split-ratio', type=float, default=0.3) args, _ = parser.parse_known_args() print('Received arguments {}'.format(args)) input_data_path = os.path.join('/opt/ml/processing/input', 'census-income.csv') print('Reading input data from {}'.format(input_data_path)) df = pd.read_csv(input_data_path) df = pd.DataFrame(data=df, columns=columns) df.dropna(inplace=True) df.drop_duplicates(inplace=True) df.replace(class_labels, [0, 1], inplace=True) negative_examples, positive_examples = np.bincount(df['income']) print('Data after cleaning: {}, {} positive examples, {} negative examples'.format(df.shape, positive_examples, negative_examples)) split_ratio = args.train_test_split_ratio print('Splitting data into train and test sets with ratio {}'.format(split_ratio)) X_train, X_test, y_train, y_test = train_test_split(df.drop('income', axis=1), df['income'], test_size=split_ratio, random_state=0) preprocess = make_column_transformer( (['age', 'num persons worked for employer'], KBinsDiscretizer(encode='onehot-dense', n_bins=10)), (['capital gains', 'capital losses', 'dividends from stocks'], StandardScaler()), (['education', 'major industry code', 'class of worker'], OneHotEncoder(sparse=False)) ) print('Running preprocessing and feature engineering transformations') train_features = preprocess.fit_transform(X_train) test_features = preprocess.transform(X_test) print('Train data shape after preprocessing: {}'.format(train_features.shape)) print('Test data shape after preprocessing: {}'.format(test_features.shape)) train_features_output_path = os.path.join('/opt/ml/processing/train', 'train_features.csv') train_labels_output_path = os.path.join('/opt/ml/processing/train', 'train_labels.csv') test_features_output_path = os.path.join('/opt/ml/processing/test', 'test_features.csv') test_labels_output_path = os.path.join('/opt/ml/processing/test', 'test_labels.csv') print('Saving training features to {}'.format(train_features_output_path)) pd.DataFrame(train_features).to_csv(train_features_output_path, header=False, index=False) print('Saving test features to {}'.format(test_features_output_path)) pd.DataFrame(test_features).to_csv(test_features_output_path, header=False, index=False) print('Saving training labels to {}'.format(train_labels_output_path)) y_train.to_csv(train_labels_output_path, header=False, index=False) print('Saving test labels to {}'.format(test_labels_output_path)) y_test.to_csv(test_labels_output_path, header=False, index=False) ``` Run this script as a processing job. Use the `SKLearnProcessor.run()` method. You give the `run()` method one `ProcessingInput` where the `source` is the census dataset in Amazon S3, and the `destination` is where the script reads this data from, in this case `/opt/ml/processing/input`. These local paths inside the processing container must begin with `/opt/ml/processing/`. Also give the `run()` method a `ProcessingOutput`, where the `source` is the path the script writes output data to. For outputs, the `destination` defaults to an S3 bucket that the Amazon SageMaker Python SDK creates for you, following the format `s3://sagemaker-<region>-<account_id>/<processing_job_name>/output/<output_name/`. You also give the ProcessingOutputs values for `output_name`, to make it easier to retrieve these output artifacts after the job is run. The `arguments` parameter in the `run()` method are command-line arguments in our `preprocessing.py` script. ``` from sagemaker.processing import ProcessingInput, ProcessingOutput sklearn_processor.run(code='preprocessing.py', inputs=[ProcessingInput( source=input_data, destination='/opt/ml/processing/input')], outputs=[ProcessingOutput(output_name='train_data', source='/opt/ml/processing/train'), ProcessingOutput(output_name='test_data', source='/opt/ml/processing/test')], arguments=['--train-test-split-ratio', '0.2'] ) preprocessing_job_description = sklearn_processor.jobs[-1].describe() output_config = preprocessing_job_description['ProcessingOutputConfig'] for output in output_config['Outputs']: if output['OutputName'] == 'train_data': preprocessed_training_data = output['S3Output']['S3Uri'] if output['OutputName'] == 'test_data': preprocessed_test_data = output['S3Output']['S3Uri'] ``` Now inspect the output of the pre-processing job, which consists of the processed features. ``` training_features = pd.read_csv(preprocessed_training_data + '/train_features.csv', nrows=10) print('Training features shape: {}'.format(training_features.shape)) training_features.head(n=10) ``` ## Training using the pre-processed data We create a `SKLearn` instance, which we will use to run a training job using the training script `train.py`. ``` from sagemaker.sklearn.estimator import SKLearn sklearn = SKLearn( entry_point='train.py', train_instance_type="ml.m5.xlarge", role=role) ``` The training script `train.py` trains a logistic regression model on the training data, and saves the model to the `/opt/ml/model` directory, which Amazon SageMaker tars and uploads into a `model.tar.gz` file into S3 at the end of the training job. ``` %%writefile train.py import os import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.externals import joblib if __name__=="__main__": training_data_directory = '/opt/ml/input/data/train' train_features_data = os.path.join(training_data_directory, 'train_features.csv') train_labels_data = os.path.join(training_data_directory, 'train_labels.csv') print('Reading input data') X_train = pd.read_csv(train_features_data, header=None) y_train = pd.read_csv(train_labels_data, header=None) model = LogisticRegression(class_weight='balanced', solver='lbfgs') print('Training LR model') model.fit(X_train, y_train) model_output_directory = os.path.join('/opt/ml/model', "model.joblib") print('Saving model to {}'.format(model_output_directory)) joblib.dump(model, model_output_directory) ``` Run the training job using `train.py` on the preprocessed training data. ``` sklearn.fit({'train': preprocessed_training_data}) training_job_description = sklearn.jobs[-1].describe() model_data_s3_uri = '{}{}/{}'.format( training_job_description['OutputDataConfig']['S3OutputPath'], training_job_description['TrainingJobName'], 'output/model.tar.gz') ``` ## Model Evaluation `evaluation.py` is the model evaluation script. Since the script also runs using scikit-learn as a dependency, run this using the `SKLearnProcessor` you created previously. This script takes the trained model and the test dataset as input, and produces a JSON file containing classification evaluation metrics, including precision, recall, and F1 score for each label, and accuracy and ROC AUC for the model. ``` %%writefile evaluation.py import json import os import tarfile import pandas as pd from sklearn.externals import joblib from sklearn.metrics import classification_report, roc_auc_score, accuracy_score if __name__=="__main__": model_path = os.path.join('/opt/ml/processing/model', 'model.tar.gz') print('Extracting model from path: {}'.format(model_path)) with tarfile.open(model_path) as tar: tar.extractall(path='.') print('Loading model') model = joblib.load('model.joblib') print('Loading test input data') test_features_data = os.path.join('/opt/ml/processing/test', 'test_features.csv') test_labels_data = os.path.join('/opt/ml/processing/test', 'test_labels.csv') X_test = pd.read_csv(test_features_data, header=None) y_test = pd.read_csv(test_labels_data, header=None) predictions = model.predict(X_test) print('Creating classification evaluation report') report_dict = classification_report(y_test, predictions, output_dict=True) report_dict['accuracy'] = accuracy_score(y_test, predictions) report_dict['roc_auc'] = roc_auc_score(y_test, predictions) print('Classification report:\n{}'.format(report_dict)) evaluation_output_path = os.path.join('/opt/ml/processing/evaluation', 'evaluation.json') print('Saving classification report to {}'.format(evaluation_output_path)) with open(evaluation_output_path, 'w') as f: f.write(json.dumps(report_dict)) import json from sagemaker.s3 import S3Downloader sklearn_processor.run(code='evaluation.py', inputs=[ProcessingInput( source=model_data_s3_uri, destination='/opt/ml/processing/model'), ProcessingInput( source=preprocessed_test_data, destination='/opt/ml/processing/test')], outputs=[ProcessingOutput(output_name='evaluation', source='/opt/ml/processing/evaluation')] ) evaluation_job_description = sklearn_processor.jobs[-1].describe() ``` Now retrieve the file `evaluation.json` from Amazon S3, which contains the evaluation report. ``` evaluation_output_config = evaluation_job_description['ProcessingOutputConfig'] for output in evaluation_output_config['Outputs']: if output['OutputName'] == 'evaluation': evaluation_s3_uri = output['S3Output']['S3Uri'] + '/evaluation.json' break evaluation_output = S3Downloader.read_file(evaluation_s3_uri) evaluation_output_dict = json.loads(evaluation_output) print(json.dumps(evaluation_output_dict, sort_keys=True, indent=4)) ``` ## Running processing jobs with your own dependencies Above, you used a processing container that has scikit-learn installed, but you can run your own processing container in your processing job as well, and still provide a script to run within your processing container. Below, you walk through how to create a processing container, and how to use a `ScriptProcessor` to run your own code within a container. Create a scikit-learn container and run a processing job using the same `preprocessing.py` script you used above. You can provide your own dependencies inside this container to run your processing script with. ``` !mkdir docker ``` This is the Dockerfile to create the processing container. Install `pandas` and `scikit-learn` into it. You can install your own dependencies. ``` %%writefile docker/Dockerfile FROM python:3.7-slim-buster RUN pip3 install pandas==0.25.3 scikit-learn==0.21.3 ENV PYTHONUNBUFFERED=TRUE ENTRYPOINT ["python3"] ``` This block of code builds the container using the `docker` command, creates an Amazon Elastic Container Registry (Amazon ECR) repository, and pushes the image to Amazon ECR. ``` import boto3 account_id = boto3.client('sts').get_caller_identity().get('Account') ecr_repository = 'sagemaker-processing-container' tag = ':latest' processing_repository_uri = '{}.dkr.ecr.{}.amazonaws.com/{}'.format(account_id, region, ecr_repository + tag) # Create ECR repository and push docker image !docker build -t $ecr_repository docker !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email) !aws ecr create-repository --repository-name $ecr_repository !docker tag {ecr_repository + tag} $processing_repository_uri !docker push $processing_repository_uri ``` The `ScriptProcessor` class lets you run a command inside this container, which you can use to run your own script. ``` from sagemaker.processing import ScriptProcessor script_processor = ScriptProcessor(command=['python3'], image_uri=processing_repository_uri, role=role, instance_count=1, instance_type='ml.m5.xlarge') ``` Run the same `preprocessing.py` script you ran above, but now, this code is running inside of the Docker container you built in this notebook, not the scikit-learn image maintained by Amazon SageMaker. You can add the dependencies to the Docker image, and run your own pre-processing, feature-engineering, and model evaluation scripts inside of this container. ``` script_processor.run(code='preprocessing.py', inputs=[ProcessingInput( source=input_data, destination='/opt/ml/processing/input')], outputs=[ProcessingOutput(output_name='train_data', source='/opt/ml/processing/train'), ProcessingOutput(output_name='test_data', source='/opt/ml/processing/test')], arguments=['--train-test-split-ratio', '0.2'] ) script_processor_job_description = script_processor.jobs[-1].describe() print(script_processor_job_description) ```
github_jupyter
# Exercise 7.1 Flowers with data augmentation We've covered enough to create a model from scratch. Let's apply what we've learned to the `flowers` dataset. First we'll want to import all the packages needed. Notice that this time, layers are imported explicitly so we don't have to keep appending `tf.keras.layers.` when defining our model. ``` import os import numpy as np import glob import shutil import matplotlib.pyplot as plt # Import layers explicitly to keep our code compact import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator ``` The dataset can be downloaded with the code below. ``` _URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" zip_file = tf.keras.utils.get_file(origin=_URL, fname="flower_photos.tgz", extract=True) base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos') ``` The dataset we downloaded contains images of 5 types of flowers: 1. Rose 2. Daisy 3. Dandelion 4. Sunflowers 5. Tulips So, let's create the labels for these 5 classes: ``` classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips'] ``` Also, the dataset we have downloaded has following directory structure. n <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>daisy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> </pre> **As you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this.** * Create a `train` and a `val` folder each containing 5 folders (one for each type of flower). * Move the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. * In the end our directory will have the following structure: <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>flower_photos</b> |__ <b>daisy</b> |__ <b>dandelion</b> |__ <b>roses</b> |__ <b>sunflowers</b> |__ <b>tulips</b> |__ <b>train</b> |______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....] |______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....] |__ <b>val</b> |______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....] |______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....] |______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....] |______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....] |______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....] </pre> ``` # TODO - Build a training and validation dataset as specified above ``` **With the dataset downloaded, we're now ready to build and train our model, you'll need to:** * Create image generators for your trainind and validation dataset, apply all previously mentioned transformation: * Flip * Rotation * Zoom * Define your model * Compile the model * Print out a summary of your model (optional) * Train the model on the created image generators * Plot the output statistics ``` # TODO - Create a model as specified above ``` ## Exercise 7.1 Solution The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E7.1.ipynb)
github_jupyter
### *** Name: [Insert Your Name Here]*** # Prelab 3 - Return Statements and Plotting Basics ## Prelab 3 Contents 1. Python Return Statements * Functions Without Returns * Return Statements as Code Breaks * Return Statements for Assigned Output 2. Basic Python Plotting ``` import numpy as np ``` # 1. Python Return Statements ## 1.1 Functions Without Returns Thus far we've not paid much attention to function return statements. As many of you have noticed, you don't HAVE to have a return statement at the end of a function. For example: ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" z=x+y z dummy_func() ``` There are a few things to note here. First that the function did not print out z, even though in an ordinary notebook cell (without a function definition) typing the variable name alone usually prints the value stored in it. That's not the case with functions because functions will only return to output what you tell them to. If you want to print the value of something within a function, then you must use a print statement. For example: ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" print("x is", x) print("y is", y) z=x+y print("z is", z) dummy_func(2,3) ``` This brings up another subtelty that it's worth reemphasizing here - optional arguments can be specified in a function call in any order ***but*** if you don't specify which variable is which in the function call, it will assume that you are specifying them in the order that you defined them in the function. For example, as I've written the function, you should be able to specify none, one or two arguments in any order. Make sure you understand what each of the following cells are doing before moving on ``` dummy_func(1) dummy_func(y=1) dummy_func(y=3,x=2) dummy_func(3,2) ``` OK back to the function dummy_func and the idea of return statements. Note that we did not include a return statement in the definition cell. A return statement is ***not necessary*** to delimit the end of a function, though it can be visually useful in that way. In fact, a function will consist of any code that is indented beneath it. If you remove the indentation, any code that you write is no longer considered part of the function. For example: ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" print("x is", x) print("y is", y) z=x+y print("z is", z) print('This is not part of the function and will not be printed when I call it. It will print when I execute this cell') dummy_func() ``` ## 1.2 Return Statements as Code Breaks Now let's try adding a return statment to dummy_func ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" print("x is", x) print("y is", y) z=x+y print("z is", z) return dummy_func() ``` Note that nothing changed about the output when we added a return to dummy_func. Regardless of where it is embedded in a function, a return statement always tells Python that the function is complete and it should stop execution and return to a command prompt. In this way, it can sometimes be useful inside a function. For example, consider the following function (from Lab 2 solutions) ``` def temp_convert(system="C"): """ Description: Asks user to input a temperature, and prints it in C, F and K. Required Inputs: none Optional Inputs: system - string variable that allows the user to specify the temperature system. Default is C, but F and K are also options. """ #ask the user to enter a temperature input_temp = input("enter a temperature (set system keyword if not Celsius):") # default input type is string. Convert it to a Float input_temp=float(input_temp) #Convert all input temperatures to Celsius if system == "C": input_temp = input_temp elif system == "F": input_temp = (input_temp-32)*5/9 elif system == "K": input_temp = input_temp - 273 else: #if system keyword is not C, F or K, exit without any output and print a warning print("unrecognized system - please specify C, F or K") return #Convert and print the temperatures print('Temperature in Celsius is ', str(input_temp)) temp_f = input_temp*9/5 + 32 print('Temperature in Farenheit is ', str(temp_f)) temp_k = input_temp + 273 print('Temperature in Kelvin is ', str(temp_k)) return temp_convert(system="B") ``` In the example above, a return statement was used mid-function in order to break out of it when a bad temperature system is specified. This returns the prompt to the user BEFORE reaching the print functions below, which are bound to fail. In this way, the return statement acts a bit like a "break" statement but break is only useable inside of loops (for, while) and it does not exit the entire function, but only the loop. For example, consider the following function (also from Lab #2 solutions). ``` def order_please(names): orders='' foods='' names.sort(key=len) for a in names: negg = input(a +": How many eggs would you like?") spam = input(a +": Would you like Spam (yes or no)?") if spam == "yes": spam = "" spam_print = "SPAM!" elif spam == "no": spam_print = "NO SPAM!" else: print('unrecognized answer. Please specify yes or no') break order =a +" wants "+negg + " eggs, and " + spam + " Spam" food = a+": "+"egg "*int(negg)+"and "+spam_print orders = orders+'\n\n'+order foods = foods+'\n'+food print(orders) print(foods) ``` Like the return statement in the temperature conversion function, break is used here to handle unrecognized input, but here it functions only to end the for loop and not to exit the function. It will still reach the two print statements at the bottom and print any orders that were collected before the bad input. To verify this, exectute the cell below. For Rowan's order, enter 2 for eggs and no for spam. For Tasha's order, enter 3 eggs and yuck for spam. At the end, the function will still print Rowan's order, and skip Tasha's. ``` order_please(['Rowan','Tasha']) ``` Note though that break will break you out of the for loop entirely. If you enter an unrecognized answer for spam in Rowan's order, it will not then ask Tasha for hers. Verify this by reexecuting the cell above and entering a bad value for spam in Rowan's order. Note too that entering return in place of break in the function above would have stopped the code entirely at that point, and would not have printed the good order before exiting. Try swapping it out in the function definition to verify this. ## 1.3 Return Statements for Assigned Output Return statements are not only useful as code breaks, however. Their main purpose is to return calculated values, arrays, etc. to the user so that they can be referenced in future code cells. So far our code has mostly involved printing things, but sometimes we want to use and manupulate the output of a function and so it needs to be passed back to the user. For example: ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" print("x is", x) print("y is", y) z=x+y print("z is", z) return z dummy_func() ``` Note in executing the cell above, you now have output (z in this case). Nice, but stil not that useful since it just tells you what it is. The function returns z as output, but does not store anything in a variable called z. So even though you defined z within the function and returned it, python will not recognize the variable. You can see this by executing the cell below, which will return an error. ``` z ``` This is an important higher-order thing to note about functions - variables only have meaning ***within*** them, not outside them. If I want the function to return whatever I've told it to return (z) ***and*** store it in a variable, I have to tell it so via the following syntax: ``` z = dummy_func() ``` This time, I've told python that it should take the output of dummy_func and store it in the variable z. Functions can return arbitrary numbers of things as well ``` def dummy_func(x=0, y=0): """This function takes two optional arguments (x and y) with default values of zero and adds them together to create a new variable z, then prints it.""" print("x is", x) print("y is", y) z=x+y print("z is", z) return x, y, z dummy_func() ``` And when you want to assign them to variables, you use a similar syntax, though it seems a bit funny. ``` x, y, z = dummy_func() x y z ``` Note that when you define a function with multiple return variables and you assign those returned variables into stored variables with = you ***must have an equal number of assigned variables as returned variables***. For example, the following will return an error. ``` x, y = dummy_func() ``` <div class=hw> ### Exercise 1 --------------------- Write a function that does the following: 1) Has one required numerical input - the distance to a galaxy 2) Has one optional input - a string specifying the units for that distance, with a default value of "Mpc" 3) If the unit specified is "lyr" (light years), "pc" (parsecs) or "km" (kilometers), converts the value of the numerical input to Megaparsecs 4) If the unit is not one of the above or Mpc, prints a reasonable error message and exits 5) Uses Hubble's law, one version of which is shown below, to compute the redshift (z) of the galaxy. c is the speed of light, d is the distance to the galaxy in Mpc, and $H_0$ is Hubble's constant, 73.5km/sec/Mpc. $$z=\frac{dH_0}{c}$$ 6) If the computed redshift is greater than 1, print: "The linear form of hubble's law does not provide an accurate distance estimate beyond z=1" and exit 7) Returns the redshift as a variable ``` #function definition #test statement ``` # 2. Basic Python Plotting You've already made some plots for this course, however we have not yet manipulated their appearance in any way. For example, in Prelab #2, you made plots of the sine function that looked like this: ![](sin.png "sin plot") As you can see, python (unlike many languages) generally does a lovely job with coloring, line thickness etc. with simple plot commands. It does not, however, add titles, axis labels, legends, etc. and these are ***very*** important things to include. From now on, any plots that you make in Labs or homeworks should always, at a minimum, include: axis labels (including units), a plot title, and a legend in any case where there's more than one line on the same plot. There are many useful optional inputs to the plot command that allow you to tweak the appearance of the plot, including: linestyle, color, placement of the legend, etc. So let's learn the basics by plotting some things. So far we have been using the command %pylab inline to allow jupyter to insert inline plots in our notebooks. Now we are going to do this more properly by importing the matplotlib library's plotting module pyplot an then telling the notebook that you still want it to display any plots inline (inside the notebook) with the magic function %matplotlib inline with the following lines ``` import matplotlib.pyplot as plt %matplotlib inline ``` This gives you access to all of pyplot's functions in the usual way of calling modules (plt.functionname). For example: ``` x=np.arange(-10,10,0.01) y=x**2 plt.plot(x, y) ``` Here are some especially useful pyplot functions, called with plt.functionname(input(s)): * xlim and ylim set the range of the x and y axes, respectively and they have two required inputs - a minimum and maximum for the range. By default, pylab will set axis ranges to encompass all of the values in the x and y arrays that you specified in the plot call, but there are many cases where you might want to "zoom in" on certain regions of the plot. * xlabel and ylabel set the labels for the x and y axes, respectively and have a required string input. * title sets the title of the plot and also requires a string input. Line properties are controlled with optional keywords to the plot function, namely the commands *color*, *linestyle* and *linewidth*. The first two have required string arguments (lists of the options are available here), and the third (linewidth) requires a numerical argument in multiples of the default linewidth (1). These can be specified either in the call to the function or separately before or after the plot call. See the cell below for an example of all of these at play ``` plt.xlim(-5,5) plt.ylim(0,20) plt.xlabel("the independent variable (no units)") plt.ylabel("the dependent variable (no units)") plt.title("The Quadratic Function") plt.plot(x, y, color='red',linestyle='--', linewidth=2.5) ``` Note these functions can be set before or after the plot command as long as they're within the same cell. ``` plt.plot(x, y, color='red',linestyle='--', linewidth=2.5) plt.xlim(-5,5) plt.ylim(0,20) plt.xlabel("the independent variable (no units)") plt.ylabel("the dependent variable (no units)") plt.title("The Quadratic Function") ``` As you have already encountered in your labs and homeworks, it is often useful to overplot functions on top of one another, which we do with multiple plot commands. In this case, what you need to make a quality graphic is a legend to label which is which. For example, let's plot the cubic function on top of the quadratic. In doing so below, note that we don't actually **have** to specify a separate variable, but that the arguments to a plot command can be combinations of variables ``` plt.plot(x,x**2) plt.plot(x,x**3) ``` To add a legend to a plot, you use the pyplot function legend. Perhaps the simplest way to use this function is to assign labels to each line plot with the label keyword, as below, and then call legend with no input. Note that the label keyword requires string input and that you can use LaTeX syntax within those labels ``` plt.plot(x,x**2, label='$x^2$') plt.plot(x,x**3, label='$x^3$') plt.legend() ``` As you can see above, the default for a legend is to place it at the upper right of the plot, even when it obscures the underlying lines and to draw a solid line around it (bounding box). Generally speaking, bounding boxes are rather ugly, so you should nearly always (unless you really want to set the legend apart) use the optional Boolean (True or False) keyword "frameon" to turn this off. Legend also takes the optional keyword loc to set the location of the legend. Loc should be a string, and you can see the full list of options by accessing help for the legend function by hitting shift+tab+tab inside of the plt.legend parentheses below. ``` plt.plot(x,x**2, label='$x^2$') plt.plot(x,x**3, label='$x^3$') plt.legend(loc="lower right", frameon=False) ``` <div class=hw> ### Exercise 2 --------------------- Make a plot that meets the following criteria: * plots $x^a$ for all integer values of a between 0 and 5 * labels the x and y axes appropriately * includes an appropriate descriptive title * zooms in on a region of the plot where you find the differences between the functions to be the most interesting * sets linestyles and colors to be distinct for each function * makes a legend without a bounding box at an appropriate location ``` # plotting code here from IPython.core.display import HTML def css_styling(): styles = open("../../custom.css", "r").read() return HTML(styles) css_styling() ```
github_jupyter
#### Chapter 8 in the 2nd edition, Chapter 9 in the 1st edition ##### 8.1* The nonplanning method looks particularly poor in Figure 8.4 because it is a one-step method; a method using multi-step bootstrapping would do better. Do you think one of the multi-step bootstrapping methods from Chapter 7 could do as well as the Dyna method? Explain why or why not. The multi-step bootstrapping method could possibly do as well as the Dyna method with one-step updates, since both multi-step bootstrapping and Dyna algorithms assign values to all states previously visited in order to speed up learning. It's not clear to me which one would do better without an experiment. ##### 8.2 Why did the Dyna agent with exploration bonus, Dyna-Q+, perform better in the first phase as well as in the second phase of the blocking and shortcut experiments? First phase: Dyna-Q+ performs better in the first phase than Dyna-Q because of the footnote description and since Dyna-Q+ explores more than Dyna-Q due to the augmented reward (thus more likely to find optimal path): > "The Dyna-Q+ agent was changed in two other ways as well. First, actions that had never been tried before from a state were allowed to be considered in the planning step (f) of Figure 8.2. Second, the initial model for such actions was that they would lead back to the same state with a reward of zero." Second phase: Dyna-Q+ performs better in the second phase since the new shorter path is learned under Dyna-Q+ but not under Dyna-Q. Dyna-Q+ augments the reward for unvisited states by $\kappa \sqrt{\tau}$, whereas during planning Dyna-Q discounts the value for unvisited states if they were not previously optimal. This forces Dyna-Q+ to keep exploring and eventually find the shorter path, while Dyna-Q has no reason to believe that a shorter path exists during planning. ##### 8.3 Careful inspection of Figure 8.6 reveals that the difference between Dyna-Q+ and Dyna-Q narrowed slightly over the first part of the experiment. What is the reason for this? I suspect that the narrowing in Figure 8.6 occurs since Dyna-Q learns episodes with $\epsilon$-greedy actions, and thus eventually finds the optimal path that Dyna-Q+ found a lot quicker due to Dyna-Q+ increased exploration. ##### 8.4 The exploration bonus described above actually changes the estimated values of states and actions. Is this necessary? Suppose the bonus κ√τ was used not in backups, but solely in action selection. That is, suppose the action selected was always that for which Q(S, a) + $\kappa\sqrt{\tau_{S_a}}$ was maximal. Carry out a gridworld experiment that tests and illustrates the strengths and weaknesses of this alternate approach. ``` # Maze code obtained from # https://github.com/ShangtongZhang/reinforcement-learning-an-introduction/blob/master/chapter08/VariousMaze.py # Additional requirements from the problem statement were added to re-create Figure 8.6 # --------------------------------------------------------------------# ####################################################################### # Copyright (C) 2016 Shangtong Zhang(zhangshangtong.cpp@gmail.com) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### import numpy as np import matplotlib.pyplot as plt %matplotlib inline class Maze: def __init__(self): # maze width self.WORLD_WIDTH = 9 # maze height self.WORLD_HEIGHT = 6 # all possible actions self.ACTION_UP = 0 self.ACTION_DOWN = 1 self.ACTION_LEFT = 2 self.ACTION_RIGHT = 3 self.actions = [self.ACTION_UP, self.ACTION_DOWN, self.ACTION_LEFT, self.ACTION_RIGHT] # start state self.START_STATE = [2, 0] # goal state self.GOAL_STATES = [[0, 8]] # all obstacles self.obstacles = [[1, 2], [2, 2], [3, 2], [0, 7], [1, 7], [2, 7], [4, 5]] self.oldObstacles = None self.newObstacles = None # time to change obstacles self.changingPoint = None # initial state action pair values self.stateActionValues = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, len(self.actions))) # max steps self.maxSteps = float('inf') # track the resolution for this maze self.resolution = 1 def takeAction(self, state, action): x, y = state if action == self.ACTION_UP: x = max(x - 1, 0) elif action == self.ACTION_DOWN: x = min(x + 1, self.WORLD_HEIGHT - 1) elif action == self.ACTION_LEFT: y = max(y - 1, 0) elif action == self.ACTION_RIGHT: y = min(y + 1, self.WORLD_WIDTH - 1) if [x, y] in self.obstacles: x, y = state if [x, y] in self.GOAL_STATES: reward = 1.0 else: reward = 0.0 return [x, y], reward class PlanningModel(object): def __init__(self, environment, time_weight=0): self.time = 0 self.model = dict() self.time_weight = time_weight self.environment = environment self.rand = np.random.RandomState(0) def save_experience(self, current_state, action, new_state, reward): """ Deterministic environment model """ self.time += 1 self.create_state_action_value(current_state, action) self.model[tuple(current_state)][action] = [list(new_state), reward, self.time] def sample(self): stateIndex = self.rand.choice(range(0, len(self.model.keys()))) state = list(self.model.keys())[stateIndex] actionIndex = self.rand.choice(range(0, len(self.model[state].keys()))) action = list(self.model[state].keys())[actionIndex] newState, reward, time = self.model[state][action] # adjust reward with elapsed time since last vist time_reward = self.time_weight * np.sqrt(self.time - time) return list(state), action, list(newState), reward, time_reward def get_time_reward(self, state, action): self.create_state_action_value(state, action=None) newState, reward, time = self.model[tuple(state)][action] return self.time_weight * np.sqrt(max(0, self.time - time)) def create_state_action_value(self, current_state, action): if tuple(current_state) not in self.model: self.model[tuple(current_state)] = dict() # Actions that had never been tried before from a state # were allowed to be considered in the planning step for action_ in self.environment.actions: if action_ != action: # Such actions would lead back to the same state with a reward of zero # Notice that the minimum time stamp is 1 instead of 0 self.model[tuple(current_state)][action_] = [list(current_state), 0, 1] class DynaAgent(object): def __init__(self, gamma=0.95, epsilon=0.1, alpha=0.1, planning_steps=50): # discount factor self.gamma = gamma # probability for exploration self.epsilon = epsilon # step size self.alpha = alpha # planning steps self.planning_steps = planning_steps def epsilon_greedy_action(self, state, state_action_values, environment): if np.random.binomial(1, self.epsilon) == 1: return np.random.choice(environment.actions) return np.argmax(state_action_values[state[0], state[1], :]) def epsilon_greedy_action_with_time_reward(self, state, state_action_values, environment, planning_model): if np.random.binomial(1, self.epsilon) == 1: return np.random.choice(environment.actions) Q = [planning_model.get_time_reward(state, a) for a in environment.actions] Q += state_action_values[state[0], state[1], :] return np.argmax(Q) def play_episode(self, state_action_values, planning_model, environment, action_with_time_reward=False): steps = 0 current_state = environment.START_STATE while current_state not in environment.GOAL_STATES: steps += 1 if action_with_time_reward: action = self.epsilon_greedy_action_with_time_reward( current_state, state_action_values, environment, planning_model ) else: action = self.epsilon_greedy_action( current_state, state_action_values, environment ) new_state, reward = environment.takeAction(current_state, action) # Q-Learning update state_action_values[current_state[0], current_state[1], action] += \ self.alpha * (reward + self.gamma * np.max( state_action_values[new_state[0], new_state[1], :]) - state_action_values[current_state[0], current_state[1], action]) # feed the model with experience planning_model.save_experience(current_state, action, new_state, reward) # sample experience from the model for t in range(0, self.planning_steps): state_sample, action_sample, new_state_sample, reward_sample, time_reward = planning_model.sample() if not action_with_time_reward: # only use the time_reward update if we don't select the actions # using the time rewards reward_sample += time_reward state_action_values[state_sample[0], state_sample[1], action_sample] += \ self.alpha * (reward_sample + self.gamma * np.max( state_action_values[new_state_sample[0], new_state_sample[1], :]) - state_action_values[state_sample[0], state_sample[1], action_sample]) current_state = new_state # check whether it has exceeded the step limit if steps > environment.maxSteps: break return steps def changingMazeExperiment(environment, planning_model, agent, num_runs=1, with_time_reward=False): """ Runs an experiment given a changing environmnet, with a planning agent """ # set up max steps maxSteps = environment.maxSteps # track the cumulative rewards # rewards = np.zeros((maxSteps)) rewards_ = np.zeros((num_runs, maxSteps)) for run in range(0, num_runs): # print('Run ', run) # initialize state action values stateActionValues = environment.stateActionValues # set old obstacles for the maze environment.obstacles = environment.oldObstacles steps = 0 lastSteps = steps while steps < maxSteps: # play for an episode steps += agent.play_episode( stateActionValues, planning_model, environment, action_with_time_reward=with_time_reward ) # update cumulative rewards steps_ = min(steps, maxSteps - 1) rewards_[run, lastSteps: steps_] = rewards_[run, lastSteps] rewards_[run, steps_] = rewards_[run, lastSteps] + 1 lastSteps = steps if steps > environment.changingPoint: # change the obstacles environment.obstacles = environment.newObstacles # averaging over runs rewards = np.mean(rewards_, axis=0) return rewards # set up a shortcut maze instance shortcutMaze = Maze() shortcutMaze.START_STATE = [5, 3] shortcutMaze.GOAL_STATES = [[0, 8]] shortcutMaze.oldObstacles = [[3, i] for i in range(1, 9)] # new obstacles will have a shorter path shortcutMaze.newObstacles = [[3, i] for i in range(1, 8)] # step limit shortcutMaze.maxSteps = 6000 shortcutMaze.changingPoint = 3000 # setup up the planning model and Agent agent = DynaAgent(planning_steps=50, alpha=0.7) agent2 = DynaAgent(planning_steps=0, alpha=0.7) dynaq_model = PlanningModel(environment=shortcutMaze, time_weight=0) dynaq_plus_model = PlanningModel(environment=shortcutMaze, time_weight=5e-4) # run experiments dynaq_rewards = changingMazeExperiment( shortcutMaze, planning_model=dynaq_model, agent=agent, num_runs=2 ) dynaq_plus_rewards = changingMazeExperiment( shortcutMaze, planning_model=dynaq_plus_model, agent=agent, num_runs=2 ) dynaq_plus_84_rewards = changingMazeExperiment( shortcutMaze, planning_model=dynaq_plus_model, agent=agent2, num_runs=2, with_time_reward=True ) plt.plot(dynaq_rewards, label='dyna-Q') plt.plot(dynaq_plus_rewards, label='dyna-Q+') plt.plot(dynaq_plus_84_rewards, label='dyna-Q-8.4') plt.xlabel('time steps') plt.ylabel('cumulative reward') plt.legend(loc='best') ``` For the short-cut maze problem, we see that the 8.4 method (using $\kappa \sqrt{\tau}$ for action selection only and not for planning) results in similar performance as dyna-Q if the number of planning steps is large (as the planning steps are identical). If the number of planning steps is close to 0, the 8.4 method does better than dyna-Q because of the exploration step in the action selection. Overall, the 8.4 method in the problem doesn't seem to add much value to dyna-Q, since the action selection is not as exploratory as dyna-Q+ for the maze shortcut. ##### 9.5 The analysis above assumed that all of the possible next states were equally likely to occur. Suppose instead that the distribution was highly skewed, that some of the states were much more likely to occur than most. Would this strengthen or weaken the case for sample backups over full backups? Support your answer. If the distribution of next states was highly skewed, full backups would be better on states more likely to occur rather than sample backups on several states, some of which were unlikely to occur. ##### 9.6** Some of the graphs in Figure 9.14 seem to be scalloped in their early portions, particularly the upper graph for $b=1$ and the uniform distribution. Why do you think this is? What aspects of the data shown support your hypothesis? I'm not sure at all why this is happening and wasn't able to reproduce it for some reason. ##### 9.7 If you have access to a moderately large computer, try replicating the experiment whose results are shown in the lower part of Figure 9.14. Then try the same experiment but with b=3. Discuss the meaning of your results. ``` import numpy as np from matplotlib import pyplot as plt %matplotlib inline class TrajectorySampling(object): def __init__(self, n=0, k=0, gamma=0.9, alpha=0.1, epsilon=0.1, max_num_tasks=2000): """ Args: n (int): Number of states k (int): Branching factor for each state gamma (float): discount factor epsilon (float): probability of choosing a non-greedy action alpha (float): probability of landing in terminal state """ self.gamma = gamma self.alpha = alpha self.epsilon = epsilon self.max_num_tasks = max_num_tasks def setup(self, n_arg, k_arg): """ Setup the reward, Q-value, and policy arrays """ self.n = n_arg self.k = k_arg self.successor = np.zeros((self.n, 2, self.k)).astype(int) self.R = np.zeros((self.n, self.k + 1, 2)) self.Q = np.zeros((self.n, 2)) self.policy = np.zeros((self.n)).astype(int) self.V = np.zeros((self.n)) self.randomness = np.zeros((self.max_num_tasks)).astype(int) for t in range(self.max_num_tasks): self.randomness[t] = np.random.randint(low=0, high=self.max_num_tasks * 9999) def init(self, task_num): """ Initialize the Q, R, and successor array. The successor array determines the next state given an action in a given state """ np.random.seed(seed=self.randomness[task_num]) self.Q = np.zeros_like(self.Q) self.R = np.random.rand(*self.R.shape) for s in range(self.n): for a in range(2): for i, sp in enumerate(np.random.choice(self.n, self.k)): self.successor[s, a, i] = sp def next_state(self, s, a): if np.random.rand() > self.alpha: k = np.random.randint(0, high=self.k) return self.successor[s, a, k] return self.n def full_backup(self, s, a): backup = 0 for i in range(self.k): sp = self.successor[s, a, i] backup += self.R[s, i, a] + self.gamma * np.max(self.Q[sp, :]) backup = backup * (1 - self.alpha) / self.k + self.alpha * self.R[s, self.k, a] return backup def run_sweeps(self, n_arg, k_arg, num_runs, num_sweeps, sweeps_per_measurement): self.setup(n_arg, k_arg) backups_per_measurement = (sweeps_per_measurement * 2 * self.n) backups_per_sweep = self.n * 2 num_backups = num_sweeps * backups_per_sweep num_measurements = num_backups // backups_per_measurement perf = np.zeros((num_runs, num_measurements)) for run in range(num_runs): self.init(run) backups = 0 for ns in range(num_sweeps): for s in range(self.n): for a in range(2): if backups % backups_per_measurement == 0: perf[run, backups // backups_per_measurement] = self.measure_performance() self.Q[s, a] = self.full_backup(s, a) backups += 1 return perf def run_trajectories(self, n_arg, k_arg, num_runs, num_sweeps, sweeps_per_measurement): self.setup(n_arg, k_arg) backups_per_measurement = (sweeps_per_measurement * 2 * self.n) backups_per_sweep = self.n * 2 num_backups = num_sweeps * backups_per_sweep num_measurements = num_backups // backups_per_measurement perf = np.zeros((num_runs, num_measurements)) for run in range(num_runs): self.init(run) backups = 0 state = 0 while state != self.n: action = np.random.choice(2) if np.random.rand() < self.epsilon\ else np.argmax(self.Q[state, :]) nxt_state = self.next_state(state, action) if backups % backups_per_measurement == 0: perf[run, backups // backups_per_measurement] += self.measure_performance() self.Q[state, action] = self.full_backup(state, action) backups += 1 if backups == num_backups: break state = nxt_state if state == self.n: state = 0 return perf def measure_performance(self): for s in range(self.n): self.V[s] = 0.0 self.policy[s] = np.argmax(self.Q[s, :]) delta = 1 while delta > 0.001: delta = 0 for s in range(self.n): old_V = self.V[s] self.V[s] = self.full_backup(s, self.policy[s]) delta += np.sum(np.abs(old_V - self.V[s])) return self.V[0] ts = TrajectorySampling() sweep_results = ts.run_sweeps( n_arg=1000, k_arg=1, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) trajectory_results = ts.run_trajectories( n_arg=1000, k_arg=1, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) sweep_results3 = ts.run_sweeps( n_arg=1000, k_arg=3, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) trajectory_results3 = ts.run_trajectories( n_arg=1000, k_arg=3, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) x = (np.ones(sweep_results.shape[1]).cumsum() - 1) * ts.n * 2 plt.plot(x, np.mean(sweep_results, axis=0)) plt.plot(x, np.mean(trajectory_results, axis=0)) plt.plot(x, np.mean(sweep_results3, axis=0)) plt.plot(x, np.mean(trajectory_results3, axis=0)) plt.xlabel('Computation time in full backups') plt.ylabel('Value of start state under greedy policy') plt.legend(['uniform, k=1', 'on-policy, k=1', 'uniform, k=3', 'on-policy, k=3'], loc='best') sweep_results = ts.run_sweeps( n_arg=10000, k_arg=1, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) trajectory_results = ts.run_trajectories( n_arg=10000, k_arg=1, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) sweep_results3 = ts.run_sweeps( n_arg=10000, k_arg=3, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) trajectory_results3 = ts.run_trajectories( n_arg=10000, k_arg=3, num_runs=200, num_sweeps=10, sweeps_per_measurement=1 ) x = (np.ones(sweep_results.shape[1]).cumsum() - 1) * ts.n * 2 plt.plot(x, np.mean(sweep_results, axis=0)) plt.plot(x, np.mean(trajectory_results, axis=0)) plt.plot(x, np.mean(sweep_results3, axis=0)) plt.plot(x, np.mean(trajectory_results3, axis=0)) plt.xlabel('Computation time in full backups') plt.ylabel('Value of start state under greedy policy') plt.legend(['uniform, k=1', 'on-policy, k=1', 'uniform, k=3', 'on-policy, k=3'], loc='best') ``` We see that the results for 10k states are similar to those with 1k states, where the k=3 branching factor has a similar effect on the value funtion as before.
github_jupyter
``` import numpy as np import scipy.stats as ss import jax import jax.numpy as jnp import jax.scipy.stats as jss import jax.scipy.special as jsss import matplotlib.pyplot as plt from matplotlib.patches import Ellipse import matplotlib.transforms as transforms import pathfinder def ellipse_confidence(mu, cov, ax, c, n_std=2.): lambda_, v = np.linalg.eig(cov) lambda_ = np.sqrt(lambda_) ellipse = Ellipse(xy=(*mu,), width=lambda_[0]*n_std*2, height=lambda_[1]*n_std*2, angle=np.degrees(np.arctan2(*v[:,0][::-1])), facecolor=c,edgecolor="b", alpha=0.1) return ax.add_artist(ellipse) ``` ### Pathfinder: parallel quasi-newton variational inference *(from abstact)* Starting from a random initialization, Pathfinder locates normal approximations to the target density along a quasi-Newton optimization path, with local covariance estimated using the inverse Hessian estimates produced by the optimizer. Pathfinder returns draws from the approximation with the lowest estimated Kullback-Leibler (KL) divergence to the true posterior. Here we show the full L-BFGS optimization path with 95% central region of pathfinder samples and estimate ELBO wrt target distribution for each step of the algorithm. #### Bivariate normal: ``` rng_key = jax.random.PRNGKey(0) mu = jnp.zeros(2) cov = jnp.array([[1.0, -0.9], [-0.9, 1.0]]) x0 = jnp.array([1.75,2.05]) logp_fn = lambda x: jax.scipy.stats.multivariate_normal.logpdf(x, mu, cov) maxcor, maxiter = 10, 10 elbos, xs, phis = pathfinder.pathfinder(rng_key, logp_fn, x0, maxiter, maxcor, 10000, output='all') xs = jnp.stack(xs) step = 0.01 x, y = jnp.mgrid[-3:3:step, -3:3:step] pos = jnp.dstack((x, y)) rv = ss.multivariate_normal.pdf(pos, mu, cov) rows = int(np.ceil((len(xs)-1) / 3)) fig,axs = plt.subplots(rows, 3, figsize=(15, 5*rows)) for i,ax in zip(range(1,len(xs)), axs.flatten()): ax.contour(x,y,rv) xlim, ylim = ax.get_xlim(), ax.get_ylim() ax.plot(xs[0:i+1,0], xs[0:i+1,1], marker="*", linestyle="--",markersize=10) mu_i, cov_i = phis[i].mean(0), jnp.cov(phis[i],rowvar=False) ellipse_confidence(mu_i,cov_i, ax, "r") ax.set_title(f"Iteration: {i}\nEstimated ELBO: {elbos[i]:.2f}") ax.set_xlim(*xlim), ax.set_ylim(*ylim) ``` #### Posterior of binomial model: (chart should resemble *figure 1* of the paper) ``` n, p = 1000, 2 true_beta_mu = jnp.array([0.6,-0.4]) X = np.random.normal(size=(n,p)) betas = np.random.multivariate_normal(true_beta_mu, np.eye(p), size=n) Y = np.random.binomial(1,jsss.expit((betas*X).sum(1))) prior_beta_logp = lambda beta: jss.multivariate_normal.logpdf(beta, mean=jnp.zeros(p), cov=jnp.eye(p)) logp_model = jax.vmap(lambda beta: jss.bernoulli.logpmf(Y,jsss.expit(X@beta)).sum(-1) + prior_beta_logp(beta)) logp_model_ = lambda beta: logp_model(jnp.atleast_2d(beta)) # lbfgs routine needs 1d arrays, vmap 2d's... maxcor, maxiter = 10, 20 sample = jnp.array([[1.5, -1.5]]) elbos, xs, phis = pathfinder.pathfinder(rng_key, logp_model_, sample[0], maxiter, maxcor, 1000, output='all') xs = jnp.stack(xs) step, size = 0.05, 1.5 x, y = jnp.mgrid[true_beta_mu[0]-size:true_beta_mu[0]+size:step, true_beta_mu[1]-size:true_beta_mu[1]+size:step,] pos = jnp.dstack((x, y)) rv = logp_model(pos.reshape(-1,p)).reshape(*x.shape) levels = np.percentile(rv.flatten(),np.linspace(70,100,10)) rows = int(np.ceil((len(xs)-1) / 3)) fig,axs = plt.subplots(rows, 3, figsize=(15, 5*rows)) for i,ax in zip(range(1,len(xs)), axs.flatten()): ax.contour(x,y,rv,levels=levels) xlim, ylim = ax.get_xlim(), ax.get_ylim() ax.plot(xs[0:i+1,0], xs[0:i+1,1], marker="*", linestyle="--",markersize=10) mu_i, cov_i = phis[i].mean(0), jnp.cov(phis[i],rowvar=False) ellipse_confidence(mu_i,cov_i, ax, "r") ax.set_title(f"Iteration: {i}\nEstimated ELBO: {elbos[i]:.2f}") ax.set_xlim(*xlim), ax.set_ylim(*ylim) ``` #### Funnel-like posterior distribution (chart should resemble *figure 2* of the paper) ``` n, p = 1, 2 logp_tao_prior = lambda tao : jss.norm.logpdf(tao, 0, 1.) logp_mu_prior = lambda mu: jss.norm.logpdf(mu, 0., 1.) logp_obs = lambda obs, mu, tao: jss.norm.logpdf(obs, mu, jnp.exp(tao)) obs = jax.random.normal(rng_key) * 2e-2 logp_model = lambda x : logp_obs(obs, jnp.take(x,0,-1), jnp.take(x,1,-1)) \ + logp_mu_prior(jnp.take(x,0,-1)) + logp_tao_prior(jnp.take(x,1,-1)) x0 = jnp.array([2.,1.]) elbos, xs, phis = pathfinder.pathfinder(rng_key, logp_model, x0, maxiter, maxcor, 1000, output='all') xs = jnp.stack(xs) step, size = 0.05, 15 x, y = jnp.mgrid[0.-size:0.+size:step, -5:2:step] pos = jnp.dstack((x, y)) rv = logp_model(pos.reshape(-1,p)).reshape(*x.shape) levels = np.percentile(rv.flatten(),np.linspace(50,95,10)) rows = int(np.ceil((len(xs)-1) / 3)) fig,axs = plt.subplots(rows, 3, figsize=(15, 5*rows)) for i,ax in zip(range(1,len(xs)), axs.flatten()): ax.contour(x,y,rv,levels=levels) xlim, ylim = ax.get_xlim(), ax.get_ylim() ax.plot(xs[0:i+1,0], xs[0:i+1,1], marker="*", linestyle="--",markersize=10) mu_i, cov_i = phis[i].mean(0), jnp.cov(phis[i],rowvar=False) ellipse_confidence(mu_i,cov_i, ax, "r") ax.set_title(f"Iteration: {i}\nEstimated ELBO: {elbos[i]:.2f}") ax.set_xlim(*xlim), ax.set_ylim(*ylim) ```
github_jupyter
# Analysis Notebook author: Martin Saveski (msaveski@mit.edu) Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. ``` # load libraries suppressMessages(library(tidyverse)) suppressMessages(library(cowplot)) suppressMessages(library(ggsci)) suppressMessages(library(scales)) # setup setwd("~/code/social-catalysts") source("scripts/utils.R") dta_root <- "data/" plt_root <- "figs_cscw/" theme_set(theme_light()) colors = c("Catalyst" = "#ee0001", "Matched" = "#3b4992") ``` # Posts Analysis ## Topics (Fig 2) ``` df_empath <- readRDS(str_c(dta_root, "post_analysis/empath.rds")) df_empath <- df_empath %>% ungroup() %>% mutate(post_cm = str_to_title(post_cm)) # (A) counts plt_empath <- df_empath %>% ggplot(aes( x = fct_reorder(topic, m), y = m, fill = fct_rev(post_cm) )) + geom_bar(stat = "identity", position = position_dodge(), width = 0.7) + geom_errorbar( aes(ymin = low_ci, ymax = high_ci), position = position_dodge(width = 0.7), color = "black", size = 0.3, width = 0.5 ) + labs(x = NULL, y = "Average Count", fill = NULL) + scale_fill_manual(values = colors) + coord_flip() + theme( panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), legend.position = "none" ) # (B) differences df_empath_dd <- df_empath %>% select(post_cm, topic, m, se) %>% mutate(post_cm = str_to_lower(post_cm)) %>% multi_spread(post_cm, c(m, se)) %>% mutate( d_m = catalyst_m - matched_m, d_se = sqrt(catalyst_se^2 + matched_se^2), d_ci = 1.96 * d_se, d_m_low = d_m - d_ci, d_m_high = d_m + d_ci ) %>% arrange(desc(d_m)) # variables for coloring the CIs df_empath_dd <- df_empath_dd %>% mutate( b_start = case_when( d_m_low > 0 & d_m_high > 0 ~ d_m, d_m_low < 0 & d_m_high > 0 ~ d_m_low, d_m_low < 0 & d_m_high < 0 ~ d_m_low ), b_end = case_when( d_m_low > 0 & d_m_high > 0 ~ d_m_high, d_m_low < 0 & d_m_high > 0 ~ d_m_high, d_m_low < 0 & d_m_high < 0 ~ d_m ), w_start = case_when( d_m_low > 0 & d_m_high > 0 ~ d_m_low, d_m_low < 0 & d_m_high > 0 ~ 0, d_m_low < 0 & d_m_high < 0 ~ d_m ), w_end = case_when( d_m_low > 0 & d_m_high > 0 ~ d_m, d_m_low < 0 & d_m_high > 0 ~ d_m, d_m_low < 0 & d_m_high < 0 ~ d_m_high ) ) plt_empath_dd <- df_empath_dd %>% ggplot(aes(x = fct_reorder(topic, d_m), y = d_m)) + geom_bar(stat = "identity", width = 0.6) + geom_errorbar( aes(ymin = b_start, ymax = b_end), width = 0, color = "black", size = 0.8 ) + geom_errorbar( aes(ymin = w_start, ymax = w_end), width = 0, color = "white", size = 0.8 ) + labs(x = "", y = "Catalyst - Matched") + scale_x_discrete(position = "top") + scale_y_continuous(breaks = pretty_breaks(), limits = c(-0.01, 0.04)) + coord_flip() + theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank()) p_row <- plot_grid(plt_empath, plt_empath_dd, ncol = 2, align = "h") legend <- get_legend( plt_empath + guides(fill = guide_legend(reverse = T)) + theme(legend.position="bottom") ) plt_empath_full <- plot_grid(p_row, legend, ncol=1, rel_heights = c(1, .08)) options(repr.plot.width=8, repr.plot.height=6.5) print(plt_empath_full) ``` # User Analysis ## Ego networks (Fig 3) ``` df_ego_stats <- readRDS(str_c(dta_root, "user_ego_nets_sample/ego_stats.rds")) df_ego_stats_inc <- df_ego_stats %>% ungroup() %>% select(is_catalyst, field, m, se) %>% multi_spread(is_catalyst, c(m, se)) %>% group_by(field) %>% do( per_change_delta_se( .$catalyst_m, .$matched_m, .$catalyst_se, .$matched_se ) ) ego_stats_fields <- c( "n_nodes", "density", "avg_clust", "avg_degree", "var_degrees", "deg_assortativity", "fiedler", "modularity" ) df_ego_stats_inc <- df_ego_stats_inc %>% ungroup() %>% filter(field %in% ego_stats_fields) %>% mutate( field = case_when( field == "n_nodes" ~ "Number of Nodes (Friends)", field == "density" ~ "Density", field == "avg_degree" ~ "Degree Average", field == "var_degrees" ~ "Degree Variance", field == "deg_assortativity" ~ "Degree Assortativity", field == "fiedler" ~ "Algebraic Connectivity", field == "avg_clust" ~ " Average Clustering Coefficient", field == "modularity" ~ "Modularity" ), field = factor( field, levels = rev(c( "Number of Nodes (Friends)", "Number of Edges", "Density", "Degree Average", "Degree Variance", "Degree Assortativity", " Average Clustering Coefficient", "Algebraic Connectivity", "Modularity" ) )) ) plt_ego_stats_inc <- df_ego_stats_inc %>% ggplot(aes( x = field, y = mean, ymin = lower95, ymax = upper95 )) + geom_point(size = 2) + geom_errorbar(width = 0, size = 0.6) + geom_hline(aes(yintercept = 0), linetype = "dashed") + labs(x = NULL, y = "Catalyst vs Matched users (% increase)") + scale_y_continuous(labels = percent_format(accuracy = 1), limits = c(-0.013, 0.3)) + scale_color_aaas() + coord_flip() + theme( axis.ticks = element_blank(), strip.text.y = element_text(color = "black", angle = 0), strip.background.y = element_rect(fill = "grey90"), legend.position = "none" ) # k-core df_ego_k_core <- readRDS(str_c(dta_root, "user_ego_nets_sample/k_core.rds")) df_ego_k_core <- df_ego_k_core %>% ungroup() %>% filter(threshold < 16) plt_ego_k_core <- df_ego_k_core %>% ggplot(aes(x = threshold, y = m, color=is_catalyst)) + geom_line() + geom_point() + geom_errorbar(aes(ymin=low_ci, ymax=high_ci), width=0.2) + scale_x_continuous(trans = log2_trans(), breaks = c(2, 4, 8), labels = c(expression(2^1), expression(2^2), expression(2^3))) + labs(x = "k", y = "Components in k-core", color = NULL) + expand_limits(y = 1) + guides(color = guide_legend(reverse = T)) + scale_color_aaas() + theme( panel.grid.minor.y = element_blank(), legend.position="bottom" ) # k brace df_ego_k_truss <- readRDS(str_c(dta_root, "user_ego_nets_sample/k_truss.rds")) df_ego_k_truss <- df_ego_k_truss %>% ungroup() %>% filter(threshold < 16) plt_ego_k_truss <- df_ego_k_truss %>% ggplot(aes(x = threshold, y = m, color=is_catalyst)) + geom_line() + geom_point() + geom_errorbar(aes(ymin=low_ci, ymax=high_ci), width=0.2) + scale_x_continuous(trans = log2_trans(), breaks = c(2, 4, 8), labels = c(expression(2^1), expression(2^2), expression(2^3))) + scale_y_continuous(breaks = seq(1, 8, by = 1)) + labs(x = "k", y = "Components in k-brace", color = NULL) + expand_limits(y = 1) + guides(color = guide_legend(reverse = T)) + scale_color_aaas() + theme( panel.grid.minor.y = element_blank(), legend.position="bottom" ) # group figures plt_ego_nets_all <- plot_grid( plt_ego_stats_inc, plt_ego_k_core + theme(legend.position="none"), plt_ego_k_truss + theme(legend.position="right"), labels = c('A', 'B', 'C'), nrow = 1, rel_widths = c(1, 0.615, 0.82), align = "h" ) options(repr.plot.width=11, repr.plot.height=3) print(plt_ego_nets_all) ``` # Survey ## Questions Overlap (Fig 5) ``` df_overlap <- readRDS(str_c(dta_root, "survey/overlap.rds")) df_overlap <- df_overlap %>% mutate(col = ifelse(v > 0.345, "white", "black")) plt_overlap <- df_overlap %>% ggplot(aes(q_i, fct_rev(q_j), fill = v)) + geom_tile() + geom_tile(color = "black", linetype = 1, size = 0.2) + geom_text(aes(label = v, color = col)) + scale_fill_material("grey", na.value = 'white') + scale_color_manual(values = c("black", "white")) + scale_x_discrete(position = "top") + labs( x = expression(paste("Question ", italic("i"))), y = expression(paste("Question ", italic("j"))) ) + guides( color = F, fill = guide_colourbar( draw.ulim = FALSE, draw.llim = FALSE, label.theme = element_text(colour = "black", size = 8, margin = margin(l=5)) )) + theme( axis.ticks = element_blank(), panel.border = element_blank(), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank(), legend.title = element_blank(), legend.position = "right" ) options(repr.plot.width=4, repr.plot.height=3) print(plt_overlap) ``` ## Nominated Percentiles (Fig 6) ``` df_nom_percentiles <- readRDS( str_c( dta_root, "survey_nominated_percentiles/nominated_percentile_per_nomination.rds" ) ) df_nom_percentiles <- df_nom_percentiles %>% ungroup() %>% filter(field != 'catalyst comments (per post)') %>% mutate( field = case_when( field == "posts" ~ "Number of Posts", field == "catalyst comments (total)" ~ "Number of Catalyst Comments", field == "mutual friends" ~ "Number of Mutual Friends", field == "friends" ~ "Number of Friends" ), field = factor( field, levels = c( "Number of Posts", "Number of Friends", "Number of Mutual Friends", "Number of Catalyst Comments" ) ), nomination_number = case_when( nomination_number == "0" ~ "1", nomination_number == "1" ~ "2", nomination_number == "2" ~ "3" ), question_code = str_to_upper(question_code) ) plt_nom_percentiles <- df_nom_percentiles %>% ggplot(aes(x = question_code, y = m, color = nomination_number)) + geom_point(size = 2, position = position_dodge(0.9)) + geom_errorbar(aes(ymin = low_ci, ymax = high_ci), width = 0.8, position = position_dodge(0.9)) + geom_hline(aes(yintercept = 0.5), linetype = "dashed") + facet_wrap(~ field, ncol = 4) + labs(x = NULL, y = "Mean Percentile Rank of Nominated Users", color = "Nomination Number") + scale_y_continuous(labels = percent_format(accuracy = 1), breaks = seq(0, 0.8, 0.1)) + expand_limits(y = 0) + scale_color_aaas() + theme( panel.grid.major.x = element_blank(), panel.grid.minor.y = element_blank(), strip.text.x = element_text(color = "black", size = 8, face = "bold"), strip.background.x = element_rect(fill = "grey95"), legend.position = "bottom" ) options(repr.plot.width=11, repr.plot.height=4) print(plt_nom_percentiles) ``` ## Percent Increase in Mean Catalystness (Fig 7) ``` df_tot_cat_per_q <- readRDS(str_c( dta_root, "survey_catalystness_per_question/tot_catalystness.rds" )) df_tot_cat_per_q <- df_tot_cat_per_q %>% ungroup() %>% mutate(question_code = str_to_upper(question_code)) %>% rename( mean = avg_total_cat, std = std_total_cat ) %>% multi_spread(is_nominated, c(mean, std, n)) df_tot_cat_per_q_inc <- df_tot_cat_per_q %>% group_by(question_code) %>% do( per_change_delta( .$nominated_mean, .$matched_mean, .$nominated_n, .$matched_n, .$nominated_std, .$matched_std ) ) plt_cat_total_inc <- df_tot_cat_per_q_inc %>% ggplot(aes(x = fct_rev(question_code), y = mean)) + geom_point(size = 3) + geom_errorbar(aes(ymin = lower95, ymax = upper95), width = 0, size = 0.6) + geom_hline(aes(yintercept = 0), linetype = "dashed") + labs(x = NULL, y = "Catalysts Comments of Nominated vs Matched users \n (% increase)") + scale_y_continuous(labels = percent) + expand_limits(y = 0) + coord_flip() + theme( axis.ticks = element_blank() ) options(repr.plot.width=4.5, repr.plot.height=3) print(plt_cat_total_inc) ```
github_jupyter
``` %reset import keras from keras import models from keras.models import Sequential, Model from keras import layers from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense from keras.layers.advanced_activations import LeakyReLU from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard from keras.optimizers import SGD, Adam import matplotlib.pyplot as plt import numpy as np import scipy.io import random import os %matplotlib inline exec(open("tiny-yolo/utils.py").read()) NORM_H, NORM_W = 416, 416 GRID_H, GRID_W = 13 , 13 BATCH_SIZE = 64 BOX = 5 ORIG_CLASS = 20 LABELS = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] ANCHORS = '1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52' ANCHORS = [float(ANCHORS.strip()) for ANCHORS in ANCHORS.split(',')] SCALE_NOOB, SCALE_CONF, SCALE_COOR, SCALE_PROB = 0.5, 5.0, 5.0, 1.0 model = Sequential() # Layer 1 model.add(Conv2D(16, (3,3), strides=(1,1), padding='same', use_bias=False, input_shape=(416,416,3))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(MaxPooling2D(pool_size=(2, 2))) # Layer 2 - 5 for i in range(0,4): model.add(Conv2D(32*(2**i), (3,3), strides=(1,1), padding='same', use_bias=False)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(MaxPooling2D(pool_size=(2, 2))) # Layer 6 model.add(Conv2D(512, (3,3), strides=(1,1), padding='same', use_bias=False)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) model.add(MaxPooling2D(pool_size=(2, 2), strides=(1,1), padding='same')) # Layer 7 - 8 for _ in range(0,2): model.add(Conv2D(1024, (3,3), strides=(1,1), padding='same', use_bias=False)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.1)) # Layer 9 model.add(Conv2D(BOX * (4 + 1 + ORIG_CLASS), (1, 1), strides=(1, 1), kernel_initializer='he_normal')) model.add(Activation('linear')) model.add(Reshape((GRID_H, GRID_W, BOX, 4 + 1 + ORIG_CLASS))) model.load_weights('tiny-yolo.h5') #model.load_weights('face-weights.hdf5') ann_dir = 'FDDB_2010/Annotations/' img_dir = 'FDDB_2010/JPEGImages/' anns, labels = parse_annotation(ann_dir, LABELS) CLASS = 1 import random for i in range(0,5): idx = random.randint(0, 2767) print(idx) print(anns[idx]) #ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828] def custom_loss(y_true, y_pred): ### Adjust prediction # adjust x and y pred_box_xy = tf.sigmoid(y_pred[:,:,:,:,:2]) # adjust w and h pred_box_wh = tf.exp(y_pred[:,:,:,:,2:4]) * np.reshape(ANCHORS, [1,1,1,BOX,2]) pred_box_wh = tf.sqrt(pred_box_wh / np.reshape([float(GRID_W), float(GRID_H)], [1,1,1,1,2])) # adjust confidence pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[:, :, :, :, 4]), -1) # adjust probability pred_box_prob = tf.nn.softmax(y_pred[:, :, :, :, 5:]) y_pred = tf.concat([pred_box_xy, pred_box_wh, pred_box_conf, pred_box_prob], 4) print("Y_pred shape: {}".format(y_pred.shape)) ### Adjust ground truth # adjust x and y center_xy = .5*(y_true[:,:,:,:,0:2] + y_true[:,:,:,:,2:4]) center_xy = center_xy / np.reshape([(float(NORM_W)/GRID_W), (float(NORM_H)/GRID_H)], [1,1,1,1,2]) true_box_xy = center_xy - tf.floor(center_xy) # adjust w and h true_box_wh = (y_true[:,:,:,:,2:4] - y_true[:,:,:,:,0:2]) true_box_wh = tf.sqrt(true_box_wh / np.reshape([float(NORM_W), float(NORM_H)], [1,1,1,1,2])) # adjust confidence pred_tem_wh = tf.pow(pred_box_wh, 2) * np.reshape([GRID_W, GRID_H], [1,1,1,1,2]) pred_box_area = pred_tem_wh[:,:,:,:,0] * pred_tem_wh[:,:,:,:,1] pred_box_ul = pred_box_xy - 0.5 * pred_tem_wh pred_box_bd = pred_box_xy + 0.5 * pred_tem_wh true_tem_wh = tf.pow(true_box_wh, 2) * np.reshape([GRID_W, GRID_H], [1,1,1,1,2]) true_box_area = true_tem_wh[:,:,:,:,0] * true_tem_wh[:,:,:,:,1] true_box_ul = true_box_xy - 0.5 * true_tem_wh true_box_bd = true_box_xy + 0.5 * true_tem_wh intersect_ul = tf.maximum(pred_box_ul, true_box_ul) intersect_br = tf.minimum(pred_box_bd, true_box_bd) intersect_wh = intersect_br - intersect_ul intersect_wh = tf.maximum(intersect_wh, 0.0) intersect_area = intersect_wh[:,:,:,:,0] * intersect_wh[:,:,:,:,1] iou = tf.truediv(intersect_area, true_box_area + pred_box_area - intersect_area) best_box = tf.equal(iou, tf.reduce_max(iou, [3], True)) best_box = tf.to_float(best_box) true_box_conf = tf.expand_dims(best_box * y_true[:,:,:,:,4], -1) # adjust confidence true_box_prob = y_true[:,:,:,:,5:] y_true = tf.concat([true_box_xy, true_box_wh, true_box_conf, true_box_prob], 4) print("Y_true shape: {}".format(y_true.shape)) #y_true = tf.Print(y_true, [true_box_wh], message='DEBUG', summarize=30000) ### Compute the weights weight_coor = tf.concat(4 * [true_box_conf], 4) weight_coor = SCALE_COOR * weight_coor weight_conf = SCALE_NOOB * (1. - true_box_conf) + SCALE_CONF * true_box_conf weight_prob = tf.concat(CLASS * [true_box_conf], 4) weight_prob = SCALE_PROB * weight_prob weight = tf.concat([weight_coor, weight_conf, weight_prob], 4) print("Weight shape: {}".format(weight.shape)) ### Finalize the loss loss = tf.pow(y_pred - y_true, 2) loss = loss * weight loss = tf.reshape(loss, [-1, GRID_W*GRID_H*BOX*(4 + 1 + CLASS)]) loss = tf.reduce_sum(loss, 1) loss = .5 * tf.reduce_mean(loss) return loss # freeze first 8 layers for layer in model.layers: layer.trainable = False connecting_layer = model.layers[-4].output top_model = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), kernel_initializer='he_normal') (connecting_layer) top_model = Activation('linear') (top_model) top_model = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS)) (top_model) new_model = Model(model.input, top_model) new_model.summary() early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=3, mode='min', verbose=1) checkpoint = ModelCheckpoint('face-weights.hdf5', monitor='loss', verbose=1, save_best_only=True, mode='min', period=1) optimizer = SGD(lr=0.5e-4, decay=0.0005, momentum=0.9) #optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) new_model.compile(loss=custom_loss, optimizer=optimizer) exec(open("tiny-yolo/utils.py").read()) history = new_model.fit_generator(data_gen(anns, BATCH_SIZE), int(len(anns)/BATCH_SIZE), epochs = 200, verbose = 1, #callbacks = [checkpoint, early_stop]) callbacks = [checkpoint]) new_model.load_weights('face-weights.hdf5') image = cv2.imread('test01.jpg') plt.figure(figsize=(10,10)) input_image = cv2.resize(image, (416, 416)) input_image = input_image / 255. input_image = input_image[:,:,::-1] input_image = np.expand_dims(input_image, 0) netout = new_model.predict(input_image) #print netout THRESHOLD = 0.2 image = interpret_netout(image, netout[0]) plt.imshow(image[:,:,::-1]); plt.show() image = cv2.imread('test02.jpg') plt.figure(figsize=(10,10)) input_image = cv2.resize(image, (416, 416)) input_image = input_image / 255. input_image = input_image[:,:,::-1] input_image = np.expand_dims(input_image, 0) netout = new_model.predict(input_image) #print netout THRESHOLD = 0.65 image = interpret_netout(image, netout[0]) plt.imshow(image[:,:,::-1]); plt.show() ```
github_jupyter
``` !pip install tensorflow==2.2.0 !python --version import tensorflow import tensorflow_hub print(tensorflow.__version__) print(tensorflow_hub.__version__) !pip3 install sentencepiece !pip3 install tf-sentencepiece import pandas as pd import tensorflow as tf import tensorflow_hub as hub import numpy as np import tf_sentencepiece import tensorflow.compat.v1 as tf tf.disable_v2_behavior() # Some texts of different lengths. english_sentences = ["dog", "Puppies are nice.", "I enjoy taking long walks along the beach with my dog."] german_sentences = ["Hund", "Welpen sind nett.", "Ich genieße lange Spaziergänge am Strand entlang mit meinem Hund."] # Set up graph. g = tf.Graph() with g.as_default(): text_input = tf.placeholder(dtype=tf.string, shape=[None]) en_de_embed = hub.Module('https://tfhub.dev/google/universal-sentence-encoder-xling/en-de/1') embedded_text = en_de_embed(text_input) init_op = tf.group([tf.global_variables_initializer(), tf.tables_initializer()]) g.finalize() # Initialize session. session = tf.Session(graph=g) session.run(init_op) # Compute embeddings. en_result = session.run(embedded_text, feed_dict={text_input: [english_sentences[0]]}) de_result = session.run(embedded_text, feed_dict={text_input: [german_sentences[0]]}) # Compute similarity. Higher score indicates greater similarity. similarity_score = np.dot(np.squeeze(en_result), np.squeeze(de_result)) tuples = [] for english_sentence, german_sentence in zip(english_sentences, german_sentences): # Compute embeddings. en_result = session.run(embedded_text, feed_dict={text_input: [english_sentence]}) de_result = session.run(embedded_text, feed_dict={text_input: [german_sentence]}) # Compute similarity. Higher score indicates greater similarity. similarity_score = np.dot(np.squeeze(en_result), np.squeeze(de_result)) # tuples.append((english_sentence, german_sentence, similarity_score, en_result, de_result)) tuples.append((english_sentence, german_sentence, similarity_score)) # pd.DataFrame(tuples, columns = ['english_sentence', 'german_sentence', 'similarity_score', 'en_embedding', 'de_embedding']) pd.set_option('display.max_colwidth', None) pd.DataFrame(tuples, columns = ['english_sentence', 'german_sentence', 'similarity_score']) from google.colab import drive drive.mount('/content/drive') import numpy as np en_embedding_file = '/content/drive/My Drive/Colab Notebooks/media-agenda/refactorization/data/en_embeddings.npy' de_embedding_file = '/content/drive/My Drive/Colab Notebooks/media-agenda/refactorization/data/de_embeddings.npy' en_embeddings = np.load(en_embedding_file) de_embeddings = np.load(de_embedding_file) # embeddings = np.concatenate((en_embeddings, de_embeddings), axis = 0) # print('Shape of embeddings:', embeddings.shape) %cd '/content/drive/My Drive/Colab Notebooks/media-agenda/' import json path = '/content/drive/My Drive/Colab Notebooks/media-agenda/data/sentence_dict.json' with open(path, 'r') as f: sentences = json.load(f) print(len(sentences)) print(sentences[0]) import pandas as pd result = [] count = 5 for en_index, en_embedding in enumerate(en_embeddings): tuples = [] for de_index, de_embedding in enumerate(de_embeddings): result.append(pd.DataFrame(tuples, columns = ['en_index', 'de_index', 'similarity_score']).sort_values('similarity_score', ascending = False).head(count).to_numpy()) similarity_score = np.dot(np.squeeze(en_embedding), np.squeeze(de_embedding)) de_index = en_embeddings.shape[0] + de_index tuples.append((en_index, de_index, similarity_score)) if en_index%10000 == 0: print('Now...', en_index) result # tuples = np.asarray(tuples) # pd.set_option('display.max_colwidth', None) np.save('cs_result', result) result ```
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* *No changes were made to the contents of this notebook from the original.* <!--NAVIGATION--> < [Aggregations: Min, Max, and Everything In Between](02.04-Computation-on-arrays-aggregates.ipynb) | [Contents](Index.ipynb) | [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb) > # Computation on Arrays: Broadcasting We saw in the previous section how NumPy's universal functions can be used to *vectorize* operations and thereby remove slow Python loops. Another means of vectorizing operations is to use NumPy's *broadcasting* functionality. Broadcasting is simply a set of rules for applying binary ufuncs (e.g., addition, subtraction, multiplication, etc.) on arrays of different sizes. ## Introducing Broadcasting Recall that for arrays of the same size, binary operations are performed on an element-by-element basis: ``` import numpy as np a = np.array([0, 1, 2]) b = np.array([5, 5, 5]) a + b ``` Broadcasting allows these types of binary operations to be performed on arrays of different sizes–for example, we can just as easily add a scalar (think of it as a zero-dimensional array) to an array: ``` a + 5 ``` We can think of this as an operation that stretches or duplicates the value ``5`` into the array ``[5, 5, 5]``, and adds the results. The advantage of NumPy's broadcasting is that this duplication of values does not actually take place, but it is a useful mental model as we think about broadcasting. We can similarly extend this to arrays of higher dimension. Observe the result when we add a one-dimensional array to a two-dimensional array: ``` M = np.ones((3, 3)) M M + a ``` Here the one-dimensional array ``a`` is stretched, or broadcast across the second dimension in order to match the shape of ``M``. While these examples are relatively easy to understand, more complicated cases can involve broadcasting of both arrays. Consider the following example: ``` a = np.arange(3) b = np.arange(3)[:, np.newaxis] print(a) print(b) a + b ``` Just as before we stretched or broadcasted one value to match the shape of the other, here we've stretched *both* ``a`` and ``b`` to match a common shape, and the result is a two-dimensional array! The geometry of these examples is visualized in the following figure (Code to produce this plot can be found in the [appendix](06.00-Figure-Code.ipynb#Broadcasting), and is adapted from source published in the [astroML](http://astroml.org) documentation. Used by permission). ![Broadcasting Visual](figures/02.05-broadcasting.png) The light boxes represent the broadcasted values: again, this extra memory is not actually allocated in the course of the operation, but it can be useful conceptually to imagine that it is. ## Rules of Broadcasting Broadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays: - Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is **padded** with ones on its leading (left) side. - Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is **stretched** to match the other shape. - Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised. To make these rules clear, let's consider a few examples in detail. ### Broadcasting example 1 Let's look at adding a two-dimensional array to a one-dimensional array: ``` M = np.ones((2, 3)) a = np.arange(3) ``` Let's consider an operation on these two arrays. The shape of the arrays are - ``M.shape = (2, 3)`` - ``a.shape = (3,)`` We see by rule 1 that the array ``a`` has fewer dimensions, so we pad it on the left with ones: - ``M.shape -> (2, 3)`` - ``a.shape -> (1, 3)`` By rule 2, we now see that the first dimension disagrees, so we stretch this dimension to match: - ``M.shape -> (2, 3)`` - ``a.shape -> (2, 3)`` The shapes match, and we see that the final shape will be ``(2, 3)``: ``` M + a ``` ### Broadcasting example 2 Let's take a look at an example where both arrays need to be broadcast: ``` a = np.arange(3).reshape((3, 1)) b = np.arange(3) ``` Again, we'll start by writing out the shape of the arrays: - ``a.shape = (3, 1)`` - ``b.shape = (3,)`` Rule 1 says we must pad the shape of ``b`` with ones: - ``a.shape -> (3, 1)`` - ``b.shape -> (1, 3)`` And rule 2 tells us that we upgrade each of these ones to match the corresponding size of the other array: - ``a.shape -> (3, 3)`` - ``b.shape -> (3, 3)`` Because the result matches, these shapes are compatible. We can see this here: ``` a + b ``` ### Broadcasting example 3 Now let's take a look at an example in which the two arrays are not compatible: ``` M = np.ones((3, 2)) a = np.arange(3) ``` This is just a slightly different situation than in the first example: the matrix ``M`` is transposed. How does this affect the calculation? The shape of the arrays are - ``M.shape = (3, 2)`` - ``a.shape = (3,)`` Again, rule 1 tells us that we must pad the shape of ``a`` with ones: - ``M.shape -> (3, 2)`` - ``a.shape -> (1, 3)`` By rule 2, the first dimension of ``a`` is stretched to match that of ``M``: - ``M.shape -> (3, 2)`` - ``a.shape -> (3, 3)`` Now we hit rule 3–the final shapes do not match, so these two arrays are incompatible, as we can observe by attempting this operation: ``` M + a ``` Note the potential confusion here: you could imagine making ``a`` and ``M`` compatible by, say, padding ``a``'s shape with ones on the right rather than the left. But this is not how the broadcasting rules work! That sort of flexibility might be useful in some cases, but it would lead to potential areas of ambiguity. If right-side padding is what you'd like, you can do this explicitly by reshaping the array (we'll use the ``np.newaxis`` keyword introduced in [The Basics of NumPy Arrays](02.02-The-Basics-Of-NumPy-Arrays.ipynb)): ``` a[:, np.newaxis].shape M + a[:, np.newaxis] ``` Also note that while we've been focusing on the ``+`` operator here, these broadcasting rules apply to *any* binary ``ufunc``. For example, here is the ``logaddexp(a, b)`` function, which computes ``log(exp(a) + exp(b))`` with more precision than the naive approach: ``` np.logaddexp(M, a[:, np.newaxis]) ``` For more information on the many available universal functions, refer to [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb). ## Broadcasting in Practice Broadcasting operations form the core of many examples we'll see throughout this book. We'll now take a look at a couple simple examples of where they can be useful. ### Centering an array In the previous section, we saw that ufuncs allow a NumPy user to remove the need to explicitly write slow Python loops. Broadcasting extends this ability. One commonly seen example is when centering an array of data. Imagine you have an array of 10 observations, each of which consists of 3 values. Using the standard convention (see [Data Representation in Scikit-Learn](05.02-Introducing-Scikit-Learn.ipynb#Data-Representation-in-Scikit-Learn)), we'll store this in a $10 \times 3$ array: ``` X = np.random.random((10, 3)) ``` We can compute the mean of each feature using the ``mean`` aggregate across the first dimension: ``` Xmean = X.mean(0) Xmean ``` And now we can center the ``X`` array by subtracting the mean (this is a broadcasting operation): ``` X_centered = X - Xmean ``` To double-check that we've done this correctly, we can check that the centered array has near zero mean: ``` X_centered.mean(0) ``` To within machine precision, the mean is now zero. ### Plotting a two-dimensional function One place that broadcasting is very useful is in displaying images based on two-dimensional functions. If we want to define a function $z = f(x, y)$, broadcasting can be used to compute the function across the grid: ``` # x and y have 50 steps from 0 to 5 x = np.linspace(0, 5, 50) y = np.linspace(0, 5, 50)[:, np.newaxis] z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) ``` We'll use Matplotlib to plot this two-dimensional array (these tools will be discussed in full in [Density and Contour Plots](04.04-Density-and-Contour-Plots.ipynb)): ``` %matplotlib inline import matplotlib.pyplot as plt plt.imshow(z, origin='lower', extent=[0, 5, 0, 5], cmap='viridis') plt.colorbar(); ``` The result is a compelling visualization of the two-dimensional function. <!--NAVIGATION--> < [Aggregations: Min, Max, and Everything In Between](02.04-Computation-on-arrays-aggregates.ipynb) | [Contents](Index.ipynb) | [Comparisons, Masks, and Boolean Logic](02.06-Boolean-Arrays-and-Masks.ipynb) >
github_jupyter
# Creating your own dataset from Google Images *by: Francisco Ingham and Jeremy Howard. Inspired by [Adrian Rosebrock](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)* In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats). ``` from fastai.vision import * ``` ## Get a list of URLs ### Search and scroll Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do. Scroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700. It is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, "canis lupus lupus", it might be a good idea to exclude other variants: "canis lupus lupus" -dog -arctos -familiaris -baileyi -occidentalis You can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown. ### Download into file Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset. In Google Chrome press <kbd>Ctrl</kbd><kbd>Shift</kbd><kbd>j</kbd> on Windows/Linux and <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>j</kbd> on macOS, and a small window the javascript 'Console' will appear. In Firefox press <kbd>Ctrl</kbd><kbd>Shift</kbd><kbd>k</kbd> on Windows/Linux or <kbd>Cmd</kbd><kbd>Opt</kbd><kbd>k</kbd> on macOS. That is where you will paste the JavaScript commands. You will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands: ```javascript urls=Array.from(document.querySelectorAll('.rg_i')).map(el=> el.hasAttribute('data-src')?el.getAttribute('data-src'):el.getAttribute('data-iurl')); window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n'))); ``` ### Create directory and upload urls file into your server Choose an appropriate name for your labeled images. You can run these steps multiple times to create different labels. ``` folder = 'black' file = 'urls_black.csv' folder = 'teddys' file = 'urls_teddys.csv' folder = 'grizzly' file = 'urls_grizzly.csv' ``` You will need to run this cell once per each category. ``` path = Path('data/bears') dest = path/folder dest.mkdir(parents=True, exist_ok=True) path.ls() ``` Finally, upload your urls file. You just need to press 'Upload' in your working directory and select your file, then click 'Upload' for each of the displayed files. ![uploaded file](images/download_images/upload.png) ## Download images Now you will need to download your images from their respective urls. fast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved. Let's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls. You will need to run this line once for every category. ``` classes = ['teddys','grizzly','black'] download_images(path/file, dest, max_pics=200) # If you have problems download, try with `max_workers=0` to see exceptions: download_images(path/file, dest, max_pics=20, max_workers=0) ``` Then we can remove any images that can't be opened: ``` for c in classes: print(c) verify_images(path/c, delete=True, max_size=500) ``` ## View data ``` np.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) # If you already cleaned your data, run this cell instead of the one before # np.random.seed(42) # data = ImageDataBunch.from_csv(path, folder=".", valid_pct=0.2, csv_labels='cleaned.csv', # ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) ``` Good! Let's take a look at some of our pictures then. ``` data.classes data.show_batch(rows=3, figsize=(7,8)) data.classes, data.c, len(data.train_ds), len(data.valid_ds) ``` ## Train model ``` learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.lr_find() # If the plot is not showing try to give a start and end learning rate # learn.lr_find(start_lr=1e-5, end_lr=1e-1) learn.recorder.plot() learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4)) learn.save('stage-2') ``` ## Interpretation ``` learn.load('stage-2'); interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() ``` ## Cleaning Up Some of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be. Using the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong. ``` from fastai.widgets import * ``` First we need to get the file paths from our top_losses. We can do this with `.from_toplosses`. We then feed the top losses indexes and corresponding dataset to `ImageCleaner`. Notice that the widget will not delete images directly from disk but it will create a new csv file `cleaned.csv` from where you can create a new ImageDataBunch with the corrected labels to continue training your model. In order to clean the entire set of images, we need to create a new dataset without the split. The video lecture demostrated the use of the `ds_type` param which no longer has any effect. See [the thread](https://forums.fast.ai/t/duplicate-widget/30975/10) for more details. ``` db = (ImageList.from_folder(path) .split_none() .label_from_folder() .transform(get_transforms(), size=224) .databunch() ) # If you already cleaned your data using indexes from `from_toplosses`, # run this cell instead of the one before to proceed with removing duplicates. # Otherwise all the results of the previous step would be overwritten by # the new run of `ImageCleaner`. # db = (ImageList.from_csv(path, 'cleaned.csv', folder='.') # .split_none() # .label_from_df() # .transform(get_transforms(), size=224) # .databunch() # ) ``` Then we create a new learner to use our new databunch with all the images. ``` learn_cln = cnn_learner(db, models.resnet34, metrics=error_rate) learn_cln.load('stage-2'); ds, idxs = DatasetFormatter().from_toplosses(learn_cln) ``` Make sure you're running this notebook in Jupyter Notebook, not Jupyter Lab. That is accessible via [/tree](/tree), not [/lab](/lab). Running the `ImageCleaner` widget in Jupyter Lab is [not currently supported](https://github.com/fastai/fastai/issues/1539). ``` # Don't run this in google colab or any other instances running jupyter lab. # If you do run this on Jupyter Lab, you need to restart your runtime and # runtime state including all local variables will be lost. ImageCleaner(ds, idxs, path) ``` If the code above does not show any GUI(contains images and buttons) rendered by widgets but only text output, that may caused by the configuration problem of ipywidgets. Try the solution in this [link](https://github.com/fastai/fastai/issues/1539#issuecomment-505999861) to solve it. Flag photos for deletion by clicking 'Delete'. Then click 'Next Batch' to delete flagged photos and keep the rest in that row. `ImageCleaner` will show you a new row of images until there are no more to show. In this case, the widget will show you images until there are none left from `top_losses.ImageCleaner(ds, idxs)` You can also find duplicates in your dataset and delete them! To do this, you need to run `.from_similars` to get the potential duplicates' ids and then run `ImageCleaner` with `duplicates=True`. The API works in a similar way as with misclassified images: just choose the ones you want to delete and click 'Next Batch' until there are no more images left. Make sure to recreate the databunch and `learn_cln` from the `cleaned.csv` file. Otherwise the file would be overwritten from scratch, losing all the results from cleaning the data from toplosses. ``` ds, idxs = DatasetFormatter().from_similars(learn_cln) ImageCleaner(ds, idxs, path, duplicates=True) ``` Remember to recreate your ImageDataBunch from your `cleaned.csv` to include the changes you made in your data! ## Putting your model in production First thing first, let's export the content of our `Learner` object for production: ``` learn.export() ``` This will create a file named 'export.pkl' in the directory where we were working that contains everything we need to deploy our model (the model, the weights but also some metadata like the classes or the transforms/normalization used). You probably want to use CPU for inference, except at massive scale (and you almost certainly don't need to train in real-time). If you don't have a GPU that happens automatically. You can test your model on CPU like so: ``` defaults.device = torch.device('cpu') img = open_image(path/'black'/'00000021.jpg') img ``` We create our `Learner` in production enviromnent like this, just make sure that `path` contains the file 'export.pkl' from before. ``` learn = load_learner(path) pred_class,pred_idx,outputs = learn.predict(img) pred_class ``` So you might create a route something like this ([thanks](https://github.com/simonw/cougar-or-not) to Simon Willison for the structure of this code): ```python @app.route("/classify-url", methods=["GET"]) async def classify_url(request): bytes = await get_bytes(request.query_params["url"]) img = open_image(BytesIO(bytes)) _,_,losses = learner.predict(img) return JSONResponse({ "predictions": sorted( zip(cat_learner.data.classes, map(float, losses)), key=lambda p: p[1], reverse=True ) }) ``` (This example is for the [Starlette](https://www.starlette.io/) web app toolkit.) ## Things that can go wrong - Most of the time things will train fine with the defaults - There's not much you really need to tune (despite what you've heard!) - Most likely are - Learning rate - Number of epochs ### Learning rate (LR) too high ``` learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(1, max_lr=0.5) ``` ### Learning rate (LR) too low ``` learn = cnn_learner(data, models.resnet34, metrics=error_rate) ``` Previously we had this result: ``` Total time: 00:57 epoch train_loss valid_loss error_rate 1 1.030236 0.179226 0.028369 (00:14) 2 0.561508 0.055464 0.014184 (00:13) 3 0.396103 0.053801 0.014184 (00:13) 4 0.316883 0.050197 0.021277 (00:15) ``` ``` learn.fit_one_cycle(5, max_lr=1e-5) learn.recorder.plot_losses() ``` As well as taking a really long time, it's getting too many looks at each image, so may overfit. ### Too few epochs ``` learn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False) learn.fit_one_cycle(1) ``` ### Too many epochs ``` np.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.9, bs=32, ds_tfms=get_transforms(do_flip=False, max_rotate=0, max_zoom=1, max_lighting=0, max_warp=0 ),size=224, num_workers=4).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet50, metrics=error_rate, ps=0, wd=0) learn.unfreeze() learn.fit_one_cycle(40, slice(1e-6,1e-4)) ```
github_jupyter
``` import numpy as np import mxnet as mx from mxnet import nd, autograd, gluon ``` ![image.png](../assets/data_prep_wf.png) # Raw Data - FER2013 * https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data * 48 X 48 gray scale images * 28,709 training samples * 3,589 validation data * 3,589 test data * 7 emotion labels (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral) * ~60-65% human accuracy in labelling ![image.png](../assets/test_raw_data.png) # Data Preparation is Critical * State of the Art accuracy using a Convolutional Neural Network (CNN) model directly on this raw data is **63%** [1]. * State of the Art accuracy using a slightly modified Convolutional Neural Network (CNN) model after processing the data is **83%** [2]. # Processed Data - FER+ Follow the instructions listed in the README file - https://github.com/TalkAI/facial-emotion-recognition-gluon#before-you-start * FER+ has new corrected labels * FER+ has 8 emotions - (0: 'neutral', 1: 'happiness', 2: 'surprise', 3: 'sadness', 4: 'anger', 5: 'disgust', 6: 'fear',7: 'contempt') * Image augmentations: * Crop faces in the images – bounding box in the FER+ dataset * Scale image size from 48 X 48 -> 64 X 64 * Shift image * Flip image * Rotate (angle) image * Normalize the pixels in the image | Emotion | Train | Val | Test | |---|---|---|---| | neutral |8733 | 1180 | 1083 | | happiness | 7284 | 862 | 892 | | surprise | 3136 | 411 | 394 | | sadness | 3022 | 348 | 382 | | anger | 2098 | 289 | 269 | | disgust | 116 | 25 | 16 | | fear | 536 | 60 | 86 | | comtempt | 120 | 16 | 15 | ``` # 8 Emotions we want to recognize emotion_table = {0: 'neutral', 1: 'happiness', 2: 'surprise', 3: 'sadness', 4: 'anger', 5: 'disgust', 6: 'fear', 7: 'contempt'} processed_train_images = np.load('../data/fer_train_processed_images.npy') processed_train_labels = np.load('../data/fer_train_processed_labels.npy') print(processed_train_images.shape, processed_train_labels.shape) from matplotlib import pyplot as plt plt.imshow(processed_train_images[987].reshape(64,64), cmap='gray') processed_test_images = np.load('../data/fer_test_processed_images.npy') processed_test_labels = np.load('../data/fer_test_processed_labels.npy') processed_val_images = np.load('../data/fer_val_processed_images.npy') processed_val_labels = np.load('../data/fer_val_processed_labels.npy') print(processed_test_images.shape, processed_test_labels.shape) print(processed_val_images.shape, processed_val_labels.shape) # Sets context to GPU, if available. Otherwise sets to CPU. # Note that training on one GPU takes approx. 1.1 minute per epoch, and may take longer on CPU ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() ``` ![image.png](../assets/model_training_prep_wf.png) # Step 1 – Construct the Neural Network ### 13 layer VGGNet presented in the paper [2] ![image.png](../assets/network.png) ``` # We use HybridSequential network type to able to save the trained model as symbols and params. # More Info - https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html net = gluon.nn.HybridSequential() # Construct 13 layer VGGNet suggested in the paper with net.name_scope(): net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(0.25)) net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(0.25)) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(0.25)) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.Conv2D(channels=256, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(0.25)) net.add(gluon.nn.Flatten()) net.add(gluon.nn.Dense(1024, activation='relu')) net.add(gluon.nn.Dropout(0.5)) net.add(gluon.nn.Dense(1024, activation='relu')) net.add(gluon.nn.Dropout(0.5)) net.add(gluon.nn.Dense(8)) # We Hybridize the HybridSequential network to able to save the trained model as symbols and params. # More Info - https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html net.hybridize() ``` # Step 2 – Initialize the parameters in Neural Network ``` net.collect_params().initialize(mx.init.Xavier(), ctx=ctx) nd.waitall() # Use MXBOARD here to visualize network x = mx.sym.var('data') sym = net(x) mx.viz.plot_network(sym) ``` # Step 3 – Prepare the Trainer with optimizer ``` softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() batch_size = 32 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.0025, 'momentum': 0.9}) ``` # Step 4 – Prepare the model evaluation strategy ``` def evaluate_accuracy(data_iterator, net): acc = mx.metric.Accuracy() for i, (data, label) in enumerate(data_iterator): data = data.as_in_context(ctx) label = label.as_in_context(ctx) output = net(data) predictions = nd.argmax(output, axis=1) acc.update(preds=predictions, labels=label) return acc.get()[1] ``` # Step 5 – Prepare data loaders ``` train_labels = np.argmax(processed_train_labels, axis=1) val_labels = np.argmax(processed_val_labels, axis=1) train_data = gluon.data.DataLoader(gluon.data.ArrayDataset(processed_train_images, train_labels), batch_size = batch_size, shuffle=True) val_data = gluon.data.DataLoader(gluon.data.ArrayDataset(processed_val_images, val_labels), batch_size = batch_size) ``` # Step 6 – Train the Neural Network ``` epochs = 25 train_accuracies = [] losses = [] val_accuracies = [] for e in range(epochs): batch = 0 for i, (data, label) in enumerate(train_data): data = data.as_in_context(ctx) label = label.as_in_context(ctx) with autograd.record(): output = net(data) loss = softmax_cross_entropy(output, label) loss.backward() trainer.step(data.shape[0]) curr_loss = nd.mean(loss).asscalar() batch +=1 val_accuracy = evaluate_accuracy(val_data, net) train_accuracy = evaluate_accuracy(train_data, net) losses.append(curr_loss) train_accuracies.append(train_accuracy) val_accuracies.append(val_accuracy) print("Epoch %s. Loss: %s, Train_acc %s, Val_acc %s" % (e, curr_loss, train_accuracy, val_accuracy)) ``` # Step 7 - Evaluate on Test Data ``` # Test accuracy acc = mx.metric.Accuracy() test_labels = np.argmax(processed_test_labels, axis=1) data_iterator = gluon.data.DataLoader(gluon.data.ArrayDataset(processed_test_images, test_labels), batch_size = 32) for i, (data, label) in enumerate(data_iterator): data = data.as_in_context(ctx) label = label.as_in_context(ctx) output = net(data) predictions = nd.argmax(output, axis=1) acc.update(preds=predictions, labels=label) print("Test Accuracy - ", acc.get()[1]) # for plotting purposes %matplotlib inline import matplotlib import matplotlib.pyplot as plt epochs = range(len(train_accuracies)) f = plt.figure(figsize=(12,6)) fg1 = f.add_subplot(121) fg2 = f.add_subplot(122) fg1.set_xlabel('epoch',fontsize=14) fg1.set_title('Loss over Training') fg1.grid(True, which="both") fg1.plot(epochs, losses) fg2.set_title('Comparing accuracy') fg2.set_xlabel('epoch', fontsize=14) fg2.grid(True, which="both") p1, = fg2.plot(epochs, train_accuracies) p2, = fg2.plot(epochs, val_accuracies) fg2.legend([p1, p2], ['training accuracy', 'validation accuracy'],fontsize=14) # Example Inference idx = 98 plt.imshow(processed_test_images[idx].reshape(64,64), cmap='gray') print("Actual Emotion - ", emotion_table[test_labels[idx]]) # Perform Inference output = net(mx.nd.array(processed_test_images[idx].reshape(1,1,64,64)).as_in_context(ctx)) print("Predicted Emotion - ", emotion_table[nd.argmax(output, axis=1).asnumpy()[0]]) ``` ![image.png](../assets/inference_wf.png) # Step 8 - Export the model for Production ``` # Export the model for production deployment. # There will be 2 files exported: # 1) gluon_ferplus-symbol.json => Contains the network definition # 2) gluon_ferplus-0000.params => Contains the weights in the network net.export('fer') ``` # Step 9 - Install pre-requisites for model serving We will need the PyPi packages listed below to test model server locally, and to perform image pre-processing prior to the model inference. ``` !pip install mxnet-model-server !pip install model-archiver !pip install scikit-image !pip install opencv-python ``` # Step 10 - Export a model archive We will now prepare the model archive, which encapsulates all of the resources required by MMS for serving the model. To learn more about the model archive, visit [MMS Export docs](https://github.com/awslabs/mxnet-model-server/blob/master/docs/export.md). ``` # As a first step, we will download the pre-trained model. # You can skip this step if you have just trained your model, but then you will need to copy the model files into ferplus directory !mkdir fer %cd fer !curl -O https://s3.amazonaws.com/mxnet-demo-models/models/fer/fer-0000.params !curl -O https://s3.amazonaws.com/mxnet-demo-models/models/fer/fer-symbol.json # Now let's pull in and review the other resources needed for the model archive %cp ../../model_archive_resources/* . # We define the model's input and output type and shape via signature.json !cat signature.json # We define the model's class label names via synset.txt !cat synset.txt # And lastly, we define custom code for request handling via python code other auxiliary files !cat fer_service.py # Let's package everything up into a Model Archive bundle % cd .. !model-archiver --model-name fer --model-path ./fer --handler fer_service:handle !ls -l fer.mar ``` # Step 11 - Serving the model archive with MXNet Model Server Now that we have the model archive ready, we can start the server and ask it to setup HTTP endpoints to serve the model, emit real-time operational metrics and more. To learn more about serving, check out the [MMS Serving docs](https://github.com/awslabs/mxnet-model-server/blob/master/docs/server.md). To start the server and load the FER model on startup, go to the console and run: ` $ mxnet-model-server --models fer=fer.mar --model-store . ` ``` # Check out the health endpoint to make sure model server is running !curl http://127.0.0.1:8080/ping # Call MMS management API to see list of loaded models !curl http://localhost:8081/models # Query the loaded FER model !curl http://localhost:8081/models/fer ``` Now let's make a prediction request with a test image: ![Neutral](../assets/neutral-sad.png) ``` !curl -X POST http://127.0.0.1:8080/fer/predict -T "../assets/neutral-sad.png" ``` Lastly, to stop the server we will go to the console and run: ` $ mxnet-model-server --stop ` # References 1. I. J. Goodfellow, D. Erhan, P. L. Carrier, A. Courville, M. Mirza, B. Hamner, W. Cukierski, Y. Tang, D. Thaler, D.-H. Lee, et al. Challenges in representation learning: A report on three machine learning contests. In Neural information processing, pages 117–124. Springer, 2013 2. Training Deep Networks for Facial Expression Recognition with Crowd-Sourced Label Distribution Emad Barsoum et. al. https://arxiv.org/abs/1608.01041
github_jupyter
``` %load_ext autoreload %autoreload 2 import sys sys.path.append('/home/vera0519/vera_911') from src.cities.new_orleans import NewOrleans from src.cities.seattle import Seattle from src.cities.dallas import Dallas from src.cities.detroit import Detroit from src.cities.charleston import Charleston import pandas as pd pd.set_option('display.max_rows', 999) pd.set_option('display.max_columns', 500) %load_ext autoreload %autoreload 2 import sys sys.path.append('/home/vera0519/vera_911') import pandas as pd # import cenpy import src.features.call_types as call_types from src.cities.new_orleans import NewOrleans from src.cities.seattle import Seattle from src.cities.dallas import Dallas from src.cities.detroit import Detroit from src.cities.charleston import Charleston import matplotlib.pyplot as plt import src.features.geo as Geo from src.features.call_types import load_call_mappings, assign_disposition, process import src.visualization.visualize as vis %matplotlib inline import pandas as pd import geopandas as gp import matplotlib.pyplot as plt import seaborn as sns import math #from ..utils import VIS_DIR import numpy as np np.set_printoptions(threshold=np.inf) def plot_call_volme_by_CFS_by_self_initiated(city, call_type=None, ax=None): try: if(not ax): fig = plt.figure() ax = fig.add_subplot(111) if (call_type == None) : data = city.clean_data().groupby(['year', 'self_initiated']).count().reset_index() data[['year']] = data[['year']].astype(int) data = data.pivot_table(index='year', columns='self_initiated', values='index') else : data = city.clean_data().groupby(['year', 'self_initiated', 'call_type']).count().reset_index() data[['year']] = data[['year']].astype(int) data = data.pivot_table(index=['call_type', 'year'], columns='self_initiated', values='index').loc[call_type] if (city.name() == 'Charleston' or city.name() == 'Dallas') : data = data.assign(unknown = lambda x : x.unknown/1000) data.plot(kind='barh', stacked=True, label='Self Initiated',ax=ax, colors=['#D1D4C9']) ax.legend(['N/A'],loc='lower right') if (city.name() == 'NewOrleans') : data = data.assign(No = lambda x : x.No/1000, Yes = lambda x : x.Yes/1000, other = lambda x : x.other/1000 ) data.plot(kind='barh', stacked=True, label='Self Initiated',ax=ax, colors=['#159BA3','#F9BA16','#D1D4C9']) ax.legend(['Non self initiated', 'Self initiated','N/A'],loc='lower left') if (city.name() == 'Detroit' or city.name() == 'Seattle') : data = data.assign(No = lambda x : x.No/1000, Yes = lambda x : x.Yes/1000 ) data.plot(kind='barh', stacked=True, label='Self Initiated',ax=ax, colors=['#159BA3','#F9BA16','#D1D4C9']) ax.legend(['Non self initiated', 'Self initiated','N/A'],loc='lower right') ax.set_xlabel('Number of Calls (thousands)') ax.set_ylabel('') ax.set_title('Number of calls - '+ ('all CFS types' if call_type == None else call_type)) except: show_no_self_initiated_error(ax) return ax CFS_codes = [None, 'Accidents/Traffic Safety', 'Alarms', 'Assisting the Public', 'Behavioral Health', 'Call Related Issues', 'Callback', 'Complaints/Environmental Conditions', 'Domestic Violence', 'Drugs', 'Fire', 'Liquor Violations', 'Medical Emergencies', 'Missing Persons', 'Officer Needs Help', 'Other (Not Crime)', 'Other Crimes', 'Property Crime', 'Sex Offenses', 'Status Offenses', 'Statuses', 'Suspicion', 'Training Academy', 'Violent Crime', 'Warrants', 'other'] city = Detroit() plot_call_volme_by_CFS_by_self_initiated(city, call_type = CFS_codes[0]) %load_ext autoreload %autoreload 2 import geopandas as gp import pandas as pd pd.set_option('display.max_rows', 999) pd.set_option('display.max_columns', 500) from tqdm import tqdm import logging from pathlib import Path from dotenv import find_dotenv, load_dotenv #from geo_reference import get_city_dir, load_calls_for_city, generate_points_city,rename_positional_columns,assign_point_to_census_tract,load_tracts_for_city #from shapely.geometry import Point SeattleCalls = load_calls_for_city("Seattle") callCounts = SeattleCalls.groupby('Initial Call Type').agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Initial Call Type', 'index']] SeattleCalls = load_calls_for_city("Seattle") callCounts = SeattleCalls.groupby('Final Call Type').agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Final Call Type', 'index']] calls = load_calls_for_city("Detroit") calls.head() callCounts = calls.groupby(['Call Description', 'Category']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Call Description', 'Category', 'index']] calls = load_calls_for_city("Dallas") calls.head() callCounts = calls.groupby(['Call (911) Problem']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Call (911) Problem', 'index']] calls = load_calls_for_city("Dallas") calls.head() callCounts = calls.groupby(['Type of Incident']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Type of Incident', 'index']] calls = load_calls_for_city("NewOrleans") calls.head() ##callCounts = calls.groupby(['Call (911) Problem']).agg('count').reset_index() ##callCounts.sort_values('index', ascending = False)[['Call (911) Problem', 'index']] calls = load_calls_for_city("NewOrleans") calls.head() callCounts = calls.groupby(['TypeText']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['TypeText', 'index']] calls = load_calls_for_city("Seattle") calls.head() #callCounts = calls.groupby(['Event Clearance Description']).agg('count').reset_index() #callCounts.sort_values('index', ascending = False)[['Event Clearance Description', 'index']] callCounts = calls.groupby(['Call Type']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['Call Type', 'index']] calls = load_calls_for_city("Dallas") calls.head() #callCounts = calls.groupby(['Offense Status']).agg('count').reset_index() #callCounts.sort_values('index', ascending = False)[['Offense Status', 'index']] callCounts = calls.groupby(['UCR Disposition']).agg('count').reset_index() callCounts.sort_values('index', ascending = False)[['UCR Disposition', 'index']] ##callCounts = calls.groupby(['Call Type']).agg('count').reset_index() ##callCounts.sort_values('index', ascending = False)[['Call Type', 'index']] calls = load_calls_for_city("Detroit") calls.head() #callCounts = calls.groupby(['Officer Initiated']).agg('count').reset_index() #callCounts.sort_values('index', ascending = False)[['Officer Initiated', 'index']] calls = load_calls_for_city("NewOrleans") calls.head() #callCounts = calls.groupby(['SelfInitiated']).agg('count').reset_index() #callCounts.sort_values('index', ascending = False)[['SelfInitiated', 'index']] #callCounts = calls.groupby(['DispositionText']).agg('count').reset_index() #callCounts.sort_values('index', ascending = False)[['DispositionText', 'index']] ```
github_jupyter
# Notebook for some basic data analysis for the Replica Dataset ``` replica_root = '/datasets01/replica/061819/18_scenes' import os import json scenes = os.listdir(replica_root) scenes = ['apartment_0', 'office_2', 'room_0', 'frl_apartment_5', # 'office_0', 'frl_apartment_4', 'frl_apartment_1', 'frl_apartment_0'] import matplotlib.pyplot as plt %matplotlib inline plt.rcParams["figure.figsize"] = (20,3) # exclude categories - wall, wall-plug, ceiling, floor exc = ['blinds', 'wall', 'wall-plug', 'ceiling', 'floor', 'undefined', 'other-leaf', 'anonymize_picture', 'anonymize_text', 'wall-cabinet', 'base_cabinet'] # exc = [] # oc = {} cur = None import pandas as pd for x in scenes: print(f"\nScene {x}") habitat_semantic_json = os.path.join(replica_root, x, 'habitat', 'info_semantic.json') print(habitat_semantic_json) with open(habitat_semantic_json, "r") as f: habitat_semantic_data = json.load(f) # print(type(habitat_semantic_data)) # save id_to_category cats = {} for obj_cls in habitat_semantic_data["classes"]: cats[obj_cls["id"]] = obj_cls["name"] # cats.append({"id": obj_cls["id"], "name": obj_cls["name"], "supercategory": "shape"}) # if obj_cls['name'] in ('floor', 'wall', 'ceiling', 'wall-plug'): # print(obj_cls['id'], obj_cls['name']) if cur is None: cur = cats df = pd.DataFrame(index=cur.values(), columns=scenes) df = df.fillna(0) # with 0s rather than NaNs else: if cur != cats: print("Unequal Categories") def count_objects(h): c = {} for x in h['objects']: cid = x['class_name'] if cid not in exc: if cid in c: c[cid] += 1 else: c[cid] = 1 c = {k:v for k,v in sorted(c.items(), key=lambda kv: kv[1], reverse=True) if v > 0} print(c) plt.figure(figsize=(20,3), dpi=100) plt.bar(c.keys(), c.values(), color='green') plt.xticks(rotation = 90) plt.show() return c c = count_objects(habitat_semantic_data) # update overall count for k, v in c.items(): df.at[k, x] = v # if k in oc: # oc[k].append({x: v}) # else: # oc[k] = [{x:v}] # # break print(oc) # for each train test split, find the intersection of objects (take the instace count ) # find objects in each scene # for each row pick label if it has a minimum count in each scene minv = 1 ql = [] for index, row in df.iterrows(): q = True for ix, val in row.items(): # print(index, ix, val) if val < minv: q = False if q: ql.append(index) print(len(ql), ql) cur ```
github_jupyter
# Python Regular Expression Tutorial > Chung-Yu Shao, cshao@andrew.cmu.edu ## Introduction This tutorial was the summary for my experience to understand the famous **regular expression.** Although we've already had homework 1 which includes tasks related to regular expression, it's a very useful but forgettable tools that we, as a software engineer or researcher in the computer related fileds, should sharpen up on. It often looks scary as the first glance, hope we can learn regular expression better together through this tutorial. In this tutorial, I will mainly cover: * (1) What is regular expression, what functionality do python `re` suport. * (2) The `Match` object: what we get from calling the `re` find-like operations. * (3) The `Pattern` object: mainly, how do we write the pattern * (4) Advanced discussion * 4.1 `\b` and `\B` * 4.2 Raw string notation * 4.3 The "greedy" match concept * 4.4 The extension notation `(?...)` ## [What is regular expression?](https://en.wikipedia.org/wiki/Regular_expression) > a regular expression (sometimes called a rational expression) is a sequence of characters that define a search pattern, mainly for use in pattern matching with strings, or string matching, i.e. **"find and replace"-like operations**. So, what kind of find and replace"-like operations python `re` module support? * `re.search(pattern, string, flags=0)` * Checks for a match **anywhere in the string** * `re.match(pattern, string, flags=0)` * Checks for a match only **at the beginning of the string** * `re.split(pattern, string, maxsplit=0, flags=0)` * Split string by the occurrences of pattern. * `re.sub(pattern, repl, string, count=0, flags=0)` * Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. * `re.findall(pattern, string, flags=0)`, `re.finditer(pattern, string, flags=0)` Let's see a quick example! ``` import re string = "abcdba" abMatch = re.match(r"ab", string) noMatch = re.match(r"cd", string) cdMatch = re.search(r"cd", string) print "ab match:{}\nno match: {}\ncd match:{}".format(abMatch, noMatch, cdMatch) print "----" pattern = re.compile("ab") abCompileMatch = pattern.match(string) print "pattern:{}\nab compile match:{}".format(pattern, abCompileMatch) print "----" print "type of r\'ab\':{}\ntype of `pattern`: {}".format(type(r"ab"), type(pattern)) ``` So, what we can observe from the above snippet? (1) The difference between `re.match()` and `re.search()` is that `re.match()` only search from the beginning, whereas `re.search()` find the pattern in the target string!. (2) There are Mainly two types of objects we will use: `_sre.SRE_Match` and `_sre.SRE_Pattern`, which we will discuss in the later sections. (3) Mainly two styles to use `pattern` as below, which we will discuss later in the advanced discussion: Raw string notation. ```python abMatch = re.match(r"ab", string) # or pattern = re.compile("ab") abCompileMatch = pattern.match(string) ``` --- There are a lot of to discuss for the `pattern` object, especially the rules to build a pattern. So let's start from the `_sre.SRE_Match` object ## `Match` object Match object is **what we got from the provided find-like operations**. So, what would the user like to have as a result of a "find and replace"-like operations? It might be something like **The string(s) that matche the pattern(s)** and **The index(es) of each matched substring**! That's what `match` provide! That's take a deeper look of these two parts. Noted that there is an imporant concept call **group(s)** in the regular expression. Group means that by using `()` to "group" the pattern you want, you can get the group(s) of string you want to match, as well as the corresponding indexes! Formally speaking, we can concate different subpattern using parenthesis to a large pattern to match a string. Each sub-pattern matching is return in different *group*, with the access function call as below snippet. * `group([group1, ...])` * Returns one or more subgroups of the match. * `groups([default])` * Return a tuple containing all the subgroups of the match, from 1 up to however many groups are in the pattern. ``` matchObj = re.match(r"(ab).*(de)", "abcde") #`.*` to represet any amount of arbitrary characters #We use print matchObj.group() print "---" print matchObj.group(0) # The entire match print matchObj.group(1) # The first parenthesized subgroup. print matchObj.group(2) # The second parenthesized subgroup. print matchObj.group(0, 1, 2) # Multiple arguments give us a tuple. print "---" print matchObj.groups() ``` Since we already have group(s) of matched object, the indexes we want from `Match` object is therefore retrieved from the `group` object. * `start([group])` and `end([group])` * Return the indices of the start and end of the substring matched by group; * `span([group])` * For MatchObject m, return the 2-tuple (m.start(group), m.end(group)). * If group did not contribute to the match, this is (-1, -1). ``` matchObj = re.match(r"(ab).*(de)", "abcde") print matchObj.group(), matchObj.groups() print "---" print matchObj.group(0), matchObj.span(0) # The entire match print matchObj.group(1), matchObj.span(1) # The first parenthesized subgroup. print matchObj.group(2), matchObj.span(2) # The second parenthesized subgroup. print "---" ``` ## How to write pattern? > regular expression (sometimes called a rational expression) is **a sequence of characters** that define a search pattern Remembered what we lernt from the wikipedia page? Regular expression is composed by a sequence of characters. To understand how to write the pattern, we need to know the **characters** first. Generally speaking, regular expressions, regardless of the language, all separates characters to ordinary characters and special characters. Most of characters are **"ordinary characters"**, which **simply match themselves**. However, there are **special characters** which either * (1) stand for classes of ordinary characters, or * (2) affect how the regular expressions around them are interpreted. Among them, I mainly catagorized the functionalities to be * (1) setting anchor * (2) repetitions * (3) building logic #### stand for classes of ordinary characters: * `'.'`: In the default mode, dot `.` matches any character except a newline. * `'[]'`: Used to indicate a set of characters * Special characters lose their special meaning inside sets. For example, `'[(+*)]'` will match any of the literal characters `'(', '+', '*'`, or `')'`. * `'[^]'`: Complementing the set. If the first character of the set `'[]'` is `'^'`, all the characters that are not in the set will be matched. * Character sets | | represents | | ---| ---| | `\w` | `[a-zA-Z0-9_]` | | `\W` | `[^a-zA-Z0-9_]` | | `\s` | `[ \n\r\t\f\v]` | | `\S` | `[^ \n\r\t\f\v]`| | `\d` | `[0-9] ` | | `\D` | `[^0-9]` | #### Setting anchor * `'^'`: Matches the start of the string. * `'$'`: Matches the end of the string or just before the newline at the end of the string. * `'(...)'`: Matches whatever regular expression is inside the parentheses, and indicates the start and end of a group * `'\A'`: Matches only at the start of the string. * `'\Z'`: Matches only at the end of the string. * `'\b'`: Matches the empty string, but only at the beginning or end of a word. => Read [Advanced discussion: \b and \B] * `'\B'`: Matches the empty string, but only when it is not at the beginning or end of a word. => Read [Advanced discussion: \b and \B]() #### Repetitions * `'*'`: Match 0 or more repetitions of the preceding RE, as many repetitions as are possible. * `'+'`: Match 1 or more repetitions of the preceding RE * `'?'`: Match 0 or 1 repetitions of the preceding RE * `'{m}'`: Specifies that exactly m copies of the previous RE should be matched * `'{m,n}'`: Causes the resulting RE to match from m to n (inclusive) repetitions of the preceding RE #### Building Logic * `'|'`: `A|B`, where A and B can be arbitrary REs, creates a regular expression that will match either A or B. --- With the above definition in mind, let's practice an easy but well known example. ### U.S. Phone number matching #### (1) Supposed the phone number will only have numbers, ex: "6503352800" ``` print re.match(r"\d{9}", "6503352800").group() # use `\d` and {m} to specify m copies of the number ``` #### (2) How about there might be `'-'` between the third, fourth number or sixth, seventh number? Ex: "650-335-2800", "650-3352800", "6503352800", "650335-2800" ``` print re.match(r"\d{3}-?\d{3}-?\d{4}", "6503352800").group() print re.match(r"\d{3}-?\d{3}-?\d{4}", "650-335-2800").group() print re.match(r"\d{3}-?\d{3}-?\d{4}", "650-3352800").group() print re.match(r"\d{3}-?\d{3}-?\d{4}", "650335-2800").group() ``` #### (3) Wait, we do have "650-335-2800", "650-3352800", "6503352800", but I have never seen "650335-2800"? ``` print re.match(r"\d{3}-\d{3}-?\d{4}|\d{9}", "650335-2800") print re.match(r"\d{3}-\d{3}-?\d{4}|\d{9}", "6503352800").group() print re.match(r"\d{3}-\d{3}-?\d{4}|\d{9}", "650-3352800").group() print re.match(r"\d{3}-\d{3}-?\d{4}|\d{9}", "650-335-2800").group() ``` #### (4) How about the first hyphen might be parenthesis around the first to three character instead? Ex: "(650)3352800", "(650)335-2800", "650-335-2800", "650-3352800", "6503352800" will all work, but not the other case! ``` print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "650335-2800") print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "(650)3352800").group() print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "(650)335-2800").group() print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "650-335-2800").group() print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "650-3352800").group() print re.match(r"\(\d{3}\)\d{3}-?\d{4}|\d{3}-\d{3}-?\d{4}|\d{9}", "6503352800").group() ``` Cool! We've use the simple concepts like `\d`, the character set that represents digits `[0-9]`, the repetition expression `{m}`, `?`, the escape backslash to make special characters normal `\(\)`, and the OR to build the matching logic `|`. However, there are still lots of topics to explored. Followings are the parts that I find useful but confused at the first glance. --- ### Advanced discussion 1: `\b` and `\B` #### `\b` The usage of `\b` and `\B` is important! But it's not that intuitive in the first glance of reading the documentation. You need to understand that these two match will **match with zero-length.** > `\b` matches the empty string, but only at the beginning or end of a word, formally, `\b` is defined as the boundary between a `\w` (`[a-zA-Z0-9_]`) and a `\W` character (or vice versa), or between \w and the beginning/end of the string. --- > `\B` *Matches the empty string, but only when it is not at the beginning or end of a word, which means `\B` matches at every position where `\b` does not. Effectively, `\B` matches at **any position between two word characters** as well as at any position between two non-word characters. Reading the example and comments below will help you out! ``` # \b example print re.search(r"\bfoo\b", "foo").group() #\w and the beginning/end of the string print re.search(r"\bfoo\b", "foo.").group() #`.` is in the \W set! print re.search(r"\bfoo\b", "(foo)").group() # `(` and `)` is in the \W set! print re.search(r"\bfoo\b", "bar foo baz").group() # ` ` is in the \W set! print re.search(r"\bfoo\b", "foobar") # the right \b can't be matched, since b is in \W set print re.search(r"\bfoo\b", "foo3") # 3 can't be matched print "---" # \B example print re.search(r"py\B", "python").group() #the empty string was mathced between p"yt"hon print re.search(r"py\B", "py3").group() #the empty string was mathced between p"y3" print re.search(r"py\B", "py") # y's next is the end, so we can't match print re.search(r"py\B", "py.") # y's next is in \W, so we can't match print re.search(r"py\B", "py!") # y's next is in \W, so we can't match ``` ### Advanced discussion 2: Raw string notation (r"text") Remembered we mentioned there are two styles of function calls: ```python abMatch = re.match(r"ab", string) # or pattern = re.compile("ab") abCompileMatch = pattern.match(string) ``` So, what's the difference and why do we need the raw string notation (r"text")? It's actually caused by the **Backslash character (`'\'`)**. `'\'` indicate special forms or to allow special characters to be normal in regular expression. However, this conflicts with the string literal, which makes it simply to be a escape sign. In short, matching **a** literal backslash `'\'` in a string, one has to write **"\\\\"** as the RE string, because the regular expression must be \\, and each backslash must be expressed as \\ inside a Python string literal. The `r` at the start of the pattern string designates a python "raw" string which **passes through backslashes without change.** Take a look for the following examples, noted that in the first parameters in the `re.match`, it should be a pattern string, whereas in the second parameters, it's a string. `"\\"` in a string means the single `"\"` ``` print re.match("\\\\", "\\").group() print re.match(r"\\", "\\").group() print re.match(r"\\", r"\\").group() print re.match("\\\\", r"\\").group() print re.match("\\\\w", "\\w").group() print re.match(r"\\w", "\\w").group() print re.match("\\\\section", "\\section").group() print re.match(r"\\section", "\\section").group() ``` ### Advanced discussion 3: The "greedy" match concept What is the "greedy" means in regular expression? ``` print re.match("ab*", "a").group() print re.match("ab*", "ab").group() print re.match("ab*", "abb").group() print re.match("ab*", "abbbbbbbbbbb").group() ``` From the example, we can see that `'*'` causes the resulting RE to match 0 or more repetitions of the preceding RE, **as many repetitions as are possible**. This is the so-called greedy in regular expression. However, sometimes greedy is not what we want. ``` print "greedy: " + re.match(r"p.*q", "pbq c pdq").group() # what if we only want the <a> to be matched? ``` In the above example, we just want the "pbq", however, the greedy `.*` will give us the whole "pbq c pdq". Followings list the methods/usages to escape from the greedy operations. #### (1) `*?`, `+?`, `??` ``` print "non greedy: " + re.match(r"p.*?q", "pbq c pdq").group() print "---" print "greedy: " + re.match(r"p.+q", "pbq c pdq").group() print "non greedy: " + re.match(r"p.+?q", "pbq c pdq").group() print "---" print "greedy: " + re.match(r"p?q", "pq").group() print "non greedy: " + re.match(r"p??q", "pq").group() print "greedy: " + re.match(r"p?q", "q").group() print "non greedy: " + re.match(r"p??q", "q").group() ``` The last case `??` and `?`, though has same result, it is actually different in how the regular engine runs. For the greedy match of `re.match("p?q", "pq")`, it will actually check "pq" first, then check "q". However, if you use `re.match("p??q", "pq")`, it will check "q" first, then check "pq". This concepts also apply on the `'|'`, OR operator. As the target string is scanned, REs separated by `'|'` are tried from left to right. When one pattern completely matches, that branch is accepted. This means that once A matches, B will not be tested further, even if it would produce a longer overall match. Therefore, the document said the **`'|'` operator is never greedy**. #### (2) `{m,n}?` ``` print "greedy: " + re.match(r"a{3,5}", "aaa").group() print "non greedy: " + re.match(r"a{3,5}?", "aaa").group() print "greedy: " + re.match(r"a{3,5}", "aaaaa").group() print "non greedy: " + re.match(r"a{3,5}?", "aaaaa").group() ``` ### Advanced discussion 4: `(?...)`, the extension notation `(?...)` is an extension notation. The **first character after the `'?'` determines** what the meaning and further syntax of the construct is. #### (1) `(?:...)` * A non-capturing version of regular parentheses. ``` print re.match(r"(ab)(cd)(e)", "abcde").group() print re.match(r"(ab)(cd)(e)", "abcde").groups() print re.match(r"(ab)(cd)(e)", "abcde").group() print re.match(r"(ab)(?:cd)(e)", "abcde").groups() ``` #### (2) `(?P<name>...)` and `(?P=name)` `(?P<name>...)` * the substring matched by the group is accessible via the symbolic group name `name`. `(?P=name)` * A backreference to a named group; it matches whatever text was matched by the earlier group named name. * There is another way to do the backreference: `\number`, which will be shown below ``` # example 1: set and access the symbolic group name `foo` and `bar` match = re.match(r"(?P<foo>ab)(c)(?P<bar>de)", "abcde") print match.group() print match.groupdict() print match.groupdict()['foo'] print match.groupdict()['bar'] print match.group(1) print match.group(2) print match.group(3) # example 2: use the back reference by symbolic group name print re.match(r"(?P<foo>ab)(c)(?P=foo)", "abcde") print "---" match = re.match(r"(?P<foo>ab)(c)(?P=foo)", "abcab") print match.groupdict() print match.groups() # the later group that been referenced by the symbolic name "foo" will not be shown print match.span(1) # example 3: use the back reference by \number match = re.match(r"(ab)(c)(\1)", "abcab") print match.groups() match = re.match(r"(ab)(c)(\1)(\2)", "abcabc") print match.groups() ``` ## Summary and references I've struggled a lot for the regular expression before, through rearranging, covering and testing the Python `re` documentation with this tutorial, I do gain a lot of confidence on using it. Hope the reader will also find it helpful! * [Python Document: 7.2. re — Regular expression operations](https://docs.python.org/2/library/re.html) * [Regular Expression HOWTO](https://docs.python.org/2/howto/regex.html#regex-howto) * [Regular Expression Info](http://www.regular-expressions.info/)
github_jupyter
<p align="center"> <img src="https://github.com/GeostatsGuy/GeostatsPy/blob/master/TCG_color_logo.png?raw=true" width="220" height="240" /> </p> ## Subsurface Data Analytics ### Bayesian Linear Regression for Subsurface Data Analytics in Python #### Michael Pyrcz, Associate Professor, University of Texas at Austin ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) ### PGE 383 Exercise: Bayesian Linear Regression for Subsurface Modeling in Python Here's a simple workflow, demonstration of Bayesian linear regression for subsurface modeling workflows. This should help you get started with building subsurface models with data analytics and machine learning. Here's some basic details about Bayesian linear regression. #### Bayesian Updating The prediction for Bayesian linear regression is distributed: \begin{equation} y \sim N(\beta^{T}X, \sigma^{2} I) \end{equation} We estimate the model parameter distributions through Bayesian updating for infering the model parameters from a prior and likelihood from training data. \begin{equation} p(\beta | y, X) = \frac{p(y,X| \beta) p(\beta)}{p(y,X)} \end{equation} In general for continuous features, we must use a sampling method, such as Markov chain Monte Carlo to sample the posterior. #### Workflow Goals Learn the basics of isotonic regression in Python for analysis, modeling and prediction of porosity from density. This includes: * Basic Python workflows and data preparation * Training / fitting a ridge regression model * Checking the model and learning about the impact of hyperparameters #### Objective Content to support the PGE 383: Subsurface Machine Learning class. #### Getting Started Here's the steps to get setup in Python with the GeostatsPy package: 1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. 3. In the terminal type: pip install geostatspy. 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. There are examples below with these functions. You can go here to see a list of the available functions, https://git.io/fh4eX, other example workflows and source code. #### Import Required Packages Let's import the GeostatsPy package. ``` import os # to set current working directory import numpy as np # arrays and matrix math import scipy.stats as st # statistical methods import pandas as pd # DataFrames import matplotlib.pyplot as plt # for plotting from sklearn.isotonic import IsotonicRegression from sklearn.model_selection import train_test_split # train and test split from sklearn.metrics import explained_variance_score import seaborn as sns seed = 73073 ``` If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. We will need to install a couple of packages that are not available in the standard Anaconda 3.7 build. * pymc3 - streamlines Markov chain Monte Carlo Sampling * arviz - exploratory analysis of Bayesian models This can be accomplished with 'pip install [package_name]' in a terminal window. ``` import warnings warnings.filterwarnings('ignore') import pymc3 as pm ``` #### Set the working directory I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). Also, in this case make sure to place the required (see below) data file in this working directory. ``` #os.chdir("d:\PGE383") # set the working directory ``` #### Loading Data Let's load the provided dataset. 'GrainSize_por.csv' is available at https://github.com/GeostatsGuy/GeoDataSets. It is a comma delimited file with 20 grain size ($mm$) and porosity (as a percentage) measures from the subsurface. We load the data file with the pandas 'read_csv' function into a data frame we called 'df'. We preview each with the head function from Pandas DataFrames. ``` #df = pd.read_csv("GrainSize_Por.csv") # read a .csv file in as a DataFrame df = pd.read_csv("https://raw.githubusercontent.com/GeostatsGuy/GeoDataSets/master/GrainSize_Por.csv") df.head() ``` #### Visualize the Data Looks like a reasonable dataset for a linear regression model. ``` X_data = df.iloc[:,0] y_data = df.iloc[:,1] plt.subplot(111) plt.scatter(X_data, y_data, color='black', s = 20, alpha = 0.5, label='sample data') plt.title('Porosity from Grainsize with Training Data'); plt.xlabel('Grain Size (mm)'); plt.ylabel('Porosity (%)') plt.xlim(0,100)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.2, hspace=0.2) plt.legend() plt.show() ``` #### Least Squares Linear Regression Let's calculate the ordinary least squares (OLS) linear regression model for comparison. * for a non-informative prior our Bayesian linear regression model parameters will be centered on the OLS solution. ``` OLS_slope, OLS_intercept, r_value, p_value, std_err = st.linregress(X_data,y_data) print('The model parameters are, slope (b1) = ' + str(round(OLS_slope,2)) + ', and the intercept (b0) = ' + str(round(OLS_intercept,2))) plt.subplot(111) plt.plot(X_data, y_data, 'o', label='sample data', color = 'red', alpha = 0.2, markeredgecolor = 'black') plt.plot(X_data, OLS_intercept + OLS_slope*X_data, label='model', color = 'black') plt.title('Porosity vs Grain Size') plt.xlabel('Grain Size (mm)') plt.ylabel('Porosity (%)') plt.legend() plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.2, hspace=0.2) plt.show() ``` The following code was based on the work of Will Koehrsen [blog post](https://towardsdatascience.com/introduction-to-bayesian-linear-regression-e66e60791ea7) and example workflow in [Jupyter Notebook](https://github.com/WillKoehrsen/Data-Analysis/blob/master/bayesian_lr/Bayesian%20Linear%20Regression%20Demonstration.ipynb). https://github.com/WillKoehrsen/Data-Analysis/blob/master/bayesian_lr/Bayesian%20Linear%20Regression%20Demonstration.ipynb #### Bayesian Linear Regression In the following code we take advantage of the pymc3 package's API to build a very readible and compact model. The parts include: * specify the prior models for $\beta_0$, $\beta_1$ and $\sigma^2$. * specify the model estimates * define the distribution of the predictions and the observated predictor features * specify the specific form for the MCMC steps * conduct MCMC sampling Note the default MCMC method in pymc3 is the Hamiltonian Monte Carlo (HMC). * HMC reduces correlation between succesive samples by including proposals for moves to distant states, $L$ * No U-Turn Sampler (NUTS) is an extension of HMC that auotmatically tunes L to avoid oscillation ``` with pm.Model() as linear_all_data: # prior distributions, Naive - set to Gaussian with very large variance intercept = pm.Normal('Intercept', mu = 0, sd = 10.0)# model B0 - Intercept Prior slope = pm.Normal('slope', mu = 0, sd = 10.0) # model B1 - Slope Prior sigma = pm.HalfNormal('sigma', sd = 10.0) # homoscedastic error in standard deviation # specify the model, linear regression mean = intercept + slope * X_data # estimate of model # define the distribution of the predictions from the model and provide the sampled response features Y_obs = pm.Normal('Y_obs', mu = mean, sd = sigma, observed = y_data.values[:]) # Observed values # specify the sampler step = pm.NUTS(target_accept = 0.95) # use the No-U-Turn Sampler (NUTS) for Markov Chains Monte Carlo MCMC, addaptive step size # Posterior distribution linear_trace_all_data = pm.sample(draws = 500, tune = 500, step = step, random_seed = seed) #linear_trace_all_data = pm.sample(draws = 500, tune = 500, step = step, random_seed = seed,discard_tuned_samples = False) ``` #### Summary Statistics for the Model Parameter Posterior Samples It is useful to assess the mean, dispersion and credible intervals for the model parameters. ``` def trace_mean(x): return pd.Series(np.average(x, 0), name='mean') def trace_sd(x): return pd.Series(np.std(x, 0), name='sd') def trace_975(x): return pd.Series(np.percentile(x, 97.5), name='97.5') def trace_025(x): return pd.Series(np.percentile(x, 2.5), name='2.5') # Get the expectation / average for the model parameters from sampling the posterior distribution bayes_exp_intercept = pm.stats.summary(linear_trace_all_data, stat_funcs = [trace_mean]).iloc[0,0] bayes_exp_slope = pm.stats.summary(linear_trace_all_data, stat_funcs = [trace_mean]).iloc[1,0] pm.stats.summary(linear_trace_all_data, stat_funcs = [trace_mean, trace_sd, trace_025, trace_975]) pm.stats.summary(linear_trace_all_data, stat_funcs = [trace_mean]) ``` #### Diagnostic Plots For each of the model parameters, let's look at the results for the 4 Markov chains. * distributions / PDFs based on kernel density estimates (KDE) from the 1000 MCMC states * trace of the samples over each state to observe for burn-in, stuck in local minimums / sufficient variation ``` pm.traceplot(linear_trace_all_data) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=2.0, wspace=0.2, hspace=0.2) plt.show() ``` #### Posterior Distributions We can take our 1000 states from our 4 Markov chains as samples of the posterior distributions of our Bayesian linear regresion model parameters. * once again the distributions are represented as PDFs from kernel density estimation ``` pm.plot_posterior(linear_trace_all_data, credible_interval = 0.95) plt.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.2, hspace=0.2) plt.show() ``` #### Visualize the Credible Intervals for the Model Parameters We can visualize the credible intervals for all of the model parameters * one for each Markov chain ``` pm.forestplot(linear_trace_all_data, credible_interval = 0.95) plt.show() ``` #### Sampling the Bayesian Regression Model We can sample from the resulting uncertainty distributions for the model parameters. * we'll sample 1000 times and visualize the reuslting models ``` plt.subplot(111) plt.scatter(X_data, y_data, color='black', s = 20, alpha = 0.5, label='sample data', zorder = 3) pm.plot_posterior_predictive_glm(linear_trace_all_data, samples = 1000, eval=np.linspace(2, 100, 100), linewidth = 1, color = 'red', alpha = 0.02, label = 'Bayesian Posterior Models',lm = lambda x, sample: sample['Intercept'] + sample['slope'] * x, zorder = 1) plt.plot(X_data, bayes_exp_intercept + bayes_exp_slope*X_data, label='OLS Model', color = 'black', zorder= 2) plt.title('Porosity from Grainsize Bayesian Model'); plt.xlabel('Grain Size (mm)'); plt.ylabel('Porosity (%)') plt.xlim(0,100)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.2, hspace=0.2) plt.legend() plt.show() ``` #### Posterior Prediction Let's pick a specific grain size and apply the above 1000 samples of the model to sample the Bayes posterior prediction. * note we would still need to add in the noise term, $\sigma$, to the model samples ``` grain_size = 40 nruns = linear_trace_all_data['slope'].shape[0] plt.subplot(121) plt.scatter(X_data, y_data, color='black', s = 20, alpha = 0.5, label='sample data', zorder = 3) pm.plot_posterior_predictive_glm(linear_trace_all_data, samples = 1000, eval=np.linspace(2, 100, 100), linewidth = 1, color = 'red', alpha = 0.02, label = 'Bayesian Posterior Models',lm = lambda x, sample: sample['Intercept'] + sample['slope'] * x, zorder = 1) plt.plot(X_data, OLS_intercept + OLS_slope*X_data, label='model', color = 'black', zorder= 2) plt.vlines(x = grain_size,ymin = 0, ymax = 30, label = 'OLS Prediction',colors = 'black', linestyles='--') plt.title('Porosity from Grainsize Bayesian Model'); plt.xlabel('Grain Size (mm)'); plt.ylabel('Porosity (%)') plt.xlim(0,100)#; plt.ylim(0,1500000) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.2, hspace=0.2) plt.legend() plt.subplot(122) model_uncert = linear_trace_all_data['Intercept'] + linear_trace_all_data['slope'] * grain_size sns.kdeplot(model_uncert, label = 'Bayes Model Uncertainty', color = 'blue') bayes_prediction = linear_trace_all_data['Intercept'] + linear_trace_all_data['slope'] * grain_size + np.random.normal(loc=0,scale=linear_trace_all_data['sigma'],size=nruns) sns.kdeplot(bayes_prediction, label = 'Bayes Posterior Prediction', color = 'black') plt.vlines(x = OLS_intercept + OLS_slope * grain_size,ymin = 0, ymax = 2.5, label = 'OLS Prediction',colors = 'red', linestyles='--') plt.xlim(0,30),plt.ylim(0,1.0) plt.xlabel('Porosity (%)'), plt.ylabel('Density'); plt.title('Posterior Prediction for Grain Size = ' + str(grain_size)); plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.2, hspace=0.2) plt.legend() plt.show() ``` #### Comments This was a basic demonstration of Bayesian linear regression. Motivation to work with Bayesian methods: * work with the model parameter uncertainty distributions * ingrate prior information. I hope this was helpful, *Michael* Michael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin On twitter I'm the @GeostatsGuy. *** #### More on Michael Pyrcz and the Texas Center for Geostatistics: ### Michael Pyrcz, Associate Professor, University of Texas at Austin *Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions* With over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development. For more about Michael check out these links: #### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) #### Want to Work Together? I hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate. * Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you! * Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems! * I can be reached at mpyrcz@austin.utexas.edu. I'm always happy to discuss, *Michael* Michael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin #### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
github_jupyter
``` # IDEA: Add neck to the posture map? from IPython import get_ipython # QT for movable plots %load_ext autoreload %autoreload 2 import time, os, sys, shutil from utils.fitting_utils import * # for math and plotting import pandas as pd import numpy as np import scipy as sp import matplotlib.pyplot as plt # %matplotlib notebook # %matplotlib inline # %matplotlib widget %matplotlib qt from itertools import compress # for list selection with logical from tqdm import tqdm from multiprocessing import Process # ALLSO JIT STUFF from numba import jit, njit # and pytorch import torch import sys, os, pickle # import cv2 from colour import Color import h5py from tqdm import tqdm, tqdm_notebook import glob import itertools from utils.analysis_tools import adjust_spines,cmpl ``` # Load tracked data and plot a frame ``` #load the tracked data! data_folder = 'example_data/tracking/' top_folder_0 = '/media/chrelli/Data0/recording_20200806-171004' top_folder_1 = '/media/chrelli/Data1/recording_20200806-171004' data_folder = top_folder_0 with open(data_folder +'/tracked_behavior.pkl', 'rb') as f: tracked_behavior = pickle.load(f) print(tracked_behavior.keys()) # load ALL the frames as jagged lines with h5py.File(data_folder+'/pre_processed_frames.hdf5', mode='r') as hdf5_file: print(hdf5_file.keys()) print(len(hdf5_file['dataset'])) jagged_lines = hdf5_file['dataset'][...] from utils.cuda_tracking_utils import unpack_from_jagged, cheap4d # kill first 6 secs of the frames (delay is ~180) start_frame = 30*60 pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(jagged_lines[start_frame]) print(ikeyp) print(pos.shape) cheap4d(pos,keyp,ikeyp) # fig = plt.gcf() # plt.title("N positions is {}".format(pos.shape)) print(tracked_behavior['tracking_holder'].shape) print(tracked_behavior['var']) print(tracked_behavior['ivar']) print(tracked_behavior.keys()) ``` # Plot a raw frame, check that the plotter works ``` from utils.analysis_tools import VideoPlotMachine,PlotMachine,PlotMachine_noimpl # take a list of frames, calculate body supports and plot as a kind of decaying trail, with some decay and lengthm maybe the center of the nose?? plt.close('all') Plotter = PlotMachine_noimpl(tracked_behavior,jagged_lines) #Plotter.kernel_smoothing(9) example_frame = 20*60+2000 n_fine = 5 Plotter.make(example_frame,view_override = [55.0,90.]) ``` # Here we copy the smoothing functions, so they are easy to make plots from ``` sigma_process = .01 sigma_measure = .015 from filterpy.common import kinematic_kf,Q_discrete_white_noise from filterpy.kalman import FixedLagSmoother def kalman1_3D(tr,sigma_process,sigma_measure,dt = 1/60): # make first order kinematic kalman filter cv = kinematic_kf(dim=3, order=1, dt = dt) cv.R = np.eye(3) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0,2,4]: cv.Q[i:(i+2),i:(i+2)] = Q_discrete_white_noise(dim=2, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0,0],0,tr[0,1],0,tr[0,2] ,0 ]]).T cv.update(tr[i,:]) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i,:][:,np.newaxis]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) tr_filtered = kalman_estimate[[0,2,4],:].T return tr_filtered def fls1_3d(tr,sigma_process,sigma_measure,dt = 1/60,N_lag = 16): # make first order kinematic kalman filter cv = kinematic_kf(dim=3, order=1, dt = dt) cv.R = np.eye(3) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0,2,4]: cv.Q[i:(i+2),i:(i+2)] = Q_discrete_white_noise(dim=2, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0,0],0,tr[0,1],0,tr[0,2] ,0 ]]).T # also make an FLS smoother fls = FixedLagSmoother(dim_x=6, dim_z=3, N=N_lag) fls.x = np.copy(cv.x) fls.F = np.copy(cv.F) fls.H = np.copy(cv.H) fls.P = np.copy(cv.P) fls.R = np.copy(cv.R) fls.Q = np.copy(cv.Q) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i,:][:,np.newaxis]) fls.smooth(tr[i,:][:,np.newaxis]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) fls_estimate = np.hstack(fls.xSmooth) tr_filtered = kalman_estimate[[0,2,4],:].T tr_smoothed = fls_estimate[[0,2,4],:].T return tr_smoothed def fls2_3d(tr,sigma_process,sigma_measure,dt = 1/60,N_lag = 16): # make second order kinematic kalman filter cv = kinematic_kf(dim=3, order=2, dt = dt) cv.R = np.eye(3) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0,3,6]: cv.Q[i:(i+3),i:(i+3)] = Q_discrete_white_noise(dim=3, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0,0],0,0,tr[0,1],0,0,tr[0,2],0 ,0 ]]).T # also make an FLS smoother fls = FixedLagSmoother(dim_x=9, dim_z=3, N=N_lag) fls.x = np.copy(cv.x) fls.F = np.copy(cv.F) fls.H = np.copy(cv.H) fls.P = np.copy(cv.P) fls.R = np.copy(cv.R) fls.Q = np.copy(cv.Q) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i,:][:,np.newaxis]) fls.smooth(tr[i,:][:,np.newaxis]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) fls_estimate = np.hstack(fls.xSmooth) tr_filtered = kalman_estimate[[0,3,6],:].T tr_smoothed = fls_estimate[[0,3,6],:].T return tr_smoothed def kalman_1D(tr,sigma_process,sigma_measure,dt = 1/60): # make first order kinematic kalman filter cv = kinematic_kf(dim=1, order=1, dt = dt) cv.R = np.eye(1) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0]: cv.Q[i:(i+2),i:(i+2)] = Q_discrete_white_noise(dim=2, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0],0 ]]).T cv.update(tr[i]) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) tr_filtered = kalman_estimate[[0],:].T return tr_filtered def kalman2_3D(tr,sigma_process,sigma_measure,dt = 1/60): # make second order kinematic kalman filter cv = kinematic_kf(dim=3, order=2, dt = dt) cv.R = np.eye(3) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0,3,6]: cv.Q[i:(i+3),i:(i+3)] = Q_discrete_white_noise(dim=3, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0,0],0,0,tr[0,1],0,0,tr[0,2],0 ,0 ]]).T cv.update(tr[i,:]) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i,:][:,np.newaxis]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) tr_filtered = kalman_estimate[[0,3,6],:].T return tr_filtered def fls2_1d(tr,sigma_process,sigma_measure,dt = 1/60,N_lag = 16): # make second order kinematic kalman filter cv = kinematic_kf(dim=1, order=2, dt = dt) cv.R = np.eye(1) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0]: cv.Q[i:(i+2),i:(i+2)] = Q_discrete_white_noise(dim=2, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0],0,0]]).T # also make an FLS smoother fls = FixedLagSmoother(dim_x=3, dim_z=1, N=N_lag) fls.x = np.copy(cv.x) fls.F = np.copy(cv.F) fls.H = np.copy(cv.H) fls.P = np.copy(cv.P) fls.R = np.copy(cv.R) fls.Q = np.copy(cv.Q) # print(cv) # print(fls) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i]) fls.smooth(tr[i]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) fls_estimate = np.hstack(fls.xSmooth) tr_filtered = kalman_estimate[[0],:].T tr_smoothed = fls_estimate[[0],:].T return tr_smoothed def fls1_1d(tr,sigma_process,sigma_measure,dt = 1/60,N_lag = 16): # make second order kinematic kalman filter cv = kinematic_kf(dim=1, order=1, dt = dt) cv.R = np.eye(1) * sigma_measure**2 G = np.array([[0.5 * dt**2, dt]], np.float32).T Q0 = np.matmul(G, G.T) * sigma_process**2 for i in [0]: cv.Q[i:(i+2),i:(i+2)] = Q_discrete_white_noise(dim=2, dt=dt, var=sigma_process**2) # cv.Q[i:(i+2),i:(i+2)] = Q0 cv.P = np.ones((cv.dim_x,cv.dim_x))*0.001 +.0001 kalman_estimate = [] # initialize cv.x = np.array([[ tr[0],0]]).T # also make an FLS smoother fls = FixedLagSmoother(dim_x=2, dim_z=1, N=N_lag) fls.x = np.copy(cv.x) fls.F = np.copy(cv.F) fls.H = np.copy(cv.H) fls.P = np.copy(cv.P) fls.R = np.copy(cv.R) fls.Q = np.copy(cv.Q) fls.x = np.array([[ .9,0]]).T fls.P = np.array([[2.73445008e-04 ,2.49619926e-05],[2.49619926e-05, 4.56088374e-06]]) for i in tqdm(range(tr.shape[0])): cv.predict() cv.update(tr[i]) fls.smooth(tr[i]) kalman_estimate.append(cv.x) kalman_estimate = np.hstack(kalman_estimate) fls_estimate = np.hstack(fls.xSmooth) tr_filtered = kalman_estimate[[0],:].T tr_smoothed = fls_estimate[[0],:].T return tr_smoothed ``` # Smooth the 3D skeleton data ``` # function for filtering and error correction! from utils.analysis_tools import particles_to_body_supports_cuda,VideoPlotMachine # overwrite HACK for now Plotter = VideoPlotMachine(tracked_behavior,jagged_lines) self = Plotter # get the raw tracking data! part = self.tracking_holder # unpack all the 3D coordinates! part = torch.from_numpy(part).float().cuda() part = torch.transpose(part,0,1) body_support_0 = particles_to_body_supports_cuda(part[:,:8],implant = False) body_support_1 = particles_to_body_supports_cuda(part[:,8:],implant = False) # and the spine length s_0 = part[:,2].cpu().numpy() s_1 = part[:,2+8].cpu().numpy() # and smooth the data from utils.analysis_tools import smooth_body_support body_support_0_smooth,s_0_smooth = smooth_body_support(body_support_0,s_0) body_support_1_smooth,s_1_smooth = smooth_body_support(body_support_1,s_1) ``` # And smooth the rotation matrices ``` # calculate rotation matrices from the smoothed skeleton points, to be used for smooth video playback def rotation_matrix_vec2vec_numpy(f,t): # from this paper, ffrom math stacj # but made batch-able for pytorch # https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311 #rotate vector f onto vector t # import numpy as np # v = np.cross(f, t) # u = v/np.linalg.norm(v) # c = np.dot(f, t) # h = (1 - c)/(1 - c**2) # vx, vy, vz = v # rot =[[c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy], # [h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx], # [h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2]] # good disussion about smoothing rotation matrices later: https://www.cvl.isy.liu.se/education/graduate/geometry2010/lectures/Lecture7b.pdf # rotate f onto t # very fast, but slightly numerically unstable, so we add epsilon! epsilon = 1e-6 # f = x_pointer # t = nose_pointer # cross product v = np.cross(f,t) u = v/(np.linalg.norm(v,axis=1)[:,np.newaxis] + epsilon) # dot product c = np.einsum('i,ai->a', f,t) # the factor h h = (1 - c)/(1 - c**2 + epsilon) vx, vy, vz = v[:,0],v[:,1],v[:,2] R = np.stack([np.stack([c + h*vx**2, h*vx*vy - vz, h*vx*vz + vy], axis=1), np.stack([h*vx*vy+vz, c+h*vy**2, h*vy*vz-vx], axis=1), np.stack([h*vx*vz - vy, h*vy*vz + vx, c+h*vz**2], axis=1)], axis=1) return R # use quarterneons to smooth the body ellipsoid rotations # convert the rotation matrices to quarternions from pyquaternion import Quaternion def unpack_axis_angels(R): # calculate the axis-angle representation # https://stackoverflow.com/questions/15022630/how-to-calculate-the-angle-from-rotation-matrix angle_x = np.arctan2(R[:,2,1],R[:,2,2]) angle_y = np.arctan2(-R[:,2,0],np.sqrt(R[:,2,1]**2 + R[:,2,2]**2 ) ) angle_z = np.arctan2(R[:,1,0],R[:,0,0]) return np.stack((angle_x,angle_y,angle_z),axis = 1) def averageQuaternions(Q): # from https://github.com/christophhagen/averaging-quaternions/blob/master/averageQuaternions.py # Number of quaternions to average M = Q.shape[0] A = np.zeros(shape=(4,4)) for i in range(0,M): q = Q[i,:] # multiply q with its transposed version q' and add A A = np.outer(q,q) + A # scale A = (1.0/M)*A # compute eigenvalues and -vectors eigenValues, eigenVectors = np.linalg.eig(A) # Sort by largest eigenvalue eigenVectors = eigenVectors[:,eigenValues.argsort()[::-1]] # return the real part of the largest eigenvector (has only real part) return np.real(eigenVectors[:,0].ravel()) def quaternion_smoothing(R): angles_kalman = unpack_axis_angels(R) q_x = [ Quaternion(axis = (1.,0.,0.),radians = r) for r in angles_kalman[:,0] ] q_y = [ Quaternion(axis = (0.,1.,0.),radians = r) for r in angles_kalman[:,1] ] q_z = [ Quaternion(axis = (0.,0.,1.),radians = r) for r in angles_kalman[:,2] ] # now, smooth the rotations q_all = [] for i in range(len(q_x)): q_all.append(q_x[i]*q_y[i]*q_z[i]) # convert to a matrix w qith w x y z Q = np.stack([q.elements for q in q_all],axis = 0) # try a running average first! Q_run_av = np.copy(Q) w_length = 9 # must be uneven h_length = int(np.floor(w_length/2)) for i in tqdm(np.arange(np.floor(w_length/2),Q.shape[0]-np.ceil(w_length/2))): i = int(i) Q_run_av[i,:] = averageQuaternions(Q[(i-h_length):(i+h_length+1),:]) # convert back to rotation matrices, to check that converion is fine R_q_list = [q.rotation_matrix for q in q_all] R_q = np.stack(R_q_list,axis = 0) R_q_smooth_list = [Quaternion(Q_run_av[i,:]).rotation_matrix for i in range(Q_run_av.shape[0])] R_q_smooth = np.stack(R_q_smooth_list,axis = 0) return R_q,R_q_smooth def smooth_rotation_matrices(body_support_0_smooth): # first calculate body vectors from the smoothed skeleton points c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support_0_smooth #todo, maybe average across the both noisy estimates here, prob won't gain much though.. v_nose = c_nose - c_mid v_hip = c_mid - c_hip v_nose = v_nose/np.linalg.norm(v_nose,axis=1)[:,np.newaxis] v_hip = v_hip/np.linalg.norm(v_hip,axis=1)[:,np.newaxis] # To calculate R_nose, we ask how we have to rotate a vector along x, so that it points along the hip or nose f = np.array([1,0,0]) t = v_nose R_nose_smooth = rotation_matrix_vec2vec_numpy(f,t) t = v_hip R_body_smooth = rotation_matrix_vec2vec_numpy(f,t) return [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body_smooth,R_head,R_nose_smooth] def smooth_rotation_matrices_quaternion(body_support_0_smooth): # first calculate body vectors from the smoothed skeleton points c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support_0_smooth #todo, maybe average across the both noisy estimates here, prob won't gain much though.. v_nose = c_nose - c_mid v_hip = c_mid - c_hip v_nose = v_nose/np.linalg.norm(v_nose,axis=1)[:,np.newaxis] v_hip = v_hip/np.linalg.norm(v_hip,axis=1)[:,np.newaxis] # To calculate R_nose, we ask how we have to rotate a vector along x, so that it points along the hip or nose f = np.array([1,0,0]) t = v_nose R_nose_smooth = rotation_matrix_vec2vec_numpy(f,t) t = v_hip R_body_smooth = rotation_matrix_vec2vec_numpy(f,t) R_q,R_body_smooth = quaternion_smoothing(R_body_smooth) R_q,R_nose_smooth = quaternion_smoothing(R_nose_smooth) return [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body_smooth,R_head,R_nose_smooth] # Make example plots for smoothing! # and add the smoothed and raw data to the video maker from utils.analysis_tools import particles_to_body_supports_cuda,VideoPlotMachine,VideoPlotMachine_noimpl Plotter = VideoPlotMachine_noimpl(tracked_behavior,jagged_lines) self = Plotter # add the smoothed coordinates as numpy arrays self.body_support_0_raw = [i.cpu().numpy().squeeze() for i in body_support_0] self.body_support_0_smooth = body_support_0_smooth self.s_0_raw = s_0 self.s_0_smooth = s_0_smooth self.body_support_1_raw = [i.cpu().numpy().squeeze() for i in body_support_1] self.body_support_1_smooth = body_support_1_smooth self.s_1_raw = s_1 self.s_1_smooth = s_1_smooth # also smooth the body ellipsoid rotations self.body_support_0_smooth = smooth_rotation_matrices(body_support_0_smooth) self.body_support_1_smooth = smooth_rotation_matrices(body_support_1_smooth) # self.body_support_0_smooth = smooth_rotation_matrices_quaternion(body_support_0_smooth) # self.body_support_1_smooth = smooth_rotation_matrices_quaternion(body_support_1_smooth) ``` # Make example videos for smoothing ``` # Example stills for the figure! example_frame = 20*60+2000 self.make_me(example_frame,cloud = True,skel='smooth',ellip='smooth', trace='smooth',view_override = [47,74],savepath='figs/example_smooth.pdf') #47.74, 25.86 self.make_me(example_frame,cloud = True,skel='raw',ellip='raw', trace='raw',view_override = [47,74],savepath='figs/example_raw.pdf') #47.74, 25.86 # Example videos for smoothing frame_list = 20*60+np.arange(100)*30 self.video_me(frame_list=frame_list, savepath = 'videos/example_raw.mp4', trace='raw',skel='raw',ellip='raw',view_override = [47,74],fps = 10) self.video_me(frame_list=frame_list, savepath = 'videos/example_smooth.mp4', trace='smooth',skel='smooth',ellip='smooth',view_override = [47,74],fps=10) plt.close('all') # merge the videos using opencv, from here: https://gist.github.com/nkint/8576156 import cv2 import os # this two lines are for loading the videos. # in this case the video are named as: cut1.mp4, cut2.mp4, ..., cut15.mp4 # videofiles = [n for n in os.listdir('.') if n[0]=='c' and n[-4:]=='.mp4'] # videofiles = sorted(videofiles, key=lambda item: int( item.partition('.')[0][3:])) video_files = ['videos/example_raw.mp4', 'videos/example_smooth.mp4'] cap_0 = cv2.VideoCapture(video_files[0]) cap_1 = cv2.VideoCapture(video_files[1]) # video resolution: 1624x1234 px # out = cv2.VideoWriter("video.avi", # cv2.cv.CV_FOURCC('F','M','P', '4'), # 15, (1624, 1234), 1) # get vcap property width = cap_0.get(cv2.CAP_PROP_FRAME_WIDTH) # float height = cap_0.get(cv2.CAP_PROP_FRAME_HEIGHT) # float fourcc = cv2.VideoWriter_fourcc(*'MP4V') out = cv2.VideoWriter('videos/supplementary_video_smoothing.mp4', fourcc, 10, (int(2*width), int(height))) # fourcc = cv2.VideoWriter_fourcc(*'XVID') # out = cv2.VideoWriter('videos/smoothing.avi', fourcc, 20, (int(2*width), int(height))) # fourcc = cv2.VideoWriter_fourcc(*'XVID') # out = cv2.VideoWriter('cutout.avi', fourcc, 20.0, (640, 480)) # create a splash screen # create blank image img = np.zeros((int(height),int(2*width), 3), np.uint8) font = cv2.FONT_HERSHEY_SIMPLEX font_color = (255, 255, 255) h = height-.1*height w = width font_scale = 1 thickness = 2 text = 'Ebbesen & Froemke, 2020' def put_centered_text(img,text,w,h,font, font_scale, font_color, thickness): # get boundary of this text textsize = cv2.getTextSize(text, font, font_scale, thickness)[0] cv2.putText(img, text, (int(w - textsize[0]/2),int(h) ), font, font_scale, font_color, thickness, cv2.LINE_AA) put_centered_text(img,text,w,h,font, font_scale, font_color, thickness) put_centered_text(img,'Supplementary video 3: State-space filtering',w,.2*height,font, font_scale, font_color, thickness) for _ in range(30): cv2.imshow('frame',img) cv2.waitKey(10) out.write(img) while(cap_0.isOpened() | cap_1.isOpened()): ret, frame_0 = cap_0.read() ret, frame_1 = cap_1.read() if frame_1 is not None: stacked_frame = cv2.hconcat([frame_0,frame_1]) # Add annotation! # centering is not easy, in cv2: https://gist.github.com/xcsrz/8938a5d4a47976c745407fe2788c813a font = cv2.FONT_HERSHEY_SIMPLEX font_color = (0,0,0) h = height-.9*height w = width/2 font_scale = 1 thickness = 2 # get boundary of this text put_centered_text(stacked_frame,'Raw frame-by-frame data',w,h,font, font_scale, font_color, thickness) put_centered_text(stacked_frame,'After state-space filtering',w+width,h,font, font_scale, font_color, thickness) cv2.imshow('frame',stacked_frame) out.write(stacked_frame) else: break cap_0.release() cap_1.release() out.release() cv2.destroyAllWindows() print ("end.") # Also make a gif for the website # Example videos for smoothing self.video_me(frame_list=34000+np.arange(-120,60)*30, savepath = 'videos/example_tracking_25.gif', trace='smooth',skel='smooth',ellip='smooth',view_override = [47,74],fps=10,dpi = 25) plt.close('all') # crop the gif a little bit import imageio import numpy as np #Create reader object for the gif path_list = ['videos/example_tracking_30.gif'] gifs = [imageio.get_reader(f) for f in path_list] number_of_frames = min([g.get_length() for g in gifs]) #Create writer object now = time.time() new_gif = imageio.get_writer('videos/example_tracking_wide_30b.gif',fps =10) for frame_number in tqdm(range(number_of_frames)): imgs = [g.get_next_data() for g in gifs] new_image = np.hstack(imgs) new_gif.append_data(new_image[24:-24,:,:]) for g in gifs: g.close() new_gif.close() ``` # Make plots for a supplementary figure showing how we do the state-space filtering ``` # Make example plots for smoothing! # and add the smoothed and raw data to the video maker from utils.analysis_tools import particles_to_body_supports_cuda,VideoPlotMachine Plotter = VideoPlotMachine(tracked_behavior,jagged_lines) self = Plotter # add the smoothed coordinates as numpy arrays self.body_support_0_raw = [i.cpu().numpy().squeeze() for i in body_support_0] self.body_support_0_smooth = body_support_0_smooth self.s_0_raw = s_0 self.s_0_smooth = s_0_smooth self.body_support_1_raw = [i.cpu().numpy().squeeze() for i in body_support_1] self.body_support_1_smooth = body_support_1_smooth self.s_1_raw = s_1 self.s_1_smooth = s_1_smooth # also smooth the body ellipsoid rotations self.body_support_0_smooth = smooth_rotation_matrices(body_support_0_smooth) self.body_support_1_smooth = smooth_rotation_matrices(body_support_1_smooth) # c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0_raw # angles_raw = unpack_axis_angels(R_body) # c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0_smooth # angles_kalman = unpack_axis_angels(R_body) # R_q,R_q_smooth = quaternion_smoothing(R_body) # angles_q = unpack_axis_angels(R_q) # angles_q_smooth = unpack_axis_angels(R_q_smooth) c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0_raw angles_raw = unpack_axis_angels(R_nose) c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0_smooth angles_kalman = unpack_axis_angels(R_nose) R_q,R_q_smooth = quaternion_smoothing(R_nose) angles_q = unpack_axis_angels(R_q) angles_q_smooth = unpack_axis_angels(R_q_smooth) import matplotlib %matplotlib inline # Say, "the default sans-serif font is COMIC SANS" matplotlib.rcParams['font.sans-serif'] = "Liberation Sans" # Then, "ALWAYS use sans-serif fonts" matplotlib.rcParams['font.family'] = "sans-serif" matplotlib.rc('font', family='sans-serif') matplotlib.rc('text', usetex='false') matplotlib.rcParams.update({'font.size': 13}) from palettable.cmocean.sequential import Algae_6 cmpl = Algae_6.mpl_colors sc=2 plt.figure(figsize = (4,5) ) n_vars = len(tracked_behavior['var']) n_frames = tracked_behavior['tracking_holder'].shape[1] start_frame = 30000 frame_window = 2400 scale_bar = [1,1,.1,1,1,1,.01,.01,.01,1,1,.1,1,1,.1,.1,.1] units = ["rad","rad","a.u.",'rad','rad','rad',"m","m","m","rad","rad","a.u.",'rad','rad',"m","m","m"] latex_vars = ['β', 'γ', 's', 'ψ', 'θ', 'φ', 'x', 'y', 'z', 'β', 'γ', 's', 'θ', 'φ', 'x', 'y', 'z'] for i in range(n_vars): plt.subplot(n_vars,1,1+i) plt.plot(tracked_behavior['tracking_holder'][i,start_frame:(start_frame+frame_window)],c=cmpl[1]) plt.plot(tracked_behavior['guessing_holder'][i,start_frame:(start_frame+frame_window)],c=cmpl[3]) ax = plt.gca() plt.gca().spines['top'].set_visible(False) plt.gca().spines['right'].set_visible(False) plt.gca().spines['left'].set_visible(False) if i < (n_vars-1): pass plt.gca().spines['bottom'].set_visible(False) plt.xticks([]) # adjust_spines(ax,[]) else: pass # adjust_spines(ax,['bottom']) plt.gca().spines['bottom'].set_bounds(0,600) plt.xticks([0,600]) if i < 9: ax.set_facecolor('k') ax.patch.set_alpha(0.13) else: ax.set_facecolor('peru') ax.patch.set_alpha(0.2) # adjust_spines(ax,['bottom','left']) plt.yticks([]) # plt.ylabel(tracked_behavior['var'][i]) # plt.ylabel(latex_vars[i]) plt.xlim([0,frame_window+20]) ax = plt.gca() ylim=ax.get_ylim() xlim=ax.get_xlim() plt.plot( -2+np.array([1,1])*xlim[1],ylim[0]+np.array([0,1])*scale_bar[i],'-k' ) # plt.text(xlim[1],ylim[0]," "+str(scale_bar[i])+' '+units[i] ) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.) # plt.xlabel('Frames',labelpad =-10) # plt.tight_layout() if False: plt.savefig('/home/chrelli/git/3d_sandbox/mouseposev0p2/figure_raw_pics'+ '/supplementary_body_points/raw_tracked.png',transparent=False, bbox_inches = 'tight',pad_inches =1) plt.show() def unlink_wrap(dat, lims=[-np.pi, np.pi], thresh = 0.95): # nice trick: https://stackoverflow.com/questions/27138751/preventing-plot-joining-when-values-wrap-in-matplotlib-plots """ Iterate over contiguous regions of `dat` (i.e. where it does not jump from near one limit to the other). This function returns an iterator object that yields slice objects, which index the contiguous portions of `dat`. This function implicitly assumes that all points in `dat` fall within `lims`. """ jump = np.nonzero(np.abs(np.diff(dat)) > ((lims[1] - lims[0]) * thresh))[0] lasti = 0 for ind in jump: yield slice(lasti, ind + 1) lasti = ind + 1 yield slice(lasti, len(dat)) plt.figure(figsize = (4,10)) plt.subplot(7,1,1) for i in range(3): plt.plot(angles_raw[:,i],'.',c=cmpl[i+1],markersize = .7) plt.subplot(7,1,2) for i in range(3): plt.plot(angles_kalman[:,i],'.',c=cmpl[i+1],markersize = 1.5) # plt.plot(angles_q,'.') #plt.plot(angles_q_smooth,'.') for i in range(3): plt.subplot(7,1,3+i) plt.plot(angles_q[:,i],'.',c=cmpl[i+1]) for i in range(3): plt.subplot(7,1,3+i) for slc in unlink_wrap(angles_q_smooth[:,i]): xx = np.arange(angles_q_smooth.shape[0]) yy = angles_q_smooth[:,i] plt.plot(xx[slc],yy[slc],'-',c='k',markersize=.3) # plt.xlim(49000,53000) for i in range(7): plt.subplot(7,1,1+i) plt.xlim(30000,36000-3600) sc = 1.2 plt.ylim([-np.pi*sc,np.pi*sc]) ax = plt.gca() ax.axis('off') plt.subplots_adjust(hspace=0,wspace=0) if False: plt.savefig('/home/chrelli/git/3d_sandbox/mouseposev0p2/figure_raw_pics'+ '/supplementary_body_points/smooth_angles.png',transparent=True, bbox_inches = 'tight',pad_inches =1) plt.show() # Make a figure to show the kalman smoothing! # plt.close('all') fsz = (4,10) lw1 = 1 plt.figure(figsize=fsz) axs = [] type_list = np.array(['hip','tail','mid','nose','tip','impl']) for sub_i,i in enumerate([1,0,2,3,5,4]): N_plots = 7 plt.subplot(N_plots,1,1+sub_i) ax = plt.gca() axs.append(ax) for j in range(3): plt.plot(body_support_0[i].squeeze().cpu().numpy()[:,j],'.',c=cmpl[1+j],markersize = .7) plt.plot(body_support_0_smooth[i][:,j],c='k',lw=lw1) # plt.plot(body_support_0_smooth[i][:,j],c=cmpl[-1],lw=lw1) plt.ylabel(type_list[i]) plt.ylim([-.22,.22]) plt.subplot(N_plots,1,7) ax = plt.gca() axs.append(ax) plt.plot(s_0,c=cmpl[1]) plt.plot(s_0_smooth,c='k') plt.ylabel('s') plt.ylim([-1,1]) for ax in axs: ax.set_xlim(30000,36000-3600) ax.axis('off') # plt.subplots_adjust(hspace=0,wspace=0) if False: plt.savefig('/home/chrelli/git/3d_sandbox/mouseposev0p2/figure_raw_pics'+ '/supplementary_body_points/smooth_xyz0.png',transparent=True, bbox_inches = 'tight',pad_inches =1) plt.show() # re-compute the nose-tip and hip-tail end, after correcting the 3D rotations and smooothing s! #c_hip = c_mid + R_body @ np.array([-s,0,0]) from utils.analysis_tools import a_hip_0,a_hip_delta,b_hip_0,b_hip_delta,a_nose c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = self.body_support_0_smooth s = self.s_0_smooth # calculate vectors holding the hip values! # the values are n_particles long a_hip = a_hip_0.cpu().numpy() + a_hip_delta.cpu().numpy() * s b_hip = b_hip_0.cpu().numpy() + b_hip_delta.cpu().numpy() * (1.-s) c_ass_smooth = np.einsum('aij,aj->ai',R_body,np.hstack([-a_hip,np.zeros_like(s),np.zeros_like(s)]) ) + c_mid c_ass_smooth = np.einsum('aij,aj->ai',R_body,np.hstack([-a_hip,np.zeros_like(s),np.zeros_like(s)]) ) + c_mid c_tip_smooth = R_nose@np.array([a_nose.cpu().numpy()[0],0.,0.]) + c_nose plt.close('all') axs = [] plt.figure(figsize = (4,10)) plt.subplot(7,1,1) axs.append(plt.gca()) for i in range(3): plt.plot(c_tip[:,i],'.',c = cmpl[1+i]) plt.plot(c_tip_smooth,'-k') plt.xlim([30000,32400]) plt.subplot(7,1,2) axs.append(plt.gca()) for i in range(3): plt.plot(c_ass[:,i],'.',c = cmpl[1+i]) plt.plot(c_ass_smooth,'-k') plt.xlim([30000,32400]) plt.subplot(7,1,7) axs.append(plt.gca()) for ax in axs: ax.set_xlim(30000,36000-3600) ax.axis('off') plt.subplots_adjust(hspace=0,wspace=0) if False: plt.savefig('/home/chrelli/git/3d_sandbox/mouseposev0p2/figure_raw_pics'+ '/supplementary_body_points/smooth_tipass.png',transparent=True, bbox_inches = 'tight',pad_inches =1) plt.show() ``` # Let's make a video with an examples of each social behavior ``` # Make example plots for smoothing! # and add the smoothed and raw data to the video maker from utils.analysis_tools import particles_to_body_supports_cuda,VideoPlotMachine Plotter = VideoPlotMachine(tracked_behavior,jagged_lines) self = Plotter # add the smoothed coordinates as numpy arrays self.body_support_0_raw = [i.cpu().numpy().squeeze() for i in body_support_0] self.body_support_0_smooth = body_support_0_smooth self.s_0_raw = s_0 self.s_0_smooth = s_0_smooth self.body_support_1_raw = [i.cpu().numpy().squeeze() for i in body_support_1] self.body_support_1_smooth = body_support_1_smooth self.s_1_raw = s_1 self.s_1_smooth = s_1_smooth # also smooth the body ellipsoid rotations # just by skeleton self.body_support_0_smooth = smooth_rotation_matrices(body_support_0_smooth) self.body_support_1_smooth = smooth_rotation_matrices(body_support_1_smooth) # By skeleton & running quaternion # self.body_support_0_smooth = smooth_rotation_matrices_quaternion(body_support_0_smooth) # self.body_support_1_smooth = smooth_rotation_matrices_quaternion(body_support_1_smooth) # and re-compute the nose tip and tail # Make videos for example social events from the figure! (detected in previous notebook) event_frames = [18969,36343,17100] event_name = ['Nose0 <-> Nose1','Nose0 -> Tail1','Nose1 -> Tail0'] # Plot the examples plt.close('all') for i in range(len(event_frames)): Plotter.make_me(event_frames[i],cloud = True,skel='smooth',ellip='smooth', trace='smooth',view_override = [55.0,90.+18]) # now, we make the actual videos. They have three parts: # - 1) a chunk of time before the social event pre_time = 8 # frames (half frame rate) # - 2) a rotation around the social event # - 3) a chunk of time after the social event post_time = 8 # frames video_folder = 'videos' dpi = 60 for event_number in tqdm(range(len(event_frames))): # before frame_window = np.hstack([np.arange(-pre_time*10,0)]).astype('int')*3 frame_list = frame_window + event_frames[event_number] savepath = video_folder+'/pre_'+str(event_number) + '.mp4' if True: Plotter.video_me(frame_list=frame_list,cloud = True,skel='smooth',ellip='smooth', trace='smooth',savepath = savepath,view_override = [20,50],dpi=dpi,time_offset = -frame_window[0]/60) # after frame_window = np.hstack([np.arange(0,post_time*10)]).astype('int')*3 frame_list = frame_window + event_frames[event_number] savepath = video_folder+'/post_'+str(event_number) + '.mp4' if True: Plotter.video_me(frame_list=frame_list,cloud = True,skel='smooth',ellip='smooth', trace='smooth',savepath = savepath,view_override = [20,50],dpi=dpi) # rotation savepath = video_folder+'/rotation_'+str(event_number) + '.mp4' if True: Plotter.rotation(frame_list=frame_list,cloud = True,skel='smooth',ellip='smooth', trace='smooth',savepath=savepath,view_override = [20,50],dpi=dpi) plt.close('all') ``` # Now, stack all the vidoes together, using cv2 ``` # merge the videos using opencv, from here: https://gist.github.com/nkint/8576156 import cv2 import os n_videos = 3 video_folder = 'videos' video_files = [video_folder+'/pre_{:01d}.mp4'.format(i) for i in range(n_videos)] caps = [cv2.VideoCapture(video_files[i]) for i in range(n_videos)] width = caps[0].get(cv2.CAP_PROP_FRAME_WIDTH) # float height = caps[0].get(cv2.CAP_PROP_FRAME_HEIGHT) # float b_cut = 12 s_cut = 40 fourcc = cv2.VideoWriter_fourcc(*'MP4V') out_fps = 20 out = cv2.VideoWriter('videos/supplementary_video_social.mp4', fourcc, out_fps, (int(n_videos*width-6*s_cut), int(height-b_cut))) # create a splash screen # create blank image img = np.zeros((int(height-b_cut),int(n_videos*width-6*s_cut), 3), np.uint8) font = cv2.FONT_HERSHEY_SIMPLEX font_color = (255, 255, 255) h = height-.1*height w = (n_videos*width-6*s_cut)/2 font_scale = 1.4 thickness = 2 text = 'Ebbesen & Froemke, 2020' def put_centered_text(img,text,w,h,font, font_scale, font_color, thickness): # get boundary of this text textsize = cv2.getTextSize(text, font, font_scale, thickness)[0] cv2.putText(img, text, (int(w - textsize[0]/2),int(h) ), font, font_scale, font_color, thickness, cv2.LINE_AA) put_centered_text(img,text,w,h,font, font_scale, font_color, thickness) put_centered_text(img,'Supplementary video 4: Social events',w,.2*height,font, font_scale, font_color, thickness) for _ in range(30): cv2.imshow('frame',img) cv2.waitKey(10) out.write(img) # Add annotation! # centering is not easy, in cv2: https://gist.github.com/xcsrz/8938a5d4a47976c745407fe2788c813a font = cv2.FONT_HERSHEY_SIMPLEX font_color = (0,0,0) h = .07*height w = width/2 font_scale = 1 thickness = 2 def add_files(video_files,c=(0,0,0)): caps = [cv2.VideoCapture(video_files[i]) for i in range(n_videos)] while(caps[0].isOpened()): ret, frame_0 = caps[0].read() ret, frame_1 = caps[1].read() ret, frame_2 = caps[2].read() if frame_1 is not None: stacked_frame = cv2.hconcat([frame_0[:-b_cut,s_cut:-s_cut,:],frame_1[:-b_cut,s_cut:-s_cut,:],frame_2[:-b_cut,s_cut:-s_cut,:]]) put_centered_text(stacked_frame,'Nose_0 <-> Nose_1',w - s_cut,h,font, font_scale, font_color, thickness) put_centered_text(stacked_frame,'Nose_0 -> Tail_1',w+width - 3* s_cut,h,font, font_scale, font_color, thickness) put_centered_text(stacked_frame,'Nose_1 -> Tail_0',w+2*width- 5 * s_cut,h,font, font_scale, font_color, thickness) cv2.imshow('frame',stacked_frame) cv2.waitKey(10) out.write(stacked_frame) else: break cc = (10,10,10) video_files = [video_folder+'/pre_{:01d}.mp4'.format(i) for i in range(n_videos)] add_files(video_files, c = cc) video_files = [video_folder+'/rotation_{:01d}.mp4'.format(i) for i in range(n_videos)] add_files(video_files, c= cc) video_files = [video_folder+'/post_{:01d}.mp4'.format(i) for i in range(n_videos)] add_files(video_files,c = cc) caps[0].release() caps[1].release() caps[2].release() out.release() cv2.destroyAllWindows() print ("end.") ```
github_jupyter
``` import spacy from spacy import displacy from collections import Counter from py2neo import * import matplotlib.pyplot as plt import en_core_web_sm nlp = en_core_web_sm.load() from spacy.lang.en.stop_words import STOP_WORDS ``` ###### 1. Who are the entities and officers in US? ###### 3. Which countries have the most addresses? ###### 4. Which jurisdictions is BOSHEN connected to? ###### 5. How are X and Y connected/related? ###### 6. Which jurisdictions has the least/most entities? / Most popular jurisdictions ###### 7. Which locations are in/come under X jurisdiction? ###### 8. Under which jurisdiction is location X? ###### 9. Most influential/important/ entities in the graph? ###### 10. Which officers have the same addresses? ###### 10. Which officers in X region have the same addresses? ``` def connect(): global graph graph = Graph("bolt://localhost:7687", auth = ("neo4j", "soham")) tx = graph.begin() print('Connected...') connect() def ask_question(): global question question = input("INPUT: ") print("\n") tag_question() def tag_question(): global tokens,ner doc = nlp(question) tokens = [token.text for token in doc] pos = [pos.pos_ for pos in doc] tags = zip(tokens,pos) tags = list(tags) ner = [(ner.text,ner.label_) for ner in doc.ents] print("Tokens: ", tokens) print("Tags are: ",tags,'\n',"Number of tags: ",len(tags),'\n',"NER: ",ner) displacy.render(doc,style='dep',jupyter=True) displacy.render(doc,style = 'ent',jupyter=True) parms_builder() def parms_builder(): global parms, parms_2 if len(ner) == 1: if (ner[0][1] == 'GPE') or (ner[0][1] == 'LOC'): if (ner[0][0] == "US") or (ner[0][0] == "USA"): country_ = 'United States' elif (ner[0][0] == "UK"): country_ = 'United Kingdom' else: country = ner[0][0] parms = {} parms["country"] = country print(parms) elif (ner[0][1] == 'ORG'): org = ner[0][0] parms = {} parms["org"] = org print(parms) elif (ner[0][1] == 'PERSON'): person = ner[0][0] parms = {} parms["person"] = person print(parms) elif len(ner) > 1: name1 = ner[0][0] name2 = ner[1][0] parms_2 = {"name1":name1, "name2":name2} print(parms_2) query_picker() def query_picker(): label_entity = "(entity:Entity)" label_officer = "(officer:Officer)" label_address = "(address:Address)" label_intermediary = "(interm:Intermediary)" query_sp_officer = "MATCH (o1:Officer{ name: $name1 }),(o2:Officer{ name: $name2 }), o = shortestPath((o1)-[*..15]-(o2)) RETURN o1.name, o2.name, o" query_sp_entity = "MATCH (e1:Entity{ name: $name1 }),(e2:Entity{ name: $name2 }), e = shortestPath((e1)-[*..15]-(e2)) RETURN e1.name, e2.name, e" for token in tokens: if token in ["officer","officers"] and token in ['entity','entities']: label_1 = label_officer label_2 = label_entity relationship_1 = "-[r:OFFICER_OF]->" match_1 = "MATCH + {} + {} + {}".format(label_1,relationship_1,label_2) query_skeleton_1 = match_1 + " RETURN collect(" + "{}".format(label_1) + ".name), " + "({}".format(label_1) + ".countries), " + "({}".format(label_entity) + ".name) LIMIT 10" print(graph.run(query_skeleton_1).to_table()) if token in ['country','countries'] and ['address','addresses']: label_1 = label_address match_0 = "MATCH {}".format(label_1) query_skeleton_0 = match_0 + " RETURN address.countries as Country, address.country_codes as Codes, count(*) AS Count ORDER BY Count DESC LIMIT 15" print(graph.run(query_skeleton_0).to_table()) if token in ['jurisdiction','jurisdictions'] and ['connected','related'] and (ner[0][1]=='ORG'): label_1 = label_officer label_2 = label_entity relationship_1 = "-[]->" match_1 = "MATCH {} {} {}".format(label_1,relationship_1,label_2) query_skeleton_1 = match_1 + " WHERE officer.name contains $org RETURN entity.jurisdiction_description as Juris, count(*) as Number_of_entities ORDER BY Number_of_entities DESC" print(query_skeleton_1) print(graph.run(query_skeleton_1,parms).to_table()) if token in ['connected'] and len(ner) > 1: print(query_sp,'\n') print(graph.run(query_sp,parms_2).to_table()) g = query_sp.get_graph() x.draw(g) if token in ['influential','important'] and ['entities']: label_1 = label_entity query_page_rank = "MATCH" + "{}".format(label_1) + "WHERE exists(entity.pagerank_g) RETURN entity.name AS entity, entity.jurisdiction_description AS jurisdiction, entity.pagerank_g AS pagerank ORDER BY pagerank DESC LIMIT 15" print(query_page_rank) print(graph.run(query_page_rank).to_table()) if token in (all(['come','under']) and ['jurisdiction']): label_1 = label_entity match_0 = "MATCH {}".format(label_1) query_skeleton_0 = match_0 + "WHERE entity.jurisdiction_description CONTAINS $country RETURN collect(distinct entity.countries) as Locations, entity.jurisdiction_description as Jurisdiction limit 10" print(query_skeleton_0,'\n') print(graph.run(query_skeleton_0,parms).to_table()) if token in ['Under'] and ['jurisdictions']: label_1 = label_entity match_0 = "MATCH {}".format(label_1) query_skeleton_0 = match_0 + "WHERE entity.countries CONTAINS $country RETURN distinct entity.countries as Location, entity.jurisdiction_description as Jurisdiction, collect(entity.name) limit 5" print(query_skeleton_0) print(graph.run(query_skeleton_0,parms).to_table()) if token in (all(['same','address']) and ['officers']): label_1 = label_officer label_2 = label_address relationship_1 = '-[r:REGISTERED_ADDRESS]->' match_1 = "MATCH {} {} {}".format(label_1,relationship_1,label_2) query = match_1 + "RETURN collect(officer.name), address.address" print(query) print(graph.run(query).to_table()) ask_question() ```
github_jupyter
# DenoiSeg Example: Fly Wing This is an example notebook which illustrates how DenoiSeg should be trained. In this notebook we use a membrane labeled developing Fly Wing dataset from our collaborators. We already split the data into train and test images. From the train images we then extracted 1428 training and 252 validation patches of size 128x128. The test set contains 50 images of size 512x512. ``` # Here we are just importing some libraries which are needed to run this notebook. import warnings warnings.filterwarnings('ignore') import numpy as np from matplotlib import pyplot as plt from scipy import ndimage from denoiseg.models import DenoiSeg, DenoiSegConfig from denoiseg.utils.misc_utils import combine_train_test_data, shuffle_train_data, augment_data from denoiseg.utils.seg_utils import * from denoiseg.utils.compute_precision_threshold import measure_precision from csbdeep.utils import plot_history import urllib import os import zipfile ``` ## Downloading and Data Loading We created three versions of this dataset by adding Gaussian noise with zero mean and standard deviations 10 and 20. The dataset are marked with the suffixes n0, n10 and n20 accordingly. In the next cell you can choose which `noise_level` you would like to investigate. ``` # Choose the noise level you would like to look at: # Values: 'n0', 'n10', 'n20' noise_level = 'n20' # create a folder for our data if not os.path.isdir('./data'): os.mkdir('data') if noise_level == 'n0': link = 'https://zenodo.org/record/5156991/files/Flywing_n0.zip?download=1' elif noise_level == 'n10': link = 'https://zenodo.org/record/5156993/files/Flywing_n10.zip?download=1' elif noise_level == 'n20': link = 'https://zenodo.org/record/5156995/files/Flywing_n20.zip?download=1' else: print('This noise level does not exist for this dataset.') # check if data has been downloaded already zipPath="data/Flywing_{}.zip".format(noise_level) if not os.path.exists(zipPath): #download and unzip data data = urllib.request.urlretrieve(link, zipPath) with zipfile.ZipFile(zipPath, 'r') as zip_ref: zip_ref.extractall("data") # Loading of the training images trainval_data = np.load('data/Flywing_{}/train/train_data.npz'.format(noise_level)) train_images = trainval_data['X_train'].astype(np.float32) train_masks = trainval_data['Y_train'] val_images = trainval_data['X_val'].astype(np.float32) val_masks = trainval_data['Y_val'] print("Shape of train_images: {}".format(train_images.shape)) print("Shape of train_masks: {}".format(train_masks.shape)) print("Shape of val_images: {}".format(val_images.shape)) print("Shape of val_masks: {}".format(val_masks.shape)) ``` ## Small Amounts of Annotated Training Data With DenoiSeg we present a solution to train deep neural networks if only few annotated ground truth segmentations are available. We simulate such a scenario by zeroing out all but a fraction of the available training data. In the next cell you can specify the percentage of training images for which ground truth annotations are available. ``` # Set the number of annotated training images. # Values: 0.0 (no annotated images) to total number of training images (all images have annotations) number_of_annotated_training_images = 5 assert number_of_annotated_training_images >= 0.0 and number_of_annotated_training_images <=train_images.shape[0] # Seed to shuffle training data (annotated GT and raw image pairs). seed = 1 # First we shuffle the training images to remove any bias. X_shuffled, Y_shuffled = shuffle_train_data(train_images, train_masks, random_seed=seed) # Here we convert the number of annotated images to be used for training as percentage of available training data. percentage_of_annotated_training_images = float((number_of_annotated_training_images/train_images.shape[0])*100.0) assert percentage_of_annotated_training_images >= 0.0 and percentage_of_annotated_training_images <=100.0 # Here we zero out all training images which are not part of the # selected percentage. X_frac, Y_frac = zero_out_train_data(X_shuffled, Y_shuffled, fraction = percentage_of_annotated_training_images) # Now we apply data augmentation to the training patches: # Rotate four times by 90 degree and add flipped versions. X, Y_train_masks = augment_data(X_frac, Y_frac) X_val, Y_val_masks = val_images, val_masks # Here we add the channel dimension to our input images. # Dimensionality for training has to be 'SYXC' (Sample, Y-Dimension, X-Dimension, Channel) X = X[...,np.newaxis] Y = convert_to_oneHot(Y_train_masks) X_val = X_val[...,np.newaxis] Y_val = convert_to_oneHot(Y_val_masks) print("Shape of X: {}".format(X.shape)) print("Shape of Y: {}".format(Y.shape)) print("Shape of X_val: {}".format(X_val.shape)) print("Shape of Y_val: {}".format(Y_val.shape)) ``` Next we look at a single sample. In the first column we show the input image, in the second column the background segmentation, in the third column the foreground segmentation and in the last column the border segmentation. With the parameter `sample` you can choose different training patches. You will notice that not all of them have a segmentation ground truth. ``` sample = 0 plt.figure(figsize=(20,5)) plt.subplot(1,4,1) plt.imshow(X[sample,...,0]) plt.axis('off') plt.title('Raw validation image') plt.subplot(1,4,2) plt.imshow(Y[sample,...,0], vmin=0, vmax=1, interpolation='nearest') plt.axis('off') plt.title('1-hot encoded background') plt.subplot(1,4,3) plt.imshow(Y[sample,...,1], vmin=0, vmax=1, interpolation='nearest') plt.axis('off') plt.title('1-hot encoded foreground') plt.subplot(1,4,4) plt.imshow(Y[sample,...,2], vmin=0, vmax=1, interpolation='nearest') plt.axis('off') plt.title('1-hot encoded border'); ``` ### Configure network parameters ``` train_batch_size = 128 train_steps_per_epoch = min(400, max(int(X.shape[0]/train_batch_size), 10)) ### In the next cell, you can choose how much relative importance (weight) to assign to denoising ### and segmentation tasks by choosing appropriate value for denoiseg_alpha (between 0 and 1; with 0 being ### only segmentation and 1 being only denoising. Here we choose denoiseg_alpha = 0.5) conf = DenoiSegConfig(X, unet_kern_size=3, n_channel_out=4, relative_weights = [1.0,1.0,5.0], train_steps_per_epoch=train_steps_per_epoch, train_epochs=10, batch_norm=True, train_batch_size=train_batch_size, unet_n_first = 32, unet_n_depth=4, denoiseg_alpha=0.5, train_tensorboard=False) vars(conf) model_name = 'DenoiSeg_Flywing_n20' basedir = 'models' model = DenoiSeg(conf, model_name, basedir) history = model.train(X, Y, (X_val, Y_val)) history.history.keys() plot_history(history, ['loss', 'val_loss']) ``` ## Computing Threshold Value The network predicts 4 output channels: 1. The denoised input. 2. The foreground likelihoods. 3. The background likelihoods. 4. The border likelihoods. We will threshold the foreground prediction image to obtain object segmentations. The optimal threshold is determined on the validation data. Additionally we can optimize the threshold for a given measure. In this case we choose the Average Precision (AP) measure. ``` threshold, val_score = model.optimize_thresholds(val_images.astype(np.float32), val_masks, measure=measure_precision()) print("The higest score of {} is achieved with threshold = {}.".format(np.round(val_score, 3), threshold)) ``` ## Test Data Finally we load the test data and run the prediction. ``` test_data = np.load('data/Flywing_{}/test/test_data.npz'.format(noise_level), allow_pickle=True) test_images = test_data['X_test'] test_masks = test_data['Y_test'] predicted_images, precision_result = model.predict_label_masks(test_images, test_masks, threshold, measure=measure_precision()) print("Average precision over all test images with threshold = {} is {}.".format(threshold, np.round(precision_result, 3))) ``` ### Visualize the results ``` sl = 6 fig = plt.figure() plt.figure(figsize=(20,10)) plt.subplot(1, 3, 1) plt.imshow(test_images[sl]) plt.title("Raw image") plt.subplot(1, 3, 2) plt.imshow(predicted_images[sl]) plt.title("Predicted segmentation") plt.subplot(1, 3, 3) plt.imshow(test_masks[sl]) plt.title("Ground truth segmentation") plt.show() print("Number of annotated images used for training:", number_of_annotated_training_images) print("Noise level:", noise_level) print("Considered alpha:", conf.denoiseg_alpha) ``` ### Expected results for this dataset <b>AP scores for Flywing n0 dataset</b> ||5 imgs|10 imgs|19 imgs|38 imgs|76 imgs| |--- |--- |--- |--- |--- |--- | |Alpha 0.5|0.737±0.041|0.808±0.013|0.849±0.012|0.894±0.008|0.925±0.001| |Alpha 0.3|0.772±0.030|0.786±0.040|0.838±0.028|0.912±0.003|0.919±0.004| |Alpha 0.7|0.680±0.040|0.795±0.009|0.811±0.006|0.871±0.012|0.913±0.007| |Alpha best|0.806±0.014|0.871±0.006|0.892±0.005|0.923±0.003|0.929±0.0008| <b>AP scores for Flywing n10 dataset</b> ||5 imgs|10 imgs|19 imgs|38 imgs|76 imgs| |--- |--- |--- |--- |--- |--- | |Alpha 0.5|0.782±0.026|0.855±0.008|0.874±0.012|0.891±0.009|0.898±0.012| |Alpha 0.3|0.799±0.036|0.847±0.011|0.862±0.020|0.880±0.010|0.916±0.005| |Alpha 0.7|0.736±0.046|0.801±0.027|0.862±0.011|0.894±0.005|0.907±0.007| |Alpha best|0.857±0.010|0.878±0.010|0.893±0.005|0.919±0.003|0.929±0.0009| <b>AP scores for Flywing n20 dataset</b> ||5 imgs|10 imgs|19 imgs|38 imgs|76 imgs| |--- |--- |--- |--- |--- |--- | |Alpha 0.5|0.788±0.078|0.865±0.014|0.880±0.014|0.895±0.005|0.902±0.009| |Alpha 0.3|0.848±0.019|0.868±0.014|0.878±0.011|0.913±0.004|0.916±0.005| |Alpha 0.7|0.824±0.041|0.878±0.007|0.870±0.012|0.894±0.007|0.892±0.010| |Alpha best|0.882±0.014|0.907±0.003|0.899±0.005|0.917±0.003|0.929±0.001| ### Export your model for Fiji ``` model.export_TF(name='DenoiSeg - FlyWing Example', description='This is the 2D DenoiSeg example trained on FlyWing data in python.', authors=["Tim-Oliver Buchholz", "Mangal Prakash", "Alexander Krull", "Florian Jug"], test_img=X_val[0,...,0], axes='YX', patch_shape=(128, 128)) ```
github_jupyter
# Lab 03 - 1 by Nicholas Fong, worked with Vivian Duong ``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline df = pd.read_csv(os.path.join('advertising-raw.csv'), index_col = 0) df.head() ``` ## Let's do some boxplots! E.g., `df[ ['column(s) of interest'] ].plot(kind = 'box')` ``` print df[ ['TV'] ].plot(kind = 'box') print df[ ['Radio'] ].plot(kind = 'box') print df[ ['Newspaper'] ].plot(kind = 'box') print df[ ['Sales'] ].plot(kind = 'box') ``` ### What do we learn from TV, radio, newspaper and sales boxplots? Answer: All of the distributions seem fairly evenly distributed, except the Newspaper distribution is skewed right with a few outliers. ## Let's play with correlation! Compute the correlation matrix of your data and explain what you've learned from it. For instance, which variable can depict sales the best? (TV, radio or newspaper). What are the highest and lowest correlations? ``` df[['TV','Radio','Newspaper','Sales']].corr() ``` What did you learn from the correlation matrix? Answer: Sales increase with TV, Radio, and Newspaper, but TV has a higher impact on Sales than Radio, which has a higher impact than Newspaper. Thus TV can best predict Sales. The highest correlation (besides self correlation) is between TV and Sales at 0.782224. The lowest correlation is between TV and Radio at a mere 0.054809. ## Let's get rid of outliers! 1. Look over your boxplots and pick one variable that likely has outliers. 2. Find the Interquartile Range (IQR) of that variable. You can do this in two different ways. - Use df.describe() - Use df.quantile() 3. Remember, one definition for outliers is anything that is more than `1.5 * IQR` above Q3 or `1.5 * IQR` below Q1. For this exercise, solely focus on 1.5 * IQR above Q3) 4. Delete the rows for these outliers. ### Step 1: Which variable seems to have outliers? Answer: Newspaper is the only variable with outliers and its two outliers are visible from its boxplot. ``` # Hint: Start checking how many observations are in your DataFrame len(df) ``` ### Step 2: IQR ``` df.describe() ``` ### Steps 3 and 4: Remove the outliers ``` #For all the values in the Newspaper column that are 1.5 * the IQR (75%-25%) above the 75%ile, remove the column for i in df[df['Newspaper'] > (1.5*(df['Newspaper'].quantile(0.75)-df['Newspaper'].quantile(0.25))+df['Newspaper'].quantile(0.75))].index: df.drop(i,inplace=True) # Sanity check: Check the length of your data and see if you have correctly dropped the outliers. len(df) df.to_csv(os.path.join('advertising-tidy.csv')) ``` # Your last mission! Our goal is to divide our dataset to high and low Sales. We would like to add a dummy variable called sales_dummy which is 0 if sales of that specific observation is below the median of all sales and 1 otherwise. We can do it in shorter steps - but for education purposes, first define a variable called `SalesCategory` which is `Low` when sales is below the median and `High` otherwise. ``` def SalesCategory(input): return input < df['Sales'].quantile(0.5) ``` We can now define a new variable called `SalesDummy` that serves our purpose. Please use `.map()`. ``` # I asked the Professor and he said True/False is ok to use instead of 0/1 or 'Low'/'High' SalesDummy = map(SalesCategory, df['Sales']) SalesDummy len(SalesDummy) ```
github_jupyter
``` from __future__ import print_function, division, absolute_import ``` # Code Testing and CI **Version 0.1** The notebook contains problems about code testing and continuous integration. * * * E Tollerud (STScI) ## Problem 1: Set up py.test in you repo In this problem we'll aim to get the [py.test](https://docs.pytest.org/en/latest/) testing framework up and running in the code repository you set up in the last set of problems. We can then use it to collect and run tests of the code. ### 1a: Ensure py.test is installed Of course ``py.test`` must actually be installed before you can use it. The commands below should work for the Anaconda Python Distribution, but if you have some other Python installation you'll want to install `pytest` (and its coverage plugin) as directed in the install instructions for ``py.test``. ``` !conda install pytest pytest-cov ``` ### 1b: Ensure your repo has code suitable for unit tests Depending on what your code actually does, you might need to modify it to actually perform something testable. For example, if all it does is print something, you might find it difficult to write an effective unit test. Try adding a function that actually performs some operation and returns something different depending on various inputs. That tends to be the easiest function to unit-test: one with a clear "right" answer in certain situations. Also be sure you have `cd`ed to the *root* of the repo for `pytest` to operate correctly. ### 1c: Add a test file with a test function The test must be part of the package and follow the convention that the file and the function begin with ``test`` to get picked up by the test collection machinery. Inside the test function, you'll need some code that fails if the test condition fails. The easiest way to do this is with an ``assert`` statement, which raises an error if its first argument is False. *Hint: remember that to be a valid python package, a directory must have an ``__init__.py``* ``` !mkdir #complete !touch #complete %%file <yourpackage>/tests/test_something.py def test_something_func(): assert #complete ``` ### 1d: Run the test directly While this is not how you'd ordinarily run the tests, it's instructive to first try to execute the test *directly*, without using any fancy test framework. If your test function just runs, all is good. If you get an exception, the test failed (which in this case might be *good*). *Hint: you may need to use `reload` or just re-start your notebook kernel to get the cell below to recognize the changes.* ``` from <yourpackage>.tests import test_something test_something.test_something_func() ``` ### 1e: Run the tests with ``py.test`` Once you have an example test, you can try invoking ``py.test``, which is how you should run the tests in the future. This should yield a report that shows a dot for each test. If all you see are dots, the tests ran sucessfully. But if there's a failure, you'll see the error, and the traceback showing where the error happened. ``` !py.test ``` ### 1f: Make the test fail (or succeed...) If your test failed when you ran it, you should now try to fix the test (or the code...) to make it work. Try running (Modify your test to fail if it succeeded before, or vice versa) ``` !py.test ``` ### 1g: Check coverage The coverage plugin we installed will let you check which lines of your code are actually run by the testing suite. ``` !py.test --cov=<yourproject> tests/ #complete ``` This should yield a report, which you can use to decide if you need to add more tests to acheive complete coverage. Check out the command line arguments to see if you can get a more detailed line-by-line report. ## Problem 2: Implement some unit tests The sub-problems below each contain different unit testing complications. Place the code from the snippets in your repository (either using an editor or the ``%%file`` trick), and write tests to ensure the correctness of the functions. Try to achieve 100% coverage for all of them (especially to catch some hidden bugs!). Also, note that some of these examples are not really practical - that is, you wouldn't want to do this in *real* code because there's better ways to do it. But because of that, they are good examples of where something can go subtly wrong... and therefore where you want to make tests! ### 2a When you have a function with a default, it's wise to test both the with-default call (``function_b()``), and when you give a value (``function_b(1.2)``) *Hint: Beware of numbers that come close to 0... write your tests to accomodate floating-point errors!* ``` #%%file <yourproject>/<filename>.py #complete, or just use your editor # `math` here is for *scalar* math... normally you'd use numpy but this makes it a bit simpler to debug import math inf = float('inf') # this is a quick-and-easy way to get the "infinity" value def function_a(angle=180): anglerad = math.radians(angle) return math.sin(anglerad/2)/math.sin(anglerad) ``` ### 2b This test has an intentional bug... but depending how you right the test you *might* not catch it... Use unit tests to find it! (and then fix it...) ``` #%%file <yourproject>/<filename>.py #complete, or just use your editor def function_b(value): if value < 0: return value - 1 else: value2 = subfunction_b(value + 1) return value + value2 def subfunction_b(inp): vals_to_accum = [] for i in range(10): vals_to_accum.append(inp ** (i/10)) if vals_to_accum[-1] > 2: vals.append(100) # really you would use numpy to do this kind of number-crunching... but we're doing this for the sake of example right now return sum(vals_to_accum) ``` ### 2c There are (at least) *two* significant bugs in this code (one fairly apparent, one much more subtle). Try to catch them both, and write a regression test that covers those cases once you've found them. One note about this function: in real code you're probably better off just using [the Angle object from `astropy.coordinates`](http://docs.astropy.org/en/stable/coordinates/angles.html). But this example demonstrates one of the reasons *why* that was created, as it's very easy to write a buggy version of this code. *Hint: you might find it useful to use `astropy.coordinates.Angle` to create test cases...* ``` #%%file <yourproject>/<filename>.py #complete, or just use your editor import math # know that to not have to worry about this, you should just use `astropy.coordinates`. def angle_to_sexigesimal(angle_in_degrees, decimals=3): """ Convert the given angle to a sexigesimal string of hours of RA. Parameters ---------- angle_in_degrees : float A scalar angle, expressed in degrees Returns ------- hms_str : str The sexigesimal string giving the hours, minutes, and seconds of RA for the given `angle_in_degrees` """ if math.floor(decimals) != decimals: raise ValueError('decimals should be an integer!') hours_num = angle_in_degrees*24/180 hours = math.floor(hours_num) min_num = (hours_num - hours)*60 minutes = math.floor(min_num) seconds = (min_num - minutes)*60 format_string = '{}:{}:{:.' + str(decimals) + 'f}' return format_string.format(hours, minutes, seconds) ``` ### 2d *Hint: numpy has some useful functions in [numpy.testing](https://docs.scipy.org/doc/numpy/reference/routines.testing.html) for comparing arrays.* ``` #%%file <yourproject>/<filename>.py #complete, or just use your editor import numpy as np def function_d(array1=np.arange(10)*2, array2=np.arange(10), operation='-'): """ Makes a matrix where the [i,j]th element is array1[i] <operation> array2[j] """ if operation == '+': return array1[:, np.newaxis] + array2 elif operation == '-': return array1[:, np.newaxis] - array2 elif operation == '*': return array1[:, np.newaxis] * array2 elif operation == '/': return array1[:, np.newaxis] / array2 else: raise ValueError('Unrecognized operation "{}"'.format(operation)) ``` ## Problem 3: Set up travis to run your tests whenever a change is made Now that you have a testing suite set up, you can try to turn on a continuous integration service to constantly check that any update you might send doesn't create a bug. We will the [Travis-CI](https://travis-ci.org/) service for this purpose, as it has one of the lowest barriers to entry from Github. ### 3a: Ensure the test suite is passing locally Seems obvious, but it's easy to forget to check this and only later realize that all the trouble you thought you had setting up the CI service was because the tests were actually broken... ``` !py.test ``` ### 3b: Set up an account on travis This turns out to be quite convenient. If you go to the [Travis web site](https://travis-ci.org/), you'll see a "Sign in with GitHub" button. You'll need to authorize Travis, but once you've done so it will automatically log you in and know which repositories are yours. ### 3c: Create a minimal ``.travis.yml`` file. Before we can activate travis on our repo, we need to tell travis a variety of metadata about what's in the repository and how to run it. The template below should be sufficient for the simplest needs. ``` %%file .travis.yml language: python python: - "3.6" # command to install dependencies #install: "pip install numpy" #uncomment this if your code depends on numpy or similar # command to run tests script: pytest ``` Be sure to commit and push this to github before proceeding: ``` !git #complete ``` ### 3d: activate Travis You can now click on your profile picture in the upper-right and choose "accounts". You should see your repo listed there, presumably with a grey X next to it. Click on the X, which should slide the button over and therefore activate travis on that repository. Once you've done this, you should be able to click on the name of the reposository in the travis accounts dashboard, popping up a window showing the build already in progress (if not, just be a bit patient). Wait for the tests to complete. If all is good you should see a green check next to the repository name. Otherwise you'll need to go in and fix it and the tests will automatically trigger when you send a new update. ### 3e: Break the build Make a small change to the repository to break a test. If all else fails simply add the following test: ``` def test_fail(): assert False ``` Push that change up and go look at travis. It should automatically run the tests and result in them failing. ### 3f: Have your neighbor fix your repo Challenge your nieghbor to find the bug and fix it. Have them follow the Pull Request workflow, but do *not* merge the PR until Travis' tests have finished (they *should* run automatically, and leave note in the github PR page to that effect). Once the tests have finished, they will tell you if the fix really does cure the bug. If it does, merge it and say thank you. If it doesn't, ask your neighbor to try updating their fix with the feedback from Travis... *Hint: it may be late in the day, but keep being nice!* ## Challenge Problem 1: Use py.test "parametrization" ``py.test`` has a feature called test parametrization that can be extremely useful for writing easier-to-understand tests. The key idea is that you can use one simple test *function*, but with multiple inputs, and break that out into separate tests. At first glance this might appear similar to just one test where you interate over lots of inputs, but it's actually much more useful because it doesn't stop at the *first* failure. Rather it will run all the inputs ever time, helpinf you debug subtle problems where only certain inputs fail. For more info and how to actually *use* the feature, see [the py.test docs on the subject](https://docs.pytest.org/en/latest/parametrize.html). In this challenge problem, try adapting the Problem 2 cases to use this feature. 2c and 2d are particularly amenable to this approach. ## Challenge Problem 2: Test-driven development Test-driven development is a radically different approach to designing code from what we're generally used to. In test-driven design, you write the tests *first*. That is, you write how you expect your code to behave before writing the code. For this problem, try experimenting with test-driven desgin. Choose a problem (ideally from your science interests) where you know some clear cases that you could write tests for. Write the full testing suite (using the techniques you developed above). Then run the tests to ensure all the new ones are failing due to lack of implementation, and then write the new code. A few ideas are given below, but, again, for a real challenge try to come up with your own problem. * Compute the location of Lagrange points for two arbitrary mass bodies. (Good test cases are the Earth-Moon or Earth-Sun system, which you can probably find on wikipedia.) Consider solving the problem numerically instead of with formulae you can look up, but use the formulae to concoct the test cases. * Write a function that uses one of the a clustering algorithm in [scikit-learn](http://scikit-learn.org/stable/modules/clustering.html) to identify the centers of two 2D gaussian point-clouds. The tests are particularly easy to formulate before-hand because you know the right answer at the outset if you generate the point-clouds yourself.
github_jupyter
(regression)= # Regression ## Introduction In this chapter, you'll learn how to run linear regressions with code. If you're running this code (either by copying and pasting it, or by downloading it using the icons at the top of the page), you may need to the packages it uses by, for example, running `pip install packagename` on your computer's command line. (If you're not sure what a command line is, take a quick look at the basics of coding chapter.) Most of this chapter will rely on [statsmodels](https://www.statsmodels.org/stable/index.html) with some use of [**linearmodels**](https://bashtage.github.io/linearmodels/). Some of the material in this chapter follows [Grant McDermott](https://grantmcdermott.com/)'s excellent notes and the [Library of Statistical Translation](https://lost-stats.github.io/). ### Notation and basic definitions Greek letters, like $\beta$, are the truth and represent parameters. Modified Greek letters are an estimate of the truth, for example $\hat{\beta}$. Sometimes Greek letters will stand in for vectors of parameters. Most of the time, upper case Latin characters such as $X$ will represent random variables (which could have more than one dimension). Lower case letters from the Latin alphabet denote realised data, for instance $x$ (which again could be multi-dimensional). Modified Latin alphabet letters denote computations performed on data, for instance $\bar{x} = \frac{1}{n} \displaystyle\sum_{i} x_i$ where $n$ is number of samples. Ordinary least squares (OLS) regression can be used to *estimate* the parameters of certain types of model, most typically models of the form $$ y = \beta_0 + \beta_1 \cdot x_1 + \beta_2 \cdot x_2 $$ This generic model says that the value of an outcome variable $y$ is a linear function of one or more input predictor variables $x_i$, where the $x_i$ could be transforms of original data. But the above equation is a platonic ideal, what we call a data generating process (DGP). OLS allows us to recover *estimates* of the parameters of the model , i.e. to find $\hat{\beta_i}$ and to enable us to write an estimated model: $$ y = \hat{\beta_0} + \hat{\beta_1} \cdot x_1 + \hat{\beta_2} \cdot x_2 + \epsilon $$ This equation can also be expressed in matrix form as $$ y = x'\cdot \hat{\beta} + \epsilon $$ where $x' = (1, x_1, \dots, x_{n})'$ and $\hat{\beta} = (\hat{\beta_0}, \hat{\beta_1}, \dots, \hat{\beta_{n}})$. Given data $y_i$ stacked to make a vector $y$ and $x_{i}$ stacked to make a matrix $X$, this can be solved for the coefficients $\hat{\beta}$ according to $$ \hat{\beta} = \left(X'X\right)^{-1} X'y $$ To be sure that the estimates of these parameters are the *best linear unbiased estimate*, a few conditions need to hold: the Gauss-Markov conditions: 1. $y$ is a linear function of the $\beta_i$ 2. $y$ and the $x_i$ are randomly sampled from the population. 3. There is no perfect multi-collinearity of variables. 4. $\mathbb{E}(\epsilon | x_1, \dots, x_n) = 0$ (unconfoundedness) 5. $\text{Var}(\epsilon | x_1, \dots, x_n) = \sigma^2$ (homoskedasticity) (1)-(4) also guarantee that OLS estimates are unbiased and $\mathbb{E}(\hat{\beta}_i) = \beta_i$. The classic linear model requires a 6th assumption; that $\epsilon \thicksim \mathcal{N}(0, \sigma^2)$. The interpretation of regression coefficients depends on what their units are to begin with, but you can always work it out by differentiating both sides of the model equation with respect to the $x_i$. For example, for the first model equation above $$ \frac{\partial y}{\partial x_i} = \beta_i $$ so we get the interpretation that $\beta_i$ is the rate of change of y with respect to $x_i$. If $x_i$ and $y$ are in levels, this means that a unit increase in $x_i$ is associated with a $\beta_i$ units increase in $y$. If the right-hand side of the model is $\ln x_i$ then we get $$ \frac{\partial y}{\partial x_i} = \beta_i \frac{1}{x_i} $$ with some abuse of notation, we can rewrite this as $\partial y = \beta_i \partial x_i/x_i$, which says that a percent change in $x_i$ is associated with a $\beta_i$ unit change in $y$. With a logged $y$ variable, it's a percent change in $x_i$ that is associated with a percent change in $y$, or $\partial y/y = \beta_i \partial x_i/x_i$ (note that both sides of this equation are unitless in this case). Finally, another example that is important in practice is that of log differences, eg $y = \beta_i (\ln x_i - \ln x_i')$. Again, we will abuse notation and say that this case may be represented as $\partial y = \beta_i (\partial x_i/x_i - \partial x_i'/x_i')$, i.e. the difference in two percentages, a *percentage point* change, in $x_i$ is associated with a $\beta_i$ unit change in $y$. ### Imports Let's import some of the packages we'll be using: ``` import numpy as np import scipy.stats as st import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm import statsmodels.formula.api as smf import os from pathlib import Path # Set max rows displayed for readability pd.set_option('display.max_rows', 6) # Plot settings plt.style.use("https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt") ``` ## Regression basics There are two ways to run regressions in [**statsmodels**](https://www.statsmodels.org/stable/index.html); passing the data directly as objects, and using formulae. We'll see both but, just to get things started, let's use the formula API. We'll use the starwars dataset to run a regression of mass on height for star wars characters. This example borrows very heavily from notes by [Grant McDermott](https://grantmcdermott.com/). First, let's bring the dataset in: ``` df = pd.read_csv("https://github.com/aeturrell/coding-for-economists/raw/main/data/starwars.csv", index_col=0) # Look at first few rows df.head() ``` Okay, now let's do a regression using OLS and a formula that says our y-variable is mass and our regressor is height: ``` results = smf.ols('mass ~ height', data=df).fit() ``` Well, where are the results!? They're stored in the object we created. To peek at them we need to call the summary function (and, for easy reading, I'll print it out too using `print`) ``` print(results.summary()) ``` What we're seeing here are really several tables glued together. To just grab the coefficients in a tidy format, use ``` results.summary().tables[1] ``` You'll have noticed that we got an intercept, even though we didn't specify one in the formula. **statsmodels** adds in an intercept by default because, most of the time, you will want one. To turn it off, add a `-1` at the end of the formula command, eg in this case you would call `smf.ols('mass ~ height -1', data=df).fit()`. The fit we got in the case with the intercept was pretty terrible; a low $R^2$ and both of our confidence intervals are large and contain zero. What's going on? If there's one adage in regression that's always worth paying attention to, it's *always plot your data*. Let's see what's going on here: ``` fig, ax = plt.subplots() sns.scatterplot(data=df, x="height", y="mass", s=200, ax=ax, legend=False, alpha=0.8) ax.annotate('Jabba the Hutt', df.iloc[df['mass'].idxmax()][['height', 'mass']], xytext=(0, -50), textcoords='offset points', arrowprops=dict(arrowstyle="fancy", color='k', connectionstyle="arc3,rad=0.3", ) ) ax.set_ylim(0, None) ax.set_title('Always plot the data', loc='left') plt.show() ``` Oh dear, Jabba's been on the paddy frogs again, and he's a bit of different case. When we're estimating statistical relationships, we have all kinds of choices and should be wary about arbitrary decisions of what to include or exclude in case we fool ourselves about the generality of the relationship we are capturing. Let's say we knew that we weren't interested in Hutts though, but only in other species: in that case, it's fair enough to filter out Jabba and run the regression without this obvious outlier. We'll exclude any entry that contains the string 'Jabba' in the `name` column: ``` results_outlier_free = smf.ols('mass ~ height', data=df[~df['name'].str.contains('Jabba')]).fit() print(results_outlier_free.summary()) ``` This looks a lot more healthy. Not only is the model explaining a *lot* more of the data, but the coefficients are now significant. ### Robust regression Filtering out data is one way to deal with outliers, but it's not the only one; an alternative is to use a regression technique that is robust to such outliers. **statsmodels** has a variety of robust linear models that you can read more about [here](https://www.statsmodels.org/stable/examples/notebooks/generated/robust_models_0.html). To demonstrate the general idea, we will run the regression again but using a robust method. ``` results_robust = smf.rlm('mass ~ height', data=df, M=sm.robust.norms.TrimmedMean(.5)).fit() print(results_robust.summary()) ``` There are many different 'M-estimators' available; in this case the TrimmedMean estimator gives a very similar result to the regression with the point excluded. We can visualise this, and, well, the results are not really very different in this case. Note that `abline_plot` just takes an intercept and coefficient from a fitted model and renders the line that they encode. ``` fig, ax = plt.subplots() ax.scatter(df['height'], df['mass']) sm.graphics.abline_plot(model_results=results_robust, ax=ax, alpha=0.5, label='Robust') sm.graphics.abline_plot(model_results=results, ax=ax, color='red', label='OLS', alpha=0.5, ls='--') ax.legend() ax.set_xlabel('Height') ax.set_ylabel('Mass') ax.set_ylim(0, None) plt.show() ``` ### Standard errors You'll have seen that there's a column for the standard error of the estimates in the regression table and a message saying that the covariance type of these is 'nonrobust'. Let's say that, instead, we want to use Eicker-Huber-White robust standard errors, aka "HC2" standard errors. We can specify to use these up front standard errors up front in the fit method: ``` (smf.ols('mass ~ height', data=df) .fit(cov_type='HC2') .summary() .tables[1]) ``` Or, alternatively, we can go back to our existing results and recompute the results from those: ``` print(results.get_robustcov_results('HC2').summary()) ``` There are several different types of standard errors available in **statsmodels**: - ‘HC0’, ‘HC1’, ‘HC2’, and ‘HC3’ - ‘HAC’, for heteroskedasticity and autocorrelation consistent standard errors, for which you may want to also use some keyword arguments - 'hac-groupsum’, for Driscoll and Kraay heteroscedasticity and autocorrelation robust standard errors in panel data, again for which you may have to specify extra keyword arguments - 'hac-panel’, for heteroscedasticity and autocorrelation robust standard errors in panel data, again with keyword arguments; and - 'cluster' for clustered standard errors. You can find information on all of these [here](https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLSResults.get_robustcov_results.html?highlight=get_robustcov_results#statsmodels.regression.linear_model.OLSResults.get_robustcov_results). For more on standard errors in python, [this is a good](http://www.vincentgregoire.com/standard-errors-in-python/) link. For now, let's look more closely at those last ones: clustered standard errors. #### Clustered standard errors Often, we know something about the structure of likely errors, namely that they occur in groups. In the below example we use one-way clusters to capture this effect in the errors. Note that in the below example, we grab a subset of the data for which a set of variables we're interested in are defined, otherwise the below example would execute with an error because of missing cluster-group values. ``` xf = df.dropna(subset=['homeworld', 'mass', 'height', 'species']) results_clus = (smf.ols('mass ~ height', data=xf) .fit(cov_type='cluster', cov_kwds={'groups': xf['homeworld']})) print(results_clus.summary()) ``` We can add two-way clustering of standard errors using the following: ``` xf = df.dropna(subset=['homeworld', 'mass', 'height', 'species']) two_way_clusters = np.array(xf[['homeworld', 'species']], dtype=str) results_clus = (smf.ols('mass ~ height', data=xf) .fit(cov_type='cluster', cov_kwds={'groups': two_way_clusters})) print(results_clus.summary()) ``` As you would generally expect, the addition of clustering has increased the standard errors. ## Fixed effects and categorical variables Fixed effects are a way of allowing the intercept of a regression model to vary freely across individuals or groups. It is, for example, used to control for any individual-specific attributes that do not vary across time in panel data. Let's use the 'mtcars' dataset to demonstrate this. We'll read it in and set the datatypes of some of the columns at the same time. ``` mpg = (pd.read_csv('https://raw.githubusercontent.com/LOST-STATS/lost-stats.github.io/source/Data/mtcars.csv', dtype={'model': str, 'mpg':float, 'hp': float, 'disp': float, 'cyl': "category"})) mpg.head() ``` Now we have our data in we want to regress mpg (miles per gallon) on hp (horsepower) with fixed effects for cyl (cylinders). Now we *could* just pop in a formula like this `'mpg ~ hp + cyl'` because we took the trouble to declare that `cyl` was of datatype category when reading it in from the csv file. This means that statsmodels will treat it as a category and use it as a fixed effect by default. But when I read that formula I get nervous that `cyl` might not have been processed correctly (ie it could have been read in as a float, which is what it looks like) and it might just be treated as a float (aka a continuous variable) in the regression. Which is not what we want at all. So, to be safe, and make our intentions explicit (even when the data is of type 'category'), it's best to use the syntax `C(cyl)` to ask for a fixed effect. Here's a regression which does that: ``` results_fe = (smf.ols('mpg ~ hp + C(cyl)', data=mpg) .fit()) print(results_fe.summary()) ``` We can see here that two of the three possible values of `cyl`: ``` mpg['cyl'].unique() ``` have been added as fixed effects regressors. The way that `+C(cyl)` has been added makes it so that the coefficients given are relative to the coefficient for the intercept. We can turn the intercept off to get a coefficient per unique `cyl` value: ``` print(smf.ols('mpg ~ hp + C(cyl) -1', data=mpg) .fit() .summary() .tables[1]) ``` When there is an intercept, the coefficients of fixed effect variables can be interpreted as being the average of $y$ for that class *compared* to the excluded classes holding all other categories and variables fixed. ### High dimensional fixed effects, aka absorbing regression Sometimes, you just have a LOT of fixed effects (and perhaps you don't particularly care about them individually). A common example is having a large number of firms as part of a panel. Fortunately, there are ways to make regression with high dimensional fixed effects be both fast and concise. (In Stata, this is provided by the `reghdfe` package.) Here, we will use the [**linearmodels**](https://bashtage.github.io/linearmodels/index.html) package, which is built on top of **statsmodels**. Let's say we have a regression of the form $$ y_i = x_i\cdot \beta + z_i\cdot \gamma +\epsilon_i $$ where $y_i$ are observations indexed by $i$, $x_i$ are vectors of exogenous variables we care about the coefficients ($\beta$), $z_i$ are vectors of fixed effects we don't care too much about the coefficients (\gamma) for, and the $\epsilon_i$ are errors. Then we can use an *absorbing regression* to solve for the $\beta$ while ignoring the $\gamma$. Here's an example using simulated data on workers taken from the **linearmodels** docs. Let's simulate some data first, with two fixed effects (state and firm) alongside the two exogenous variables we're interested in. ``` from numpy.random import default_rng rng = default_rng() # Random number generator # Create synthetic input data nobs = 1_000_000 # No. observations state_id = rng.integers(50, size=nobs) # State identifier firm_id = rng.integers(nobs // 5, size=nobs) # Firm identifier (mean of 5 workers/firm) x = rng.standard_normal((nobs, 2)) # Exogenous variables sim = pd.DataFrame( {"state_id": pd.Categorical(state_id), "firm_id": pd.Categorical(firm_id), "exog_0": x[:, 0], "exog_1": x[:, 1]} ) # Create synthetic relationship beta = [1, 3] # coefficients of interest state_effects = rng.standard_normal(state_id.max() + 1) state_effects = state_effects[state_id] # Generate state fixed effects firm_effects = rng.standard_normal(firm_id.max() + 1) firm_effects = firm_effects[firm_id] # Generate firm fixed effects eps = rng.standard_normal(nobs) # Generate errors # Generate endogeneous outcome variable sim['y'] = sim['exog_0']*beta[0] + sim['exog_1']*beta[1] + firm_effects + state_effects + eps sim.head() ``` Now we pass this to **linearmodels** and with the `state_id` and `firm_id` variables entered via the `absorb` keyword argument: ``` from linearmodels.iv.absorbing import AbsorbingLS mod = AbsorbingLS(sim['y'], sim[['exog_0', 'exog_1']], absorb=sim[['state_id', 'firm_id']]) print(mod.fit()) ``` So, from our 1,000,000 observations, we have roughly 200,000 fixed effects that have been scooped up and packed away, leaving us with just the coefficients, $\beta$, on the exogenous variables of interest. ## Transformations of regressors This chapter is showcasing *linear* regression. What that means is that the model is linear in the regressors: but it doesn't mean that those regressors can't be some kind of (potentially non-linear) transform of the original features $x_i$. ### Logs and arcsinh You have two options for adding in logs: do them before, or do them in the formula. Doing them before just makes use of standard dataframe operations to declare a new column: ``` mpg['lnhp'] = np.log(mpg['hp']) print(smf.ols('mpg ~ lnhp', data=mpg) .fit() .summary() .tables[1]) ``` Alternatively, you can specify the log directly in the formula: ``` results_ln = smf.ols('mpg ~ np.log(hp)', data=mpg).fit() print(results_ln.summary().tables[1]) ``` Clearly, the first method will work for `arcsinh(x)` and `log(x+1)`, but you can also pass both of these into the formula directly too. (For more on the pros and cons of arcsinh, see {cite}`bellemare2020elasticities`.) Here it is with arcsinh: ``` print(smf.ols('mpg ~ np.arcsinh(hp)', data=mpg) .fit() .summary() .tables[1]) ``` ### Interaction terms and powers This chapter is showcasing *linear* regression. What that means is that the model is linear in the regressors: but it doesn't mean that those regressors can't be some kind of non-linear transform of the original features $x_i$. Two of the most common transformations that you might want to use are *interaction terms* and *polynomial terms*. An example of an interaction term would be $$ y = \beta_0 + \beta_1 x_1 \cdot x_2 $$ while an example of a polynomial term would be $$ y = \beta_0 + \beta_1 x_1^2 $$ i.e. the last term enters only after it is multiplied by itself. One note of warning: the interpretation of the effect of a variable is no longer as simple as was set out at the start of this chapter. To work out *what* the new interpretation is, the procedure is the same though: just take the derivative. In the case of the interaction model above, the effect of a unit change in $x_1$ on $y$ is now going to be a function of $x_2$. In the case of the polynomial model above, the effect of a unit change in $x_1$ on $y$ will be $2\beta_1 \cdot x_1$. For more on interaction terms, see {cite}`balli2013interaction`. Alright, with all of that preamble out of the way, let's see how we actual do some of this! Let's try including a linear and squared term in the regression of `mpg` on `hp` making use of the numpy power function: ``` res_poly = smf.ols('mpg ~ hp + np.power(hp, 2)', data=mpg).fit() print(res_poly.summary().tables[1]) ``` Now let's include the original term in hp, a term in disp, and the interaction between them, which is represented by hp:disp in the table. ``` res_inter = smf.ols('mpg ~ hp * disp', data=mpg).fit() print(res_inter.summary().tables[1]) ``` In the unusual case that you want *only* the interaction term, you write it as it appears in the table above: ``` print(smf.ols('mpg ~ hp : disp', data=mpg).fit().summary().tables[1]) ``` ## The formula API explained As you will have seen `~` separates the left- and right-hand sides of the regression. `+` computes a set union, which will also be familiar from the examples above (ie it inludes two terms as long as they are distinct). `-` computes a set difference; it adds the set of terms to the left of it while removing any that appear on the right of it. As we've seen, `a*b` is a short-hand for `a + b + a:b`, with the last term representing the interaction. `/` is short hand for `a + a:b`, which is useful if, for example `b` is nested within `a`, so it doesn't make sense to control for `b` on its own. Actually, the `:` character can interact multiple terms so that `(a + b):(d + c)` is the same as `a:c + a:d + b:c + b:d`. `C(a)` tells statsmodels to treat `a` as a categorical variable that will be included as a fixed effect. Finally, as we saw above with powers, you can also pass in vectorised functions, such as `np.log` and `np.power`, directly into the formulae. One gotcha with the formula API is ensuring that you have sensible variable names in your dataframe, i.e. ones that do *not* include whitespace or, to take a really pathological example, have the name 'a + b' for one of the columns that you want to regress on. You can dodge this kind of problem by passing in the variable name as, for example, `Q("a + b")` to be clear that the *column name* is anything within the `Q("...")`. ## Multiple regression models As is so often the case, you're likely to want to run more than one model at once with different specifications. Although there is a base version of this in **statsmodels**, called `summary_col`, which you can find an example of [here](http://aeturrell.com//2018/05/05/running-many-regressions-alongside-pandas/), instead we'll be using the [**stargazer**](https://github.com/mwburke/stargazer) package to assemble the regressions together in a table. In the above examples, we've collected a few different regression results. Let's put them together: ``` from stargazer.stargazer import Stargazer stargazer_tab = Stargazer([results_ln, res_poly, res_inter]) stargazer_tab ``` There are lots of customisation options, including ones that add a title, rename variables, add notes, and so on. What is most useful is that as well as the HTML friendly output that you can see above, the package also exports to latex: ``` print(stargazer_tab.render_latex()) ``` And of course this can be written to a file using `open('regression.tex', 'w').write(stargazer.render_latex())` where you can get your main latex compilation to scoop it up and use it. ## Specifying regressions without formulae, using the array API As noted, there are two ways to run regressions in [**statsmodels**](https://www.statsmodels.org/stable/index.html); passing the data directly as objects, and using formulae. We've seen the formula API, now let's see how to specify regressions using arrays with the format `sm.OLS(y, X)`. We will first need to take the data out of the **pandas** dataframe and put it into a couple of arrays. When we're not using the formula API, the default is to treat the array X as the design matrix for the regression-so, if it doesn't have a column of constants in, there will be no intercept in the regression. Therefore, we need to add a constant vector to the matrix `X` if we *do* want an intercept. Use `sm.add_constant(X)` for this. ``` X = np.array(xf['height']) y = np.array(xf['mass']) X = sm.add_constant(X) results = sm.OLS(y, X).fit() print(results.summary()) ``` This approach seems a lot less convenient, not to mention less clear, so you may be wondering when it is useful. It's useful when you want to do many regressions in a systematic way or when you don't know what the columns of a dataset will be called ahead of time. It can actually be a little bit simpler to specify for more complex regressions too. ### Fixed effects in the array API If you're using the formula API, it's easy to turn a regressor `x` into a fixed effect by putting `C(x)` into the model formula, as you'll see in the next section. For the array API, things are not that simple and you need to use dummy variables. Let's say we have some data like this: ``` from numpy.random import Generator, PCG64 # Set seed for random numbers seed_for_prng = 78557 prng = Generator(PCG64(seed_for_prng)) no_obs = 200 X = pd.DataFrame(prng.normal(size=no_obs)) X[1] = prng.choice(['a', 'b'], size=no_obs) # Get this a numpy array X = X.values # Create the y data, adding in a bit of noise y = X[:, 0]*2 + 0.5 + prng.normal(scale=0.1, size=no_obs) y = [el_y + 1.5 if el_x == 'a' else el_y + 3.4 for el_y, el_x in zip(y, X[:, 1])] X[:5, :] ``` The first feature (column) is of numbers and it's clear how we include it. The second, however, is a grouping that we'd like to include as a fixed effect. But if we just throw this matrix into `sm.OLS(y, X)`, we're going to get trouble because **statsmodels** isn't sure what to do with a vector of strings. So, instead, we need to create some dummy variables out of our second column of data Astonishingly, there are several popular ways to create dummy variables in Python: **scikit-learn**'s `OneHotEncoder` and **pandas**' `get_dummies` being my favourites. Let's use the latter here. ``` pd.get_dummies(X[:, 1]) ``` We just need to pop this into our matrix $X$: ``` X = np.column_stack([X[:, 0], pd.get_dummies(X[:, 1])]) X = np.array(X, dtype=float) X[:5, :] ``` Okay, so now we're ready to do our regression: ``` print(sm.OLS(y, X).fit().summary()) ``` Perhaps you can see why I generally prefer the formula API... ## Instrumental variables Rather than use **statsmodels** for IV, we'll use the [**linearmodels**](https://bashtage.github.io/linearmodels/doc/index.html) package, which has very clean documentation (indeed, this sub-section is indebted to that documentation). Recall that a good instrumental variable $z$ has zero covariance with the error from the regression (which is untestable) and non-zero covariance with the variable of interest (which is). Recall that in IV regression, we have a model of the form $$ \begin{split}y_i & = x_{1i}\hat{\beta_1} + x_{2i}\hat{\beta_2} + \epsilon_i \\ x_{2i} & = z_{1i}\hat{\delta} + z_{2i}\hat{\gamma} + \nu_i\end{split} $$ where $x_{1i}$ is a set of $k_1$ exogenous regressors and $x_{2i}$ is a set of $k_2$ endogenous regressors such that $\text{Cov}(x_{2i}, \epsilon_i)\neq 0$. This is a problem for the usual OLS assumptions (the right-hand side should be exogenous). To get around this, in 2-stage least squares IV, we first regress $x_{2i}$ on instruments that explain $x_{2i}$ *but not* $y_i$, and then regress $y_i$ only on the predicted/estimated left-hand side from the first regression, ie on $\hat{x_{2i}}$. There are other estimators than IV2SLS, but I think that one has the most intuitive explanation of what's going. As well as a 2-stage least squares estimator called `IV2SLS`, **linearmodels** has a Limited Information Maximum Likelihood (LIML) estimator `IVLIML`, a Generalized Method of Moments (GMM) estimator `IVGMM`, and a Generalized Method of Moments using the Continuously Updating Estimator (CUE) `IVGMMCUE`. Just as with OLS via **statsmodels**, there's an option to use an array API for the **linearmodels** IV methods. It's always easiest to see an example, so let's estimate what might cause (realised) cigarette demand for the 48 continental US states in 1995 with `IV2SLS`. First we need to import the estimator, `IV2SLS`, and the data: ``` from linearmodels.iv import IV2SLS df = (pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/AER/CigarettesSW.csv', dtype={'state': 'category', 'year': 'category'}) .assign(rprice = lambda x: x['price']/x['cpi'], rincome = lambda x: x['income']/x['population']/x['cpi']) ) df.head() ``` Now we'll specify the model. It's going to be in the form `dep ~ exog + [endog ~ instruments]`, where endog will be regressed on instruments and dep will be regressed on both exog and the predicted values of endog. In this case, the model will be $$ \text{Price}_i = \hat{\pi_0} + \hat{\pi_1} \text{SalesTax}_i + v_i $$ in the first stage regression and $$ \text{Packs}_i = \hat{\beta_0} + \hat{\beta_2}\widehat{\text{Price}_i} + \hat{\beta_1} \text{RealIncome}_i + u_i $$ in the second stage. ``` results_iv2sls = (IV2SLS.from_formula('np.log(packs) ~ 1 + np.log(rincome) + C(year) + C(state) + [np.log(rprice) ~ taxs]', df) .fit(cov_type='clustered', clusters=df['year'])) print(results_iv2sls.summary) ``` We sort of skipped a step here and did everything all in one go. If we *did* want to know how our first stage regression went, we can just pass a formula to `IV2SLS` without the part in square brackets, `[...]`, and it will run regular OLS. But, in this case, there's an easier way: we can print out a set of handy 1st stage statistics from running the full model. ``` print(results_iv2sls.first_stage) ``` There are more tests and checks available. For example, Wooldridge’s regression test of exogeneity uses regression residuals from the endogenous variables regressed on the exogenous variables and the instrument to test for endogenity and is available to run on fitted model results. Let's check that: ``` results_iv2sls.wooldridge_regression ``` We can compare the IV results against (naive) OLS. First, run the OLS equivalent: ``` res_cig_ols = (IV2SLS.from_formula('np.log(packs) ~ 1 + np.log(rincome) + C(year) + C(state) + np.log(rprice)', df) .fit(cov_type='clustered', clusters=df['year'])) ``` Now select these two models to compare: ``` from collections import OrderedDict from linearmodels.iv.results import compare res = OrderedDict() res['OLS'] = res_cig_ols res['2SLS'] = results_iv2sls print(compare(res)) ``` Once we take into account the fact that the real price is endogeneous to (realised) demand, we find that its coefficient is more negative; i.e. an increase in the real price of cigarettes creates a bigger fall in number of packs bought. ## Logit, probit, and generalised linear models ### Logit A logistical regression, aka a logit, is a statistical method for a best-fit line between a regressors $X$ and an outcome varibale $y$ that takes on values in $(0, 1)$. The function that we're assuming links the regressors and the outcome has a few different names but the most common is the sigmoid function or the logistic function. The data generating process is assumed to be $$ {\displaystyle \mathbb{P}(Y=1\mid X) = \frac{1}{1 + e^{-X'\beta}}} $$ we can also write this as $\ln\left(\frac{p}{p-1}\right) = \beta_0 + \sum_i \beta_i x_i$ to get a 'log-odds' relationship. The coefficients from a logit model do not have the same interpration as in an OLS estimation, and you can see this from the fact that $\partial y/\partial x_i \neq \beta_i$ for logit. Of course, you can work out what the partial derivative is for yourself but most packages offer a convenient way to quickly recover the marginal effects. Logit models are available in **scikit-learn** and **statsmodels** but bear in mind that the **scikit-learn** logit model is, ermm, extremely courageous in that regularisation is applied by default. If you don't know what that means, don't worry, but it's probably best to stick with **statsmodels** as we will do in this example. We will predict a target `GRADE`, representing whether a grade improved or not, based on some regressors including participation in a programme. ``` # Load the data from Spector and Mazzeo (1980) df = sm.datasets.spector.load_pandas().data # Look at info on data print(sm.datasets.spector.NOTE) res_logit = smf.logit('GRADE ~ GPA + TUCE + PSI', data=df).fit() print(res_logit.summary()) ``` So, did participation (`PSI`) help increase a grade? Yes. But we need to check the marginal effect to say exactly how much. We'll use `get_margeff` to do this, we'd like the $dy/dx$ effect, and we'll take it at the mean of each regressor. ``` marg_effect = res_logit.get_margeff(at='mean', method='dydx') marg_effect.summary() ``` So participation gives almost half a grade increase. ### Probit Probit is very similar to logit: it's a statistical method for a best-fit line between regressors $X$ and an outcome varibale $y$ that takes on values in $(0, 1)$. And, just like with logit, the function that we're assuming links the regressors and the outcome has a few different names! The data generating process is assumed to be $$ {\displaystyle \mathbb{P}(Y=1\mid X)=\Phi (X^{T}\beta )} $$ where $$ {\displaystyle \Phi (x)={\frac {1}{\sqrt {2\pi }}}\int _{-\infty }^{x}e^{-{\frac {y^{2}}{2}}}dy.} $$ is the cumulative standard normal (aka Gaussian) distribution. The coefficients from a probit model do not have the same interpration as in an OLS estimation, and you can see this from the fact that $\partial y/\partial x_i \neq \beta_i$ for probit. And, just as with logit, although you can derive the marginal effects, most packages offer a convenient way to quickly recover them. We can re-use our previous example of predicting a target `GRADE`, representing whether a grade improved or not, based on some regressors including participation (PSI) in a programme. ``` res_probit = smf.probit('GRADE ~ GPA + TUCE + PSI', data=df).fit() print(res_probit.summary()) p_marg_effect = res_probit.get_margeff(at='mean', method='dydx') p_marg_effect.summary() ``` It's no coincidence that we find very similar results here because the two functions we're using don't actually look all that different: ``` import scipy.stats as st fig, ax = plt.subplots() support = np.linspace(-6, 6, 1000) ax.plot(support, st.logistic.cdf(support), 'r-', ls='--', label='Logistic') ax.plot(support, st.norm.cdf(support), label='Probit') ax.legend() ax.set_ylim(0, None) ax.set_ylim(0, None) plt.show() ``` What difference there is, is that logistic regression puts more weight into the tails of the distribution. Arguably, logit is easier to interpret too. With logistic regression, a one unit change in $x_i$ is associated with a $\beta_i$ change in the log odds of a 1 outcome or, alternatively, an $e^{\beta_i}$-fold change in the odds, all else being equal. With a probit, this is a change of $\beta_i z$ for $z$ a normalised variable that you'd have to convert into a predicted probability using the normal CDF. ### Generalised linear models Logit and probit (and OLS for that matter) as special cases of a class of models such that $g$ is a 'link' function connects a function of regressors to the output, and $\mu$ is the mean of a conditional response distribution at a given point in the space of regressors. When $g(\mu) = X'\beta$, we just get regular OLS. When it's logit, we have $$ {\displaystyle \mu= \mathbb{E}(Y\mid X=x) =g^{-1}(X'\beta)= \frac{1}{1 + e^{-X'\beta}}.} $$ But as well as the ones we've seen, there are many possible link functions one can use via the catch-all `glm` function. These come in different 'families' of distributions, with the default for the binomial family being logit. So, running `smf.glm('GRADE ~ GPA + TUCE + PSI', data=df, family=sm.families.Binomial()).fit()` will produce exactly the same as we got both using the `logit` function. For more on the families of distributions and possible link functions, see the [relevant part](https://www.statsmodels.org/stable/glm.html#) of the **statsmodels** documentation. ## Linear probability model When $y$ takes values in $\{0, 1\}$ but the model looks like $$ y = x' \cdot \beta $$ and is estimated by OLS then you have a linear probability model. In this case, the interpretion of a unit change in $x_i$ is that it induces a $\beta_i$ *change in probability* of $y$. Note that homoskedasticity does not hold for the linear probability model. ## Violations of the classical linear model (CLM) ### Heteroskedasticity If an estimated model is homoskedastic then its random variables have equal (finite) variance. This is also known as homogeneity of variance. Another way of putting it is that, for all *observations* $i$ in an estimated model $y_i = X_i\hat{\beta} + \epsilon_i$ then $$ \mathbb{E}(\epsilon_i \epsilon_i) = \sigma^2 $$ When this relationship does not hold, an estimated model is said to be heteroskedastic. To test for heteroskedasticity, you can use **statsmodels**' versions of the [Breusch-Pagan](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.het_breuschpagan.html#statsmodels.stats.diagnostic.het_breuschpagan) or [White](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.het_white.html#statsmodels.stats.diagnostic.het_white) tests with the null hypothesis that the estimated model is homoskedastic. If the null hypothesis is rejected, then standard errors, t-statistics, and F-statistics are invalidated. In this case, you will need HAC (heteroskedasticity and auto-correlation consistent) standard errors, t- and F-statistics. To obtain HAC standard errors from existing regression results in a variable `results`, you can use (for 1 lag): ```python results.get_robustcov_results('HAC', maxlags=1).summary() ``` ## Quantile regression Quantile regression estimates the conditional quantiles of a response variable. In some cases, it can be more robust to outliers and, in the case of the $q=0.5$ quantile it is equivalent LAD (Least Absolute Deviation) regression. Let's look at an example of quantile regression in action, lifted direct from the **statsmodels** [documentation](https://www.statsmodels.org/dev/examples/notebooks/generated/quantile_regression.html) and based on a Journal of Economic Perspectives paper by Koenker and Hallock. ``` df = sm.datasets.engel.load_pandas().data df.head() ``` What we have here are two sets of related data. Let's perform several quantile regressions from 0.1 to 0.9 in steps of 0.1 ``` mod = smf.quantreg('foodexp ~ income', df) quantiles = np.arange(0.1, 1., 0.1) q_results = [mod.fit(q=x) for x in quantiles] ``` The $q=0.5$ entry will be at the `4` index; let's take a look at it: ``` print(q_results[4].summary()) ``` Let's take a look at the results for all of the regressions *and* let's add in OLS for comparison: ``` ols_res = smf.ols('foodexp ~ income', df).fit() get_y = lambda a, b: a + b * x x = np.arange(df.income.min(), df.income.max(), 50) # Just to make the plot clearer x_max = 3000 x = x[x<x_max] fig, ax = plt.subplots() df.plot.scatter(ax=ax, x='income', y='foodexp', alpha=0.7, s=10, zorder=2, edgecolor=None) for i, res in enumerate(q_results): y = get_y(res.params['Intercept'], res.params['income']) ax.plot(x, y, color='grey', lw=0.5, zorder=0, linestyle=(0, (5, 10))) ax.annotate(f'$q={quantiles[i]:1.1f}$', xy=(x.max(), y.max())) y = get_y(ols_res.params['Intercept'], ols_res.params['income']) ax.plot(x, y, color='red', label='OLS', zorder=0) ax.legend() ax.set_xlim(0, x_max) plt.show() ``` This chart shows very clearly how quantile regression differs from OLS. The line fitted by OLS is trying to be all things to all points whereas the line fitted by quantile regression is focused only on its quantile. You can also see how points far from the median (not all shown) may be having a large influence on the OLS line. ## Rolling and recursive regressions Rolling ordinary least squares applies OLS (ordinary least squares) across a fixed window of observations and then rolls (moves or slides) that window across the data set. They key parameter is `window`, which determines the number of observations used in each OLS regression. Recursive regression is equivalent to rolling regression but with a window that expands over time. Let's first create some synthetic data to perform estimation on: ``` from statsmodels.regression.rolling import RollingOLS import statsmodels.api as sm from sklearn.datasets import make_regression X, y = make_regression(n_samples=200, n_features=2, random_state=0, noise=4.0, bias=0) df = pd.DataFrame(X).rename(columns={0: 'feature0', 1: 'feature1'}) df['target'] = y df.head() ``` Now let's fit the model using a formula and a `window` of 25 steps. ``` roll_reg = RollingOLS.from_formula('target ~ feature0 + feature1 -1', window=25, data=df) model = roll_reg.fit() ``` Note that -1 in the formala suppresses the intercept. We can see the parameters using `model.params`. Here are the params for time steps between 20 and 30: ``` model.params[20:30] ``` Note that there aren't parameters for entries between 0 and 23 because our window is 25 steps wide. We can easily look at how any of the coefficients are changing over time. Here's an example for 'feature0'. ``` fig = model.plot_recursive_coefficient(variables=['feature0']) plt.xlabel('Time step') plt.ylabel('Coefficient value') plt.show() ``` A rolling regression with an *expanding* rather than *moving* window is effectively a recursive least squares model. We can do this instead using the `RecursiveLS` function from **statsmodels**. Let's fit this to the whole dataset: ``` reg_rls = sm.RecursiveLS.from_formula( 'target ~ feature0 + feature1 -1', df) model_rls = reg_rls.fit() print(model_rls.summary()) ``` But now we can look back at how the values of the coefficients changed over time too: ``` fig = model_rls.plot_recursive_coefficient(range(reg_rls.k_exog), legend_loc='upper right') ax_list = fig.axes for ax in ax_list: ax.set_xlim(0, None) ax_list[-1].set_xlabel('Time step') ax_list[0].set_title('Coefficient value'); ``` ## Regression plots **statsmodels** has a number of built-in plotting methods to help you understand how well your regression is capturing the relationships you're looking for. Let's see a few examples of these using **statsmodels** built-in Statewide Crime data set: ``` crime_data = sm.datasets.statecrime.load_pandas() print(sm.datasets.statecrime.NOTE) ``` First, let's look at a Q-Q plot to get a sense of how the variables are distributed. This uses **scipy**'s stats module. The default distribution is normal but you can use any that **scipy** supports. ``` st.probplot(crime_data.data['murder'], dist='norm', plot=plt); ``` Clearly, this is not quite normal and there are some serious outliers in the tails. Let's run take a look at the unconditional relationship we're interested in: how murder depends on high school graduation. We'll use [**plotnine**](https://plotnine.readthedocs.io/en/stable/index.html)'s `geom_smooth` to do this but bear in mind it will only run a linear model of `'murder ~ hs_grad'` and ignore the other covariates. ``` from plotnine import * ( ggplot(crime_data.data, aes(y='murder', x='hs_grad')) + geom_point() + geom_smooth(method='lm') ) ``` We can take into account those other factors by using a partial regression plot that asks what does $\mathbb{E}(y|X)$ look like as a function of $\mathbb{E}(x_i|X)$? (Use `obs_labels=False` to remove data point labels.) ``` with plt.rc_context({'font.size': 5}): sm.graphics.plot_partregress(endog='murder', exog_i='hs_grad', exog_others=['urban', 'poverty', 'single'], data=crime_data.data, obs_labels=True) plt.show() ``` At this point, the results of the regression are useful context. ``` results_crime = smf.ols('murder ~ hs_grad + urban + poverty + single', data=crime_data.data).fit() print(results_crime.summary()) ``` Putting the multicollinearity problems to one side, we see that the relationship shown in the partial regression plot is also implied by the coefficient on `hs_grad` in the regression table. We can also look at an in-depth summary of one exogenous regressor and its relationship to the outcome variable. Each of these types of regression diagnostic are available individually, or for all regressors at once, too. The first panel is the chart we did with **plotnine** rendered differently (and, one could argue, more informatively). Most of the plots below are self-explanatory except for the third one, the CCPR (Component-Component plus Residual) plot. This provides a way to judge the effect of one regressor on the response variable by taking into account the effects of the other independent variables. ``` fig = plt.figure(figsize=(8, 6), dpi=150) sm.graphics.plot_regress_exog(results_crime, 'hs_grad', fig=fig) plt.tight_layout() plt.show() ``` **statsmodels** can also produce influence plots of the 'externally studentised' residuals vs. the leverage of each observation as measured by the so-called hat matrix $X(X^{\;\prime}X)^{-1}X^{\;\prime}$ (because it puts the 'hat' on $y$). Externally studentised residuals are residuals that are scaled by their standard deviation. High leverage points could exert an undue influence over the regression line, but only if the predicted $y$ values of a regression that was fit with them excluded was quite different. In the example below, DC is having a big influence. ``` with plt.rc_context({'font.size': 6}): sm.graphics.influence_plot(results_crime) ``` Finally, it's nice to be able to see plots of our coefficients along with their standard errors. There isn't a built-in **statsmodels** option for this, but happily it's easy to extract the results of regressions in a sensible format. Using the `results` object from earlier, and excluding the intercept, we can get the coefficients from `results.params[1:]` and the associated errors from `results.bse[1:]`. ``` # Put the results into a dataframe with Name, Coefficient, Error res_df = (pd.concat([results_crime.params[1:], results_crime.bse[1:]], axis=1) .reset_index() .rename(columns={'index': 'Name', 0: 'Coefficient', 1: 'Error'})) # Plot the coefficient values and their errors ( ggplot(res_df) + geom_point(aes("Name", "Coefficient")) + geom_errorbar(aes(x="Name", ymin="Coefficient-Error", ymax="Coefficient+Error")) ) ``` ## Specification curve analysis When specifying a model, modellers have many options. These can be informed by field intelligence, priors, and even misguided attempts to find a significant result. Even with the best of intentions, research teams can reach entirely different conclusions using the same, or similar, data because of different choices made in preparing data or in modelling it. There’s formal evidence that researchers really do make different decisions; one study {cite}`silberzahn2018many` gave the same research question - whether soccer referees are more likely to give red cards to dark-skin-toned players than to light-skin-toned players - to 29 different teams. From the abstract of that paper: > Analytic approaches varied widely across the teams, and the estimated effect sizes ranged from 0.89 to 2.93 (Mdn = 1.31) in odds-ratio units. Twenty teams (69%) found a statistically significant positive effect, and 9 teams (31%) did not observe a significant relationship. Overall, the 29 different analyses used 21 unique combinations of covariates. Neither analysts’ prior beliefs about the effect of interest nor their level of expertise readily explained the variation in the outcomes of the analyses. Peer ratings of the quality of the analyses also did not account for the variability. So not only were different decisions made, there seems to be no clearly identifiable reason for them. There is usually scope for reasonable alternative model specifications when estimating coefficients, and those coefficients will vary with those specifications. Specification curve analysis {cite}`simonsohn2020specification` looks for a more exhaustive way of trying out alternative specifications. The three steps of specification curve analysis are: 1. identifying the set of theoretically justified, statistically valid, and non-redundant analytic specifications; 2. displaying alternative results graphically, allowing the identification of decisions producing different results; and 3. conducting statistical tests to determine whether as a whole results are inconsistent with the null hypothesis. For a good example of specification curve analysis in action, see this recent Nature Human Behaviour paper {cite}`orben2019association` on the association between adolescent well-being and the use of digital technology. We'll use the [**specification curve analysis**](https://specification-curve.readthedocs.io/en/latest/readme.html) package to do the first two, which you can install with `pip install specification_curve` (full disclosure: I wrote this package). To demonstrate the full functionality, we'll create a second, alternative 'hp' that is a transformed version of the original. ``` mpg['hp_boxcox'], _ = st.boxcox(mpg['hp']) ``` Now let's create a specification curve. We need to specify the data, the different outcome variables we'd like to try, `y_endog`; the different possible versions of the main regressor of interest, `x_exog`; the possible controls, `controls`; any controls that should always be included, `always_include`; and any categorical variables to include class-by-class, `cat_expand`. Some of these accept lists of variables as well as single reggressors. The point estimates that have confidence intervals which include zero are coloured in grey, instead of blue. There is also an `exclu_grps` option to exclude certain combinations of regressors, and you can pass alternative estimators to fit, for example `fit(estimator=sm.Logit)`. ``` from specification_curve import specification_curve as specy sc = specy.SpecificationCurve(mpg, y_endog='mpg', x_exog=['lnhp', 'hp_boxcox'], controls=['drat', 'qsec', 'cyl', 'gear'], always_include=['gear'], cat_expand='cyl') sc.fit() sc.plot() ``` ## Review In this very short introduction to regression with code, you should have learned how to: - ✅ perform linear OLS regressions with code; - ✅ add fixed effects/categorical variables to regressions; - ✅ use different standard errors; - ✅ use models with transformed regressors; - ✅ use the formula or array APIs for **statsmodels** and **linearmodels**; - ✅ show the results from multiple models; - ✅ perform IV regressions; - ✅ perform GLM regressions; and - ✅ use plots as a way to interrogate regression results.
github_jupyter
# FloPy ### MODFLOW-2005 based model checker demonstration ``` import os import sys # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join("..", "..")) sys.path.append(fpth) import flopy print(sys.version) print("flopy version: {}".format(flopy.__version__)) ``` #### Set the working directory ``` path = os.path.join("..", "data", "mf2005_test") ``` #### Load example dataset and change the model work space ``` m = flopy.modflow.Modflow.load("test1ss.nam", model_ws=path) m.change_model_ws("data") ``` By default, the checker performs a model-level check when a set of model files are loaded, unless load is called with `check=False`. The load check only produces screen output if load is called with `verbose=True`. Checks are also performed at the package level when an individual package is loaded #### The `check()` method Each model and each package has a `check()` method. The check method has three arguments: ``` help(m.check) ``` #### The check class By default, check is called at the model level without a summary output file, but with `verbose=True` and `level=1`. The check methods return an instance of the **check** class, which is housed with the flopy utilities. ``` chk = m.check() ``` #### Summary array Most of the attributes and methods in **check** are intended to be used by the ``check()`` methods. The central attribute of **check** is the summary array: ``` chk.summary_array ``` This is a numpy record array that summarizes errors and warnings found by the checker. The package, layer-row-column location of the error, the offending value, and a description of the error are provided. In the checker, errors and warnings are loosely defined as follows: #### Errors: either input that would cause MODFLOW to crash, or inputs that almost certainly mis-represent the intended conceptual model. #### Warnings: inputs that are potentially problematic, but may be intentional. each package-level check produces a **check** instance with a summary array. The model level checks combine the summary arrays from the packages into a master summary array. At the model and the package levels, the summary array is used to generate the screen output shown above. At either level, the summary array can be written to a csv file by supply a filename to the `f` argument. Specifying `level=2` prints the summary array to the screen. ``` m.check(level=2) ``` #### example of package level check and summary file ``` m.rch.check() ``` #### example of summary output file ``` m.check(f="checksummary.csv") try: import pandas as pd df = pd.read_csv("data/checksummary.csv") except: df = open("data/checksummary.csv").readlines() df ``` #### checking on `write_input()` checking is also performed by default when `write_input()` is called at the package or model level. Checking on write is performed with the same `verbose` setting as specified for the model. However, if errors or warnings are encountered and `level=1` (default) or higher, a screen message notifies the user of the errors. By default, the checks performed on `load()` and `write_input()` save results to a summary file, which is named after the packge or the model. ``` m.write_input() ```
github_jupyter
# Masakhane - Machine Translation for African Languages (Using JoeyNMT) ## Note before beginning: ### - The idea is that you should be able to make minimal changes to this in order to get SOME result for your own translation corpus. ### - The tl;dr: Go to the **"TODO"** comments which will tell you what to update to get up and running ### - If you actually want to have a clue what you're doing, read the text and peek at the links ### - With 100 epochs, it should take around 7 hours to run in Google Colab ### - Once you've gotten a result for your language, please attach and email your notebook that generated it to masakhanetranslation@gmail.com ### - If you care enough and get a chance, doing a brief background on your language would be amazing. See examples in [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) ## Retrieve your data & make a parallel corpus If you are wanting to use the JW300 data referenced on the Masakhane website or in our GitHub repo, you can use `opus-tools` to convert the data into a convenient format. `opus_read` from that package provides a convenient tool for reading the native aligned XML files and to convert them to TMX format. The tool can also be used to fetch relevant files from OPUS on the fly and to filter the data as necessary. [Read the documentation](https://pypi.org/project/opustools-pkg/) for more details. Once you have your corpus files in TMX format (an xml structure which will include the sentences in your target language and your source language in a single file), we recommend reading them into a pandas dataframe. Thankfully, Jade wrote a silly `tmx2dataframe` package which converts your tmx file to a pandas dataframe. ``` from google.colab import drive drive.mount('/content/drive') # TODO: Set your source and target languages. Keep in mind, these traditionally use language codes as found here: # These will also become the suffix's of all vocab and corpus files used throughout import os source_language = "en" target_language = "fon" lc = False # If True, lowercase the data. seed = 42 # Random seed for shuffling. tag = "baseline" # Give a unique name to your folder - this is to ensure you don't rewrite any models you've already submitted os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language os.environ["tag"] = tag # This will save it to a folder in our gdrive instead! !mkdir -p "/content/drive/My Drive/masakhane/$src-$tgt-$tag" os.environ["gdrive_path"] = "/content/drive/My Drive/masakhane/%s-%s-%s" % (source_language, target_language, tag) !echo $gdrive_path # Install opus-tools ! pip install opustools-pkg # Downloading our corpus ! opus_read -d JW300 -s $src -t $tgt -wm moses -w jw300.$src jw300.$tgt -q # extract the corpus file ! gunzip JW300_latest_xml_$src-$tgt.xml.gz # Download the global test set. ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-any.en # And the specific test set for this language pair. os.environ["trg"] = target_language os.environ["src"] = source_language ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.en ! mv test.en-$trg.en test.en ! wget https://raw.githubusercontent.com/juliakreutzer/masakhane/master/jw300_utils/test/test.en-$trg.$trg ! mv test.en-$trg.$trg test.$trg # Read the test data to filter from train and dev splits. # Store english portion in set for quick filtering checks. en_test_sents = set() filter_test_sents = "test.en-any.en" j = 0 with open(filter_test_sents) as f: for line in f: en_test_sents.add(line.strip()) j += 1 print('Loaded {} global test sentences to filter from the training/dev data.'.format(j)) import pandas as pd # TMX file to dataframe source_file = 'jw300.' + source_language target_file = 'jw300.' + target_language source = [] target = [] skip_lines = [] # Collect the line numbers of the source portion to skip the same lines for the target portion. with open(source_file) as f: for i, line in enumerate(f): # Skip sentences that are contained in the test set. if line.strip() not in en_test_sents: source.append(line.strip()) else: skip_lines.append(i) with open(target_file) as f: for j, line in enumerate(f): # Only add to corpus if corresponding source was not skipped. if j not in skip_lines: target.append(line.strip()) print('Loaded data and skipped {}/{} lines since contained in test set.'.format(len(skip_lines), i)) df = pd.DataFrame(zip(source, target), columns=['source_sentence', 'target_sentence']) # if you get TypeError: data argument can't be an iterator is because of your zip version run this below #df = pd.DataFrame(list(zip(source, target)), columns=['source_sentence', 'target_sentence']) df.head(3) df.source_sentence[10] df.target_sentence[10] len(df) ``` ## Pre-processing and export It is generally a good idea to remove duplicate translations and conflicting translations from the corpus. In practice, these public corpora include some number of these that need to be cleaned. In addition we will split our data into dev/test/train and export to the filesystem. ``` # drop duplicate translations df_pp = df.drop_duplicates() # drop conflicting translations # (this is optional and something that you might want to comment out # depending on the size of your corpus) df_pp.drop_duplicates(subset='source_sentence', inplace=True) df_pp.drop_duplicates(subset='target_sentence', inplace=True) # Shuffle the data to remove bias in dev set selection. df_pp = df_pp.sample(frac=1, random_state=seed).reset_index(drop=True) # Install fuzzy wuzzy to remove "almost duplicate" sentences in the # test and training sets. ! pip install fuzzywuzzy ! pip install python-Levenshtein import time from fuzzywuzzy import process import numpy as np # reset the index of the training set after previous filtering df_pp.reset_index(drop=False, inplace=True) # Remove samples from the training data set if they "almost overlap" with the # samples in the test set. # Filtering function. Adjust pad to narrow down the candidate matches to # within a certain length of characters of the given sample. def fuzzfilter(sample, candidates, pad): candidates = [x for x in candidates if len(x) <= len(sample)+pad and len(x) >= len(sample)-pad] if len(candidates) > 0: return process.extractOne(sample, candidates)[1] else: return np.nan # NOTE - This might run slow depending on the size of your training set. We are # printing some information to help you track how long it would take. scores = [] start_time = time.time() for idx, row in df_pp.iterrows(): scores.append(fuzzfilter(row['source_sentence'], list(en_test_sents), 5)) if idx % 1000 == 0: hours, rem = divmod(time.time() - start_time, 3600) minutes, seconds = divmod(rem, 60) print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds), "%0.2f percent complete" % (100.0*float(idx)/float(len(df_pp)))) # Filter out "almost overlapping samples" df_pp['scores'] = scores df_pp = df_pp[df_pp['scores'] < 95] # This section does the split between train/dev for the parallel corpora then saves them as separate files # We use 1000 dev test and the given test set. import csv # Do the split between dev/train and create parallel corpora num_dev_patterns = 1000 # Optional: lower case the corpora - this will make it easier to generalize, but without proper casing. if lc: # Julia: making lowercasing optional df_pp["source_sentence"] = df_pp["source_sentence"].str.lower() df_pp["target_sentence"] = df_pp["target_sentence"].str.lower() # Julia: test sets are already generated dev = df_pp.tail(num_dev_patterns) # Herman: Error in original stripped = df_pp.drop(df_pp.tail(num_dev_patterns).index) with open("train."+source_language, "w") as src_file, open("train."+target_language, "w") as trg_file: for index, row in stripped.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") with open("dev."+source_language, "w") as src_file, open("dev."+target_language, "w") as trg_file: for index, row in dev.iterrows(): src_file.write(row["source_sentence"]+"\n") trg_file.write(row["target_sentence"]+"\n") #stripped[["source_sentence"]].to_csv("train."+source_language, header=False, index=False) # Herman: Added `header=False` everywhere #stripped[["target_sentence"]].to_csv("train."+target_language, header=False, index=False) # Julia: Problematic handling of quotation marks. #dev[["source_sentence"]].to_csv("dev."+source_language, header=False, index=False) #dev[["target_sentence"]].to_csv("dev."+target_language, header=False, index=False) # Doublecheck the format below. There should be no extra quotation marks or weird characters. ! head train.* ! head dev.* ``` --- ## Installation of JoeyNMT JoeyNMT is a simple, minimalist NMT package which is useful for learning and teaching. Check out the documentation for JoeyNMT [here](https://joeynmt.readthedocs.io) ``` # Install JoeyNMT ! git clone https://github.com/joeynmt/joeynmt.git ! cd joeynmt; pip3 install . ``` # Preprocessing the Data into Subword BPE Tokens - One of the most powerful improvements for agglutinative languages (a feature of most Bantu languages) is using BPE tokenization [ (Sennrich, 2015) ](https://arxiv.org/abs/1508.07909). - It was also shown that by optimizing the umber of BPE codes we significantly improve results for low-resourced languages [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021) [(Martinus, 2019)](https://arxiv.org/abs/1906.05685) - Below we have the scripts for doing BPE tokenization of our data. We use 4000 tokens as recommended by [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021). You do not need to change anything. Simply running the below will be suitable. ``` # One of the huge boosts in NMT performance was to use a different method of tokenizing. # Usually, NMT would tokenize by words. However, using a method called BPE gave amazing boosts to performance # Do subword NMT from os import path os.environ["src"] = source_language # Sets them in bash as well, since we often use bash scripts os.environ["tgt"] = target_language # Learn BPEs on the training data. os.environ["data_path"] = path.join("joeynmt", "data", source_language + target_language) # Herman! ! subword-nmt learn-joint-bpe-and-vocab --input train.$src train.$tgt -s 4000 -o bpe.codes.4000 --write-vocabulary vocab.$src vocab.$tgt # Apply BPE splits to the development and test data. ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < train.$src > train.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < train.$tgt > train.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < dev.$src > dev.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < dev.$tgt > dev.bpe.$tgt ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$src < test.$src > test.bpe.$src ! subword-nmt apply-bpe -c bpe.codes.4000 --vocabulary vocab.$tgt < test.$tgt > test.bpe.$tgt # Create directory, move everyone we care about to the correct location ! mkdir -p $data_path ! cp train.* $data_path ! cp test.* $data_path ! cp dev.* $data_path ! cp bpe.codes.4000 $data_path ! ls $data_path # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" # Create that vocab using build_vocab ! sudo chmod 777 joeynmt/scripts/build_vocab.py ! joeynmt/scripts/build_vocab.py joeynmt/data/$src$tgt/train.bpe.$src joeynmt/data/$src$tgt/train.bpe.$tgt --output_path joeynmt/data/$src$tgt/vocab.txt # Some output ! echo "BPE Xhosa Sentences" ! tail -n 5 test.bpe.$tgt ! echo "Combined BPE Vocab" ! tail -n 10 joeynmt/data/$src$tgt/vocab.txt # Herman # Also move everything we care about to a mounted location in google drive (relevant if running in colab) at gdrive_path ! cp train.* "$gdrive_path" ! cp test.* "$gdrive_path" ! cp dev.* "$gdrive_path" ! cp bpe.codes.4000 "$gdrive_path" ! ls "$gdrive_path" ``` # Creating the JoeyNMT Config JoeyNMT requires a yaml config. We provide a template below. We've also set a number of defaults with it, that you may play with! - We used Transformer architecture - We set our dropout to reasonably high: 0.3 (recommended in [(Sennrich, 2019)](https://www.aclweb.org/anthology/P19-1021)) Things worth playing with: - The batch size (also recommended to change for low-resourced languages) - The number of epochs (we've set it at 30 just so it runs in about an hour, for testing purposes) - The decoder options (beam_size, alpha) - Evaluation metrics (BLEU versus Crhf4) ``` # This creates the config file for our JoeyNMT system. It might seem overwhelming so we've provided a couple of useful parameters you'll need to update # (You can of course play with all the parameters if you'd like!) name = '%s%s' % (source_language, target_language) gdrive_path = os.environ["gdrive_path"] # Create the config config = """ name: "{name}_transformer" data: src: "{source_language}" trg: "{target_language}" train: "data/{name}/train.bpe" dev: "data/{name}/dev.bpe" test: "data/{name}/test.bpe" level: "bpe" lowercase: False max_sent_length: 100 src_vocab: "data/{name}/vocab.txt" trg_vocab: "data/{name}/vocab.txt" testing: beam_size: 5 alpha: 1.0 training: #load_model: "{gdrive_path}/models/{name}_transformer/1.ckpt" # if uncommented, load a pre-trained model from this checkpoint random_seed: 42 optimizer: "adam" normalization: "tokens" adam_betas: [0.9, 0.999] scheduling: "plateau" # TODO: try switching from plateau to Noam scheduling patience: 5 # For plateau: decrease learning rate by decrease_factor if validation score has not improved for this many validation rounds. learning_rate_factor: 0.5 # factor for Noam scheduler (used with Transformer) learning_rate_warmup: 1000 # warmup steps for Noam scheduler (used with Transformer) decrease_factor: 0.7 loss: "crossentropy" learning_rate: 0.0003 learning_rate_min: 0.00000001 weight_decay: 0.0 label_smoothing: 0.1 batch_size: 4096 batch_type: "token" eval_batch_size: 3600 eval_batch_type: "token" batch_multiplier: 1 early_stopping_metric: "ppl" epochs: 30 # TODO: Decrease for when playing around and checking of working. Around 30 is sufficient to check if its working at all validation_freq: 1000 # TODO: Set to at least once per epoch. logging_freq: 100 eval_metric: "bleu" model_dir: "models/{name}_transformer" overwrite: False # TODO: Set to True if you want to overwrite possibly existing models. shuffle: True use_cuda: True max_output_length: 100 print_valid_sents: [0, 1, 2, 3] keep_last_ckpts: 3 model: initializer: "xavier" bias_initializer: "zeros" init_gain: 1.0 embed_initializer: "xavier" embed_init_gain: 1.0 tied_embeddings: True tied_softmax: True encoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 decoder: type: "transformer" num_layers: 6 num_heads: 4 # TODO: Increase to 8 for larger data. embeddings: embedding_dim: 256 # TODO: Increase to 512 for larger data. scale: True dropout: 0.2 # typically ff_size = 4 x hidden_size hidden_size: 256 # TODO: Increase to 512 for larger data. ff_size: 1024 # TODO: Increase to 2048 for larger data. dropout: 0.3 """.format(name=name, gdrive_path=os.environ["gdrive_path"], source_language=source_language, target_language=target_language) with open("joeynmt/configs/transformer_{name}.yaml".format(name=name),'w') as f: f.write(config) ``` # Train the Model This single line of joeynmt runs the training using the config we made above ``` # Train the model # You can press Ctrl-C to stop. And then run the next cell to save your checkpoints! !cd joeynmt; python3 -m joeynmt train configs/transformer_$src$tgt.yaml # Copy the created models from the notebook storage to google drive for persistant storage !cp -r joeynmt/models/${src}${tgt}_transformer/* "$gdrive_path/models/${src}${tgt}_transformer/" # Copy the created models from the notebook storage to google drive for persistant storage !cp joeynmt/models/${src}${tgt}_transformer/best.ckpt "$gdrive_path/models/${src}${tgt}_transformer/" !ls joeynmt/models/${src}${tgt}_transformer # Output our validation accuracy ! cat "$gdrive_path/models/${src}${tgt}_transformer/validations.txt" # Test our model ! cd joeynmt; python3 -m joeynmt test "$gdrive_path/models/${src}${tgt}_transformer/config.yaml" ```
github_jupyter
``` # !pip install recogym ``` # Likelihood-based models This notebook will outline the likelihood-based approach to training on Bandit feedback. Although before proceeding, we will study the output of the simulator in a little more detail. ``` from numpy.random.mtrand import RandomState from recogym import Configuration from recogym.agents import Agent from sklearn.linear_model import LogisticRegression from recogym import verify_agents from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args from recogym.agents import RandomAgent, random_args from recogym.evaluate_agent import verify_agents, plot_verify_agents import gym, recogym from copy import deepcopy from recogym import env_1_args import matplotlib.pyplot as plt import numpy as np %matplotlib inline %config InlineBackend.figure_format = 'retina' plt.rcParams['figure.figsize'] = [6, 3] num_users = 16000 env_1_args['number_of_flips'] = 0 env_1_args['sigma_mu_organic'] = 0.0 env_1_args['sigma_omega'] = 1 env_1_args['random_seed'] = 42 env_1_args['num_products'] = 40 env_1_args['K'] = 5 env_1_args['number_of_flips'] = 5 env = gym.make('reco-gym-v1') env.init_gym(env_1_args) data = deepcopy(env).generate_logs(num_users) ``` # Logistic Regression Model ## Turn Data into Features Now we are going to build a _Logistic Regression_ model. The model will predict _the probability of the click_ for the following data: * _`Views`_ is a total amount of views of a particular _`Product`_ shown during _Organic_ _`Events`_ **before** a _Bandit_ _`Event`_. * _`Action`_ is a proposed _`Product`_ at a _Bandit_ _`Event`_. For example, assume that we have _`10`_ products. In _Organic_ _`Events`_ , these products were shown to a user as follows: | Product Id | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | |------------|---|---|---|---|---|---|---|---|----|---| | Views | 0 | 0 | 0 | 7 | 0 | 0 | 0 | 8 | 11 | 0 | When we want to know the probability of the click for _`Product`_ = _`8`_ with available amounts of _`Views`_ , the input data for the model will be: $v = $_`0 0 0 7 0 0 0 8 11 0`_ and _**`8`**_ The first `10` numbers are _`Views`_ of _`Products`_ (see above), the latest one is the _`Action`_. We will try to predict: $\mathbb{P}(C|P=p, V)$ that is the probability of the click for a _`Product`_ $p$, provided that we have _`Views`_ $V$. We will encode _`Action`_ using a one-hot encoding. In our current example, the _`Action`_ is _`8`_. Thus, it is encoded as: _$a = $`0 0 0 0 0 0 0 0`_ _**`1`**_ _`0`_ Here, * Vector of _`Actions`_ has a size that is equal to the _*number of `Products`*_ i.e., _`10`_. * _`Action`_ _`8`_ is marked as _`1`_ (_`Action`_ starts with _`0`_ ). Numerically, to fully describe the context $P=p, V$ that mixes the evaluated product and the products seen by the user, we do a Kronecker product of the two vectors $a$ and $v$. Namely, the vector used as features is the flattened version of the following $P \times P$ matrix $$ \begin{pmatrix} \cdots & 0 & \cdots \\ & \vdots & \\ \cdots & v & \cdots \\ & \vdots & \\ \cdots & 0 & \cdots \end{pmatrix} \leftarrow \text{ only the line corresponding the the action $p$ is non zero} $$ ``` from recogym.agents import FeatureProvider class CountFeatureProvider(FeatureProvider): """Feature provider as an abstract class that defines interface of setting/getting features""" def __init__(self, config): super(CountFeatureProvider, self).__init__(config) self.feature_data = np.zeros((self.config.num_products)) def observe(self, observation): """Consider an Organic Event for a particular user""" for session in observation.sessions(): self.feature_data[int(session['v'])] += 1 def features(self, observation): """Provide feature values adjusted to a particular feature set""" return self.feature_data def reset(self): self.feature_data = np.zeros((self.config.num_products)) import math import numpy as np from recogym import Configuration, DefaultContext, Observation from recogym.envs.session import OrganicSessions def build_train_data(logs, feature_provider): user_states, actions, rewards, proba_actions = [], [], [], [] current_user = None for _, row in logs.iterrows(): if current_user != row['u']: # User has changed: start a new session and reset user state. current_user = row['u'] sessions = OrganicSessions() feature_provider.reset() context = DefaultContext(row['u'], row['t']) if row['z'] == 'organic': sessions.next(context, row['v']) else: # For each bandit event, generate one observation for the user state, # the taken action the obtained reward and the used probabilities. feature_provider.observe(Observation(context, sessions)) user_states.append(feature_provider.features(None).copy()) actions.append(row['a']) rewards.append(row['c']) proba_actions.append(row['ps']) # Start a new organic session. sessions = OrganicSessions() return np.array(user_states), np.array(actions).astype(int), np.array(rewards), np.array(proba_actions) # You can now see data that will be provided to our agents based on logistic regressions. config = Configuration(env_1_args) count_feature_provider = CountFeatureProvider(config=config) user_states, actions, rewards, proba_actions = build_train_data(data, count_feature_provider) preview_start, preview_size = 500, 3 print('User product views count at action time:') print(user_states[preview_start:preview_start + preview_size]) print('Taken actions: ', actions[preview_start:preview_start + preview_size]) print('Obtained rewards: ', rewards[preview_start:preview_start + preview_size]) print('Probablities of the taken actions: ', proba_actions[preview_start:preview_start + preview_size]) ``` Look at the data and see how it maps into the features - which is the combination of the history and the actions and the label, which is clicks. Note that only the bandit events correspond to records in the training data. To make a personalization, it is necessary to cross the action and history features. _Why_ ? We do the simplest possible to cross an element-wise Kronecker product. ``` class LikelihoodAgent(Agent): def __init__(self, feature_provider, seed=43): self.feature_provider = feature_provider self.random_state = RandomState(seed) self.model = None @property def num_products(self): return self.feature_provider.config.num_products def _create_features(self, user_state, action): """Create the features that are used to estimate the expected reward from the user state""" features = np.zeros(len(user_state) * self.num_products) features[action * len(user_state): (action + 1) * len(user_state)] = user_state return features def train(self, logs): print('LikelihoodAgent train()') user_states, actions, rewards, proba_actions = build_train_data(logs, self.feature_provider) features = np.vstack([ self._create_features(user_state, action) for user_state, action in zip(user_states, actions) ]) self.model = LogisticRegression(solver='lbfgs', max_iter=5000) self.model.fit(features, rewards) def _score_products(self, user_state): all_action_features = np.array([ self._create_features(user_state, action) for action in range(self.num_products) ]) return self.model.predict_proba(all_action_features)[:, 1] def act(self, observation, reward, done): """Act method returns an action based on current observation and past history""" self.feature_provider.observe(observation) user_state = self.feature_provider.features(observation) prob = self._score_products(user_state) action = np.argmax(prob) ps = 1.0 all_ps = np.zeros(self.num_products) all_ps[action] = 1.0 return { **super().act(observation, reward, done), **{ 'a': action, 'ps': ps, 'ps-a': all_ps, } } def reset(self): self.feature_provider.reset() # Have a look at the feature vector used by the Likelihood agent. picked_sample = 2000 count_product_views_feature_provider = CountFeatureProvider(config) likelihood_logreg = LikelihoodAgent(count_product_views_feature_provider) print('User state: ', user_states[picked_sample]) print('Action: ', actions[picked_sample]) print('Created cross features:') print(likelihood_logreg._create_features(user_states[picked_sample], actions[picked_sample])) %%time likelihood_logreg = LikelihoodAgent(count_product_views_feature_provider) likelihood_logreg.train(data) organic_counter_agent = OrganicUserEventCounterAgent( Configuration({ **organic_user_count_args, **env_1_args, 'select_randomly': True, }) ) random_agent = RandomAgent(Configuration(random_args)) result = verify_agents( env, number_of_users=200, agents={ # 'random agent': random_agent, 'Organic Count': organic_counter_agent, 'Likelihood LogReg': likelihood_logreg, } ) fig = plot_verify_agents(result) plt.show() ```
github_jupyter
``` import matplotlib.pyplot as plt %matplotlib inline oneX = [] oneY = [] meanX = [1,10,20,50] meanY = [1,13.1247,20.5588,46.3882] for i in range(0,34): oneX.append(1) oneY.append(1) for i in range(0,34): oneX.append(0) oneY.append(0) for i in range(0,34): oneX.append(10) oneY.extend([10.5,14.2,18.8,14.8,18.9,16.3,10.2,10.2,10,17.2,18.6,12.2,10,19,12,11.93,16.5,16.3,19.5,19.2,15.4,9.68,10.7,13.1,11.1,10.9,11.1,13.7,12.5,13.7,10.4,1.45,4.98,11.2]) for i in range(0,34): oneX.append(20) oneY.extend([38.4,24.5,0,0,31.1,21.4,23.3,21.6,31.4,22.6,25.8,22.2,25.1,31,59,35.1,21.9,23,20.8,23.6,24.1,23,20.9,25,0,0,0,0,0,0,29.9,24.9,26.8,22.6]) for i in range(1, 35): oneX.append(50) oneY.extend([48.1,48.1,48.1,48.1,48.1,48.1,47.6,48.1,48.1,45.9,37.5,48.1,39.4,37.5,48.1,48.1,48.1,48.1,48.1,48.1,39.4,48.1,48.1,48.1,48.1,48.1,48.1,48.1,48.1,48.1,48.1,48.1,48.1,31.2]) fig= plt.figure(figsize=(5,6)) axes= fig.add_subplot(111) #axes.set_xticks([0,50, 20, 10, 1]) axes.plot(oneX, oneY,'x') plt.xlabel('Actual Distance (m)') plt.ylabel('Calculated Distance (m)') axes.plot(meanX, meanY) #axes.set_yticks([0,5,10,15,20,25,30,35,40,45,50]) #plt.show() plt.savefig('graph04percentage.png',dpi=900) import matplotlib.pyplot as plt %matplotlib inline oneX = [] oneY = [] meanX = [1,10,20,50] meanY = [1,12.4118,15.2941,50] for i in range(0,34): oneX.append(0) oneY.append(0) for i in range(0,34): oneX.append(1) oneY.append(1) for i in range(0,34): oneX.append(50) oneY.append(50) for i in range(1,35): oneX.append(10) oneY.extend([10,10,20,10,20,20,10,10,10,20,20,10,10,20,10,10,20,20,20,20]) for i in range(0,11): oneY.append(10) oneY.extend([1,1,10]) for i in range(1,35): oneX.append(20) oneY.extend([20,20,0,0]) for i in range(0,20): oneY.append(20) for i in range(0,6): oneY.append(0) for i in range(0,4): oneY.append(20) fig= plt.figure(figsize=(5,6)) axes= fig.add_subplot(111) #axes.set_xticks([0,50, 20, 10, 1]) axes.plot(oneX, oneY,'x') plt.xlabel('Actual Distance (m)') plt.ylabel('Calculated Distance (m)') axes.plot(meanX, meanY) axes.set_yticks([0,10,20,30,40,50,60]) #plt.show() plt.savefig('graph04output.png',dpi=900) import matplotlib.pyplot as plt %matplotlib inline oneX = [] oneY = [] meanX = [1,10,20,50] meanY = [1,17.1875,27.5,50] for i in range(0,8): oneX.append(0) oneY.append(0) for i in range(0,8): oneX.append(1) oneY.append(1) for i in range(0,8): oneX.append(50) oneY.append(50) for i in range(0,8): oneX.append(10) oneY.extend([12.5,15,15,17.5,10,20,20,27.5]) for i in range(1,9): oneX.append(20) oneY.extend([60,20,20,20,20,20,0,60]) fig= plt.figure(figsize=(5,6)) axes= fig.add_subplot(111) axes.plot(oneX, oneY,'x') plt.xlabel('Actual Distance (m)') plt.ylabel('Calculated Distance (m)') axes.plot(meanX, meanY) axes.set_yticks([0,10,20,30,40,50,60]) #plt.show() plt.savefig('graph1.png',dpi=900) import matplotlib.pyplot as plt %matplotlib inline oneX = [] oneY = [] meanX = [1,10,20,50] meanY = [1,17.1875,20,50] for i in range(0,4): oneX.append(0) oneY.append(0) for i in range(0,4): oneX.append(50) oneY.append(50) for i in range(0,4): oneX.append(1) oneY.append(1) for i in range(0,4): oneX.append(10) oneY.extend([13.75,16.25,15,23.75]) for i in range(0,4): oneX.append(20) oneY.extend([40,20,20,0]) fig= plt.figure(figsize=(5,6)) axes= fig.add_subplot(111) axes.plot(oneX, oneY,'x') plt.xlabel('Actual Distance (m)') plt.ylabel('Calculated Distance (m)') axes.plot(meanX, meanY) axes.set_yticks([0,10,20,30,40,50,60]) #plt.show() plt.savefig('graph2.png',dpi=900) ```
github_jupyter
![image.png](attachment:image.png) # Introduction Microsoft Studios has recently ventured into the space of film production, but wants to know what are some key insights and recommendations that can receive to become a profitable film studio. Microsoft can increase their profitability if they: A) Release films in during months that have the lowest number of releases, B) Release films primarily on Fridays, in addition to other days that show a high RIO when released on given day, and C) Look into partnering with a highly profitable film studio or invest market research in the studios that have shown the highest domestic gross on films. # Imports ``` import pandas as pd import matplotlib import matplotlib.pyplot as plt ``` # First Data Set From The-Numbers.com that shows the production budgets and gross of a film. Working with this data, I provide an insightful analysis of the Return on Investment(RIO) for further recommendations. ``` tf = pd.read_csv('tn.movie_budgets.csv') tf ``` Cleaning the data set to work with the values. ``` tf['production_budget'] = tf['production_budget'].str.replace('$', '').str.replace(',', '').astype(int) tf['worldwide_gross'] = tf['worldwide_gross'].str.replace('$', '').str.replace(',', '').astype(int) ``` Here, I create the Return On Investment (RIO) by dividing the worldwide_gross by the production_budget and rounded the percentage to the nearest decimal place. ``` ROI = tf['worldwide_gross']/tf['production_budget'] tf['ROI'] = ROI.round() tf ``` Here I changed created to datetimes, one for the month a movie was release and one for the day of the week a movie was released, and adding a new column representing the day of the week. ``` tf['release_Date'] = pd.to_datetime(tf['release_date']).dt.day_name() tf['release_Date'] tf['release_date'] = pd.to_datetime(tf['release_date']).dt.month_name() tf['release_date'] tf ``` Then, I used a groupby to look at release date by month and day of the week in relation to the highest ROI. I grouped each of them in to different lists to later visualize. ``` list(tf.groupby(['release_date', 'ROI']).size().index) month_release_dates = ['April', 'August', 'December', 'February', 'January', 'July', 'June', 'March', 'May', 'November', 'October', 'September'] month_release_date_highest_ROI = [90, 312, 124, 292, 122, 417, 115, 499, 342, 225, 264, 432] list(tf.groupby(['release_Date', 'ROI']).size().index) roi_day = ['Friday','Monday','Saturday','Sunday','Thursday', 'Tuesday','Wednesday'] roi_release_date_highest_ROI = [499.0,100.0,180.0,225.0,312.0,264.0,414.0] ``` Here, I show the value_counts of the ROI to see what is the most common ROI on a film to provide an insight to the most common ROI that Microsoft could receive on a film. ``` tf['ROI'].value_counts() roi_percentage = list(tf['ROI'].value_counts(ascending=False).index) roi_number = list(tf['ROI'].value_counts()) ROI.describe() ROI.median() ``` # Second Data Set Here, I explore a data set from The Movie Database that offers a host of release dates that I would use to have a greater insight into how many movies are released in a month or in a day. ``` movie_dates = pd.read_csv('tmdb.movies.csv') movie_dates ``` Here, I work with this data set to see how many releases occur by a given day and month and see what months have the highest and lowest release dates. ``` movie_dates['release_date'] = pd.to_datetime(movie_dates['release_date']) movie_dates['release_date'] movie_dates['release_date'].dt.day_name().value_counts() days = list(movie_dates['release_date'].dt.day_name().value_counts().index) days_ranked = list(movie_dates['release_date'].dt.day_name().value_counts()) movie_dates['release_date'].dt.month_name().value_counts() movie_months= list(movie_dates['release_date'].dt.month_name().value_counts().index) movie_months movie_month_values = list(movie_dates['release_date'].dt.month_name().value_counts()) movie_month_values ``` # Third Data Set This data set is from Box Office Mojo where I explore the data to see what film studios have the highest domestic growth that Microsoft could potentially emulate or partner with in the future. ``` sf= pd.read_csv('bom.movie_gross.csv', index_col=False) sf ``` Cleaning the Dataset to work with the values ``` sf['foreign_gross'] = sf['foreign_gross'].fillna(0) sf ``` Here, I look at the studios with the highest domestic gross instead of foreign gross because many films are missing foreign gross. Also, instead of RIO I am looking at domestic gross becasue this particular data set didn't have a production budget section. ``` sf_rank = sf.nlargest(100, ['domestic_gross']) sf_rank ``` # Fourth Data Set Extra Data Set I received from an outside source from The Numbers that Microsoft could consider as an important insight of what movie genres have been profitable since 1995. ``` df = pd.read_csv('genre.csv') df ``` The reason an error appears here is because .str is used to change a string and (float). Thus, running this after the first initial time wouldn't work because the .str works for strings and this was changed from a string to float. ``` df['Total Box Office'] = df['Total Box Office'].str.replace('$', '').str.replace(',', '').astype(float) df['Total Box Office'] ``` # Visualizations Most months that show a high number of releases also show a lower RIO compared to months that have a low number of releases have a higher RIO. ``` index= ['April', 'August', 'December', 'February', 'January', 'July', 'June', 'March', 'May', 'November', 'October', 'September'] n_of_releases = [2566, 1698, 1929, 1614, 3132, 1504, 2166, 2406, 1865, 2338, 3035, 2264] highest_ROI = [90, 312, 124, 292, 122, 417, 115, 499, 342, 225, 264, 432] mf = pd.DataFrame({'Number of Releases': n_of_releases, 'Highest ROI': highest_ROI}, index=index) ax = mf.plot.barh(figsize=(10,10)) ax.set(title = 'Months Releases and Highest ROI', xlabel = 'Count', ylabel = 'Months') ``` Friday has the highest number of film releases, and the highest RIO, but all days that follow have a correlation of high number of releases and low RIO and a low number of releases with a high RIO. ``` day = ['Friday','Monday','Saturday','Sunday','Thursday', 'Tuesday','Wednesday'] day_highest_ROI = [499.0,100.0,180.0,225.0,312.0,264.0,414.0] day_n_release= [9171.0, 1965.0, 3568.0, 2544.0, 2651.0, 4380, 2238] ddf = pd.DataFrame({'Number of Releases': day_n_release, 'Highest ROI': day_highest_ROI}, index=day) ax = ddf.plot.barh(figsize=(10,10)) ax.set(title = 'Days Releases and Highest ROI', xlabel = '', ylabel = 'Days') ``` Film Studios that have made the highest domestic gross between the years of 2010-2018. ``` fig, ax = plt.subplots(figsize=(10,10)) ax.bar(x = sf_rank['studio'], height = sf_rank['domestic_gross']) ax.set(title = 'Studios with Highest Domestic Gross', xlabel = 'Studios', ylabel = 'Gross by Billion') plt.savefig('Studios with Highest Domestic Gross') ``` # Extra Visualizations This is a general spread of RIOS and what is the most common RIO received on a film. ``` fig, ax = plt.subplots(figsize=(10,5)) ax.bar(x = roi_percentage, height = roi_number) ax.set(title = 'ROI', xlabel = 'Percentage', ylabel = 'Count') ax.set_xlim(0, 50) ax.set_ylim(0, 1470) plt.savefig('ROI') ``` This is an extra data set I receieved from The-Numbers that shows what movies have made the most revenue from 1995-2021 ``` fig, ax = plt.subplots(figsize=(30,10)) ax.bar(x = df['Genre'], height = df['Total Box Office']) ax.set(title = 'Revenue By Genre', xlabel = 'Genre', ylabel = 'Total Box Office by 10 Billion') plt.savefig('Revenue By Genre') ``` # Conclusion Conclusion & Future Steps The mains months that Microsoft Studios should release their films based on ROI follow in this respective order: March, September, July, May, August, February, October, November, December, January, June, and April The mains months that Microsoft Studios should release their films based on ROI follow in this respective order: Friday, Wednesday, Thursday, Tuesday, Sunday, Saturday, and Monday Microsoft Studios should partner with a studio that has shown high level of profitability. If they don’t partner with any studio, they should at least put focus their main market research on the studios with the highest RIOs to have greater profitability in the future.
github_jupyter
# 2D Kriging in Python for Engineers and Geoscientists ## with GSLIB's KB2D Program Converted to Python ### Michael Pyrcz, Associate Professor, University of Texas at Austin #### Contacts: [Twitter/@GeostatsGuy](https://twitter.com/geostatsguy) | [GitHub/GeostatsGuy](https://github.com/GeostatsGuy) | [www.michaelpyrcz.com](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) This is a tutorial for / demonstration of **Kriging in Python with GSLIB's KB2D program translated to Python, wrappers and reimplementations of other GSLIB: Geostatistical Library methods** (Deutsch and Journel, 1997). This exercise demonstrates the kriging method in Python with wrappers and reimplimentation of GSLIB methods. The steps include: 1. generate a 2D model with sequential Gaussian simulation 2. sample from the simulation 3. calculate and visualize the kriged map To accomplish this I have provide wrappers or reimplementation in Python for the following GSLIB methods: 1. sgsim - sequantial Gaussian simulation limited to 2D and unconditional 2. hist - histograms plots reimplemented with GSLIB parameters using python methods 3. locmap - location maps reimplemented with GSLIB parameters using python methods 4. pixelplt - pixel plots reimplemented with GSLIB parameters using python methods 5. locpix - my modification of GSLIB to superimpose a location map on a pixel plot reimplemented with GSLIB parameters using Python methods 5. affine - affine correction adjust the mean and standard deviation of a feature reimplemented with GSLIB parameters using Python methods I have also started to translate the GSLIB support subfunctions to Python. Stay tuned. The GSLIB source and executables are available at http://www.statios.com/Quick/gslib.html. For the reference on using GSLIB check out the User Guide, GSLIB: Geostatistical Software Library and User's Guide by Clayton V. Deutsch and Andre G. Journel. Overtime, more of the GSLIB programs will be translated to Python and there will be no need to have the executables. For this workflow you will need sgsim.exe from GSLIB.com for windows and Mac OS executables from https://github.com/GeostatsGuy/GSLIB_MacOS. I did this to allow people to use these GSLIB functions that are extremely robust in Python. Also this should be a bridge to allow so many familar with GSLIB to work in Python as a kept the parameterization and displays consistent with GSLIB. The wrappers are simple functions declared below that write the parameter files, run the GSLIB executable in the working directory and load and visualize the output in Python. This will be included on GitHub for anyone to try it out https://github.com/GeostatsGuy/. This was my first effort to translate the GSLIB Fortran to Python. It was pretty easy so I'll start translating other critical GSLIB functions. I've completed NSCORE, DECLUS, GAM, GAMV and now KB2D as of now. #### Load the required libraries The following code loads the required libraries. ``` import os # to set current working directory import numpy as np # arrays and matrix math import pandas as pd # DataFrames import matplotlib.pyplot as plt # plotting ``` If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. #### Declare functions Here are the wrappers and reimplementations of GSLIB method along with two utilities to load GSLIB's Geo-EAS from data files into DataFrames and 2D Numpy arrays. These are used in the testing workflow. ``` # Some GeostatsPy Functions - by Michael Pyrcz, maintained at https://git.io/fNgR7. # A set of functions to provide access to GSLIB in Python. # GSLIB executables: nscore.exe, declus.exe, gam.exe, gamv.exe, vmodel.exe, kb2d.exe & sgsim.exe must be in the working directory # note, since I have now rewritten nscore, gam, gamv and kb2d one can just use these in Python # available in the geostatspy package. import pandas as pd import os import numpy as np import matplotlib.pyplot as plt import random as rand image_type = 'tif'; dpi = 600 # utility to convert GSLIB Geo-EAS files to a 1D or 2D numpy ndarray for use with Python methods def GSLIB2ndarray(data_file,kcol,nx,ny): colArray = [] if ny > 1: array = np.ndarray(shape=(ny,nx),dtype=float,order='F') else: array = np.zeros(nx) with open(data_file) as myfile: # read first two lines head = [next(myfile) for x in range(2)] line2 = head[1].split() ncol = int(line2[0]) # get the number of columns for icol in range(0, ncol): # read over the column names head = [next(myfile) for x in range(1)] if icol == kcol: col_name = head[0].split()[0] if ny > 1: for iy in range(0,ny): for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ny-1-iy][ix] = head[0].split()[kcol] else: for ix in range(0,nx): head = [next(myfile) for x in range(1)] array[ix] = head[0].split()[kcol] return array,col_name # utility to convert GSLIB Geo-EAS files to a pandas DataFrame for use with Python methods def GSLIB2Dataframe(data_file): colArray = [] with open(data_file) as myfile: # read first two lines head = [next(myfile) for x in range(2)] line2 = head[1].split() ncol = int(line2[0]) for icol in range(0, ncol): head = [next(myfile) for x in range(1)] colArray.append(head[0].split()[0]) data = np.loadtxt(myfile, skiprows = 0) df = pd.DataFrame(data) df.columns = colArray return df # histogram, reimplemented in Python of GSLIB hist with MatPlotLib methods, displayed and as image file def hist(array,xmin,xmax,log,cumul,bins,weights,xlabel,title,fig_name): plt.figure(figsize=(8,6)) cs = plt.hist(array, alpha = 0.2, color = 'red', edgecolor = 'black', bins=bins, range = [xmin,xmax], weights = weights, log = log, cumulative = cumul) plt.title(title) plt.xlabel(xlabel); plt.ylabel('Frequency') plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return # histogram, reimplemented in Python of GSLIB hist with MatPlotLib methods (version for subplots) def hist_st(array,xmin,xmax,log,cumul,bins,weights,xlabel,title): cs = plt.hist(array, alpha = 0.2, color = 'red', edgecolor = 'black', bins=bins, range = [xmin,xmax], weights = weights, log = log, cumulative = cumul) plt.title(title) plt.xlabel(xlabel); plt.ylabel('Frequency') return # location map, reimplemention in Python of GSLIB locmap with MatPlotLib methods def locmap(df,xcol,ycol,vcol,xmin,xmax,ymin,ymax,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name): ixy = 0 plt.figure(figsize=(8,6)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, norm=None, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im, orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return im # location map, reimplemention in Python of GSLIB locmap with MatPlotLib methods (version for subplots) def locmap_st(df,xcol,ycol,vcol,xmin,xmax,ymin,ymax,vmin,vmax,title,xlabel,ylabel,vlabel,cmap): ixy = 0 im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, norm=None, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im, orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) return im # pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods def pixelplt(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap,fig_name): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) plt.figure(figsize=(8,6)) im = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels=np.linspace(vmin,vmax,100)) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) cbar = plt.colorbar(im,orientation = 'vertical',ticks=np.linspace(vmin,vmax,10)) cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return im # pixel plot, reimplemention in Python of GSLIB pixelplt with MatPlotLib methods(version for subplots) def pixelplt_st(array,xmin,xmax,ymin,ymax,step,vmin,vmax,title,xlabel,ylabel,vlabel,cmap): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 x = [];y = []; v = [] # use dummy since scatter plot controls legend min and max appropriately and contour does not! cs = plt.contourf(xx,yy,array,cmap=cmap,vmin=vmin,vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(x,y,s=None, c=v, marker=None,cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.clim(vmin,vmax) cbar = plt.colorbar(im, orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) return cs # pixel plot and location map, reimplementation in Python of a GSLIB MOD with MatPlotLib methods def locpix(array,xmin,xmax,ymin,ymax,step,vmin,vmax,df,xcol,ycol,vcol,title,xlabel,ylabel,vlabel,cmap,fig_name): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 plt.figure(figsize=(8,6)) cs = plt.contourf(xx, yy, array, cmap=cmap,vmin=vmin, vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) cbar = plt.colorbar(orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) plt.savefig(fig_name + '.' + image_type,dpi=dpi) plt.show() return cs # pixel plot and location map, reimplementation in Python of a GSLIB MOD with MatPlotLib methods(version for subplots) def locpix_st(array,xmin,xmax,ymin,ymax,step,vmin,vmax,df,xcol,ycol,vcol,title,xlabel,ylabel,vlabel,cmap): xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) ixy = 0 cs = plt.contourf(xx, yy, array, cmap=cmap,vmin=vmin, vmax=vmax,levels = np.linspace(vmin,vmax,100)) im = plt.scatter(df[xcol],df[ycol],s=None, c=df[vcol], marker=None, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0.8, linewidths=0.8, verts=None, edgecolors="black") plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin,xmax) plt.ylim(ymin,ymax) cbar = plt.colorbar(orientation = 'vertical') cbar.set_label(vlabel, rotation=270, labelpad=20) # affine distribution correction reimplemented in Python with numpy methods def affine(array,tmean,tstdev): mean = np.average(array) stdev = np.std(array) array = (tstdev/stdev)*(array - mean) + tmean return(array) def make_variogram(nug,nst,it1,cc1,azi1,hmaj1,hmin1,it2=1,cc2=0,azi2=0,hmaj2=0,hmin2=0): if cc2 == 0: nst = 1 var = dict([('nug', nug), ('nst', nst), ('it1', it1),('cc1', cc1),('azi1', azi1),('hmaj1', hmaj1), ('hmin1', hmin1), ('it2', it2),('cc2', cc2),('azi2', azi2),('hmaj2', hmaj2), ('hmin2', hmin2)]) if nug + cc1 + cc2 != 1: print('\x1b[0;30;41m make_variogram Warning: sill does not sum to 1.0, do not use in simulation \x1b[0m') if cc1 < 0 or cc2 < 0 or nug < 0 or hmaj1 < 0 or hmaj2 < 0 or hmin1 < 0 or hmin2 < 0: print('\x1b[0;30;41m make_variogram Warning: contributions and ranges must be all positive \x1b[0m') if hmaj1 < hmin1 or hmaj2 < hmin2: print('\x1b[0;30;41m make_variogram Warning: major range should be greater than minor range \x1b[0m') return var # sequential Gaussian simulation, 2D unconditional wrapper for sgsim from GSLIB (.exe must be in working directory) def GSLIB_sgsim_2d_uncond(nreal,nx,ny,hsiz,seed,var,output_file): import os import numpy as np nug = var['nug'] nst = var['nst']; it1 = var['it1']; cc1 = var['cc1']; azi1 = var['azi1']; hmaj1 = var['hmaj1']; hmin1 = var['hmin1'] it2 = var['it2']; cc2 = var['cc2']; azi2 = var['azi2']; hmaj2 = var['hmaj2']; hmin2 = var['hmin2'] max_range = max(hmaj1,hmaj2) hmn = hsiz * 0.5 hctab = int(max_range/hsiz)*2 + 1 sim_array = np.random.rand(nx,ny) file = open("sgsim.par", "w") file.write(" Parameters for SGSIM \n") file.write(" ******************** \n") file.write(" \n") file.write("START OF PARAMETER: \n") file.write("none -file with data \n") file.write("1 2 0 3 5 0 - columns for X,Y,Z,vr,wt,sec.var. \n") file.write("-1.0e21 1.0e21 - trimming limits \n") file.write("0 -transform the data (0=no, 1=yes) \n") file.write("none.trn - file for output trans table \n") file.write("1 - consider ref. dist (0=no, 1=yes) \n") file.write("none.dat - file with ref. dist distribution \n") file.write("1 0 - columns for vr and wt \n") file.write("-4.0 4.0 - zmin,zmax(tail extrapolation) \n") file.write("1 -4.0 - lower tail option, parameter \n") file.write("1 4.0 - upper tail option, parameter \n") file.write("0 -debugging level: 0,1,2,3 \n") file.write("nonw.dbg -file for debugging output \n") file.write(str(output_file) + " -file for simulation output \n") file.write(str(nreal) + " -number of realizations to generate \n") file.write(str(nx) + " " + str(hmn) + " " + str(hsiz) + " \n") file.write(str(ny) + " " + str(hmn) + " " + str(hsiz) + " \n") file.write("1 0.0 1.0 - nz zmn zsiz \n") file.write(str(seed) + " -random number seed \n") file.write("0 8 -min and max original data for sim \n") file.write("12 -number of simulated nodes to use \n") file.write("0 -assign data to nodes (0=no, 1=yes) \n") file.write("1 3 -multiple grid search (0=no, 1=yes),num \n") file.write("0 -maximum data per octant (0=not used) \n") file.write(str(max_range) + " " + str(max_range) + " 1.0 -maximum search (hmax,hmin,vert) \n") file.write(str(azi1) + " 0.0 0.0 -angles for search ellipsoid \n") file.write(str(hctab) + " " + str(hctab) + " 1 -size of covariance lookup table \n") file.write("0 0.60 1.0 -ktype: 0=SK,1=OK,2=LVM,3=EXDR,4=COLC \n") file.write("none.dat - file with LVM, EXDR, or COLC variable \n") file.write("4 - column for secondary variable \n") file.write(str(nst) + " " + str(nug) + " -nst, nugget effect \n") file.write(str(it1) + " " + str(cc1) + " " +str(azi1) + " 0.0 0.0 -it,cc,ang1,ang2,ang3\n") file.write(" " + str(hmaj1) + " " + str(hmin1) + " 1.0 - a_hmax, a_hmin, a_vert \n") file.write(str(it2) + " " + str(cc2) + " " +str(azi2) + " 0.0 0.0 -it,cc,ang1,ang2,ang3\n") file.write(" " + str(hmaj2) + " " + str(hmin2) + " 1.0 - a_hmax, a_hmin, a_vert \n") file.close() os.system('"sgsim.exe sgsim.par"') sim_array = GSLIB2ndarray(output_file,0,nx,ny) return(sim_array[0]) # extract regular spaced samples from a model def regular_sample(array,xmin,xmax,ymin,ymax,step,mx,my,name): x = []; y = []; v = []; iix = 0; iiy = 0; xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax, ymin, -1*step)) iiy = 0 for iy in range(0,ny): if iiy >= my: iix = 0 for ix in range(0,nx): if iix >= mx: x.append(xx[ix,iy]);y.append(yy[ix,iy]); v.append(array[ix,iy]) iix = 0; iiy = 0 iix = iix + 1 iiy = iiy + 1 df = pd.DataFrame(np.c_[x,y,v],columns=['X', 'Y', name]) return(df) def random_sample(array,xmin,xmax,ymin,ymax,step,nsamp,name): import random as rand x = []; y = []; v = []; iix = 0; iiy = 0; xx, yy = np.meshgrid(np.arange(xmin, xmax, step),np.arange(ymax-1, ymin-1, -1*step)) ny = xx.shape[0] nx = xx.shape[1] sample_index = rand.sample(range((nx)*(ny)), nsamp) for isamp in range(0,nsamp): iy = int(sample_index[isamp]/ny) ix = sample_index[isamp] - iy*nx x.append(xx[iy,ix]) y.append(yy[iy,ix]) v.append(array[iy,ix]) df = pd.DataFrame(np.c_[x,y,v],columns=['X', 'Y', name]) return(df) ``` These are a variety of GSLIB subfunctions required by the GSLIB functions that I have converted to Python. I will continue to convert the subfunctions and include them as needed by the GSLIB functions that I convert. All are available with geostatspy. ``` import math import numpy as np import numpy.linalg as linalg from numba import jit def dlocate(xx,iis,iie,x): from bisect import bisect n = len(xx) if iie <= iis: iis = 0; ie = n-1 array = xx[iis:iie-1] # this is accounting for swith to 0,...,n-1 index j = bisect(array,x) return j def dsortem(ib,ie,a,iperm,b=0,c=0,d=0,e=0,f=0,g=0,h=0): a = a[ib:ie] inds = a.argsort() a = np.copy(a[inds]) # deepcopy forces pass to outside scope if(iperm == 1): return a b_slice = b[ib:ie] b = b_slice[inds] if iperm == 2: return a,b c_slice = c[ib:ie] c = c_slice[inds] if iperm == 3: return a, b, c d_slice = d[ib:ie] d = d_slice[inds] if iperm == 4: return a, b, c, d e_slice = e[ib:ie] e = e_slice[inds] if iperm == 5: return a, b, c, d, e f_slice = f[ib:ie] f = f_slice[inds] if iperm == 6: return a, b, c, d, e, f g_slice = g[ib:ie] g = g_slice[inds] if iperm == 7: return a, b, c, d, e, f, h h_slice = h[ib:ie] h = h_slice[inds] return a, b, c, d, e, f, h def gauinv(p): lim = 1.0e-10; p0 = -0.322232431088; p1 = -1.0; p2 = -0.342242088547 p3 = -0.0204231210245; p4 = -0.0000453642210148; q0 = 0.0993484626060 q1 = 0.588581570495; q2 = 0.531103462366; q3 = 0.103537752850; q4 = 0.0038560700634 # Check for an error situation: if p < lim: xp = -1.0e10 return xp if p > (1.0-lim): xp = 1.0e10 return xp # Get k for an error situation: pp = p if p > 0.5: pp = 1 - pp xp = 0.0 if p == 0.5: return xp # Approximate the function: y = np.sqrt(np.log(1.0/(pp*pp))) xp = float(y + ((((y*p4+p3)*y+p2)*y+p1)*y+p0) / ((((y*q4+q3)*y+q2)*y+q1)*y+q0) ) if float(p) == float(pp): xp = -xp return xp def gcum(x): z = x if z < 0: z = -z t= 1./(1.+ 0.2316419*z) gcum = t*(0.31938153 + t*(-0.356563782 + t*(1.781477937 + t*(-1.821255978 + t*1.330274429)))) e2= 0.0 # standard deviations out gets treated as infinity: if z <= 6: e2 = np.exp(-z*z/2.0)*0.3989422803 gcum = 1.0- e2 * gcum if x >= 0.0: return gcum gcum = 1.0 - gcum return gcum def dpowint(xlow,xhigh,ylow,yhigh,xval,pwr): EPSLON = 1.0e-20 if (xhigh-xlow) < EPSLON: dpowint = (yhigh+ylow)/2.0 else: dpowint = ylow + (yhigh-ylow)*(((xval-xlow)/(xhigh-xlow))**pwr) return dpowint @jit(nopython=True) # all NumPy array operations included in this function for precompile with NumBa def setup_rotmat(c0,nst,it,cc,ang,PMX): DTOR=3.14159265/180.0; EPSLON=0.000000; PI=3.141593 # The first time around, re-initialize the cosine matrix for the # variogram structures: rotmat = np.zeros((4,nst)) maxcov = c0 for js in range(0,nst): azmuth = (90.0-ang[js])*DTOR rotmat[0,js] = math.cos(azmuth) rotmat[1,js] = math.sin(azmuth) rotmat[2,js] = -1*math.sin(azmuth) rotmat[3,js] = math.cos(azmuth) if it[js] == 4: maxcov = maxcov + PMX else: maxcov = maxcov + cc[js] return rotmat, maxcov @jit(nopython=True) # all NumPy array operations included in this function for precompile with NumBa def cova2(x1,y1,x2,y2,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov): DTOR=3.14159265/180.0; EPSLON=0.000000; PI=3.141593 # Check for very small distance: dx = x2-x1 dy = y2-y1 # print(dx,dy) if (dx*dx+dy*dy) < EPSLON: cova2 = maxcov return cova2 # Non-zero distance, loop over all the structures: cova2 = 0.0 for js in range(0,nst): # print(js) # print(rotmat) # Compute the appropriate structural distance: dx1 = (dx*rotmat[0,js] + dy*rotmat[1,js]) dy1 = (dx*rotmat[2,js] + dy*rotmat[3,js])/anis[js] h = math.sqrt(max((dx1*dx1+dy1*dy1),0.0)) if it[js] == 1: # Spherical model: hr = h/aa[js] if hr < 1.0: cova2 = cova2 + cc[js]*(1.-hr*(1.5-.5*hr*hr)) elif it[js] == 2: # Exponential model: cova2 = cova2 + cc[js]*np.exp(-3.0*h/aa[js]) elif it[js] == 3: # Gaussian model: hh=-3.0*(h*h)/(aa[js]*aa[js]) cova2 = cova2 +cc[js]*np.exp(hh) elif it[js] == 4: # Power model: cov1 = PMX - cc[js]*(h**aa[js]) cova2 = cova2 + cov1 return cova2 def ksol_numpy(neq,a,r): # using Numpy methods a = a[0:neq*neq] # trim the array a = np.reshape(a,(neq,neq)) # reshape to 2D ainv = linalg.inv(a) # invert matrix r = r[0:neq] # trim the array s = np.matmul(ainv,r) # matrix multiplication return s ``` Here's the KB2D program translated to Python. I have applied Numba to speedup the covariance calculaiton. I will look for additional speedups over time. 100 x 100 grid mesh will krige in about 10 seconds with 50 max data per estimate of 200 data in total on a modern desktop. ``` import math # for trig and constants import scipy.spatial as sp # for fast nearest nearbour search #from numba import jit # for precompile speed up # GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the original Fortran to Python # translated by Michael Pyrcz, the University of Texas at Austin (Jan, 2019) def kb2d(df,xcol,ycol,vcol,tmin,tmax,nx,xmn,xsiz,ny,ymn,ysiz,nxdis,nydis, ndmin,ndmax,radius,ktype,skmean,vario): # Constants UNEST = -999. EPSLON = 1.0e-10 VERSION = 2.907 first = True PMX = 9999.0 MAXSAM = ndmax + 1 MAXDIS = nxdis * nydis MAXKD = MAXSAM + 1 MAXKRG = MAXKD * MAXKD # load the variogram nst = vario['nst'] cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst) ang = np.zeros(nst); anis = np.zeros(nst) c0 = vario['nug']; cc[0] = vario['cc1']; it[0] = vario['it1']; ang[0] = vario['azi1']; aa[0] = vario['hmaj1']; anis[0] = vario['hmin1']/vario['hmaj1']; if nst == 2: cc[1] = vario['cc2']; it[1] = vario['it2']; ang[1] = vario['azi2']; aa[1] = vario['hmaj2']; anis[1] = vario['hmin2']/vario['hmaj2']; # Allocate the needed memory: xdb = np.zeros(MAXDIS) ydb = np.zeros(MAXDIS) xa = np.zeros(MAXSAM) ya = np.zeros(MAXSAM) vra = np.zeros(MAXSAM) dist = np.zeros(MAXSAM) nums = np.zeros(MAXSAM) r = np.zeros(MAXKD) rr = np.zeros(MAXKD) s = np.zeros(MAXKD) a = np.zeros(MAXKRG) kmap = np.zeros((nx,ny)) vmap = np.zeros((nx,ny)) # Load the data df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)] # trim values outside tmin and tmax nd = len(df_extract) ndmax = min(ndmax,nd) x = df_extract[xcol].values y = df_extract[ycol].values vr = df_extract[vcol].values # Make a KDTree for fast search of nearest neighbours dp = list((y[i], x[i]) for i in range(0,nd)) data_locs = np.column_stack((y,x)) tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True, copy_data=False, balanced_tree=True) # Summary statistics for the data after trimming avg = vr.mean() stdev = vr.std() ss = stdev**2.0 vrmin = vr.min() vrmax = vr.max() # Set up the discretization points per block. Figure out how many # are needed, the spacing, and fill the xdb and ydb arrays with the # offsets relative to the block center (this only gets done once): ndb = nxdis * nydis if ndb > MAXDIS: print('ERROR KB2D: Too many discretization points ') print(' Increase MAXDIS or lower n[xy]dis') return kmap xdis = xsiz / max(float(nxdis),1.0) ydis = ysiz / max(float(nydis),1.0) xloc = -0.5*(xsiz+xdis) i = -1 # accounting for 0 as lowest index for ix in range(0,nxdis): xloc = xloc + xdis yloc = -0.5*(ysiz+ydis) for iy in range(0,nydis): yloc = yloc + ydis i = i+1 xdb[i] = xloc ydb[i] = yloc # Initialize accumulators: cbb = 0.0 rad2 = radius*radius # Calculate Block Covariance. Check for point kriging. rotmat, maxcov = setup_rotmat(c0,nst,it,cc,ang,PMX) cov = cova2(xdb[0],ydb[0],xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) # Keep this value to use for the unbiasedness constraint: unbias = cov first = False if ndb <= 1: cbb = cov else: for i in range(0,ndb): for j in range(0,ndb): cov = cova2(xdb[i],ydb[i],xdb[j],ydb[j],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) if i == j: cov = cov - c0 cbb = cbb + cov cbb = cbb/real(ndb*ndb) # MAIN LOOP OVER ALL THE BLOCKS IN THE GRID: nk = 0 ak = 0.0 vk = 0.0 for iy in range(0,ny): yloc = ymn + (iy-0)*ysiz for ix in range(0,nx): xloc = xmn + (ix-0)*xsiz current_node = (yloc,xloc) # Find the nearest samples within each octant: First initialize # the counter arrays: na = -1 # accounting for 0 as first index dist.fill(1.0e+20) nums.fill(-1) dist, nums = tree.query(current_node,ndmax) # use kd tree for fast nearest data search na = len(dist) - 1 # Is there enough samples? if na + 1 < ndmin: # accounting for min index of 0 est = UNEST estv = UNEST print('UNEST at ' + str(ix) + ',' + str(iy)) else: # Put coordinates and values of neighborhood samples into xa,ya,vra: for ia in range(0,na+1): jj = int(nums[ia]) xa[ia] = x[jj] ya[ia] = y[jj] vra[ia] = vr[jj] # Handle the situation of only one sample: if na == 0: # accounting for min index of 0 - one sample case na = 0 cb1 = cova2(xa[0],ya[0],xa[0],ya[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) xx = xa[0] - xloc yy = ya[0] - yloc # Establish Right Hand Side Covariance: if ndb <= 1: cb = cova2(xx,yy,xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) else: cb = 0.0 for i in range(0,ndb): cb = cb + cova2(xx,yy,xdb[i],ydb[i],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) dx = xx - xdb(i) dy = yy - ydb(i) if (dx*dx+dy*dy) < EPSLON: cb = cb - c0 cb = cb / real(ndb) if ktype == 0: s[0] = cb/cbb est = s[0]*vra[0] + (1.0-s[0])*skmean estv = cbb - s[0] * cb else: est = vra[0] estv = cbb - 2.0*cb + cb1 else: # Solve the Kriging System with more than one sample: neq = na + 1 + ktype # accounting for first index of 0 nn = (neq + 1)*neq/2 # Set up kriging matrices: iin=-1 # accounting for first index of 0 for j in range(0,na+1): # Establish Left Hand Side Covariance Matrix: for i in range(0,na+1): # was j - want full matrix iin = iin + 1 a[iin] = cova2(xa[i],ya[i],xa[j],ya[j],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) xx = xa[j] - xloc yy = ya[j] - yloc # Establish Right Hand Side Covariance: if ndb <= 1: cb = cova2(xx,yy,xdb[0],ydb[0],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) else: cb = 0.0 for j1 in range(0,ndb): cb = cb + cova2(xx,yy,xdb[j1],ydb[j1],nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov) dx = xx - xdb[j1] dy = yy - ydb[j1] if (dx*dx+dy*dy) < EPSLON: cb = cb - c0 cb = cb / real(ndb) r[j] = cb rr[j] = r[j] # Set the unbiasedness constraint: if ktype == 1: for i in range(0,na+1): iin = iin + 1 a[iin] = unbias iin = iin + 1 a[iin] = 0.0 r[neq] = unbias rr[neq] = r[neq] # Solve the Kriging System: s = ksol_numpy(neq,a,r) ising = 0 # need to figure this out # Write a warning if the matrix is singular: if ising != 0: print('WARNING KB2D: singular matrix') print(' for block' + str(ix) + ',' + str(iy)+ ' ') est = UNEST estv = UNEST else: # Compute the estimate and the kriging variance: est = 0.0 estv = cbb sumw = 0.0 if ktype == 1: estv = estv - real(s[na+1])*unbias for i in range(0,na+1): sumw = sumw + s[i] est = est + s[i]*vra[i] estv = estv - s[i]*rr[i] if ktype == 0: est = est + (1.0-sumw)*skmean kmap[ny-iy-1,ix] = est vmap[ny-iy-1,ix] = estv if est > UNEST: nk = nk + 1 ak = ak + est vk = vk + est*est # END OF MAIN LOOP OVER ALL THE BLOCKS: if nk >= 1: ak = ak / float(nk) vk = vk/float(nk) - ak*ak print(' Estimated ' + str(nk) + ' blocks ') print(' average ' + str(ak) + ' variance ' + str(vk)) return kmap, vmap ``` Here's a simple test of the KB2D code with visualizations to check the results including the data, estimates and kriging variance plotted. #### Set the working directory I always like to do this so I don't lose files and to simplify subsequent read and writes (avoid including the full address each time). Also, in this case make sure to place the required (see above) GSLIB executables in this directory or a location identified in the environmental variable *Path*. ``` os.chdir("c:/PGE337") # set the working directory ``` You will have to update the part in quotes with your own working directory and the format is different on a Mac (e.g. "~/PGE"). ##### Make a Simple, Small Example The following are the basic parameters for the demonstration. This includes the number of cells in the 2D regular grid, the cell size (step) and the x and y min and max along with the color scheme. Then simply assume some data, and a variogram model. This will allow for very fast iteration with the kB2D program. ``` import pandas as pd nx = 100; ny = 100; xsiz = .05; ysiz = .05; xmn = 0.5; ymn = 0.5; nxdis = 1; nydis = 1 ndmin = 0; ndmax = 10; radius = 3; ktype = 0; skmean = 2 vario = make_variogram(nug=0.0,nst=1,it1=1,cc1=1.0,azi1=45,hmaj1=0.6,hmin1=.2) tmin = -999; tmax = 999 x = [5,4,2,1,1,3,5,2]; y = [1,2,2,3,4,4,2,3]; vr = [1,3,0,1,2.5,1.0,1.3,1.2] df = pd.DataFrame({'x':x,'y':y,'vr':vr}) kmap, vmap = kb2d(df,'x','y','vr',tmin,tmax,nx,xmn,xsiz,ny,ymn,ysiz,nxdis,nydis, ndmin,ndmax,radius,ktype,skmean,vario) ``` Let's visualize the resulting kriging estimates with the data plotted and the kriging variance map. ``` cmap = plt.cm.plasma # color min and max and using the plasma color map xmin = xmn - xsiz/2; ymin = ymn - ysiz/2; xmax = xmin + xsiz * nx; ymax = ymin + ysiz * ny plt.subplot(121) locpix_st(kmap,xmin,xmax,ymin,ymax,xsiz,0.0,3.0,df,'x','y','vr','Kriging Estimate','X(m)','Y(m)','Porosity (%)',cmap) plt.subplot(122) pixelplt_st(vmap,xmin,xmax,ymin,ymax,xsiz,0.0,1.0,'Kriging Variance','X(m)','Y(m)','Porosity (%^2)',cmap) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.3, hspace=0.3) plt.show() ``` ##### Make a 2D Spatial Model to Sample Spaital Data The following are the parameters for the next demonstration. This includes the number of cells in the 2D regular grid, the cell size (step) and the x and y min and max along with the color scheme. Then we make a single realization of a Gausian distributed feature over the specified 2D grid and then apply affine correction to ensure we have a reasonable mean and spread for our feature's distribution, assumed to be Porosity (e.g. no negative values) while retaining the Gaussian distribution. Any transform could be applied at this point. We are keeping this workflow simple. *This is our truth model that we will sample*. The parameters of *GSLIB_sgsim_2d_uncond* are (nreal,nx,ny,hsiz,seed,hrange1,hrange2,azi,output_file). nreal is the number of realizations, nx and ny are the number of cells in x and y, hsiz is the cell siz, seed is the random number seed, hrange and hrange2 are the variogram ranges in major and minor directions respectively, azi is the azimuth of the primary direction of continuity (0 is aligned with Y axis) and output_file is a GEO_DAS file with the simulated realization. The ouput is the 2D numpy array of the simulation along with the name of the property. ``` nx = 100; ny = 100; cell_size = 10 # grid number of cells and cell size xmin = 0.0; ymin = 0.0; # grid origin xmax = xmin + nx * cell_size; ymax = ymin + ny * cell_size # calculate the extent of model seed = 74073 # random number seed for stochastic simulation vario = make_variogram(0.0,nst=1,it1=1,cc1=1.0,azi1=0,hmaj1=500,hmin1=500) mean = 10.0; stdev = 2.0 # Porosity mean and standard deviation #cmap = plt.cm.RdYlBu vmin = 0; vmax = 16; cmap = plt.cm.plasma # color min and max and using the plasma color map # calculate a stochastic realization with standard normal distribution sim = GSLIB_sgsim_2d_uncond(1,nx,ny,cell_size,seed,vario,"Por") sim = affine(sim,mean,stdev) # correct the distribution to a target mean and standard deviation. sampling_ncell = 10 # sample every 10th node from the model #samples = regular_sample(sim,xmin,xmax,ymin,ymax,sampling_ncell,30,30,'Realization') #samples_cluster = samples.drop([80,79,78,73,72,71,70,65,64,63,61,57,56,54,53,47,45,42]) # this removes specific rows (samples) #samples_cluster = samples_cluster.reset_index(drop=True) # we reset and remove the index (it is not sequential anymore) samples = random_sample(sim,xmin,xmax,ymin,ymax,cell_size,100,"Por") locpix(sim,xmin,xmax,ymin,ymax,cell_size,vmin,vmax,samples,'X','Y','Por','Porosity Realization and Regular Samples','X(m)','Y(m)','Porosity (%)',cmap,"Por_Samples") ``` Below we assume a variogram and calculate a kriged map with the same grid as the simulation and conditional to the sampled values. Note: we are not concerned about the variogram of the kriging being consistent with the variogram of the data. We are exploring! This specific run took about 15 seconds on my desktop. ``` import pandas as pd import timeit xsiz = cell_size; ysiz = cell_size; xmn = cell_size*0.5; ymn = cell_size*0.5; nxdis = 1; nydis = 1 ndmin = 0; ndmax = 30; radius = 1000; ktype = 0; skmean = 10.0 vario = make_variogram(nug=0.0,nst=1,it1=1,cc1=1.0,azi1=45,hmaj1=100,hmin1=100) tmin = -999; tmax = 999 kmap, vmap = kb2d(samples,'X','Y','Por',tmin,tmax,nx,xmn,xsiz,ny,ymn,ysiz,nxdis,nydis, ndmin,ndmax,radius,ktype,skmean,vario) ``` Let's visualize the resulting kriging estimates with the data plotted and the kriging variance map. ``` plt.subplot(121) locpix_st(kmap,xmin,xmax,ymin,ymax,xsiz,vmin,vmax,samples,'X','Y','Por','Kriged Porosity Map','X(m)','Y(m)','Porosity (%)',cmap) plt.subplot(122) pixelplt_st(vmap,xmin,xmax,ymin,ymax,xsiz,0.0,1.1,'Kriging Variance','X(m)','Y(m)','Porosity (%^2)',cmap) plt.subplots_adjust(left=0.0, bottom=0.0, right=2.0, top=1.0, wspace=0.3, hspace=0.3) plt.show() ``` I hope you find this code and demonstration useful. I'm always happy to discuss geostatistics, statistical modeling, uncertainty modeling and machine learning, *Michael* **Michael Pyrcz**, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin On Twitter I'm the **GeostatsGuy** and on YouTube my lectures are on the channel, **GeostatsGuy Lectures**.
github_jupyter
# Python for Science Python is free, it is open source, and it has a huge community. Python is one of the most popular and loved programming languages in the world! Many blogs come out every year listing the most popular programming languages. Python has been among the top choices for at least 5 years now. For example: [The 7 Most In-Demand Programming Languages of 2018](https://www.codingdojo.com/blog/7-most-in-demand-programming-languages-of-2018/), by CodingDojo; or the post [The Most In-Demand Programming Languages of 2018](https://www.thirdrepublic.com/blog/most-indemand-programming-languages-2018), on Third Republic. Python can be used for many things: managing databases, creating graphical user interfaces, making websites, and much more… including science. Because of the many uses, the world of Python includes many, many **Libraries** (you load the parts that you need, when you need them). In science, the two libraries that are king and queen of the world are: [NumPy](http://www.numpy.org), and [Matplotlib](https://matplotlib.org). ## Numpy NumPy is for working with data in the form of arrays (vectors, matrices). It has a myriad built-in functions or methods that work on arrays directly. To load the library into your current session of interactive Python, into a saved Python script, or into a Jupyter notebook, you use: ```python import numpy ``` Tips: * a one-dimensional array (vector) has the form: `[1.0, 0.5, 2.5]` * a two-dimensional array (matrix) has the form: `[[ 1.0, 0.5, 2.5], [ 0.5, 1.1, 2.0]]` * the elements in an array are numbered with an index that **starts at 0** * the colon notation: in any index position, a `:` means "all elements in this dimension" * once `numpy` is loaded, its built-in functions are called like this: `numpy.function(arg)` (where `arg` is the function argument: arrays to operats on, and parameters) _Try it!_ ``` import numpy # By the way: comments in code cells start with a hash. # here are two arrays, saved as variables x and y: x = numpy.array([1.0, 0.5, 2.5]) y = numpy.array([[ 1.0, 0.5, 2.5], [ 0.5, 1.1, 2.0]]) # The print function works on arrays: print(x) print(y) numpy.shape(y) numpy.shape(x) ``` Let's review what happened there. We first loaded `numpy`, giving us the full power to use arrays. We created two arrays: `x` and `y`… then we print `x` and we print `y`. They look nice. Numpy has a built-in function to find out the "shape" of an array, which means: _how many elements does this array have in each dimension?_ We find that `y` is a two-by-three array (it has two dimensions). What is the first element of `x`? We can use square brackets and the zero-index to find out: ``` x[0] ``` **Exercise**: Now, try it yourself. What is the first element of `y`? Right. The first element of `y` is a 3-wide array of numbers. If we want to access the first element of _this_ now, we use: ``` y[0][0] ``` **Exercise**: Try picking out different elements of the array `y`… ### We learned that: * The square brackets allow us to pick out the elements of an array using an index: `x[i]` * For a two-dimensional array, we can use two indices: `y[i][j]` * All indices start at zero. This is super powerful! ## Matplotlib Matplotlib is for making all kinds of plots. To get an idea of the great variety of plots possibe, have a look at the online [Gallery](https://matplotlib.org/gallery.html). You can see that Matplotlib itself is a pretty big library. We can load a portion of the library (called a module) that has the basic plotting funtions with: ```python from matplotlib import pyplot ``` Once the `pyplot` module is loaded, its built-in functions are called like this: `pyplot.function(arg)` (where arg is the function argument). ## An example: size of households in the US Did you know that the size of households—that is, the number of people living in each household—has been steadily decreasing in the US and many other countries? This has perhaps surprising consequences. Even if population growth slows down, or stops altogether, the number of households keeps increasing at a fast rate. More households means more $CO_2$ emissions! This is bad for the planet. ### Get the data Here, we're assuming that you have all the files from this tutorial, or are working on the lesson after launching Binder. In that case you have a dataset in the `data` folder. To load the data into two arrays, named `year` and `av-size`, execute the following cell: ``` #Load the data from local disk year, av_size = numpy.loadtxt(fname='data/statistic_id183648.csv', delimiter=',', skiprows=1, unpack=True) print(year) ``` **Exercise**: Now print the variable `av_size`, correspondig to the average size of households (in numbers of people) for each year: Great! The next thing we want to do is make a plot of the changing size of households over the years. To do that, we need to load the `Matplotlib` module called `pyplot`: ``` from matplotlib import pyplot %matplotlib inline ``` What's this `from` business about? `matplotlib` is a pretty big (and awesome!) library. All that we need is a subset of the library for creating 2D plots, so we ask for the `pyplot` module of the `matplotlib` library. Plotting the data is as easy as calling the function [`plot()`](http://matplotlib.org/1.5.1/api/pyplot_api.html#matplotlib.pyplot.plot) from the module `pyplot`. ``` pyplot.plot(year, av_size) ``` But what if we'd like to get a title on this plot, or add labels to the axes? (We should always have labelled axes!). Also, we notice a long jump from the year 1960 to 1970: let's add markers to the plot and change the line style to a dotted line. ``` pyplot.plot(year, av_size, linestyle=':', marker='o') pyplot.title("Household size in the US, 1960–2016 \n", fontsize=16) pyplot.ylabel("Average number of people per household") ``` **Exercise**: In the same cell above, now add a label on the x-axis, using the `pyplot.xlabel()` function, and re-execute it. ## Python for science, so far You learned about: * loading the Python libraries for science * using data in the form of arrays with the `numpy` library * accessing elements of an array * loading data from a file * plotting data with the `Matplotlib` library * adding title, labels to a plot, and changing the style # Data source * Data from the US Census Bureau of Labor Statistics downloaded from [https://www.statista.com/statistics/183648/average-size-of-households-in-the-us/](https://www.statista.com/statistics/183648/average-size-of-households-in-the-us/) # Next * [Earth temperature over time](http://nbviewer.jupyter.org/github/barbagroup/Caminos/blob/master/3--Earth_temperature_over_time.ipynb) --- <p style="font-size:smaller">(c) 2017 Lorena A. Barba, Natalia Clementi. Free to use under the Creative Commons Attribution <a href="https://creativecommons.org/licenses/by/4.0/"> CC-BY 4.0 License</a>. Written for the tutorial <a href="https://github.com/barbagroup/Caminos">"Data Science for a Better World"</a>, at the GW <a href="https://summer.gwu.edu/caminos">_Caminos al Futuro_</a> Summer program. </p>
github_jupyter
``` from gensim.models import KeyedVectors from gensim.scripts.glove2word2vec import glove2word2vec from xgboost import XGBClassifier import numpy as np import pandas as pd import math import sys import random import matplotlib.pyplot as plt import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('Classe verdadeira') plt.xlabel('Classe prevista') def thermometer(word_vec, min_=-1.0, max_=1.0, n=20): vec = [] for v in word_vec: t = (max_ - min_)/n rang = [] for i in np.arange(min_ + t, max_, t): if v > i: rang.append(1) else: break rang.extend([0] * (n-len(rang)-1)) vec.extend(rang) return np.array(vec) def thermometer_one_hot(real_vec, min_=-1.0, max_=1.0, n=20): vec = [] for v in real_vec: if v == max_: rang = [0] * n rang[-1] = 1 vec.extend(rang) else: rang = [0] * n t = (max_ - min_) p = v - min_ s = int((p / t) * n) rang[s] = 1 vec.extend(rang) return np.array(vec) model = KeyedVectors.load_word2vec_format('../glove.6B.50d.word2vec.txt') df = { "cooking": pd.read_csv('../dataset/processed/cooking.csv'), "crypto": pd.read_csv('../dataset/processed/crypto.csv'), "robotics": pd.read_csv('../dataset/processed/robotics.csv'), "biology": pd.read_csv('../dataset/processed/biology.csv'), "travel": pd.read_csv('../dataset/processed/travel.csv'), "diy": pd.read_csv('../dataset/processed/diy.csv'), #"physics": pd.read_csv('physics.csv'), } X = [] y = [] for file in df: for i in range(2000):#range(df[file].shape[0]): #data.append(df[file].iloc[i]['title']) #data.append(df[file].iloc[i]['content']) if type(df[file].iloc[i]['content']) is str: doc = df[file].iloc[i]['content'] v = np.array([0] * 50) w = doc.split(' ') for j in w: if j in model: v = np.add(v, model[j]) v = v / np.linalg.norm(v) # normalized X.append(thermometer_one_hot(v, n=10)) y.append(file) X_test = [] y_test = [] for file in df: for i in range(2001,2501):#range(df[file].shape[0]): #data.append(df[file].iloc[i]['title']) #data.append(df[file].iloc[i]['content']) if type(df[file].iloc[i]['content']) is str: doc = df[file].iloc[i]['content'] v = np.array([0] * 50) w = doc.split(' ') for j in w: if j in model: v = np.add(v, model[j]) v = v / np.linalg.norm(v) # normalized X_test.append(thermometer_one_hot(v, n=10)) y_test.append(file) len(y_test) #from sklearn.model_selection import KFold #from sklearn.utils import shuffle #Xnp = np.array(X) #ynp = np.array(y) #Xnp, ynp = shuffle(Xnp, ynp) #kf = KFold(n_splits=5, shuffle=True) #for train_index, test_index in kf.split(X): # print("TRAIN:", train_index, "TEST:", test_index) # X_train, X_test = Xnp[train_index], Xnp[test_index] # y_train, y_test = ynp[train_index], ynp[test_index] xg = XGBClassifier() #for i in range(len(X)): # w.fit([X[i]], [y[i]]) xg.fit(X, y) result = xg.predict(X_test) # import confusion matrix function from sklearn.metrics import confusion_matrix # Making the Confusion Matrix cm = confusion_matrix(y_test, result, labels=['cooking', 'crypto', 'robotics', 'biology', 'travel', 'diy']) #class_names = ['A','B','C','D','E','F','G','H'] # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cm, classes=['cooking', 'crypto', 'robotics', 'biology', 'travel', 'diy'], title='Matriz de Confusao') plt.show() from sklearn.metrics import accuracy_score accuracy_score(y_test, result) ```
github_jupyter
## Demo notebook for accessing TartanAir data on Azure <p style="color:red"> <b> !! NOTE: This sample file should only be used on Azure. To download the data to your local machine, please refer to the download instructions <a href=https://github.com/castacks/tartanair_tools#download-data>here</a> or the <a href=http://theairlab.org/tartanair-dataset>dataset website</a> for the sample data. </b> </p> This notebook provides an example of accessing TartanAir data from blobl storage on Azure, including: 1) navigate the directories of different environments and trajectories. 2) load the data into memory, and 3) visualize the data. ## Data directory structure ``` ROOT | --- ENV_NAME_0 # environment folder | | | ---- Easy # difficulty level | | | | | ---- P000 # trajectory folder | | | | | | | +--- depth_left # 000000_left_depth.npy - 000xxx_left_depth.npy | | | +--- depth_right # 000000_right_depth.npy - 000xxx_right_depth.npy | | | +--- flow # 000000_000001_flow/mask.npy - 000xxx_000xxx_flow/mask.npy | | | +--- image_left # 000000_left.png - 000xxx_left.png | | | +--- image_right # 000000_right.png - 000xxx_right.png | | | +--- seg_left # 000000_left_seg.npy - 000xxx_left_seg.npy | | | +--- seg_right # 000000_right_seg.npy - 000xxx_right_seg.npy | | | ---- pose_left.txt | | | ---- pose_right.txt | | | | | +--- P001 | | . | | . | | | | | +--- P00K | | | +--- Hard | +-- ENV_NAME_1 . . | +-- ENV_NAME_N ``` ## Notebook dependencies `pip install numpy` `pip install azure-storage-blob` `pip install opencv-python` ## Imports and contrainer_client ``` from azure.storage.blob import ContainerClient import numpy as np import io import cv2 import time import matplotlib.pyplot as plt %matplotlib inline # Dataset website: http://theairlab.org/tartanair-dataset/ account_url = 'https://tartanair.blob.core.windows.net/' container_name = 'tartanair-release1' container_client = ContainerClient(account_url=account_url, container_name=container_name, credential=None) ``` ## List the environments and trajectories ``` def get_environment_list(): ''' List all the environments shown in the root directory ''' env_gen = container_client.walk_blobs() envlist = [] for env in env_gen: envlist.append(env.name) return envlist def get_trajectory_list(envname, easy_hard = 'Easy'): ''' List all the trajectory folders, which is named as 'P0XX' ''' assert(easy_hard=='Easy' or easy_hard=='Hard') traj_gen = container_client.walk_blobs(name_starts_with=envname + '/' + easy_hard+'/') trajlist = [] for traj in traj_gen: trajname = traj.name trajname_split = trajname.split('/') trajname_split = [tt for tt in trajname_split if len(tt)>0] if trajname_split[-1][0] == 'P': trajlist.append(trajname) return trajlist def _list_blobs_in_folder(folder_name): """ List all blobs in a virtual folder in an Azure blob container """ files = [] generator = container_client.list_blobs(name_starts_with=folder_name) for blob in generator: files.append(blob.name) return files def get_image_list(trajdir, left_right = 'left'): assert(left_right == 'left' or left_right == 'right') files = _list_blobs_in_folder(trajdir + '/image_' + left_right + '/') files = [fn for fn in files if fn.endswith('.png')] return files def get_depth_list(trajdir, left_right = 'left'): assert(left_right == 'left' or left_right == 'right') files = _list_blobs_in_folder(trajdir + '/depth_' + left_right + '/') files = [fn for fn in files if fn.endswith('.npy')] return files def get_flow_list(trajdir, ): files = _list_blobs_in_folder(trajdir + '/flow/') files = [fn for fn in files if fn.endswith('flow.npy')] return files def get_flow_mask_list(trajdir, ): files = _list_blobs_in_folder(trajdir + '/flow/') files = [fn for fn in files if fn.endswith('mask.npy')] return files def get_posefile(trajdir, left_right = 'left'): assert(left_right == 'left' or left_right == 'right') return trajdir + '/pose_' + left_right + '.txt' def get_seg_list(trajdir, left_right = 'left'): assert(left_right == 'left' or left_right == 'right') files = _list_blobs_in_folder(trajdir + '/seg_' + left_right + '/') files = [fn for fn in files if fn.endswith('.npy')] return files ``` ### List all the environments ``` envlist = get_environment_list() print('Find {} environments..'.format(len(envlist))) print(envlist) ``` ### List all the 'Easy' trajectories in the first environment ``` diff_level = 'Easy' env_ind = 0 trajlist = get_trajectory_list(envlist[env_ind], easy_hard = diff_level) print('Find {} trajectories in {}'.format(len(trajlist), envlist[env_ind]+diff_level)) print(trajlist) ``` ### List all the data files in one trajectory ``` traj_ind = 1 traj_dir = trajlist[traj_ind] left_img_list = get_image_list(traj_dir, left_right = 'left') print('Find {} left images in {}'.format(len(left_img_list), traj_dir)) right_img_list = get_image_list(traj_dir, left_right = 'right') print('Find {} right images in {}'.format(len(right_img_list), traj_dir)) left_depth_list = get_depth_list(traj_dir, left_right = 'left') print('Find {} left depth files in {}'.format(len(left_depth_list), traj_dir)) right_depth_list = get_depth_list(traj_dir, left_right = 'right') print('Find {} right depth files in {}'.format(len(right_depth_list), traj_dir)) left_seg_list = get_seg_list(traj_dir, left_right = 'left') print('Find {} left segmentation files in {}'.format(len(left_seg_list), traj_dir)) right_seg_list = get_seg_list(traj_dir, left_right = 'left') print('Find {} right segmentation files in {}'.format(len(right_seg_list), traj_dir)) flow_list = get_flow_list(traj_dir) print('Find {} flow files in {}'.format(len(flow_list), traj_dir)) flow_mask_list = get_flow_mask_list(traj_dir) print('Find {} flow mask files in {}'.format(len(flow_mask_list), traj_dir)) left_pose_file = get_posefile(traj_dir, left_right = 'left') print('Left pose file: {}'.format(left_pose_file)) right_pose_file = get_posefile(traj_dir, left_right = 'right') print('Right pose file: {}'.format(right_pose_file)) ``` ## Functions for data downloading ``` def read_numpy_file(numpy_file,): ''' return a numpy array given the file path ''' bc = container_client.get_blob_client(blob=numpy_file) data = bc.download_blob() ee = io.BytesIO(data.content_as_bytes()) ff = np.load(ee) return ff def read_image_file(image_file,): ''' return a uint8 numpy array given the file path ''' bc = container_client.get_blob_client(blob=image_file) data = bc.download_blob() ee = io.BytesIO(data.content_as_bytes()) img=cv2.imdecode(np.asarray(bytearray(ee.read()),dtype=np.uint8), cv2.IMREAD_COLOR) im_rgb = img[:, :, [2, 1, 0]] # BGR2RGB return im_rgb ``` ## Functions for data visualization ``` def depth2vis(depth, maxthresh = 50): depthvis = np.clip(depth,0,maxthresh) depthvis = depthvis/maxthresh*255 depthvis = depthvis.astype(np.uint8) depthvis = np.tile(depthvis.reshape(depthvis.shape+(1,)), (1,1,3)) return depthvis def seg2vis(segnp): colors = np.loadtxt('seg_rgbs.txt') segvis = np.zeros(segnp.shape+(3,), dtype=np.uint8) for k in range(256): mask = segnp==k colorind = k % len(colors) if np.sum(mask)>0: segvis[mask,:] = colors[colorind] return segvis def _calculate_angle_distance_from_du_dv(du, dv, flagDegree=False): a = np.arctan2( dv, du ) angleShift = np.pi if ( True == flagDegree ): a = a / np.pi * 180 angleShift = 180 # print("Convert angle from radian to degree as demanded by the input file.") d = np.sqrt( du * du + dv * dv ) return a, d, angleShift def flow2vis(flownp, maxF=500.0, n=8, mask=None, hueMax=179, angShift=0.0): """ Show a optical flow field as the KITTI dataset does. Some parts of this function is the transform of the original MATLAB code flow_to_color.m. """ ang, mag, _ = _calculate_angle_distance_from_du_dv( flownp[:, :, 0], flownp[:, :, 1], flagDegree=False ) # Use Hue, Saturation, Value colour model hsv = np.zeros( ( ang.shape[0], ang.shape[1], 3 ) , dtype=np.float32) am = ang < 0 ang[am] = ang[am] + np.pi * 2 hsv[ :, :, 0 ] = np.remainder( ( ang + angShift ) / (2*np.pi), 1 ) hsv[ :, :, 1 ] = mag / maxF * n hsv[ :, :, 2 ] = (n - hsv[:, :, 1])/n hsv[:, :, 0] = np.clip( hsv[:, :, 0], 0, 1 ) * hueMax hsv[:, :, 1:3] = np.clip( hsv[:, :, 1:3], 0, 1 ) * 255 hsv = hsv.astype(np.uint8) rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) if ( mask is not None ): mask = mask > 0 rgb[mask] = np.array([0, 0 ,0], dtype=np.uint8) return rgb ``` ### Download and visualize the data ``` data_ind = 173 # randomly select one frame (data_ind < TRAJ_LEN) ``` #### Visualize the left and right RGB images ``` left_img = read_image_file(left_img_list[data_ind]) right_img = read_image_file(right_img_list[data_ind]) plt.figure(figsize=(12, 5)) plt.subplot(121) plt.imshow(left_img) plt.title('Left Image') plt.subplot(122) plt.imshow(right_img) plt.title('Right Image') plt.show() ``` #### Visualize the left and right depth files ``` left_depth = read_numpy_file(left_depth_list[data_ind]) left_depth_vis = depth2vis(left_depth) right_depth = read_numpy_file(right_depth_list[data_ind]) right_depth_vis = depth2vis(right_depth) plt.figure(figsize=(12, 5)) plt.subplot(121) plt.imshow(left_depth_vis) plt.title('Left Depth') plt.subplot(122) plt.imshow(right_depth_vis) plt.title('Right Depth') plt.show() ``` #### Visualize the left and right segmentation files ``` left_seg = read_numpy_file(left_seg_list[data_ind]) left_seg_vis = seg2vis(left_seg) right_seg = read_numpy_file(right_seg_list[data_ind]) right_seg_vis = seg2vis(right_seg) plt.figure(figsize=(12, 5)) plt.subplot(121) plt.imshow(left_seg_vis) plt.title('Left Segmentation') plt.subplot(122) plt.imshow(right_seg_vis) plt.title('Right Segmentation') plt.show() ``` #### Visualize the flow and mask files ``` flow = read_numpy_file(flow_list[data_ind]) flow_vis = flow2vis(flow) flow_mask = read_numpy_file(flow_mask_list[data_ind]) flow_vis_w_mask = flow2vis(flow, mask = flow_mask) plt.figure(figsize=(12, 5)) plt.subplot(121) plt.imshow(flow_vis) plt.title('Optical Flow') plt.subplot(122) plt.imshow(flow_vis_w_mask) plt.title('Optical Flow w/ Mask') plt.show() ```
github_jupyter
``` import os import re import zipfile import subprocess import pandas as pd import numpy as np from datetime import datetime DATASET = 'ml-1m' # only support "ml-100k" and "ml-1m" now RAW_PATH = os.path.join('./', DATASET) RANDOM_SEED = 0 NEG_ITEMS = 99 ``` # Load Data 1. Load interaction data and item metadata 2. Filter out items with less than 5 interactions 3. Calculate basic statistics ``` # download data if not exists if not os.path.exists(RAW_PATH): subprocess.call('mkdir ' + RAW_PATH, shell=True) if not os.path.exists(os.path.join(RAW_PATH, DATASET + '.zip')): print('Downloading data into ' + RAW_PATH) subprocess.call( 'cd {} && curl -O http://files.grouplens.org/datasets/movielens/{}.zip' .format(RAW_PATH, DATASET), shell=True) with zipfile.ZipFile(os.path.join(RAW_PATH, DATASET + '.zip')) as z: if DATASET == 'ml-100k': with z.open(os.path.join(DATASET, 'u.data')) as f: data_df = pd.read_csv(f, sep="\t", header=None) with z.open(os.path.join(DATASET, 'u.item')) as f: meta_df = pd.read_csv(f, sep='|', header=None, encoding='ISO-8859-1') elif DATASET == 'ml-1m': with z.open(os.path.join(DATASET, 'ratings.dat')) as f: data_df = pd.read_csv(f, sep=b'::', header=None, engine='python') with z.open(os.path.join(DATASET, 'movies.dat')) as f: meta_df = pd.read_csv(f, sep=b'::', header=None, engine='python') data_df.columns = ['user_id', 'item_id', 'label', 'time'] data_df.head() genres = [ 'i_Action', 'i_Adventure', 'i_Animation', "i_Children's", 'i_Comedy', 'i_Crime', 'i_Documentary', 'i_Drama', 'i_Fantasy', 'i_Film-Noir', 'i_Horror', 'i_Musical', 'i_Mystery', 'i_Romance', 'i_Sci-Fi', 'i_Thriller', 'i_War', 'i_Western', 'i_Other' ] if DATASET == 'ml-100k': item_df = meta_df.drop([1, 3, 4], axis=1) item_df.columns = ['item_id', 'i_year'] + genres elif DATASET == 'ml-1m': item_df = meta_df.copy() item_df.columns = ['item_id', 'title', 'genre'] item_df['title'] = item_df['title'].apply(lambda x: x.decode('ISO-8859-1')) item_df['genre'] = item_df['genre'].apply(lambda x: x.decode('ISO-8859-1')) genre_dict = dict() for g in genres: genre_dict[g] = np.zeros(len(item_df), dtype=np.int32) item_genre = item_df['genre'].apply(lambda x: x.split('|')).values for idx, genre_lst in enumerate(item_genre): for g in genre_lst: genre_dict['i_' + g][idx] = 1 for g in genres: item_df[g] = genre_dict[g] item_df = item_df.drop(columns=['genre']) item_df.head() # Only retain users and items with at least 5 associated interactions # print('Filter before:', len(data_df)) # filter_before = -1 # while filter_before != len(data_df): # filter_before = len(data_df) # for stage in ['user_id', 'item_id']: # val_cnt = data_df[stage].value_counts() # cnt_df = pd.DataFrame({stage: val_cnt.index, 'cnt': val_cnt.values}) # data_df = pd.merge(data_df, cnt_df, on=stage, how='left') # data_df = data_df[data_df['cnt'] >= 5].drop(columns=['cnt']) # print('Filter after:', len(data_df)) item_df = item_df[item_df['item_id'].isin(data_df['item_id'])] # remove unuseful metadata ``` ### Statistics ``` n_users = data_df['user_id'].value_counts().size n_items = data_df['item_id'].value_counts().size n_clicks = len(data_df) min_time = data_df['time'].min() max_time = data_df['time'].max() time_format = '%Y-%m-%d' print('# Users:', n_users) print('# Items:', n_items) print('# Interactions:', n_clicks) print('Time Span: {}/{}'.format( datetime.utcfromtimestamp(min_time).strftime(time_format), datetime.utcfromtimestamp(max_time).strftime(time_format)) ) ``` # Build Dataset ### Interaction data ``` np.random.seed(RANDOM_SEED) out_df = data_df[['user_id', 'item_id', 'time']] out_df = out_df.drop_duplicates(['user_id', 'item_id', 'time']) out_df.sort_values(by=['time', 'user_id'], kind='mergesort', inplace=True) out_df = out_df.reset_index(drop=True) out_df.head() # reindex (start from 1) uids = sorted(out_df['user_id'].unique()) user2id = dict(zip(uids, range(1, len(uids) + 1))) iids = sorted(out_df['item_id'].unique()) item2id = dict(zip(iids, range(1, len(iids) + 1))) out_df['user_id'] = out_df['user_id'].apply(lambda x: user2id[x]) out_df['item_id'] = out_df['item_id'].apply(lambda x: item2id[x]) out_df.head() # leave one out spliting clicked_item_set = dict() for user_id, seq_df in out_df.groupby('user_id'): clicked_item_set[user_id] = set(seq_df['item_id'].values.tolist()) def generate_dev_test(data_df): result_dfs = [] n_items = data_df['item_id'].value_counts().size for idx in range(2): result_df = data_df.groupby('user_id').tail(1).copy() data_df = data_df.drop(result_df.index) neg_items = np.random.randint(1, n_items + 1, (len(result_df), NEG_ITEMS)) for i, uid in enumerate(result_df['user_id'].values): user_clicked = clicked_item_set[uid] for j in range(len(neg_items[i])): while neg_items[i][j] in user_clicked: neg_items[i][j] = np.random.randint(1, n_items + 1) result_df['neg_items'] = neg_items.tolist() result_dfs.append(result_df) return result_dfs, data_df leave_df = out_df.groupby('user_id').head(1) data_df = out_df.drop(leave_df.index) [test_df, dev_df], data_df = generate_dev_test(data_df) train_df = pd.concat([leave_df, data_df]).sort_index() len(train_df), len(dev_df), len(test_df) train_df.head() test_df.head() # save results train_df.to_csv(os.path.join(RAW_PATH, 'train.csv'), sep='\t', index=False) dev_df.to_csv(os.path.join(RAW_PATH, 'dev.csv'), sep='\t', index=False) test_df.to_csv(os.path.join(RAW_PATH, 'test.csv'), sep='\t', index=False) ``` ### Item Metadata ``` item_df['item_id'] = item_df['item_id'].apply(lambda x: item2id[x]) if DATASET == 'ml-1m': item_df['i_year'] = item_df['title'].apply(lambda x: int(re.match('.+\((\d{4})\)$', x).group(1))) item_df = item_df.drop(columns=['title']) elif DATASET == 'ml-100k': item_df['i_year'] = item_df['i_year'].apply(lambda x: int(str(x).split('-')[-1]) if pd.notnull(x) else 0) seps = [1900, 1940, 1950, 1960, 1970, 1980, 1985] + list(range(1990, int(item_df['i_year'].max() + 2))) year_dict = {} for i, sep in enumerate(seps[:-1]): for j in range(seps[i], seps[i + 1]): year_dict[j] = i + 1 item_df['i_year'] = item_df['i_year'].apply(lambda x: year_dict[x] if x > 0 else 0) item_df.head() # save results item_df.to_csv(os.path.join(RAW_PATH, 'item_meta.csv'), sep='\t', index=False) ```
github_jupyter
# Lab: TfTransform # **Learning Objectives** 1. Preproccess data and engineer new features using TfTransform 1. Create and deploy Apache Beam pipeline 1. Use processed data to train taxifare model locally then serve a prediction ## Introduction While Pandas is fine for experimenting, for operationalization of your workflow it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam allows for streaming. In this lab we will pull data from BigQuery then use Apache Beam TfTransform to process the data. Only specific combinations of TensorFlow/Beam are supported by tf.transform so make sure to get a combo that works. In this lab we will be using: * TFT 0.15.0 * TF 2.0 * Apache Beam [GCP] 2.16.0 ``` !pip install --user apache-beam[gcp]==2.16.0 !pip install --user tensorflow-transform==0.15.0 ``` Download .whl file for tensorflow-transform. We will pass this file to Beam Pipeline Options so it is installed on the DataFlow workers ``` !pip download tensorflow-transform==0.15.0 --no-deps ``` <b>Restart the kernel</b> (click on the reload button above). ``` %%bash pip freeze | grep -e 'flow\|beam' import tensorflow as tf import tensorflow_transform as tft import shutil print(tf.__version__) # change these to try this notebook out BUCKET = 'cloud-example-labs' PROJECT = 'project-id' REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi ``` ## Input source: BigQuery Get data from BigQuery but defer the majority of filtering etc. to Beam. Note that the dayofweek column is now strings. ``` from google.cloud import bigquery def create_query(phase, EVERY_N): """Creates a query with the proper splits. Args: phase: int, 1=train, 2=valid. EVERY_N: int, take an example EVERY_N rows. Returns: Query string with the proper splits. """ base_query = """ WITH daynames AS (SELECT ['Sun', 'Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat'] AS daysofweek) SELECT (tolls_amount + fare_amount) AS fare_amount, daysofweek[ORDINAL(EXTRACT(DAYOFWEEK FROM pickup_datetime))] AS dayofweek, EXTRACT(HOUR FROM pickup_datetime) AS hourofday, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count AS passengers, 'notneeded' AS key FROM `nyc-tlc.yellow.trips`, daynames WHERE trip_distance > 0 AND fare_amount > 0 """ if EVERY_N is None: if phase < 2: # training query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST (pickup_datetime AS STRING), 4)) < 2""".format(base_query) else: query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST( pickup_datetime AS STRING), 4)) = {1}""".format(base_query, phase) else: query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST( pickup_datetime AS STRING)), {1})) = {2}""".format( base_query, EVERY_N, phase) return query query = create_query(2, 100000) ``` Let's pull this query down into a Pandas DataFrame and take a look at some of the statistics. ``` df_valid = bigquery.Client().query(query).to_dataframe() display(df_valid.head()) df_valid.describe() ``` ## Create ML dataset using tf.transform and Dataflow Let's use Cloud Dataflow to read in the BigQuery data and write it out as TFRecord files. Along the way, let's use tf.transform to do scaling and transforming. Using tf.transform allows us to save the metadata to ensure that the appropriate transformations get carried out during prediction as well. `transformed_data` is type `pcollection`. ``` import datetime import tensorflow as tf import apache_beam as beam import tensorflow_transform as tft import tensorflow_metadata as tfmd from tensorflow_transform.beam import impl as beam_impl def is_valid(inputs): """Check to make sure the inputs are valid. Args: inputs: dict, dictionary of TableRow data from BigQuery. Returns: True if the inputs are valid and False if they are not. """ try: pickup_longitude = inputs['pickuplon'] dropoff_longitude = inputs['dropofflon'] pickup_latitude = inputs['pickuplat'] dropoff_latitude = inputs['dropofflat'] hourofday = inputs['hourofday'] dayofweek = inputs['dayofweek'] passenger_count = inputs['passengers'] fare_amount = inputs['fare_amount'] return fare_amount >= 2.5 and pickup_longitude > -78 \ and pickup_longitude < -70 and dropoff_longitude > -78 \ and dropoff_longitude < -70 and pickup_latitude > 37 \ and pickup_latitude < 45 and dropoff_latitude > 37 \ and dropoff_latitude < 45 and passenger_count > 0 except: return False def preprocess_tft(inputs): """Preproccess the features and add engineered features with tf transform. Args: dict, dictionary of TableRow data from BigQuery. Returns: Dictionary of preprocessed data after scaling and feature engineering. """ import datetime print(inputs) result = {} result['fare_amount'] = tf.identity(inputs['fare_amount']) # build a vocabulary result['dayofweek'] = tft.string_to_int(inputs['dayofweek']) result['hourofday'] = tf.identity(inputs['hourofday']) # pass through # scaling numeric values result['pickuplon'] = (tft.scale_to_0_1(inputs['pickuplon'])) result['pickuplat'] = (tft.scale_to_0_1(inputs['pickuplat'])) result['dropofflon'] = (tft.scale_to_0_1(inputs['dropofflon'])) result['dropofflat'] = (tft.scale_to_0_1(inputs['dropofflat'])) result['passengers'] = tf.cast(inputs['passengers'], tf.float32) # a cast # arbitrary TF func result['key'] = tf.as_string(tf.ones_like(inputs['passengers'])) # engineered features latdiff = inputs['pickuplat'] - inputs['dropofflat'] londiff = inputs['pickuplon'] - inputs['dropofflon'] result['latdiff'] = tft.scale_to_0_1(latdiff) result['londiff'] = tft.scale_to_0_1(londiff) dist = tf.sqrt(latdiff * latdiff + londiff * londiff) result['euclidean'] = tft.scale_to_0_1(dist) return result def preprocess(in_test_mode): """Sets up preprocess pipeline. Args: in_test_mode: bool, False to launch DataFlow job, True to run locally. """ import os import os.path import tempfile from apache_beam.io import tfrecordio from tensorflow_transform.coders import example_proto_coder from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.beam import tft_beam_io from tensorflow_transform.beam.tft_beam_io import transform_fn_io job_name = 'preprocess-taxi-features' + '-' job_name += datetime.datetime.now().strftime('%y%m%d-%H%M%S') if in_test_mode: import shutil print('Launching local job ... hang on') OUTPUT_DIR = './preproc_tft' shutil.rmtree(OUTPUT_DIR, ignore_errors=True) EVERY_N = 100000 else: print('Launching Dataflow job {} ... hang on'.format(job_name)) OUTPUT_DIR = 'gs://{0}/taxifare/preproc_tft/'.format(BUCKET) import subprocess subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split()) EVERY_N = 10000 options = { 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'), 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'), 'job_name': job_name, 'project': PROJECT, 'num_workers': 1, 'max_num_workers': 1, 'teardown_policy': 'TEARDOWN_ALWAYS', 'no_save_main_session': True, 'direct_num_workers': 1, 'extra_packages': ['tensorflow-transform-0.15.0.tar.gz'] } opts = beam.pipeline.PipelineOptions(flags=[], **options) if in_test_mode: RUNNER = 'DirectRunner' else: RUNNER = 'DataflowRunner' # Set up raw data metadata raw_data_schema = { colname: dataset_schema.ColumnSchema( tf.string, [], dataset_schema.FixedColumnRepresentation()) for colname in 'dayofweek,key'.split(',') } raw_data_schema.update({ colname: dataset_schema.ColumnSchema( tf.float32, [], dataset_schema.FixedColumnRepresentation()) for colname in 'fare_amount,pickuplon,pickuplat,dropofflon,dropofflat'.split(',') }) raw_data_schema.update({ colname: dataset_schema.ColumnSchema( tf.int64, [], dataset_schema.FixedColumnRepresentation()) for colname in 'hourofday,passengers'.split(',') }) raw_data_metadata = dataset_metadata.DatasetMetadata( dataset_schema.Schema(raw_data_schema)) # Run Beam with beam.Pipeline(RUNNER, options=opts) as p: with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')): # Save the raw data metadata (raw_data_metadata | 'WriteInputMetadata' >> tft_beam_io.WriteMetadata( os.path.join( OUTPUT_DIR, 'metadata/rawdata_metadata'), pipeline=p)) # Read training data from bigquery and filter rows raw_data = (p | 'train_read' >> beam.io.Read( beam.io.BigQuerySource( query=create_query(1, EVERY_N), use_standard_sql=True)) | 'train_filter' >> beam.Filter(is_valid)) raw_dataset = (raw_data, raw_data_metadata) # Analyze and transform training data transformed_dataset, transform_fn = ( raw_dataset | beam_impl.AnalyzeAndTransformDataset( preprocess_tft)) transformed_data, transformed_metadata = transformed_dataset # Save transformed train data to disk in efficient tfrecord format transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'train'), file_name_suffix='.gz', coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema)) # Read eval data from bigquery and filter rows raw_test_data = (p | 'eval_read' >> beam.io.Read( beam.io.BigQuerySource( query=create_query(2, EVERY_N), use_standard_sql=True)) | 'eval_filter' >> beam.Filter( is_valid)) raw_test_dataset = (raw_test_data, raw_data_metadata) # Transform eval data transformed_test_dataset = ( (raw_test_dataset, transform_fn) | beam_impl.TransformDataset() ) transformed_test_data, _ = transformed_test_dataset # Save transformed train data to disk in efficient tfrecord format (transformed_test_data | 'WriteTestData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'eval'), file_name_suffix='.gz', coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema))) # Save transformation function to disk for use at serving time (transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn( os.path.join(OUTPUT_DIR, 'metadata'))) # Change to True to run locally preprocess(in_test_mode=False) ``` This will take __10-15 minutes__. You cannot go on in this lab until your DataFlow job has succesfully completed. ``` %%bash # ls preproc_tft gsutil ls gs://${BUCKET}/taxifare/preproc_tft/ ``` ## Train off preprocessed data ## Now that we have our data ready and verified it is in the correct location we can train our taxifare model locally. ``` %%bash rm -r ./taxi_trained export PYTHONPATH=${PYTHONPATH}:$PWD python3 -m tft_trainer.task \ --train_data_path="gs://${BUCKET}/taxifare/preproc_tft/train*" \ --eval_data_path="gs://${BUCKET}/taxifare/preproc_tft/eval*" \ --output_dir=./taxi_trained \ !ls $PWD/taxi_trained/export/exporter ``` Now let's create fake data in JSON format and use it to serve a prediction with gcloud ai-platform local predict ``` %%writefile /tmp/test.json {"dayofweek":0, "hourofday":17, "pickuplon": -73.885262, "pickuplat": 40.773008, "dropofflon": -73.987232, "dropofflat": 40.732403, "passengers": 2.0} %%bash model_dir=$(ls $PWD/taxi_trained/export/exporter/) gcloud ai-platform local predict \ --model-dir=./taxi_trained/export/exporter/${model_dir} \ --json-instances=/tmp/test.json ``` Copyright 2016-2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Conditional WGAN-GP ``` import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.datasets as datasets from torch.utils.data import DataLoader import torchvision.transforms as transforms from torch.utils.tensorboard import SummaryWriter import numpy as np import matplotlib.pyplot as plt device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class Discriminator(nn.Module): def __init__(self, channels_img, features_d, num_classes, img_size): super(Discriminator, self).__init__() self.img_size = img_size self.embed = nn.Embedding(num_classes, img_size*img_size) self.disc = nn.Sequential( # input: N x channels_img x 64 x 64 nn.Conv2d( channels_img+1, features_d, kernel_size=4, stride=2, padding=1 ), nn.LeakyReLU(0.2), # _block(in_channels, out_channels, kernel_size, stride, padding) self._block(features_d, features_d * 2, 4, 2, 1), self._block(features_d * 2, features_d * 4, 4, 2, 1), self._block(features_d * 4, features_d * 8, 4, 2, 1), # After all _block img output is 4x4 (Conv2d below makes into 1x1) nn.Conv2d(features_d * 8, 1, kernel_size=4, stride=2, padding=0), ) def _block(self, in_channels, out_channels, kernel_size, stride, padding): return nn.Sequential( nn.Conv2d( in_channels, out_channels, kernel_size, stride, padding, bias=False, ), nn.InstanceNorm2d(out_channels, affine=True), nn.LeakyReLU(0.2), ) def forward(self, x, labels): embedding = self.embed(labels).view(labels.shape[0], 1, self.img_size, self.img_size) x = torch.cat([x, embedding], dim=1) return self.disc(x) class Generator(nn.Module): def __init__(self, channels_noise, channels_img, features_g, num_classes, img_size, embed_size): super(Generator, self).__init__() self.img_size = img_size self.embed = nn.Embedding(num_classes, embed_size) self.net = nn.Sequential( # Input: N x channels_noise x 1 x 1 self._block(channels_noise + embed_size, features_g * 16, 4, 1, 0), # img: 4x4 self._block(features_g * 16, features_g * 8, 4, 2, 1), # img: 8x8 self._block(features_g * 8, features_g * 4, 4, 2, 1), # img: 16x16 self._block(features_g * 4, features_g * 2, 4, 2, 1), # img: 32x32 nn.ConvTranspose2d( features_g * 2, channels_img, kernel_size=4, stride=2, padding=1 ), # Output: N x channels_img x 64 x 64 nn.Tanh(), ) def _block(self, in_channels, out_channels, kernel_size, stride, padding): return nn.Sequential( nn.ConvTranspose2d( in_channels, out_channels, kernel_size, stride, padding, bias=False, ), nn.BatchNorm2d(out_channels), nn.ReLU(), ) def forward(self, x, labels): embedding = self.embed(labels).unsqueeze(2).unsqueeze(3) x = torch.cat([x, embedding], dim = 1) x = self.net(x) # print(x.shape) return x def initialize_weights(model): # Initializes weights according to the DCGAN paper for m in model.modules(): if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)): nn.init.normal_(m.weight.data, 0.0, 0.02) def gradient_penalty(critic, labels, real, fake, device='cpu'): BATCH_SIZE, C, H, W = real.shape epsilon = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1,C,H,W).to(device) interpolated_images = real*epsilon + fake*(1-epsilon) mixed_scores = critic(interpolated_images, labels) gradient = torch.autograd.grad( inputs=interpolated_images, outputs=mixed_scores, grad_outputs = torch.ones_like(mixed_scores), create_graph=True, retain_graph = True)[0] gradient = gradient.view(gradient.shape[0], -1) gradient_norm = gradient.norm(2, dim=1) gradient_penalty = torch.mean((gradient_norm - 1)**2) return gradient_penalty def plot_sample(noise, target, gen, labels, k=1): with torch.no_grad(): preds = gen(noise, labels).reshape(-1, 1, 64, 64).detach().cpu().numpy() for i in range(k): # lr = cond[i, 0].detach().cpu().numpy() hr = target[i, 0].detach().cpu().numpy() pred = preds[i, 0] # print(pred) mn = np.min([np.min(hr), np.min(pred)]) mx = np.max([np.max(hr), np.max(pred)]) fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5)) # im = ax1.imshow(lr, vmin=mn, vmax=mx, cmap='gist_ncar_r') # plt.colorbar(im, ax=ax1, shrink=0.7) im = ax2.imshow(pred, vmin=mn, vmax=mx, cmap='gist_ncar_r') plt.colorbar(im, ax=ax2, shrink=0.7) im = ax3.imshow(hr, vmin=mn, vmax=mx, cmap='gist_ncar_r') plt.colorbar(im, ax=ax3, shrink=0.7) plt.show() """ Training of DCGAN network with WGAN loss """ import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.datasets as datasets import torchvision.transforms as transforms from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter # Hyperparameters etc device = "cuda" if torch.cuda.is_available() else "cpu" LEARNING_RATE = 1e-4 BATCH_SIZE = 64 IMG_SIZE = 64 CHANNELS_IMG = 1 Z_DIM = 100 NUM_EPOCHS = 30 FEATURES_CRITIC = 32 #64 FEATURES_GEN = 32 # 64 CRITIC_ITERATIONS = 5 LAMBDA_GP = 10 NUM_CLASSES = 10 GEN_EMBEDDING = 100 transforms = transforms.Compose( [ transforms.Resize(IMG_SIZE), transforms.ToTensor(), transforms.Normalize( [0.5 for _ in range(CHANNELS_IMG)], [0.5 for _ in range(CHANNELS_IMG)] ), ] ) dataset = datasets.MNIST(root="dataset/", transform=transforms, download=True) #comment mnist and uncomment below if you want to train on CelebA dataset # dataset = datasets.CelebA(root="dataset/", transform=transforms, download=True) loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) # initialize gen and disc/critic gen = Generator(Z_DIM, CHANNELS_IMG, FEATURES_GEN, NUM_CLASSES, IMG_SIZE, GEN_EMBEDDING).to(device) critic = Discriminator(CHANNELS_IMG, FEATURES_CRITIC, NUM_CLASSES, IMG_SIZE,).to(device) initialize_weights(gen) initialize_weights(critic) # initializate optimizer opt_gen = optim.Adam(gen.parameters(), lr=LEARNING_RATE, betas=(0.0, 0.9)) opt_critic = optim.Adam(critic.parameters(), lr=LEARNING_RATE, betas=(0.0, 0.9)) # for tensorboard plotting fixed_noise = torch.randn(32, Z_DIM, 1, 1).to(device) writer_real = SummaryWriter(f"logs/real") writer_fake = SummaryWriter(f"logs/fake") step = 0 gen.train() critic.train() for epoch in range(NUM_EPOCHS): for batch_idx, (real, labels) in enumerate(loader): real = real.to(device) labels = labels.to(device) cur_batch_size = real.shape[0] # Train Critic: max E[critic(real)] - E[critic(fake)] for _ in range(CRITIC_ITERATIONS): noise = torch.randn(cur_batch_size, Z_DIM, 1, 1).to(device) fake = gen(noise, labels) critic_real = critic(real, labels).reshape(-1) critic_fake = critic(fake, labels).reshape(-1) gp = gradient_penalty(critic, labels, real, fake, device=device) loss_critic = -(torch.mean(critic_real) - torch.mean(critic_fake)) + LAMBDA_GP*gp critic.zero_grad() loss_critic.backward(retain_graph=True) opt_critic.step() # Train Generator: max E[critic(gen_fake)] <-> min -E[critic(gen_fake)] gen_fake = critic(fake, labels).reshape(-1) loss_gen = -torch.mean(gen_fake) gen.zero_grad() loss_gen.backward() opt_gen.step() # Print losses occasionally and print to tensorboard if batch_idx % 100 == 0 and batch_idx >= 0: gen.eval() critic.eval() print( f"Epoch [{epoch}/{NUM_EPOCHS}] Batch {batch_idx}/{len(loader)} \ Loss D: {loss_critic:.4f}, loss G: {loss_gen:.4f}" ) noise = torch.randn(cur_batch_size, Z_DIM, 1, 1).to(device) plot_sample(noise, real, gen, labels, k=5) step += 1 gen.train() critic.train() ```
github_jupyter
``` %load_ext autoreload %autoreload 2 from ctapipe.io import EventSource import sys from matplotlib import pyplot as plt import numpy as np %matplotlib inline import sys from scipy.stats import norm from traitlets.config.loader import Config from ctapipe import utils # ctapipe modules from ctapipe.visualization import CameraDisplay from ctapipe.plotting.camera import CameraPlotter from ctapipe.image.extractor import * from ctapipe.containers import PedestalContainer from ctapipe.io.hdf5tableio import HDF5TableWriter, HDF5TableReader from lstchain.calib.camera.r0 import LSTR0Corrections r0calib = LSTR0Corrections( pedestal_path="../../cta-lstchain-extra/calib/camera/pedestal_file_run446_0000.fits", r1_sample_start=2,r1_sample_end=38) # flat field run with interleaved flatfield and pedestal events (for the moment to big for cta-lstchain-extra) run = 472 #datadir = '/ctadata/franca/LST' datadir = '/fefs/onsite/data/20190527' file = f'{datadir}/LST-1.1.Run00{run}.0000.fits.fz' reader = EventSource(file, max_events=None) print(f"\n Read {len(reader.multi_file)} total events in files\n") print(f"{reader.file_list} ") channel=['HG','LG'] # use the tool to write calibration coefficients from lstchain.tools.calc_camera_calibration import CalibrationHDF5Writer tel_id=0 # LST1 for the moment # read first flatfield event for i, event in enumerate(reader): # calibrate r0 --> r1 r0calib.calibrate(event) # select only flatfield events if event.r0.tel[0].trigger_type == 32: continue break print(f"read event id: {event.r0.event_id}, trigger {event.r0.tel[0].trigger_type}") # plot R1 waveform of module [module] def view_waveform(chan=0, pix_id=6,i=0): waveform = event.r1.tel[tel_id].waveform plt.plot(waveform[chan, pix_id], label=f'pixel {pix_id}') plt.title(f"module {module}, pixel {pix_id}, channel {channel[chan]}",) max_now=waveform[chan, pix_id].max() min_now=waveform[chan, pix_id].min() plt.legend() plt.ylabel('DC',fontsize=15) plt.xlabel('ns',fontsize=15) # module number module=0 # channel chan=0 # ids of pixel in module pixels_mod=event.lst.tel[0].svc.pixel_ids[module*7:module*7+7] fig = plt.figure(num=0,figsize=(12,12)) for i,pix in enumerate(pixels_mod): view_waveform(chan=chan, pix_id=pix,i=i) #plt.savefig(f"Run{run}_low_level_correction_{channel[chan]}_mod{modu}.png") # plot effect of low-level calibration on module def view_waveform(chan=0, pix_id=6,i=0): plot_id=i*2+1 plt.subplot(7,2,plot_id) plt.plot(event.r0.tel[tel_id].waveform[chan, pix_id,2:38], label='not corrected') plt.plot(event.r1.tel[tel_id].waveform[chan, pix_id], label='corrected') plt.title(f"pixel {pix_id}, channel {channel[chan]}",) mymax=max(newwaveform[chan, pix_id].max(),oldwaveform[chan, pix_id].max()) + 50 mymin=min(newwaveform[chan, pix_id].min(),oldwaveform[chan, pix_id].min()) - 50 plt.ylim(mymin,mymax) plt.legend() plot_id=(i*2)+2 plt.subplot(7,2,plot_id) plt.plot(newwaveform[chan, pix_id]-oldwaveform[chan, pix_id]) plt.ylabel('corrections',fontsize=10) # module number module=0 # ids of pixel in module pixels_mod=event.lst.tel[0].svc.pixel_ids[module*7:module*7+7] # r0 newwaveform = event.r1.tel[tel_id].waveform # R1 oldwaveform = event.r0.tel[tel_id].waveform[:,:,2:38] for i,pix in enumerate(pixels_mod): for chan in(np.arange(2)): plt.figure(num=chan,figsize=(12,24)) # plot waveform of selected channel view_waveform(chan=chan, pix_id=pix,i=i) #plt.savefig(f"Run{run}_low_level_correction_{channel[chan]}_mod{modu}.png") # integrate the charge on 12 ns around the peak value config = Config({ "LocalPeakWindowSum": { "window_shift": 5, "window_width": 11 } }) integrator = LocalPeakWindowSum(config=config) waveform=event.r1.tel[0].waveform image, peakpos = integrator(waveform) fig = plt.figure(figsize=(16, 16)) for chan in(np.arange(2)): ax = plt.subplot(2, 2, chan+1) disp = CameraDisplay(event.inst.subarray.tels[0].camera) disp.image = image[chan] #disp.set_limits_minmax(2000,4000) disp.cmap = plt.cm.coolwarm disp.axes.text(2.0, 0, f'{channel[chan]} charge (DC)', rotation=90) disp.add_colorbar() ax = plt.subplot(2, 2, chan+3) disp = CameraDisplay(event.inst.subarray.tels[0].camera) disp.image = peakpos[chan] disp.cmap = plt.cm.coolwarm disp.set_limits_minmax(0,35) disp.axes.text(2.0, 0, f'{channel[chan]} time (ns)', rotation=90) disp.add_colorbar() disp.update() #plt.savefig(f"Run{run}_event_{event.lst.tel[0].evt.event_id}_charge_time.png") # Plot the part of the waveform that is integrated # (this work only after the line above) fig = plt.figure(0,figsize=(12,12)) # consider only 36 samples samples=np.arange(0,36) # chose the module module=0 module_rank=np.where(event.lst.tel[0].svc.module_ids==module) # find pixel index in module pix_in_mod=event.lst.tel[0].svc.pixel_ids[module_rank[0][0]*7:module_rank[0][0]*7+7] for chan in(np.arange(2)): plt.subplot(1,2,chan+1) for i,pix in enumerate(pix_in_mod): # samples used to calculate the charge start=int(peakpos[chan,pix]-integrator.window_shift) stop=int(start+integrator.window_width) used_samples=np.arange(start,stop) used=waveform[chan,pix,start:stop] plt.plot(waveform[chan,pix,], color='b', label='all samples') plt.plot(used_samples,used, color='r', label='integrated samples') if i==0: plt.legend() plt.ylabel("[DC]") plt.xlabel(f"{channel[chan]} waveforms in module {module}") plt.ylim(-100,2500) fig.savefig(f"Run{run}_waverforms_module_{module}.png") # flat field calculations from ctapipe.calib.camera.pedestals import PedestalIntegrator from ctapipe.calib.camera.flatfield import FlasherFlatFieldCalculator # configuration for the pedestal charge integrator ped_config = Config({ "FixedWindowSum": { "window_start": 11, "window_width": 11, } }) # configuration for the flatfield charge integrator ff_config = Config({ "LocalPeakWindowSum": { "window_shift": 4, "window_width": 11, } }) ped_calculator = PedestalIntegrator(tel_id=0, sample_size=100, charge_median_cut_outliers = [-4,4], charge_std_cut_outliers = [-4,4], charge_product="FixedWindowSum", config=ped_config) ff_calculator = FlasherFlatFieldCalculator(tel_id = 0, sample_size=100, sample_duration = 1000, charge_cut_outliers = [-0.4,0.4], time_cut_outliers = [0,30], charge_product = "LocalPeakWindowSum", config=ff_config) calib_event=0 ped_event = False ped_initialized = False initialized = False for i, event in enumerate(reader): # create r1 r0calib.calibrate(event) # get link to monitoring containers if not initialized: ped_data = event.mon.tel[tel_id].pedestal ff_data = event.mon.tel[tel_id].flatfield status_data = event.mon.tel[tel_id].pixel_status calib_data = event.mon.tel[tel_id].calibration # if new pedestal calculation if event.lst.tel[0].evt.tib_masked_trigger == 32: if ped_calculator.calculate_pedestals(event): ped_event = True print(f"new pedestal at event n. {event.r0.event_id} ({i+1})") # consider flat field events only after first pedestal event (for pedestal mask initalization) elif event.lst.tel[0].evt.tib_masked_trigger == 1 and event.r1.tel[tel_id].waveform.max()>1000: if ff_calculator.calculate_relative_gain(event): calib_event+=1 print(f"new flatfield at event n. {event.r0.event_id} ({i+1})") # consider values only after first flat field event (for flat field mask initialitation) if calib_event > 1: # mask from pedestal and flat-fleid data monitoring_unusable_pixels= np.logical_or(status_data.pedestal_failing_pixels, status_data.flatfield_failing_pixels) # calibration unusable pixels are an OR of all maskes calib_data.unusable_pixels = np.logical_or(monitoring_unusable_pixels,status_data.hardware_failing_pixels) # Extract calibraiton coefficients with F-factor method # Assume fix F2 factor, F2=1+Var(gain)/Mean(Gain)**2 must be known from elsewhere F2 =1.2 # calculate photon-electrons pe = F2*(ff_data.charge_median - ped_data.charge_median)**2/(ff_data.charge_std**2 - ped_data.charge_std**2) masked_pe = np.ma.array(pe, mask=calib_data.unusable_pixels) break # plot results mask = calib_data.unusable_pixels # charge fig = plt.figure(10,figsize=(16, 5)) image = ff_data.charge_median for chan in(np.arange(2)): ax = plt.subplot(1, 2, chan+1) disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera) disp.highlight_pixels(mask[chan]) disp.image = image[chan] disp.cmap = plt.cm.coolwarm disp.axes.text(2.4, 0, 'charge median', rotation=90) disp.add_colorbar() # time fig = plt.figure(11,figsize=(16, 5)) image = ff_data.time_median for chan in(np.arange(2)): ax = plt.subplot(1, 2, chan+1) disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera) disp.highlight_pixels(mask[chan]) disp.image = image[chan] disp.cmap = plt.cm.coolwarm disp.axes.text(2.4, 0, 'time', rotation=90) disp.add_colorbar() #pe fig = plt.figure(12,figsize=(16, 5)) image = pe for chan in(np.arange(2)): ax = plt.subplot(1, 2, chan+1) disp = CameraDisplay(event.inst.subarray.tels[tel_id].camera) disp.highlight_pixels(mask[chan]) disp.set_limits_minmax(0,150) disp.image = image[chan] disp.cmap = plt.cm.coolwarm disp.axes.text(2.4, 0, 'pe', rotation=90) disp.add_colorbar() # plot some histograms for chan in np.arange(2): n_pe = pe[chan] gain_median = ff_data.relative_gain_median[chan] charge_median = ff_data.charge_median[chan] charge_std = ff_data.charge_std[chan] median_ped = ped_data.charge_median[chan] ped_std = ped_data.charge_std[chan] # select good pixels select = np.logical_not(mask[chan]) #select = mask[chan] fig = plt.figure(chan,figsize=(12,18)) fig.suptitle(f"channel: {channel[chan]}", fontsize=25) # charge plt.subplot(321) median= int(np.median(charge_median[select])) rms= np.std(charge_median[select]) plt.title(f"Median {median:3.2f}, std {rms:5.0f}") plt.xlabel('charge (ADC)',fontsize=20) plt.ylabel('pixels',fontsize=20) plt.hist(charge_median[select]) # signal std plt.subplot(322) plt.ylabel('pixels',fontsize=20) plt.xlabel('charge std',fontsize=20) median= np.median(charge_std[select]) rms= np.std(charge_std[select]) plt.title(f"Median {median:3.2f}, std {rms:3.2f}") plt.hist(charge_std[select]) # pedestal charge plt.subplot(323) plt.ylabel('pixels',fontsize=20) plt.xlabel('pedestal',fontsize=20) median= np.median(median_ped[select]) rms= np.std(median_ped[select]) plt.title(f"Median {median:3.2f}, std {rms:3.2f}") plt.hist(median_ped[select]) # pedestal std plt.subplot(324) plt.ylabel('pixels',fontsize=20) plt.xlabel('pedestal std',fontsize=20) median= np.median(ped_std[select]) rms= np.std(ped_std[select]) plt.title(f"Median {median:3.2f}, std {rms:3.2f}") plt.hist(ped_std[select]) # relative gain plt.subplot(325) plt.ylabel('pixels',fontsize=20) plt.xlabel('relative gain',fontsize=20) plt.hist(gain_median[select]) median= np.median(gain_median[select]) rms= np.std(gain_median[select]) plt.title(f"Relative gain {median:3.2f}, std {rms:5.2f}") # photon electrons plt.subplot(326) plt.ylabel('pixels',fontsize=20) plt.xlabel('pe',fontsize=20) median= np.median(n_pe[select]) rms= np.std(n_pe[select]) plt.title(f"Median {median:3.2f}, std {rms:3.2f}") plt.hist(n_pe[select],range=(0,200)) # use the tool to write calibration coefficients from lstchain.tools.calc_camera_calibration import CalibrationHDF5Writer calibration_tool= CalibrationHDF5Writer() calibration_tool.print_help() # calibration_tool.run(argv=['--config','/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/camera_calibration_param.json']) # read back the monitoring containers written with the tool calc_camera_calibration.py from ctapipe.containers import FlatFieldContainer, WaveformCalibrationContainer from ctapipe.io.hdf5tableio import HDF5TableWriter, HDF5TableReader ff_data = FlatFieldContainer() cal_data = WaveformCalibrationContainer() #with HDF5TableReader('/astro/users/cassol/soft/python/cta-lstchain/lstchain/tools/calibration.hdf5') as h5_table: with HDF5TableReader('/astro/users/cassol/soft/python/lstchain-test/calibration.hdf5') as h5_table: assert h5_table._h5file.isopen == True for cont in h5_table.read('/tel_0/flatfield', ff_data): print(cont.as_dict()) for calib in h5_table.read('/tel_0/calibration', cal_data): print(calib.as_dict()) #plt.hist(1/calib.dc_to_pe[0], color='r', histtype='step', bins = 50, stacked=True, fill=False) h5_table.close() # Perform some plots fig = plt.figure(13,figsize=(16, 5)) disp = CameraDisplay(event.inst.subarray.tels[0].camera) disp.image = calib.unusable_pixels[chan] disp.set_limits_minmax(0,1) disp.cmap = plt.cm.coolwarm disp.axes.text(2.4, 0, 'failing pixels', rotation=90) disp.add_colorbar() # select=np.logical_not(calib.unusable_pixels[0]) values=1/calib.dc_to_pe[0] fig = plt.figure(12,figsize=(16, 5)) plt.hist(values[select], color='r', histtype='step', bins = 50, stacked=True, fill=False) plt.title(f"ADC per photon-electrons, mean={np.mean(values[select]):5.0f} ADC") ```
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import pycountry import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe(): return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv") confirmed_df_ = download_cases_dataframe() confirmed_df_.iloc[0] confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]] confirmed_df.rename( columns={ "date": "sample_date", "iso_code": "country_code", }, inplace=True) def convert_iso_alpha_3_to_alpha_2(x): try: return pycountry.countries.get(alpha_3=x).alpha_2 except Exception as e: logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}") return None confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2) confirmed_df.dropna(inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None): source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: source_regions_for_date_function(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() #%% source_regions_for_summary_df_ = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df_.tail() #%% confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df = \ confirmed_source_regions_group_df.merge( confirmed_days_df[["sample_date_string"]].rename( columns={"sample_date_string": "sample_date"}), how="right") confirmed_source_regions_group_df["new_cases"] = \ confirmed_source_regions_group_df["new_cases"].clip(lower=0) confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan) confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) result_df = confirmed_output_df.copy() result_df.tail() #%% result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left") result_df.sort_values("sample_date_string", inplace=True) result_df.fillna(method="ffill", inplace=True) result_df.tail() #%% result_df[["new_cases", "covid_cases"]].plot() if columns_suffix: result_df.rename( columns={ "new_cases": "new_cases_" + columns_suffix, "covid_cases": "covid_cases_" + columns_suffix}, inplace=True) return result_df, source_regions_for_summary_df_ confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe( report_backend_client.source_regions_for_date) confirmed_es_df, _ = get_cases_dataframe( lambda date: [spain_region_country_code], columns_suffix=spain_region_country_code.lower()) ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" base_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=base_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier] tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_base_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_base_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Official Statistics ``` import requests import pandas.io.json official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics") official_stats_response.raise_for_status() official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json()) official_stats_df = official_stats_df_.copy() official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True) official_stats_df.head() official_stats_column_map = { "date": "sample_date", "applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated", "communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated", } accumulated_suffix = "_accumulated" accumulated_values_columns = \ list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values())) interpolated_values_columns = \ list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns)) official_stats_df = \ official_stats_df[official_stats_column_map.keys()] \ .rename(columns=official_stats_column_map) official_stats_df["extraction_date"] = extraction_date official_stats_df.head() official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json" previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True) previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True) official_stats_df = official_stats_df.append(previous_official_stats_df) official_stats_df.head() official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)] official_stats_df.sort_values("extraction_date", ascending=False, inplace=True) official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True) official_stats_df.head() official_stats_stored_df = official_stats_df.copy() official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d") official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True) official_stats_df.drop(columns=["extraction_date"], inplace=True) official_stats_df = confirmed_days_df.merge(official_stats_df, how="left") official_stats_df.sort_values("sample_date", ascending=False, inplace=True) official_stats_df.head() official_stats_df[accumulated_values_columns] = \ official_stats_df[accumulated_values_columns] \ .astype(float).interpolate(limit_area="inside") official_stats_df[interpolated_values_columns] = \ official_stats_df[accumulated_values_columns].diff(periods=-1) official_stats_df.drop(columns="sample_date", inplace=True) official_stats_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( official_stats_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df = confirmed_es_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0) result_summary_df.head(daily_plot_days) def compute_aggregated_results_summary(days) -> pd.DataFrame: aggregated_result_summary_df = result_summary_df.copy() aggregated_result_summary_df["covid_cases_for_ratio"] = \ aggregated_result_summary_df.covid_cases.mask( aggregated_result_summary_df.shared_diagnoses == 0, 0) aggregated_result_summary_df["covid_cases_for_ratio_es"] = \ aggregated_result_summary_df.covid_cases_es.mask( aggregated_result_summary_df.shared_diagnoses_es == 0, 0) aggregated_result_summary_df = aggregated_result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(days).agg({ "covid_cases": "sum", "covid_cases_es": "sum", "covid_cases_for_ratio": "sum", "covid_cases_for_ratio_es": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum", "shared_diagnoses_es": "sum", }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int) aggregated_result_summary_df["teks_per_shared_diagnosis"] = \ (aggregated_result_summary_df.shared_teks_by_upload_date / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \ (aggregated_result_summary_df.shared_diagnoses / aggregated_result_summary_df.covid_cases_for_ratio).fillna(0) aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \ (aggregated_result_summary_df.shared_diagnoses_es / aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0) return aggregated_result_summary_df aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7) aggregated_result_with_7_days_window_summary_df.head() last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1] last_7_days_summary aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13) last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1] last_14_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases (Source Countries)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)", "shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)", "shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)", "shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)", "covid_cases_es": "COVID-19 Cases (Spain)", "app_downloads_es": "App Downloads (Spain – Official)", "shared_diagnoses_es": "Shared Diagnoses (Spain – Official)", "shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", "covid_cases_es", "app_downloads_es", "shared_diagnoses_es", "shared_diagnoses_per_covid_case_es", ] summary_percentage_columns= [ "shared_diagnoses_per_covid_case_es", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 30), legend=False) ax_ = summary_ax_list[0] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) for percentage_column in summary_percentage_columns: percentage_column_index = summary_columns.index(percentage_column) summary_ax_list[percentage_column_index].yaxis \ .set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "", display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "", } general_columns = \ list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values())) general_formatter = lambda x: f"{x}" if x != 0 else "" display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns))) daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi df = df.copy() df_styler = df.style.format(display_formatters) media_path = get_temporary_image_path() dfi.export(df_styler, media_path) return media_path summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, last_14_days=last_14_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" def format_shared_diagnoses_per_covid_case(value) -> str: if value == 0: return "–" return f"≤{value:.2%}" display_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case) display_last_14_days_shared_diagnoses_per_covid_case = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"]) display_last_14_days_shared_diagnoses_per_covid_case_es = \ format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"]) status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: {display_shared_diagnoses_per_covid_case} Last 14 Days: - Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case} - Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
### Importing all important libraries ``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import time import warnings warnings.filterwarnings('ignore') from sklearn.model_selection import KFold, StratifiedKFold, train_test_split from sklearn.metrics import roc_auc_score, classification_report import lightgbm as lgb import xgboost as xgb import gc import os print(os.listdir("../input")) ``` ### Importing the dataset ``` train = pd.read_csv('../input/train.csv') # test = pd.read_csv('../input/test.csv') features = pd.read_csv('../input/user_features.csv') sub = pd.read_csv('../input/sample_submission_only_headers.csv') gc.collect() ``` ### Quick look at the data ``` def reduce_mem_usage(df): start_mem_usg = df.memory_usage().sum() / 1024**2 print("Memory usage of properties dataframe is :",start_mem_usg," MB") NAlist = [] # Keeps track of columns that have missing values filled in. for col in df.columns: if df[col].dtype != object: # Exclude strings # Print current column type print("******************************") print("Column: ",col) print("dtype before: ",df[col].dtype) # make variables for Int, max and min IsInt = False mx = df[col].max() mn = df[col].min() # Integer does not support NA, therefore, NA needs to be filled if not np.isfinite(df[col]).all(): NAlist.append(col) df[col].fillna(mn-1,inplace=True) # test if column can be converted to an integer asint = df[col].fillna(0).astype(np.int64) result = (df[col] - asint) result = result.sum() if result > -0.01 and result < 0.01: IsInt = True # Make Integer/unsigned Integer datatypes if IsInt: if mn >= 0: if mx < 255: df[col] = df[col].astype(np.uint8) elif mx < 65535: df[col] = df[col].astype(np.uint16) elif mx < 4294967295: df[col] = df[col].astype(np.uint32) else: df[col] = df[col].astype(np.uint64) else: if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) # Make float datatypes 32 bit else: df[col] = df[col].astype(np.float32) # Print new column type print("dtype after: ",df[col].dtype) print("******************************") # Print final result print("___MEMORY USAGE AFTER COMPLETION:___") mem_usg = df.memory_usage().sum() / 1024**2 print("Memory usage is: ",mem_usg," MB") print("This is ",100*mem_usg/start_mem_usg,"% of the initial size") return df, NAlist train, NAlist = reduce_mem_usage(train) # test, NAlist = reduce_mem_usage(test) features, NAlist = reduce_mem_usage(features) train.head() # test.head() %%time train = train.merge(features,how = 'left',left_on='node1_id',right_on='node_id') train = train.merge(features,how = 'left',left_on='node2_id',right_on='node_id') # %%time # test = test.merge(features,how = 'left',left_on='node1_id',right_on='node_id') # test = test.merge(features,how = 'left',left_on='node2_id',right_on='node_id') train.drop(['node_id_x','node_id_y','node1_id','node2_id'], axis=1, inplace= True) # test.drop(['node_id_x','node_id_y','node1_id','node2_id'], axis=1, inplace= True) train.head() # test.head() # test_id = test.pop('id') X = train.drop('is_chat', axis=1) y = train.is_chat del train # shape of all the files X.shape, y.shape x_train,x_test,y_train,y_test = train_test_split(X,y, test_size = 0.2, random_state=1999111000) del X del y del x_train del y_train x_tr,x_val,y_tr,y_val = train_test_split(x_test,y_test, test_size = 0.1, random_state=1999111000) del x_test del y_test gc.collect() print(x_tr.shape) print(y_tr.shape) print(x_val.shape) print(y_val.shape) x_tr.columns x_tr['f1_change'] = x_tr['f1_x'] - x_tr['f1_y'] x_tr['f2_change'] = x_tr['f2_x'] - x_tr['f2_y'] x_tr['f3_change'] = x_tr['f3_x'] - x_tr['f3_y'] x_tr['f4_change'] = x_tr['f4_x'] - x_tr['f4_y'] x_tr['f5_change'] = x_tr['f5_x'] - x_tr['f5_y'] x_tr['f6_change'] = x_tr['f6_x'] - x_tr['f6_y'] x_tr['f7_change'] = x_tr['f7_x'] - x_tr['f7_y'] x_tr['f8_change'] = x_tr['f8_x'] - x_tr['f8_y'] x_tr['f9_change'] = x_tr['f9_x'] - x_tr['f9_y'] x_tr['f10_change'] = x_tr['f10_x'] - x_tr['f10_y'] x_tr['f11_change'] = x_tr['f11_x'] - x_tr['f11_y'] x_tr['f12_change'] = x_tr['f12_x'] - x_tr['f12_y'] x_tr['f13_change'] = x_tr['f13_x'] - x_tr['f13_y'] x_val['f1_change'] = x_val['f1_x'] - x_val['f1_y'] x_val['f2_change'] = x_val['f2_x'] - x_val['f2_y'] x_val['f3_change'] = x_val['f3_x'] - x_val['f3_y'] x_val['f4_change'] = x_val['f4_x'] - x_val['f4_y'] x_val['f5_change'] = x_val['f5_x'] - x_val['f5_y'] x_val['f6_change'] = x_val['f6_x'] - x_val['f6_y'] x_val['f7_change'] = x_val['f7_x'] - x_val['f7_y'] x_val['f8_change'] = x_val['f8_x'] - x_val['f8_y'] x_val['f9_change'] = x_val['f9_x'] - x_val['f9_y'] x_val['f10_change'] = x_val['f10_x'] - x_val['f10_y'] x_val['f11_change'] = x_val['f11_x'] - x_val['f11_y'] x_val['f12_change'] = x_val['f12_x'] - x_val['f12_y'] x_val['f13_change'] = x_val['f13_x'] - x_val['f13_y'] x_tr, NAlist = reduce_mem_usage(x_tr) x_val, NAlist = reduce_mem_usage(x_val) ``` ### XG Boost ``` d_train = xgb.DMatrix(x_tr,y_tr) d_test = xgb.DMatrix(x_val,y_val) param_grid = {'max_depth':7, 'silent':0, 'eta':0.03, # learning rate 'gamma':0, 'colsample_bytree':0.85, 'objective':'binary:logistic', 'eval_metric':'auc', 'subsample':0.7 } model_xgb = xgb.train(param_grid, dtrain= d_train,num_boost_round= 1000,evals=([d_test]), early_stopping_rounds= 50,verbose_eval=50) param = {} param['learning_rate'] = 0.1 param['boosting_type'] = 'gbdt' param['objective'] = 'binary' param['metric'] = 'auc' param['sub_feature'] = 0.6 param['num_leaves'] = 31 param['feature_fraction'] = 0.8 param['bagging_fraction'] = 0.7 param['min_data_in_leaf '] = 500 param['max_depth'] = 10 param['lambda_l1 '] = 2 param['lambda_l2 '] = 10 trn_data = lgb.Dataset(x_tr, label=y_tr) val_data = lgb.Dataset(x_val, label=y_val) del features clf = lgb.train(param, trn_data, num_boost_round=2000, valid_sets = [trn_data, val_data], verbose_eval=5, early_stopping_rounds = 50) del x_tr del y_tr del x_val del y_val ## All preprocessing on test data test = pd.read_csv('../input/test.csv') features = pd.read_csv('../input/user_features.csv') test, NAlist = reduce_mem_usage(test) features, NAlist = reduce_mem_usage(features) test = test.merge(features,how = 'left',left_on='node1_id',right_on='node_id') test = test.merge(features,how = 'left',left_on='node2_id',right_on='node_id') test.drop(['node_id_x','node_id_y','node1_id','node2_id'], axis=1, inplace= True) test['f1_change'] = test['f1_x'] - test['f1_y'] test['f2_change'] = test['f2_x'] - test['f2_y'] test['f3_change'] = test['f3_x'] - test['f3_y'] test['f4_change'] = test['f4_x'] - test['f4_y'] test['f5_change'] = test['f5_x'] - test['f5_y'] test['f6_change'] = test['f6_x'] - test['f6_y'] test['f7_change'] = test['f7_x'] - test['f7_y'] test['f8_change'] = test['f8_x'] - test['f8_y'] test['f9_change'] = test['f9_x'] - test['f9_y'] test['f10_change'] = test['f10_x'] - test['f10_y'] test['f11_change'] = test['f11_x'] - test['f11_y'] test['f12_change'] = test['f12_x'] - test['f12_y'] test['f13_change'] = test['f13_x'] - test['f13_y'] del features test, NAlist = reduce_mem_usage(test) test_id = test.pop('id') predictions = clf.predict(test, num_iteration=clf.best_iteration) sub["id"] = test_id sub["is_chat"] = predictions sub.to_csv("submission.csv", index=False) sub.head() ```
github_jupyter
``` import sys from utils import * import cv2 import numpy as np import matplotlib import matplotlib.pyplot as plt import torch from PIL import Image import os, shutil from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import seaborn as sns from tqdm.notebook import tqdm import numpy as np from torchvision import transforms as T from collections import Counter from pathlib import Path from utils.dataset import CloudDataset from DeepCluster.visualize import * from DeepCluster.cluster import cluster import pickle transformer = T.Compose([ T.Resize([256, 256]), T.ToTensor(), ]) batch_size = 1 PATH = "./model/model.ckpt" root = './MULTI_SCALE/512/train' limit_images = 999999 different_models = ['trained on 256 tiles', 'trained on max square', 'trained on zooniverse train'] interest = 0 PATH = "./model/"+str(different_models[interest])+"/model.ckpt" root = "CLUSTERING_BIGGEST_SQUARE/train" pca = pickle.load(open("./model/"+str(different_models[interest])+"/pca.pkl", 'rb'))#IncrementalPCA(n_components=pca_dim, batch_size=512, whiten=True) kmeans = pickle.load(open("./model/"+str(different_models[interest])+"/kmeans.pkl", 'rb')) model = torch.load(PATH) model.eval() dataset_path = "/home/raysamram/DATASETS/CLUSTERING_BIGGEST_SQUARE/train/" with open("/home/raysamram/DATASETS/CLUSTERING_BIGGEST_SQUARE/labels.pickle", "rb") as handle: labs = pickle.load(handle) print(len(labs)) raw_dataset = CloudDataset(root=dataset_path, transforms=transformer, limit=limit_images) pseudo_labels, features = cluster(pca, kmeans, model, raw_dataset, batch_size, return_features=True) clusters = pseudo_labels print(len(clusters)) clclc = list(map(int, clusters)) print("len labs : "+str(len(labs))) print("predictions : "+str(len(clclc))) cfs_mat = confusion_matrix(labs, clclc) associations = ['Fish', 'Flower', 'Gravel', 'Sugar'] fig, ax = plt.subplots(figsize=(20,10)) plt.title("Confusion Matrix Clusters DeepClustering model SCALE ") sns.set(font_scale=1.8) sns.heatmap(cfs_mat[:4,:], annot=True, fmt='', ax=ax, linewidths=.9, yticklabels=associations) fig, ax = plt.subplots(figsize=(20,10)) print(cfs_mat[:4,:].shape) print( np.sum( cfs_mat[:4,:], axis=1 ).shape ) print( ( cfs_mat[:4,:].T/np.sum(cfs_mat[:4,:], axis=1) ).shape ) plt.title("Confusion Matrix Clusters DeepClustering model % SCALE ") sns.heatmap( (cfs_mat[:4,:].T/np.sum(cfs_mat[:4,:], axis=1)).T, annot=True, fmt='.2%', cmap='Blues' , yticklabels=associations) ```
github_jupyter
# Utilities ``` from __future__ import division import tensorflow as tf from tensorflow.contrib.layers import * import numpy as np import random from tensorflow.python.training.adam import AdamOptimizer def ndmatmul(A, B): get_shape = lambda T, i: T.get_shape()[i] if T.get_shape()[i].value != None else -1 X = tf.reshape(A, shape=[-1, get_shape(A, -1)]) return tf.reshape(tf.matmul(X, B), shape=[get_shape(A, 0), get_shape(A, 1), get_shape(B, 1)]) def partition(l, k): return [l[i:i + k] for i in range(0, len(l), k)] ``` # Computational Graph ``` class RNN: def __init__(self): tf.set_random_seed(1) N = 10 L = self.L = 5 self.input = tf.placeholder(dtype=tf.float32, shape=[None, L, 1]) self.desired = tf.placeholder(dtype=tf.float32, shape=[None, L, 1]) self.prev_state = tf.placeholder(dtype=tf.float32, shape=[None, N]) xv = xavier_initializer() ones = tf.initializers.ones() self.rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=N, activation=tf.nn.leaky_relu, name="" + str( random.random())) # ,kernel_initializer=xv,bias_initializer=ones) self.wy = tf.Variable(xv(shape=[N, 1]), dtype=tf.float32) self.by = tf.Variable(ones(shape=[1, 1]), dtype=tf.float32) self.hidden_acts, self.states = tf.nn.dynamic_rnn(self.rnn_cell, inputs=self.input, initial_state=self.prev_state) # None x L x N self.out = tf.nn.leaky_relu(ndmatmul(self.hidden_acts, self.wy) + self.by) self.loss = tf.reduce_sum((self.out - self.desired) ** 2) opt = tf.train.AdamOptimizer() grads = opt.compute_gradients(self.loss) clipped = zip(tf.clip_by_global_norm([grad for grad, var in grads if grad != None], 10)[0], [var for grad, var in grads if grad != None]) self.train_ = opt.apply_gradients(clipped) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) ``` # Train the Model ``` def train(self): L = self.L sess = self.sess X = [] Y = [] for i in range(2 ** L): x = bin(i)[2:] x = "0" * (L - len(x)) + x x = list(map(int, x)) y = [x[0]] for i in x[1:]: y.append(y[-1] * 2 + i) X.append(x) Y.append(y) indices = list(range(1, 2 ** L)) random.shuffle(indices) X = [X[i] for i in indices] Y = [Y[i] for i in indices] Xs = partition(X, 50) Ys = partition(Y, 50) zero_state_0 = self.rnn_cell.zero_state(batch_size=len(Xs[0]), dtype=tf.float32).eval(session=sess) zero_state_1 = self.rnn_cell.zero_state(batch_size=len(Xs[-1]), dtype=tf.float32).eval(session=sess) for epoch in range(1000): total_loss = 0 for X, Y in zip(Xs, Ys): zero_state = zero_state_0 if len(X) == len(Xs[0]) else zero_state_1 _, loss, curr_states = sess.run([self.train_, self.loss, self.states], feed_dict={self.input: np.array(X).reshape([-1, L, 1]), self.desired: np.array(Y).reshape([-1, L, 1]), self.prev_state: zero_state}) total_loss += loss if epoch % 100 == 0: mean_loss = total_loss / 2 ** L print mean_loss ``` # Run the Computational Graph ``` def run(self, seq): sess = self.sess L = self.L outputs = [] prev_state = self.rnn_cell.zero_state(batch_size=1, dtype=tf.float32).eval(session=sess) while len(seq) >= L: s = seq[:L] inp = list(map(int, s)) out, prev_state = self.sess.run([self.out, self.states], feed_dict={self.input: np.array(inp).reshape([1, -1, 1]), self.prev_state: prev_state}) outputs.extend(out.tolist()[0][:L]) seq = seq[L:] if len(seq) > 0: s = seq + "0" * (L - len(seq)) inp = list(map(int, s)) out = self.sess.run([self.out], feed_dict={self.input: np.array(inp).reshape([1, -1, 1]), self.prev_state: prev_state})[ 0] outputs.extend(out.tolist()[0][:len(seq)]) return outputs rnn = RNN() rnn.train() pass from __future__ import division import tensorflow as tf from tensorflow.contrib.layers import * import numpy as np import random from tensorflow.python.training.adam import AdamOptimizer def ndmatmul(A, B): get_shape = lambda T, i: T.get_shape()[i] if T.get_shape()[i].value != None else -1 X = tf.reshape(A, shape=[-1, get_shape(A, -1)]) return tf.reshape(tf.matmul(X, B), shape=[get_shape(A, 0), get_shape(A, 1), get_shape(B, 1)]) def partition(l, k): return [l[i:i + k] for i in range(0, len(l), k)] class RNN: def __init__(self): tf.set_random_seed(1) N = 10 L = self.L = 5 self.input = tf.placeholder(dtype=tf.float32, shape=[None, L, 1]) self.desired = tf.placeholder(dtype=tf.float32, shape=[None, L, 1]) self.prev_state = tf.placeholder(dtype=tf.float32, shape=[None, N]) xv = xavier_initializer() ones = tf.initializers.ones() self.rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=N, activation=tf.nn.leaky_relu, name="" + str( random.random())) # ,kernel_initializer=xv,bias_initializer=ones) self.wy = tf.Variable(xv(shape=[N, 1]), dtype=tf.float32) self.by = tf.Variable(ones(shape=[1, 1]), dtype=tf.float32) self.hidden_acts, self.states = tf.nn.dynamic_rnn(self.rnn_cell, inputs=self.input, initial_state=self.prev_state) # None x L x N self.out = tf.nn.leaky_relu(ndmatmul(self.hidden_acts, self.wy) + self.by) self.loss = tf.reduce_sum((self.out - self.desired) ** 2) opt = tf.train.AdamOptimizer() grads = opt.compute_gradients(self.loss) clipped = zip(tf.clip_by_global_norm([grad for grad, var in grads if grad != None], 10)[0], [var for grad, var in grads if grad != None]) self.train_ = opt.apply_gradients(clipped) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) def train(self): L = self.L sess = self.sess X = [] Y = [] for i in range(2 ** L): x = bin(i)[2:] x = "0" * (L - len(x)) + x x = list(map(int, x)) y = [x[0]] for i in x[1:]: y.append(y[-1] * 2 + i) X.append(x) Y.append(y) indices = list(range(1, 2 ** L)) random.shuffle(indices) X = [X[i] for i in indices] Y = [Y[i] for i in indices] Xs = partition(X, 50) Ys = partition(Y, 50) zero_state_0 = self.rnn_cell.zero_state(batch_size=len(Xs[0]), dtype=tf.float32).eval(session=sess) zero_state_1 = self.rnn_cell.zero_state(batch_size=len(Xs[-1]), dtype=tf.float32).eval(session=sess) for epoch in range(1000): total_loss = 0 for X, Y in zip(Xs, Ys): zero_state = zero_state_0 if len(X) == len(Xs[0]) else zero_state_1 _, loss, curr_states = sess.run([self.train_, self.loss, self.states], feed_dict={self.input: np.array(X).reshape([-1, L, 1]), self.desired: np.array(Y).reshape([-1, L, 1]), self.prev_state: zero_state}) total_loss += loss if epoch % 100 == 0: mean_loss = total_loss / 2 ** L print(mean_loss) def run(self, seq): sess = self.sess L = self.L outputs = [] prev_state = self.rnn_cell.zero_state(batch_size=1, dtype=tf.float32).eval(session=sess) while len(seq) >= L: s = seq[:L] inp = list(map(int, s)) out, prev_state = self.sess.run([self.out, self.states], feed_dict={self.input: np.array(inp).reshape([1, -1, 1]), self.prev_state: prev_state}) outputs.extend(out.tolist()[0][:L]) seq = seq[L:] if len(seq) > 0: s = seq + "0" * (L - len(seq)) inp = list(map(int, s)) out = self.sess.run([self.out], feed_dict={self.input: np.array(inp).reshape([1, -1, 1]), self.prev_state: prev_state})[ 0] outputs.extend(out.tolist()[0][:len(seq)]) return outputs rnn = RNN() rnn.train() rnn.run("1000") ```
github_jupyter
``` import os import numpy as np import pandas as pd import scipy.io dataset_name = 'citeseer' data_dir = os.path.join('../dataset/raw', dataset_name) fn = os.path.join(data_dir, 'ind.{}.mat'.format(dataset_name)) data = scipy.io.loadmat(fn) n_train = data['all_x'].shape[0] n_test = data['tx'].shape[0] test_indices = np.squeeze(data['test_idx']) print('num train: {} num test: {}'.format(n_train, n_test)) testIds = set(test_indices) #Remove any test node that only links to the test nodes def load_graph(fn): graph = {} with open(fn) as in_csv: for line in in_csv: tokens = line.strip().split(',') nodeIDs = [int(t) for t in tokens] key = nodeIDs[0] neighbors = nodeIDs[1:] graph[key] = neighbors return graph graph_fn = os.path.join(data_dir, 'ind.{}.graph.csv'.format(dataset_name)) gp = load_graph(graph_fn) # Graph should contain the same number of train data as all_x train_indices = [key for key in gp if key not in testIds and key < n_train] assert(len(train_indices) == n_train) # get valid train indices train_indices = [key for key in gp if key not in testIds] extra_train_indices = [idx for idx in train_indices if idx >= n_train] #print(len(train_indices)) #print(len(extra_train_indices)) train_indices = [idx for idx in train_indices if idx not in extra_train_indices] #print(len(train_indices)) ##################################################################################### valid_train_nodes = [] for nodeId in train_indices: vertices = gp[nodeId] num_nodes = len(vertices) num_test_nodes = len([v for v in vertices if v in testIds or v in extra_train_indices]) if num_nodes > num_test_nodes: valid_train_nodes.append(nodeId) print('total train: {}'.format(n_train)) print('total valid train: {}'.format(len(valid_train_nodes))) # figure out the trainId to keep (starting from 0) valid_train_ids = [idx for idx, trainId in enumerate(train_indices) if trainId in valid_train_nodes] assert(len(valid_train_nodes) == len(valid_train_ids)) # filter the test data and labels train_data = data['all_x'][valid_train_ids, :] train_labels = data['all_y'][valid_train_ids, :] assert(train_data.shape[0] == train_labels.shape[0] == len(valid_train_ids)) ##################################################################################### valid_test_nodes = [] for testId in test_indices: vertices = gp[testId] num_nodes = len(vertices) num_valid_train_nodes = len([v for v in vertices if v in set(valid_train_ids)]) if num_valid_train_nodes > 0: valid_test_nodes.append(testId) print('total test: {}'.format(n_test)) print('total valid test: {}'.format(len(valid_test_nodes))) # make sure there is no duplication assert(len(valid_test_nodes) == len(set(valid_test_nodes))) # figure out the testId to keep (starting from 0) valid_test_ids = [idx for idx, testId in enumerate(test_indices) if testId in valid_test_nodes] assert(len(valid_test_nodes) == len(valid_test_ids)) # filter the test data and labels test_data = data['tx'][valid_test_ids, :] test_labels = data['ty'][valid_test_ids, :] assert(test_data.shape[0] == test_labels.shape[0] == len(valid_test_ids)) from tqdm import tqdm # create a conversion from global id to trainId globalId2TrainID = {} for trainId, globalId in enumerate(valid_train_nodes): globalId2TrainID[globalId] = trainId # create train graph train_graph = {} for nodeId in tqdm(valid_train_nodes): assert(nodeId in gp) vertices = [globalId2TrainID[v] for v in gp[nodeId] if v in set(valid_train_nodes)] assert(len(vertices) > 0) train_graph[globalId2TrainID[nodeId]] = vertices # create test graph test_graph = {} for testId, nodeId in enumerate(valid_test_nodes): assert(nodeId in gp) vertices = [globalId2TrainID[v] for v in gp[nodeId] if v in set(valid_train_nodes)] assert(len(vertices) > 0) test_graph[testId] = vertices # use index starting from 0 assert(len(train_graph) == train_data.shape[0]) assert(len(test_graph) == test_data.shape[0]) print('train: {} test: {}'.format(train_data.shape[0], test_data.shape[0])) # remove self-references for nodeId in train_graph: connections = train_graph[nodeId] if nodeId in connections: if len(connections) == 1: print("nodeId: {} only points to itself.".format(nodeId)) else: print("nodeId: {} has a self-references. Total connections: {}".format(nodeId, len(connections))) # check if the graph is symmetric for nodeId in train_graph: connections = train_graph[nodeId] for nn_nodeId in connections: if nodeId not in train_graph[nn_nodeId]: print("there is connection from {} to {}.".format(nn_nodeId, nodeId)) train_graph[0] train_graph[544] # convert labels to a sparse matrix format import sklearn.preprocessing from scipy import sparse train_labels = np.argmax(train_labels, axis=1) test_labels = np.argmax(test_labels, axis=1) n_classes = np.max(train_labels) - np.min(train_labels) + 1 label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(range(n_classes)) gnd_train = label_binarizer.transform(train_labels) gnd_test = label_binarizer.transform(test_labels) gnd_train = sparse.csr_matrix(gnd_train) gnd_test = sparse.csr_matrix(gnd_test) print(gnd_train.shape) print(gnd_test.shape) # create a connection matrix n_train = train_data.shape[0] train_connections = np.zeros((n_train, n_train), dtype=int) for doc_id in train_graph: train_connections[doc_id][train_graph[doc_id]] = 1 train_connections = sparse.csr_matrix(train_connections) n_test = test_data.shape[0] test_connections = np.zeros((n_test, n_train), dtype=int) for doc_id in test_graph: test_connections[doc_id][test_graph[doc_id]] = 1 test_connections = sparse.csr_matrix(test_connections) save_dir = os.path.join('../datasets/clean', dataset_name) ########################################################################################## train = [] for doc_id in train_graph: doc = {'doc_id': doc_id, 'bow': train_data[doc_id], 'label': gnd_train[doc_id], 'neighbors': train_connections[doc_id]} train.append(doc) train_df = pd.DataFrame.from_dict(train) train_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.train.pkl'.format(dataset_name)) train_df.to_pickle(fn) ########################################################################################## test = [] for doc_id in test_graph: doc = {'doc_id': doc_id, 'bow': test_data[doc_id], 'label': gnd_test[doc_id], 'neighbors': test_connections[doc_id]} test.append(doc) test_df = pd.DataFrame.from_dict(test) test_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.test.pkl'.format(dataset_name)) test_df.to_pickle(fn) ```
github_jupyter
# Sensitivity Analysis with the OpenCL RAMP model ### Import opencl modules ``` import numpy as np import yaml # pyyaml library for reading the parameters.yml file import os import itertools import matplotlib.pyplot as plt from microsim.opencl.ramp.run import run_headless from microsim.opencl.ramp.snapshot_convertor import SnapshotConvertor from microsim.opencl.ramp.snapshot import Snapshot from microsim.opencl.ramp.params import Params, IndividualHazardMultipliers, LocationHazardMultipliers from microsim.opencl.ramp.simulator import Simulator from microsim.opencl.ramp.disease_statuses import DiseaseStatus # Useful for connecting to this kernel %connect_info ``` ### Setup params for all runs Read the parameters file ``` PARAMETERS_FILENAME = "default.yml" with open(os.path.join("..","model_parameters", PARAMETERS_FILENAME)) as f: parameters = yaml.load(f, Loader=yaml.SafeLoader) sim_params = parameters["microsim"] # Parameters for the dynamic microsim (python) calibration_params = parameters["microsim_calibration"] disease_params = parameters["disease"] # Parameters for the disease model (r) ``` Prepare the parameters for the OpenCL model. (See [main.py](https://github.com/Urban-Analytics/RAMP-UA/blob/052861cc51be5bc1827c85bb827209f0df73c685/microsim/main.py#L262) for an example of how this is done in the code). ``` current_risk_beta = disease_params['current_risk_beta'] # The OpenCL model incorporates the current risk beta by pre-multiplying the hazard multipliers with it location_hazard_multipliers = LocationHazardMultipliers( retail=calibration_params["hazard_location_multipliers"]["Retail"] * current_risk_beta, primary_school=calibration_params["hazard_location_multipliers"]["PrimarySchool"] * current_risk_beta, secondary_school=calibration_params["hazard_location_multipliers"]["SecondarySchool"] * current_risk_beta, home=calibration_params["hazard_location_multipliers"]["Home"] * current_risk_beta, work=calibration_params["hazard_location_multipliers"]["Work"] * current_risk_beta, ) # Individual hazard multipliers can be passed straight through individual_hazard_multipliers = IndividualHazardMultipliers( presymptomatic=calibration_params["hazard_individual_multipliers"]["presymptomatic"], asymptomatic=calibration_params["hazard_individual_multipliers"]["asymptomatic"], symptomatic=calibration_params["hazard_individual_multipliers"]["symptomatic"] ) proportion_asymptomatic = disease_params["asymp_rate"] params = Params( location_hazard_multipliers=location_hazard_multipliers, individual_hazard_multipliers=individual_hazard_multipliers, proportion_asymptomatic=proportion_asymptomatic ) ``` ### Get snapshot path **NB** this is the path to the OpenCL snapshot file generated by running `microsim/main.py`. You need to initilaise the model at least once to create the snapshot. The following says 'run in opencl mode and stop once initialisation has finished': ``` python microsim/main.py -ocl -init ``` ``` opencl_dir = "../microsim/opencl" snapshot_filepath = os.path.join(opencl_dir, "snapshots", "cache.npz") ``` ## Run OpenCL simulation for multiple repetitions Code to run the simulation ``` def run_opencl_model(i, iterations, snapshot_filepath, params, opencl_dir, num_seed_days, use_gpu, store_detailed_counts=True): # load snapshot snapshot = Snapshot.load_full_snapshot(path=snapshot_filepath) # set params snapshot.update_params(params) # set the random seed of the model for each repetition, otherwise it is completely deterministic snapshot.seed_prngs(i) # seed initial infections using GAM initial cases data_dir = os.path.join(opencl_dir, "data") # Create a simulator and upload the snapshot data to the OpenCL device simulator = Simulator(snapshot, opencl_dir=opencl_dir, gpu=use_gpu) simulator.upload_all(snapshot.buffers) print(f"Running simulation {i+1}.") summary, final_state = run_headless(simulator, snapshot, iterations, quiet=True, store_detailed_counts=store_detailed_counts) return summary, final_state ``` Run it: ``` iterations = 120 repetitions = 10 num_seed_days = 10 use_gpu=False summaries = [] final_results = [] # Prepare the function arguments as lists for starmap l_i = [i for i in range(repetitions)] l_iterations = [iterations] * repetitions l_snapshot_filepath = [snapshot_filepath] * repetitions l_params = [params] * repetitions l_opencl_dir = [opencl_dir] * repetitions l_num_seed_days = [num_seed_days] * repetitions l_use_gpu = [use_gpu] * repetitions results = itertools.starmap( run_opencl_model, zip( l_i, l_iterations, l_snapshot_filepath, l_params, l_opencl_dir, l_num_seed_days, l_use_gpu )) summaries = [x[0] for x in results] final_results = [x[1] for x in results] ``` ## Plot output summary data ### Total counts of disease status ``` def plot_summaries(summaries, plot_type="error_bars"): #fig, ax = plt.subplots(1, len(DiseaseStatus), sharey=True) fig, ax = plt.subplots(1, 1, figsize=(10,7)) # Work out the number of repetitions and iterations iters, reps = _get_iters_and_reps(summaries) x = range(iters) for d, disease_status in enumerate(DiseaseStatus): if disease_status==DiseaseStatus.Susceptible or disease_status==DiseaseStatus.Recovered: continue # Calculate the mean and standard deviation matrix = np.zeros(shape=(reps,iters)) for rep in range(reps): matrix[rep] = summaries[rep].total_counts[d] mean = np.mean(matrix, axis=0) sd = np.std(matrix, axis=0) if plot_type == "error_bars": ax.errorbar(x, mean, sd, label=f"{disease_status}" ) elif plot_type == "lines": for rep in range(reps): ax.plot(x, matrix[rep], label=f"{disease_status} {rep}", color=plt.cm.get_cmap("hsv", len(DiseaseStatus))(d) ) ax.legend() ax.set_title("Disease Status") ax.set_xlabel("Iteration") ax.set_ylabel("Number of cases") def _get_iters_and_reps(summaries): reps = len(summaries) iters = len(summaries[0].total_counts[0]) return (iters, reps) plot_summaries(summaries=summaries, plot_type="error_bars") #plot_summaries(summaries=summaries, plot_type="lines") ``` ### Disease statuses by age ``` def plot_disease_status_by_age(summaries): #fig, ax = plt.subplots(1, len(DiseaseStatus), sharey=True) fig, ax = plt.subplots(int(len(DiseaseStatus)/2), int(len(DiseaseStatus)/2), figsize=(15,11), tight_layout=True) iters, reps = _get_iters_and_reps(summaries) x = range(iters) age_thresholds = summaries[0].age_thresholds for d, disease_status in enumerate(DiseaseStatus): lower_age_bound = 0 for age_idx in range(len(age_thresholds)): matrix = np.zeros(shape=(reps, iters)) for rep in range(reps): #matrix[age_idx][rep][it] = summaries[rep].age_counts[str(disease_status)][age_idx][it] matrix[rep] = summaries[rep].age_counts[str(disease_status)][age_idx] mean = np.mean(matrix, axis=0) sd = np.std(matrix, axis=0) ax.flat[d].errorbar(x, mean, sd, label=f"{lower_age_bound} - {age_thresholds[age_idx]}" ) lower_age_bound = age_thresholds[age_idx] ax.flat[d].legend() ax.flat[d].set_title(f"{str(disease_status)}") ax.flat[d].set_xlabel("Iteration") ax.flat[d].set_ylabel("Number of cases") #fig.set_title(f"Num {disease_status} people by age group") plot_disease_status_by_age(summaries) ``` ### Plot MSOA geodata #### Load MSOA shapes ``` from microsim.load_msoa_locations import load_osm_shapefile, load_msoa_shapes import pandas as pd data_dir = ("../devon_data") osm_buildings = load_osm_shapefile(data_dir) devon_msoa_shapes = load_msoa_shapes(data_dir, visualize=False) devon_msoa_shapes.plot() plt.show() import pandas as pd def plot_msoa_choropleth(msoa_shapes, summary, disease_status, timestep): # get dataframes for all statuses msoa_data = summary.get_area_dataframes() msoa_data_for_status = msoa_data[disease_status] # add "Code" column so dataframes can be merged msoa_data_for_status["Code"] = msoa_data_for_status.index msoa_shapes = pd.merge(msoa_shapes, msoa_data_for_status, on="Code") msoa_shapes.plot(column=f"Day{timestep}", legend=True) plt.show() ``` ### Plot disease status by MSOA for a given timestep and status ``` disease_status = "exposed" plot_msoa_choropleth(devon_msoa_shapes, summaries[0], disease_status, 99) ```
github_jupyter
``` ################################################################### # Script: # trainHoliday.py # Usage: # python trainHoliday.py <input_file> <pass1_file> <output_file> # Description: # Build the prediction model based on training data # Pass 2: prediction based on holiday info # Authors: # Jasmin Nakic, jnakic@salesforce.com # Samir Pilipovic, spilipovic@salesforce.com ################################################################### import sys import numpy as np from sklearn import linear_model from sklearn.externals import joblib # Imports required for visualization (plotly) import plotly.graph_objs as go from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot # Script debugging flag debugFlag = False # Feature list for holiday hours hourHolidayCols = ["isHoliday", "isHour0", "isHour1", "isHour2", "isHour3", "isHour4", "isHour5", "isHour6", "isHour7", "isHour8", "isHour9", "isHour10", "isHour11", "isHour12", "isHour13", "isHour14", "isHour15", "isHour16", "isHour17", "isHour18", "isHour19", "isHour20", "isHour21", "isHour22", "isHour23"] # Add columns to the existing array and populate with data def addColumns(dest, src, colNames): # Initialize temporary array tmpArr = np.empty(src.shape[0]) cols = 0 # Copy column content for name in colNames: if cols == 0: # first column tmpArr = np.copy(src[name]) tmpArr = np.reshape(tmpArr,(-1,1)) else: tmpCol = np.copy(src[name]) tmpCol = np.reshape(tmpCol,(-1,1)) tmpArr = np.append(tmpArr,tmpCol,1) cols = cols + 1 return np.append(dest,tmpArr,1) #end addColumns # Generate linear regression model def genModel(rawData,calcData,modelName): # Initialize array X = np.zeros(rawData.shape[0]) X = np.reshape(X,(-1,1)) # Add columns for holidays by hour X = addColumns(X,rawData,hourHolidayCols) X[:, 2] = rawData["isHoliday"]*rawData["isHour0"] X[:, 3] = rawData["isHoliday"]*rawData["isHour1"] X[:, 4] = rawData["isHoliday"]*rawData["isHour2"] X[:, 5] = rawData["isHoliday"]*rawData["isHour3"] X[:, 6] = rawData["isHoliday"]*rawData["isHour4"] X[:, 7] = rawData["isHoliday"]*rawData["isHour5"] X[:, 8] = rawData["isHoliday"]*rawData["isHour6"] X[:, 9] = rawData["isHoliday"]*rawData["isHour7"] X[:,10] = rawData["isHoliday"]*rawData["isHour8"] X[:,11] = rawData["isHoliday"]*rawData["isHour9"] X[:,12] = rawData["isHoliday"]*rawData["isHour10"] X[:,13] = rawData["isHoliday"]*rawData["isHour11"] X[:,14] = rawData["isHoliday"]*rawData["isHour12"] X[:,15] = rawData["isHoliday"]*rawData["isHour13"] X[:,16] = rawData["isHoliday"]*rawData["isHour14"] X[:,17] = rawData["isHoliday"]*rawData["isHour15"] X[:,18] = rawData["isHoliday"]*rawData["isHour16"] X[:,19] = rawData["isHoliday"]*rawData["isHour17"] X[:,20] = rawData["isHoliday"]*rawData["isHour18"] X[:,21] = rawData["isHoliday"]*rawData["isHour19"] X[:,22] = rawData["isHoliday"]*rawData["isHour20"] X[:,23] = rawData["isHoliday"]*rawData["isHour21"] X[:,24] = rawData["isHoliday"]*rawData["isHour22"] X[:,25] = rawData["isHoliday"]*rawData["isHour23"] Xnoholiday = np.zeros(rawData.shape[0]) Xnoholiday = (1-rawData["isHoliday"])*calcData["predHourWeek"] Xnoholiday = np.reshape(Xnoholiday,(-1,1)) X = np.append(X,Xnoholiday,1) if debugFlag: print("X 0: ", X[0:5]) Y = np.copy(rawData["cnt"]) if debugFlag: print("Y 0: ", Y[0:5]) model = linear_model.LinearRegression() print(model.fit(X, Y)) print("INTERCEPT: ", model.intercept_) print("COEFFICIENT shape: ", model.coef_.shape) print("COEFFICIENT values: ", model.coef_) print("SCORE values: ", model.score(X,Y)) P = model.predict(X) if debugFlag: print("P 0-5: ", P[0:5]) joblib.dump(model,modelName) return P #end genModel # Write predictions to the output file def writeResult(output,rawData,calcData,p5): # generate result file result = np.array( np.empty(rawData.shape[0]), dtype=[ ("timeStamp","|U19"), ("dateFrac",float), ("isHoliday",int), ("isSunday",int), ("cnt",int), ("predSimple",int), ("predTrig",int), ("predHourDay",int), ("predHourWeek",int), ("predHoliday",int) ] ) result["timeStamp"] = rawData["timeStamp"] result["dateFrac"] = rawData["dateFrac"] result["isHoliday"] = rawData["isHoliday"] result["isSunday"] = rawData["isSunday"] result["cnt"] = rawData["cnt"] result["predSimple"] = calcData["predSimple"] result["predTrig"] = calcData["predTrig"] result["predHourDay"] = calcData["predHourDay"] result["predHourWeek"] = calcData["predHourWeek"] result["predHoliday"] = p5 if debugFlag: print("R 0-5: ", result[0:5]) hdr = "timeStamp\tdateFrac\tisHoliday\tisSunday\tcnt\tpredSimple\tpredTrig\tpredHourDay\tpredHourWeek\tpredHoliday" np.savetxt(output,result,fmt="%s",delimiter="\t",header=hdr,comments="") #end writeResult # Start inputFileName = "train_data.txt" hourlyFileName = "train_hourly.txt" outputFileName = "train_holiday.txt" # All input columns - data types are strings, float and int inputData = np.genfromtxt( inputFileName, delimiter='\t', names=True, dtype=("|U19","|U10",int,float,int,float,float,int,float,float, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int, int,int,int,int,int,int,int,int,int,int ) ) # timeStamp dateFrac isHoliday isSunday cnt predSimple predTrig predHourDay predHourWeek hourlyData = np.genfromtxt( hourlyFileName, delimiter='\t', names=True, dtype=("|U19",float,int,int,int,int,int,int,int) ) PH = genModel(inputData,hourlyData,"modelHoliday") writeResult(outputFileName,inputData,hourlyData,PH) # Load results from file generated above using correct data types results = np.genfromtxt( outputFileName, dtype=("|U19",float,int,int,int,int,int,int,int,int), delimiter='\t', names=True ) # Examine result data print("Shape:", results.shape) print("Columns:", len(results.dtype.names)) print(results[1:5]) # Generate chart with predicitons based on training data (using plotly) print("Plotly version", __version__) # requires plotly version >= 1.9.0 init_notebook_mode(connected=True) set1 = go.Bar( x=results["dateFrac"], y=results["predHourWeek"], # marker=dict(color='blue'), name='HourWeek' ) set2 = go.Bar( x=results["dateFrac"], y=results["predHoliday"], # marker=dict(color='crimson'), opacity=0.6, name='Holiday' ) barData = [set1, set2] barLayout = go.Layout(barmode='group', title="Prediction vs. Actual") fig = go.Figure(data=barData, layout=barLayout) iplot(fig) ```
github_jupyter
Tensorflow Chessbot - Predicting chess pieces from images by training a single-layer classifier --- **[Link to Github source code](https://github.com/Elucidation/tensorflow_chessbot)** Other IPython Notebooks for Tensorflow Chessbot: 1. [Computer Vision to turn a Chessboard image into chess tiles](tensorflow_compvision.html) - [Blog post #1](http://www.samansari.info/2016/02/learning-tensorflow-1-using-computer.html) 1. [Programmatically generating training datasets](tensorflow_generate_training_data.html) 1. [Predicting chess pieces from images by training a single-layer classifier](index.html) (*This notebook*) - [Blog post #2](http://www.samansari.info/2016/02/learning-tensorflow-2-training.html) 1. [Chessboard Convolutional Neural Network classifier](tensorflow_learn_cnn.html) --- In this notebook we'll train a tensorflow neural network to tell what piece is on a chess square. In the previous notebook we wrote scripts that parsed input images which contained a chessboard into 32x32 grayscale chess squares. ``` # Init and helper functions import tensorflow as tf import numpy as np import PIL import urllib, cStringIO import glob from IPython.core.display import Markdown from IPython.display import Image, display import helper_functions as hf import tensorflow_chessbot np.set_printoptions(precision=2, suppress=True) ``` Let's load the tiles in for the training and test dataset, and then split them in a 90/10 ratio ``` # All tiles with pieces in random organizations all_paths = np.array(glob.glob("tiles/train_tiles_C/*/*.png")) # TODO : (set labels correctly) # Shuffle order of paths so when we split the train/test sets the order of files doesn't affect it np.random.shuffle(all_paths) ratio = 0.9 # training / testing ratio divider = int(len(all_paths) * ratio) train_paths = all_paths[:divider] test_paths = all_paths[divider:] # Training dataset # Generated by programmatic screenshots of lichess.org/editor/<FEN-string> print "Loading %d Training tiles" % train_paths.size train_images, train_labels = hf.loadFENtiles(train_paths) # Load from generated set # Test dataset, taken from screenshots of the starting position print "Loading %d Training tiles" % test_paths.size test_images, test_labels = hf.loadFENtiles(test_paths) # Load from generated set train_dataset = hf.DataSet(train_images, train_labels, dtype=tf.float32) test_dataset = hf.DataSet(test_images, test_labels, dtype=tf.float32) ``` Cool, lets look at a few images in the training set ``` # Visualize a couple tiles for i in np.random.choice(train_dataset.num_examples, 5, replace=False): #for i in range(train_dataset.num_examples): #if hf.label2Name(train_dataset.labels[i]) == 'P': #print "%d: Piece(%s) : Label vector: %s" % (i, hf.label2Name(train_dataset.labels[i]), train_dataset.labels[i]) print "%d: Piece(%s)" % (i, hf.label2Name(train_dataset.labels[i])) hf.display_array(np.reshape(train_dataset.images[i,:],[32,32])) ``` Looks good. Now that we've loaded the data, let's build up a simple softmax regression classifier based off of [this beginner tutorial](https://www.tensorflow.org/versions/v0.6.0/tutorials/mnist/beginners/index.html) on tensorflow. ``` x = tf.placeholder(tf.float32, [None, 32*32]) W = tf.Variable(tf.zeros([32*32, 13])) b = tf.Variable(tf.zeros([13])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 13]) cross_entropy = -tf.reduce_sum(y_*tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) # train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) N = 6000 print "Training for %d steps..." % N for i in range(N): batch_xs, batch_ys = train_dataset.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) if ((i+1) % 500) == 0: print "\t%d/%d" % (i+1, N) print "Finished training." correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Accuracy: %g\n" % sess.run(accuracy, feed_dict={x: test_dataset.images, y_: test_dataset.labels}) ``` Looks like it memorized everything from the datasets we collected, let's look at the weights to get an idea of what it sees for each piece. # Weights ``` print "Visualization of Weights as negative(Red) to positive(Blue)" for i in range(13): print "Piece: %s" % hf.labelIndex2Name(i) piece_weight = np.reshape(sess.run(W)[:,i], [32,32]) hf.display_weight(piece_weight,rng=[-0.2,0.2]) ``` Cool, you can see the shapes show up within the weights. Let's have a look at the failure cases to get a sense of what went wrong. ``` mistakes = tf.where(~correct_prediction) mistake_indices = sess.run(mistakes, feed_dict={x: test_dataset.images, y_: test_dataset.labels}).flatten() guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: test_dataset.images}) print "%d mistakes:" % mistake_indices.size for idx in np.random.choice(mistake_indices, 5, replace=False): a,b = test_dataset.labels[idx], guessed[idx] print "---" print "\t#%d | Actual: '%s', Guessed: '%s'" % (idx, hf.label2Name(a),hf.labelIndex2Name(b)) print "Actual:",a print " Guess:",guess_prob[idx,:] hf.display_array(np.reshape(test_dataset.images[idx,:],[32,32])) ``` It looks like it's been learning that pieces have black borders, and since this pieceSet didn't, and it was a small part of the training set, it just fails and thinks we're looking at blank squares, *more training data!* From the label probabilities, it did a reasonable job of thinking the pieces were white, and their second best guesses tended to be close to the right answer, the blank spaces just won out. Also, lets look at several random selections, including successes. ``` for idx in np.random.choice(test_dataset.num_examples,5,replace=False): a,b = test_dataset.labels[idx], guessed[idx] print "#%d | Actual: '%s', Guessed: '%s'" % (idx, hf.label2Name(a),hf.labelIndex2Name(b)) hf.display_array(np.reshape(test_dataset.images[idx,:],[32,32])) ``` # Manual validation via screenshots on reddit We'll eventually build a training/test/validation dataset of different proportions in one go, but for now, lets build a wrapper that given an image, returns a predicted FEN ``` validate_img_path = 'chessboards/reddit/aL64q8w.png' img_arr = tensorflow_chessbot.loadImage(validate_img_path) tiles = tensorflow_chessbot.getTiles(img_arr) # See the screenshot display(Image(validate_img_path)) # see one of the tiles print "Let's see the 5th tile, corresponding to F1" hf.display_array(tiles[:,:,5]) validation_set = np.swapaxes(np.reshape(tiles, [32*32, 64]),0,1) guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: validation_set}) print "First 5 tiles" for idx in range(5): guess = guessed[idx] print "#%d | Actual: '?', Guessed: '%s'" % (idx, hf.labelIndex2Name(guess)) hf.display_array(np.reshape(validation_set[idx,:],[32,32])) ``` Oh my, that looks correct, let's generate a FEN string from the `guessed` results, and view that side by side with the screenshot! ``` # guessed is tiles A1-H8 rank-order, so to make a FEN we just need to flip the files from 1-8 to 8-1 pieceNames = map(lambda k: '1' if k == 0 else hf.labelIndex2Name(k), guessed) # exchange ' ' for '1' for FEN fen = '/'.join([''.join(pieceNames[i*8:(i+1)*8]) for i in reversed(range(8))]) print "FEN:",fen # See our prediction as a chessboard display(Markdown("Prediction: [Lichess analysis](https://lichess.org/analysis/%s)" % fen)) display(Image(url='http://www.fen-to-image.com/image/%s' % fen)) # See the original screenshot we took from reddit print "Actual" display(Image(validate_img_path)) ``` A perfect match! Awesome, at this point even though we have enough to make predictions from several lichess boards (not all of them yet) and return a result. We can build our reddit chatbot now. # Predict from image url Let's wrap up predictions into a single function call from a URL, and test it on a few reddit posts. ``` def getPrediction(img): """Run trained neural network on tiles generated from image""" # Convert to grayscale numpy array img_arr = np.asarray(img.convert("L"), dtype=np.float32) # Use computer vision to get the tiles tiles = tensorflow_chessbot.getTiles(img_arr) if tiles is []: print "Couldn't parse chessboard" return "" # Reshape into Nx1024 rows of input data, format used by neural network validation_set = np.swapaxes(np.reshape(tiles, [32*32, 64]),0,1) # Run neural network on data guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: validation_set}) # Convert guess into FEN string # guessed is tiles A1-H8 rank-order, so to make a FEN we just need to flip the files from 1-8 to 8-1 pieceNames = map(lambda k: '1' if k == 0 else hf.labelIndex2Name(k), guessed) # exchange ' ' for '1' for FEN fen = '/'.join([''.join(pieceNames[i*8:(i+1)*8]) for i in reversed(range(8))]) return fen def makePrediction(image_url): """Given image url to a chessboard image, return a visualization of FEN and link to a lichess analysis""" # Load image from url and display img = PIL.Image.open(cStringIO.StringIO(urllib.urlopen(image_url).read())) print "Image on which to make prediction: %s" % image_url hf.display_image(img.resize([200,200], PIL.Image.ADAPTIVE)) # Make prediction fen = getPrediction(img) display(Markdown("Prediction: [Lichess analysis](https://lichess.org/analysis/%s)" % fen)) display(Image(url='http://www.fen-to-image.com/image/%s' % fen)) print "FEN: %s" % fen ``` ## Make Predictions All the boilerplate is done, the model is trained, it's time. I chose the first post I saw on reddit.com/chess with a chessboard (something our CV algorithm can do also): https://www.reddit.com/r/chess/comments/45inab/moderate_black_to_play_and_win/ with an image url of http://i.imgur.com/x6lLQQK.png And awaayyy we gooo... ``` makePrediction('http://i.imgur.com/x6lLQQK.png') ``` Fantastic, a perfect match! It was able to handle the highlighting on the pawn movement from G2 to F3 also. Now just for fun, let's try an image that is from a chessboard we've never seen before! Here's another on reddit: https://www.reddit.com/r/chess/comments/45c8ty/is_this_position_starting_move_36_a_win_for_white/ ``` makePrediction('http://i.imgur.com/r2r43xA.png') ``` Hah, it thought the black pawns (on A3, B2, C4, and F2) were black bishops. Same for the white pawns. This would be a pretty bad situation for white. But amazingly it predicted all the other pieces and empty squares correctly! This is pretty great, let's look at a few more screenshots taken lichess. Here's https://www.reddit.com/r/chess/comments/44q2n6/tactic_from_a_game_i_just_played_white_to_move/ ``` makePrediction('http://i.imgur.com/gSFbM1d.png') ``` Perfect match, as expected, when the validation images are based off of what the model trains, it'll do great, but if we use images from chess boards we haven't trained on, we'll see lots of mistakes. Mistakes are fun, lets see some. ## Trying with non-lichess images ``` makePrediction('http://imgur.com/oXpMSQI.png') ``` Ouch, it missed most of them there, the training data didn't contain images from this site, which looks somewhat like chess.com, need more DATA! ``` makePrediction('http://imgur.com/qk5xa6q.png') makePrediction('http://imgur.com/u4zF5Hj.png') makePrediction('http://imgur.com/CW675pw.png') makePrediction('https://i.ytimg.com/vi/pG1Uhw3pO8o/hqdefault.jpg') makePrediction('http://www.caissa.com/chess-openings/img/siciliandefense1.gif') makePrediction('http://www.jinchess.com/chessboard/?p=rnbqkbnrpPpppppp----------P----------------R----PP-PPPPPRNBQKBNR') ``` Ouch, tons of failures, interesting replacements, sometimes it's a missing piece, sometimes it's a white rook instead of a black king, or a bishop instead of a pawn, how interesting.
github_jupyter
``` # import necessary packages import os import matplotlib.pyplot as plt import geopandas as gpd from descartes import PolygonPatch import pandas as pd import numpy as np import seaborn as sns import fiona import cbsodata import pyproj import geoplot as gplt import geoplot.crs as gcrs buurten = gpd.read_file("output/buurten.shp") fig = plt.figure(1, figsize=(25,15)) ax = fig.add_subplot() buurten.apply(lambda x: ax.annotate(s=x.BUURTCODE, xy=x.geometry.centroid.coords[0], ha='center', fontsize=14),axis=1); buurten.boundary.plot(ax=ax, color='Black', linewidth=.4) buurten.plot(ax=ax, cmap='YlGn', column='c_total', figsize=(12, 12),legend=True, legend_kwds={'label': "Population by neighborhood"}).axis('off') fig.tight_layout() buurt_data_19 = pd.DataFrame(cbsodata.get_data("84583NED")) buurt_name_list = pd.read_csv("data/buurten.csv",sep=";") buurt_code = buurt_name_list[["BUURTCODE","WIJKCODE","BUURTNAAM"]] dh_buurt_data_19 = buurt_data_19[(buurt_data_19["Gemeentenaam_1"]=="'s-Gravenhage ") & (buurt_data_19["SoortRegio_2"]=="Buurt ")] total_dh_19 = dh_buurt_data_19[['WijkenEnBuurten','AantalInwoners_5','WestersTotaal_17','Marokko_19','NederlandseAntillenEnAruba_20','Suriname_21','Turkije_22','OverigNietWesters_23','GemiddeldeHuishoudensgrootte_32','GemiddeldeWoningwaarde_35','Koopwoningen_40','HuurwoningenTotaal_41','GIHandelEnHoreca_84','RUCultuurRecreatieOverigeDiensten_88','AfstandTotHuisartsenpraktijk_95','AfstandTotGroteSupermarkt_96','AfstandTotKinderdagverblijf_97','AfstandTotSchool_98']].rename(columns={'WijkenEnBuurten':'buurtname','AantalInwoners_5':'total_citizens','WestersTotaal_17':'total_western','Marokko_19':'morocco','NederlandseAntillenEnAruba_20':'antilles','Suriname_21':'suriname','Turkije_22':'turkey','OverigNietWesters_23':'other_non_western','GemiddeldeHuishoudensgrootte_32':'avg_household_size','GemiddeldeWoningwaarde_35':'avg_housevalue','Koopwoningen_40':'owned_houses','HuurwoningenTotaal_41':'rent_houses','GIHandelEnHoreca_84':'horeca_biz','RUCultuurRecreatieOverigeDiensten_88':'recreation_biz','AfstandTotHuisartsenpraktijk_95':'dist_GP','AfstandTotGroteSupermarkt_96':'dist_super','AfstandTotKinderdagverblijf_97':'dist_nursery','AfstandTotSchool_98':'dist_school'}, errors="raise").merge(buurt_code, left_on='buurtname', right_on='BUURTNAAM').set_index("BUURTCODE").drop("BUURTNAAM",axis=1) total_dh_19[["WIJKCODE","buurtname","total_citizens"]] total_dh_19.loc[44] print(total_dh_19[["WIJKCODE","buurtname","total_citizens"]].sort_index().to_latex(longtable=True)) ```
github_jupyter
# Chapter 7 Model Beyond Linearity The truth is never linear! Or almost never! But often the linearity assumption is good enough. When its not : - polynomials, - step functions, - splines, - local regression, and - generalized additive models offer a lot of exibility, without losing the ease and interpretability of linear models. ``` import pandas as pd from pandas.api.types import CategoricalDtype import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sklearn.linear_model as skl_lm from sklearn import neighbors from sklearn.preprocessing import PolynomialFeatures, StandardScaler, LabelEncoder from sklearn.pipeline import Pipeline, make_pipeline from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error from patsy import dmatrix import re %matplotlib inline plt.style.use('seaborn-white') df_wage = pd.read_csv('data/Wage.csv') df_wage.rename(columns={'Unnamed: 0': 'ID'}, inplace=True) for col_name in df_wage.columns: if df_wage[col_name].dtype == 'object': # get list of categories cat_list = np.sort(df_wage[col_name].unique()) # create a categorical dtype that is ordered for the columns that makes sense cat_dtype = CategoricalDtype(cat_list, ordered=True if col_name in ['education', 'health'] else False) df_wage[col_name] = df_wage[col_name].astype(cat_dtype) # strip '#. ' from the categories names df_wage[col_name].cat.categories = [re.sub(r"\d+.\s+", "", cat) for cat in cat_list] print(f'{col_name}: {df_wage[col_name].cat.categories.values}') df_wage.head(3) df_wage.describe(include='category') ``` ### Polynomial Regresssion $y_{i}=\beta_{0}+\beta_{1} x_{i}+\beta_{2} x_{i}^{2}+\beta_{3} x_{i}^{3}+\ldots+\beta_{d} x_{i}^{d}+\epsilon_{i}$ ``` # polynomial regression linear = skl_lm.LinearRegression() poly = PolynomialFeatures(4) X = df_wage['age'].values.reshape(-1, 1) y = df_wage['wage'] X_poly = poly.fit_transform(X) linear.fit(X_poly, y) # logistic regression df_logistic_wage = pd.DataFrame({'age': df_wage['age'].values, 'large_wage': df_wage['wage'].values > 250}, columns=['age', 'large_wage']) df_logistic_wage.large_wage = df_logistic_wage.large_wage.astype(int) X = df_logistic_wage['age'].values.reshape(-1, 1).astype(float) y = df_logistic_wage['large_wage'] pipe = Pipeline(steps=[('scaler', StandardScaler()), ('polynomial', PolynomialFeatures(4)), ('logistic', skl_lm.LogisticRegression(C=1e10))]) # Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. pipe.fit(X, y) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,4)) ax1.scatter(df_wage.age, df_wage.wage, facecolors='None', edgecolors='grey', alpha=.5) # seaborn uses np.polyfit to fit a polynomial of order 4 to the data, basically the same we did with sklearn sns.regplot(df_wage.age, df_wage.wage, ci=100, label='Degree 4', order=4, scatter=False, color='blue', ax=ax1) age_range = np.linspace(df_logistic_wage.age.min(), df_logistic_wage.age.max(), 1000).reshape((-1, 1)) # Apply transforms to the data, and predict with the final estimator predictions = pipe.predict_proba(age_range)[:, 1] # probability of wage > 250 given age ax2.plot(age_range, predictions) ax2.set_ylim(ymin=-0.005, ymax=0.2) # rug plot ax2.plot(X, y*0.19, '|', color='k') ax2.set_xlabel('age') ax2.set_ylabel('P(wage>250|age)'); ``` Details - Create new variables $X_{1}=X, X_{2}=X^{2},$ etc and then treat as multiple linear regression. - Not really interested in the coefficients; more interested in the fitted function values at any value $x_{0}:$ $$ \hat{f}\left(x_{0}\right)=\hat{\beta}_{0}+\hat{\beta}_{1} x_{0}+\hat{\beta}_{2} x_{0}^{2}+\hat{\beta}_{3} x_{0}^{3}+\hat{\beta}_{4} x_{0}^{4} $$ - Since $\hat{f}\left(x_{0}\right)$ is a linear function of the $\hat{\beta}_{\ell},$ can get a simple expression for pointwise-variances $\operatorname{Var}\left[\hat{f}\left(x_{0}\right)\right]$ at any value $x_{0} .$ In the figure we have computed the fit and pointwise standard errors on a grid of values for $x_{0} .$ We show $\hat{f}\left(x_{0}\right) \pm 2 \cdot \operatorname{se}\left[\hat{f}\left(x_{0}\right)\right]$ - We either fix the degree $d$ at some reasonably low value, else use cross-validation to choose $d$ - Logistic regression follows naturally. For example, in figure we model $$ \operatorname{Pr}\left(y_{i}>250 | x_{i}\right)=\frac{\exp \left(\beta_{0}+\beta_{1} x_{i}+\beta_{2} x_{i}^{2}+\ldots+\beta_{d} x_{i}^{d}\right)}{1+\exp \left(\beta_{0}+\beta_{1} x_{i}+\beta_{2} x_{i}^{2}+\ldots+\beta_{d} x_{i}^{d}\right)} $$ - To get confidence intervals, compute upper and lower bounds on on the logit scale, and then invert to get on probability scale. - Can do separately on several variables- just stack the variables into one matrix, and separate out the pieces afterwards (see GAMs later). - Caveat: polynomials have notorious tail behavior - very bad for extrapolation. - Can fit using $\mathrm{y} \sim \operatorname{poly}(\mathrm{x}, \text { degree }=3)$ in formula. ### Step Function Another way of creating transformations of a variable - cut the variable into distinct regions. $C_{1}(X)=I(X<35), \quad C_{2}(X)=I(35 \leq X<50), \ldots, C_{3}(X)=I(X \geq 65)$ ``` # fit a stepwise function to wage data. Use four bins like in Figure 7.2 num_bins = 4 df_step = pd.DataFrame(pd.cut(df_wage.age, num_bins)) df_step = pd.get_dummies(df_step) df_step['wage'] = df_wage.wage df_step['age'] = df_wage.age df_step.head(3) # linear regression linear = skl_lm.LinearRegression() X = df_step[df_step.columns.difference(['wage', 'age'])] y = df_step['wage'] linear.fit(X, y) # logistic regression df_logistic_step = df_step.copy() df_logistic_step['large_wage'] = (df_logistic_step['wage'].values > 250).astype(int) X = df_logistic_step[df_step.columns.difference(['wage', 'age', 'large_wage'])] y = df_logistic_step['large_wage'] pipe = Pipeline(steps=[('scaler', StandardScaler()), ('logistic', skl_lm.LogisticRegression(C=1e10))]) # Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. pipe.fit(X, y) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,4)) ax1.scatter(df_step.age, df_step.wage, facecolors='None', edgecolors='grey', alpha=.5) age_range = np.linspace(df_step.age.min(), df_step.age.max(), 1000) age_range_dummies = pd.get_dummies(pd.cut(age_range, num_bins)) predictions = linear.predict(age_range_dummies) ax1.plot(age_range, predictions) ax1.set_xlabel('age') ax1.set_ylabel('wage') # Apply transforms to the data, and predict with the final estimator predictions = pipe.predict_proba(age_range_dummies)[:, 1] # probability of wage > 250 given age ax2.plot(age_range, predictions) ax2.set_ylim(ymin=-0.005, ymax=0.2) # rug plot ax2.plot(df_step.age, y*0.19, '|', color='k') ax2.set_xlabel('age') ax2.set_ylabel('P(wage>250|age)'); ``` - Easy to work with. Creates a series of dummy variables representing each group. - Useful way of creating interactions that are easy to interpret. For example, interaction effect of Year and Age: I(Year < 2005)*Age; I(Year >= 2005)*Age would allow for different linear functions in each age category. - In R: I(year < 2005) or cut(age; c(18; 25; 40; 65; 90)) - Choice of cutpoints or knots can be problematic. For creating nonlinearities, smoother alternatives such as splines are available. ### Pricewise Polynomials - Instead of a single polynomial in $X$ over its whole domain, we can rather use different polynomials in regions defined by knots. E.g. (see figure) $$ y_{i}=\left\{\begin{array}{ll} \beta_{01}+\beta_{11} x_{i}+\beta_{21} x_{i}^{2}+\beta_{31} x_{i}^{3}+\epsilon_{i} & \text { if } x_{i}<c \\ \beta_{02}+\beta_{12} x_{i}+\beta_{22} x_{i}^{2}+\beta_{32} x_{i}^{3}+\epsilon_{i} & \text { if } x_{i} \geq c \end{array}\right. $$ - Better to add constraints to the polynomials, e.g. continuity. - Splines have the "maximum" amount of continuity. ``` X = df_wage['age'].values.reshape(-1, 1) y = df_wage['wage'] knots = np.percentile(X, [25, 50, 75]) spline_basis_plot = dmatrix("cr(x, knots=knots) -1", {"x": np.sort(X, axis=0), 'knots': knots}, return_type='dataframe') plt.plot(np.sort(X, axis=0), spline_basis_plot.values, '-', linewidth=0.5) for knot in knots: plt.axvline(knot, ls='--', color='k') plt.title('Natural spline basis functions'); ``` ### Linear Splines A linear spline with knots at $\xi_{k}, k=1, \ldots, K$ is a piecewise linear polynomial continuous at each knot. We can represent this model as $$ y_{i}=\beta_{0}+\beta_{1} b_{1}\left(x_{i}\right)+\beta_{2} b_{2}\left(x_{i}\right)+\cdots+\beta_{K+3} b_{K+3}\left(x_{i}\right)+\epsilon_{i} $$ where the $b_{k}$ are basis functions. $$ \begin{aligned} b_{1}\left(x_{i}\right) &=x_{i} \\ b_{k+1}\left(x_{i}\right) &=\left(x_{i}-\xi_{k}\right)_{+}, \quad k=1, \ldots, K \end{aligned} $$ Here the () $_{+}$ means positive part; i.e. $$ \left(x_{i}-\xi_{k}\right)_{+}=\left\{\begin{aligned} x_{i}-\xi_{k} & \text { if } x_{i}>\xi_{k} \\ 0 & \text { otherwise } \end{aligned}\right. $$ ### Cubic Splines A cubic spline with knots at $\xi_{k}, k=1, \ldots, K$ is a piecewise cubic polynomial with continuous derivatives up to order 2 at each knot. Again we can represent this model with truncated power basis functions $$ \begin{aligned} y_{i}=\beta_{0}+\beta_{1} b_{1}\left(x_{i}\right) &+\beta_{2} b_{2}\left(x_{i}\right)+\cdots+\beta_{K+3} b_{K+3}\left(x_{i}\right)+\epsilon_{i} \\ b_{1}\left(x_{i}\right) &=x_{i} \\ b_{2}\left(x_{i}\right) &=x_{i}^{2} \\ b_{3}\left(x_{i}\right) &=x_{i}^{3} \\ b_{k+3}\left(x_{i}\right) &=\left(x_{i}-\xi_{k}\right)_{+}^{3}, \quad k=1, \ldots, K \end{aligned} $$ where $$ \left(x_{i}-\xi_{k}\right)_{+}^{3}=\left\{\begin{aligned} \left(x_{i}-\xi_{k}\right)^{3} & \text { if } x_{i}>\xi_{k} \\ 0 & \text { otherwise } \end{aligned}\right. $$ ### Natural Cubic Splines A natural cubic spline extrapolates linearly beyond the boundary knots. This adds 4 = 2 * 2 extra constraints, and allows us to put more internal knots for the same degrees of freedom as a regular cubic spline. ### Generalized Additive Models - Allows for flexible nonlinearities in several variables, but retains - The additive structure of linear models. $$ y_{i}=\beta_{0}+f_{1}\left(x_{i 1}\right)+f_{2}\left(x_{i 2}\right)+\cdots+f_{p}\left(x_{i p}\right)+\epsilon_{i} $$ - Can fit a GAM simply using, e.g. natural splines: lm(wage * ns(year; df = 5) + ns(age; df = 5) + education) - Coefficients not that interesting; fitted functions are. The previous plot was produced using plot.gam. - Can mix terms | some linear, some nonlinear | and use anova() to compare models. - Can use smoothing splines or local regression as well: gam(wage * s(year; df = 5) + lo(age; span = :5) + education) - GAMs are additive, although low-order interactions can be included in a natural way using, e.g. bivariate smoothers or interactions of the form ns(age,df=5):ns(year,df=5). ``` from pygam import LinearGAM, s, f from pygam.datasets import wage X, y = wage(return_X_y=True) ## model gam = LinearGAM(s(0) + s(1) + f(2)) gam.gridsearch(X, y) ## plotting plt.figure(); fig, axs = plt.subplots(1,3); titles = ['year', 'age', 'education'] for i, ax in enumerate(axs): XX = gam.generate_X_grid(term=i) ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX)) ax.plot(XX[:, i], gam.partial_dependence(term=i, X=XX, width=.95)[1], c='r', ls='--') if i == 0: ax.set_ylim(-30,30) ax.set_title(titles[i]); ``` ### GAMs for classification $\log \left(\frac{p(X)}{1-p(X)}\right)=\beta_{0}+f_{1}\left(X_{1}\right)+f_{2}\left(X_{2}\right)+\cdots+f_{p}\left(X_{p}\right)$ ``` from pygam import LogisticGAM, s, f from pygam.datasets import default X, y = default(return_X_y=True) gam = LogisticGAM(f(0) + s(1) + s(2)).gridsearch(X, y) fig, axs = plt.subplots(1, 3) titles = ['student', 'balance', 'income'] for i, ax in enumerate(axs): XX = gam.generate_X_grid(term=i) pdep, confi = gam.partial_dependence(term=i, width=.95) ax.plot(XX[:, i], pdep) ax.plot(XX[:, i], confi, c='r', ls='--') ax.set_title(titles[i]) ``` ### End of Chapter 7
github_jupyter
# Analyzing the number of viruses associated with each host ``` import numpy as np import matplotlib.pyplot as plt import ete3 as ete import os import Bio import random import scipy.stats as stats from functools import * from Bio import Entrez from Bio import SeqIO import matplotlib.patches as mpatches import seaborn as sns sns.set_context("paper") %matplotlib inline #as part of quality control the following viruses had to be omitted from our analysis #they contain misannotations or mislabeled hosts, etc. badFolders = ['Acyrthosiphon_pisum_secondary_endosymbiont_phage_1_uid14047', 'Aureococcus_anophagefferens_virus_MM_2014_uid258005', 'Dragonfly_associated_microphage_1_uid177547', 'Enterobacter_phage_PG7_uid240014', 'Escherichia_phage_vB_EcoM_JS09_uid248321', 'Lactobacillus_phage_LL_H_uid19803', 'Providencia_phage_Redjac_uid177540', 'Pseudoalteromonas_phage_RIO_1_uid206039', 'Sputnik_virophage_2_uid243067', 'Sputnik_virophage_3_uid243065', 'Staphylococcus_phage_6ec_uid253318', 'Streptococcus_phage_DCC1738_uid253219', 'Streptococcus_phage_IC1_uid253220', 'Streptococcus_phage_K13_uid253223', 'Streptococcus_phage_SMP_uid18529', 'Vibrio_phage_CP_T1_uid181062', 'Vibrio_phage_vpms1_uid212709', 'Vibrio_phage_VPUSM_8_uid227006', 'Abaca_bunchy_top_virus_uid28697','Shallot_yellow_stripe_virus_uid15745','Equine_rhinitis_A_virus_uid15205','African_pouched_rat_arterivirus_uid274797','Spilanthes_yellow_vein_virus_uid19779', 'Velvet_bean_severe_mosaic_virus_uid41175','Paramecium_bursaria_Chlorella_virus_FR483_uid18305','Human_endogenous_retrovirus_K113_uid222261','Acholeplasma_phage_MV_L1_uid14573'] #viruses whose lineage is improperly annotated (they don't belong to any Baltimore group) unknowns= ['Bacillus_phage_phBC6A51_uid15021', 'Bacillus_phage_phBC6A52_uid15022', 'Badger_feces_associated_gemycircularvirus_uid281125', 'Chaetoceros_DNA_virus_7_uid237558', 'Chaetoceros_lorenzianus_DNA_Virus_uid63565', 'Chaetoceros_tenuissimus_DNA_virus_uid60753', 'Cladosporium_cladosporioides_virus_1_uid258308', 'Enterobacteria_phage_YYZ_2008_uid32231', 'European_mountain_ash_ringspot_associated_virus_uid39973', 'Faecal_associated_gemycircularvirus_3_uid268557', 'Faecal_associated_gemycircularvirus_4_uid268556', 'Faecal_associated_gemycircularvirus_5_uid268555', 'Faecal_associated_gemycircularvirus_6_uid268554', 'Faecal_associated_gemycircularvirus_7_uid268553', 'Gentian_ovary_ring_spot_virus_uid256090', 'Geobacillus_virus_E2_uid19797', 'Glossina_pallidipes_salivary_gland_hypertrophy_virus_uid28839', 'Haloarcula_hispanica_icosahedral_virus_2_uid109269', 'Haloarcula_phage_SH1_uid15535', 'Halovirus_PH1_uid196975', 'Halovirus_VNH_1_uid262927', 'Human_genital_associated_circular_DNA_virus_1_uid281399', 'Lactobacillus_johnsonii_prophage_Lj771_uid28145', 'Magnaporthe_oryzae_RNA_virus_uid272442', 'Mongoose_feces_associated_gemycircularvirus_a_uid281407', 'Mongoose_feces_associated_gemycircularvirus_b_uid281406', 'Mongoose_feces_associated_gemycircularvirus_c_uid281404', 'Mongoose_feces_associated_gemycircularvirus_d_uid281405', 'Mycoplasma_phage_phiMFV1_uid14387', 'Penicillium_roqueforti_dsRNA_mycovirus_1_uid258162', 'Phage_Gifsy_1_uid32269', 'Phage_Gifsy_2_uid32271', 'Pigeonpea_cryptic_virus_uid244664', 'Planaria_asexual_strain_specific_virus_like_element_type_1_uid14140', 'Pseudomonas_phage_phi_Pto_bp6g_uid240724', 'Rhizoctonia_fumigata_mycovirus_uid283068', 'Rhodococcus_phage_REQ2_uid81171', 'Rhodococcus_phage_REQ3_uid81175', 'Rose_rosette_virus_uid64937', 'Rosellinia_necatrix_fusarivirus_1_uid255787', 'Rosellinia_necatrix_megabirnavirus_1_W779_uid41609', 'Salisaeta_icosahedral_phage_1_uid167575', 'Salmonella_phage_Fels_1_uid29267', 'Sodalis_phage_phiSG1_uid16583', 'Staphylococcus_phage_phi2958PVL_uid32173', 'Staphylococcus_phage_tp310_1_uid20659', 'Staphylococcus_phage_tp310_3_uid20663', 'Stenotrophomonas_phage_phiSMA9_uid15493', 'Streptococcus_phage_20617_uid239271', 'Streptococcus_phage_phiBHN167_uid227353', 'Streptococcus_pyogenes_phage_315_1_uid14533', 'Streptococcus_pyogenes_phage_315_2_uid14528', 'Streptococcus_pyogenes_phage_315_3_uid14529', 'Streptococcus_pyogenes_phage_315_4_uid14530', 'Streptococcus_pyogenes_phage_315_5_uid14531', 'Streptococcus_pyogenes_phage_315_6_uid14532', 'Tanay_virus_uid246129', 'Thermococcus_prieurii_virus_1_uid84407', 'Thermus_phage_IN93_uid14235', 'Thermus_phage_P23_77_uid40235'] badFolders=badFolders+unknowns #these are satellite viruses sats = ['Ageratum_conyzoides_associated_symptomless_alphasatellite_uid259293', 'Ageratum_enation_alphasatellite_uid181994', 'Ageratum_leaf_curl_betasatellite_uid195929', 'Ageratum_leaf_curl_Cameroon_betasatellite_uid36669', 'Ageratum_yellow_leaf_curl_betasatellite_uid14439', 'Ageratum_yellow_vein_China_alphasatellite_uid237561', 'Ageratum_yellow_vein_China_virus_associated_DNA_beta_uid15515', 'Ageratum_yellow_vein_Singapore_alphasatellite_uid14232', 'Ageratum_yellow_vein_virus_satellite_DNA_beta_uid14444', 'Alternanthera_yellow_vein_virus_satellite_DNA_beta_uid19833', 'Andrographis_yellow_vein_leaf_curl_betasatellite_uid243492', 'Bhendi_yellow_vein_India_betasatellite__India_Aurangabad_OY164_2006__uid61557', 'Bhendi_yellow_vein_mosaic_betasatellite__India_Coimbator_OYCO1_2005__uid61777', 'Bhendi_yellow_vein_mosaic_virus_associated_alphasatellite_uid174781', 'Bhendi_yellow_vein_mosaic_virus_satellite_DNA_beta_uid14445', 'Black_medic_leafroll_alphasatellite_1_uid243500', 'Cardamom_bushy_dwarf_virus_satellite_uid230064', 'Cassava_mosaic_Madagascar_alphasatellite_uid175666', 'Chili_leaf_curl_Bhatinda_betasatellite_uid206467', 'Chilli_leaf_curl_alphasatellite_uid253224', 'Chilli_leaf_curl_Multan_alphasatellite_uid39933', 'Chilli_leaf_curl_virus_satellite_DNA_beta_uid14441', 'Cleome_leaf_crumple_virus_associated_DNA_1_uid60045', 'Corchorus_yellow_vein_mosaic_betasatellite_uid192608', 'Cotton_leaf_curl_Burewala_alphasatellite_uid45935', 'Cotton_leaf_curl_Burewala_betasatellite_uid45933', 'Cotton_leaf_curl_Gezira_alphasatellite_uid42507', 'Cotton_leaf_curl_Gezira_virus_satellite_DNA_beta_uid15166', 'Cotton_leaf_curl_Multan_betasatellite_uid15780', 'Cotton_leaf_curl_virus_associated_DNA_1_isolate_Lucknow_uid65305', 'Cotton_leaf_curl_virus_associated_DNA_beta_uid14438', 'Cotton_leaf_curl_virus_betasatellite_uid162497', 'Cowpea_severe_leaf_curl_associated_DNA_beta_uid15157', 'Croton_yellow_vein_mosaic_alphasatellite_uid45931', 'Croton_yellow_vein_mosaic_betasatellite_uid18249', 'Cuban_alphasatellite_1_uid210798', 'Dragonfly_associated_alphasatellite_uid181244', 'Emilia_yellow_vein_virus_associated_DNA_beta_uid37893', 'Erectites_yellow_mosaic_virus_satellite_DNA_beta_uid19827', 'Eupatorium_yellow_vein_virus_satellite_DNA_beta_uid14447', 'Faba_bean_necrotic_stunt_alphasatellite_1_uid243499', 'Faba_bean_necrotic_stunt_alphasatellite_2_uid243498', 'French_bean_leaf_curl_betasatellite_Kanpur_uid169556', 'Gossypium_darwinii_symptomless_alphasatellite_uid39593', 'Gossypium_davidsonii_symptomless_alphasatellite_uid39589', 'Gossypium_mustilinum_symptomless_alphasatellite_uid39591', 'Grapevine_satellite_virus_uid208539', 'Guar_leaf_curl_alphasatellite_uid193981', 'Hedyotis_uncinella_yellow_mosaic_betasatellite_uid230991', 'Honeysuckle_yellow_vein_mosaic_disease_associated_satellite_DNA_beta_uid19863', 'Honeysuckle_yellow_vein_mosaic_virus_satellite_DNA_beta_uid14620', 'Leucas_zeylanica_yellow_vein_virus_satellite_DNA_beta_uid41305', 'Ludwigia_leaf_distortion_betasatellite__India_Amadalavalasa_Hibiscus_2007__uid29233', 'Ludwigia_yellow_vein_virus_associated_DNA_beta_uid15561', 'Luffa_puckering_and_leaf_distortion_associated_betasatellite__India_Gurdaspur_Okra_2013__uid253218', 'Luffa_puckering_and_leaf_distortion_associated_DNA_beta_uid15779', 'Malachra_yellow_vein_mosaic_virus_associated_satellite_DNA_beta_uid28727', 'Malvastrum_leaf_curl_betasatellite_uid16301', 'Malvastrum_leaf_curl_Guangdong_betasatellite_uid243827', 'Malvastrum_yellow_vein_Yunnan_virus_satellite_DNA_beta_uid14567', 'Melon_chlorotic_mosaic_virus_associated_alphasatellite_uid51413', 'Mesta_yellow_vein_mosaic_virus_alphasatellite__India_Hoshiarpur_Okra_2013__uid253217', 'Mesta_yellow_vein_mosaic_virus_associated_alphasatellite_uid174780', 'Mesta_yellow_vein_mosaic_virus_associated_DNA_beta_uid21015', 'Milk_vetch_dwarf_C10_alphasatellite_uid240682', 'Mirabilis_leaf_curl_India_virus_associated_betasatellite_uid255800', 'Mungbean_yellow_mosaic_India_virus_associated_betasatellite__India__Faizabad__Cow_Pea_2012__uid177773', 'Nanovirus_like_particle_uid14386', 'Okra_enation_leaf_curl_betasatellite__India_Sonipat_EL10_2006__uid61781', 'Okra_leaf_curl_Mali_virus_satellite_DNA_beta_uid20323', 'Okra_yellow_vein_disease_associated_sequence_uid14443', 'Olive_viral_satellite_RNA_uid230268', 'Papaya_leaf_curl_alphasatellite_uid233414', 'Papaya_leaf_curl_China_virus_satellite_DNA_beta_uid19819', 'Pepper_leaf_curl_virus_satellite_DNA_beta_uid28283', 'Pepper_leaf_curl_Yunnan_virus_satellite_DNA_beta_uid29415', 'Potato_apical_leaf_curl_disease_associated_satellite_DNA_beta_uid18323', 'Radish_leaf_curl_virus_betasatellite_uid28281', 'Sida_leaf_curl_virus_satellite_DNA_beta_uid19823', 'Sida_yellow_mosaic_virus_China_associated_DNA_beta_uid15514', 'Sida_yellow_vein_China_alphasatellite_uid238950', 'Siegesbeckia_yellow_vein_virus_associated_DNA_beta_uid17269', 'Subterranean_clover_stunt_C6_alphasatellite_uid14180', 'Synedrella_leaf_curl_alphasatellite_uid263034', 'Tobacco_leaf_chlorosis_betasatellite_uid178075', 'Tobacco_leaf_curl_disease_associated_sequence_uid14442', 'Tobacco_leaf_curl_PUSA_alphasatellite_uid56023', 'Tobacco_leaf_curl_virus_associated_DNA_beta_uid45925', 'Tomato_leaf_curl_Bangladesh_betasatellite_uid56017', 'Tomato_leaf_curl_Gandhinagar_betasatellite_uid231683', 'Tomato_leaf_curl_Hajipur_betasatellite_uid175587', 'Tomato_leaf_curl_Joydebpur_virus_satellite_DNA_beta_uid28273', 'Tomato_leaf_curl_Karnataka_alphasatellite_uid181995', 'Tomato_leaf_curl_Pakistan_virus_associated_DNA_1_uid38463', 'Tomato_leaf_curl_Patna_betasatellite_uid36541', 'Tomato_leaf_curl_Togo_betasatellite_Togo_2006__uid60629', 'Tomato_leaf_curl_Yemen_betasatellite_uid177643', 'Tomato_yellow_leaf_curl_China_betasatellite_uid181248', 'Tomato_yellow_leaf_curl_Vietnam_virus_satellite_DNA_beta_uid19829', 'TYLCCNV_Y322__satellite_DNA_beta_uid16338', 'Vernonia_yellow_vein_betasatellite_uid41303', 'Vernonia_yellow_vein_Fujian_virus_alphasatellite_uid72145', 'Vernonia_yellow_vein_Fujian_virus_betasatellite_uid72143', 'Zinnia_leaf_curl_disease_associated_sequence_uid14440', 'Zinnia_leaf_curl_virus_associated_DNA_beta_uid14538'] def getTaxidFromGenomeFolder(genome_folder): ''' Takes an rpt file for a genome and returns the taxid in it. ''' if folder not in badFolders and folder not in sats: with open(os.path.join('all_rpt', folder, os.listdir(os.path.join('all_rpt', folder))[0])) as rpt_file: for line in rpt_file: if line.startswith('Taxid: '): return int(line[7:-1]) def getHostTaxidFromGenomeFolder(genome_folder): ''' Takes a folder containing an asn file and returns the taxid of the host of the virus described by that asn file. ''' if folder not in badFolders and folder not in sats : with open('folderToHostTaxid.txt', 'r') as taxid2host: for line in taxid2host: if genome_folder in line: return int(line.split('\t')[1][:-1]) return 0 def getGenomeLengthFromGenomeFolder(genome_folder): total = 0 if folder not in badFolders and folder not in sats : for file in os.listdir(os.path.join('all_ptt', folder)): with open(os.path.join('all_ptt', folder, file)) as ptt_file: line = ptt_file.readline() index = line.find('- 1..') if index == -1: return -1 num = line[index + 5 : -1] total += int(num) return total def getCodingNumberAndOverlapNumberFromGenomeFolder(folder): totalCoding = 0 totalOverlap = 0 for file in os.listdir(os.path.join('all_ptt', folder)): #these folders contain genomes with misannotation of gene overlap,non-coding DNA, or host information from NCBI. if folder not in badFolders and folder not in sats : with open(os.path.join('all_ptt', folder, file)) as ptt_file: line = ptt_file.readline() index = line.find('- 1..') repliconLength = int(line[index + 5 : -1]) indicesThatOverlap = [0] * repliconLength line = ptt_file.readline() line = ptt_file.readline() uniqueIndices = set([]) for line in ptt_file: line = line.split('\t')[0] [start, end] = [int(x) for x in line.split('..')] #this is put in place to distinguish linear from circular genomes. Here, we're looking for linear genomes if start < end: for i in range(start, end): if i in uniqueIndices: indicesThatOverlap[i] = 1 else: uniqueIndices.add(i) #here were are looking for circular genomes to be able to properly count noncoding bases elif start > end: for i in range(start, repliconLength): if i in uniqueIndices: indicesThatOverlap[i] = 1 else: uniqueIndices.add(i) for i in range(1, end): if i in uniqueIndices: indicesThatOverlap[i] = 1 else: uniqueIndices.add(i) totalCoding += len(uniqueIndices) #totalOverlap += len(bigList) - len(uniqueIndices) totalOverlap += sum(indicesThatOverlap) return (totalCoding, totalOverlap) genomeLengths = [] lengthData = [] counter = 0 length_lists = [] hostTaxidList = [] taxidList = [] folders = [] dbObject = ete.NCBITaxa() for folder in os.listdir('all_faa'): #these entries had problems with their gene overlap, protein, and noncoding annotation, need to be excluded if folder not in badFolders and folder not in sats : length_lists.append([]) for fileName in os.listdir(os.path.join('all_faa', folder)): with open(os.path.join('all_faa', folder, fileName), "r") as faa_file: proteins = SeqIO.parse(faa_file, 'fasta') for protein in proteins: length_lists[counter].append(float(len(protein.seq))) hostTaxid = getHostTaxidFromGenomeFolder(folder) # viruses with unidentified hosts. Now they are out. if hostTaxid and hostTaxid != 11086 and hostTaxid != 10036 and hostTaxid != 37965 and hostTaxid != 10640 and hostTaxid != 32644 and hostTaxid != 1 and hostTaxid != 212035 and hostTaxid != 1051671: taxidList.append(getTaxidFromGenomeFolder(folder)) hostTaxidList.append(hostTaxid) genomeLengths.append(getGenomeLengthFromGenomeFolder(folder)) folders.append(folder) counter += 1 else: length_lists = length_lists[:-1] taxidLineages = [dbObject.get_lineage(t) for t in taxidList] englishLineages = [[str(rank) for rank in dbObject.translate_to_names(lin)] for lin in taxidLineages] hostTaxidLineages = [dbObject.get_lineage(t) for t in hostTaxidList] hostEnglishLineages = [[str(rank) for rank in dbObject.translate_to_names(lin)] for lin in hostTaxidLineages] for i in range(len(length_lists)): lengthData.append({'length_list' : length_lists[i], 'taxid' : taxidList[i], 'hostTaxid' : hostTaxidList[i], 'englishLineage' : englishLineages[i], 'taxidLineage' : taxidLineages[i], 'hostEnglishLineage' : hostEnglishLineages[i], 'hostTaxidLineage' : hostTaxidLineages[i], 'mean' : np.mean(length_lists[i]), 'std' : np.std(length_lists[i]), 'median' : np.median(length_lists[i]), 'numProteins' : len(length_lists[i]), 'genomeLength' : genomeLengths[i], 'folder' : folders[i]}) lengthData = [virus for virus in lengthData if virus['genomeLength'] != -1] ``` ###Virus-host histogram for bacterial viruses ``` #unfortunately, the last entry in the host lineage is a combination of host species, strains, and sometimes genus names. #Hence, I had to clean up the list by hand for bacteria and had to exclude these viruses from this analysis. #these are the viruses (badEntries) whose hosts are not identified all the way down to species badEntries=['Streptomyces','Phormidium','Pseudomonas','Spiroplasma','Listeria','Salmonella','Nitrincola', 'Dickeya','Marinomonas','Enterobacteriaceae','Lactobacillus','Roseobacter','Vibrio','Nonlabens', 'Lactococcus','Escherichia','Mycobacterium','Parvimonas','Pseudoalteromonas','Escherichia coli O157'] allViruses= [virus['hostEnglishLineage'][-1] for virus in lengthData if "Bacteria" in virus['hostEnglishLineage'] and virus['hostEnglishLineage'][-1] not in badEntries] new=[[x, allViruses.count(x)] for x in set(allViruses)] outliers = [x for x in new if x[1]>10] print('the outliers are: ', outliers) allcounts = [p[1] for p in new] countsAndFreqs =[[x, allcounts.count(x)] for x in set(allcounts)] counts = [x[0] for x in countsAndFreqs] freqs = [x[1] for x in countsAndFreqs] plt.bar(counts, freqs, color='midnightBlue', linewidth=0) plt.tick_params(axis='both', which='both', labelsize=18, labelcolor = 'dimgrey', color='dimgrey') plt.xlabel('Number of viruses per host species', fontsize = 18, color='dimgrey') plt.ylabel('Frequency (number of host species)', fontsize = 18, color='dimgrey') #number of hosts for which we know the host species information tot=np.sum([x*y for x,y in zip(counts,freqs)]) #calculating the fraction of unique host species associated with a virome, len(new) gives the number of unique species #f=len(new)/tot print('total number of bacterial hosts:', tot) #print('fraction of host species with only one known virus', f) #mean and median of the distribution print('median number of viruses associated with a host species', np.median(np.sort(allcounts))) print('mean number of viruses associated with a host species', np.mean(allcounts)) plt.xlim(0,20) #just added to be able to perserve text when taking the plot image into illustrator version 6 plt.savefig('virusHostHistBact-shortRange.pdf') #plt.savefig('virusHostHistBact-LongRange.pdf') plt.rcParams['pdf.fonttype'] = 42 plt.show() ``` ###Exploring the viruses associated biased host species Hosts with >20 known viruses ``` x = [virus['englishLineage'] for virus in lengthData if 'Staphylococcus aureus' in virus['hostEnglishLineage']] #print(x) z = [virus['englishLineage'] for virus in lengthData if 'Solanum lycopersicum' in virus['hostEnglishLineage']] #print(z) y = [virus['englishLineage'] for virus in lengthData if 'Escherichia coli' in virus['hostEnglishLineage']] #print(y) ``` ###Virus-host histogram for archaeal viruses ``` #Similar to the bacterial case, some viral entries had to be excluded due to incomplete host lineage information. badEntries=['Sulfolobus','Acidianus','Halorubrum'] allViruses= [virus['hostEnglishLineage'][-1] for virus in lengthData if "Archaea" in virus['hostEnglishLineage'] and virus['hostEnglishLineage'][-1] not in badEntries] newArch=[[x, allViruses.count(x)] for x in set(allViruses)] allcounts = [p[1] for p in newArch] countsAndFreqs =[[x, allcounts.count(x)] for x in set(allcounts)] counts = [x[0] for x in countsAndFreqs] freqs = [x[1] for x in countsAndFreqs] plt.bar(counts, freqs, color='midnightBlue', linewidth=0) plt.tick_params(axis='both', which='both', labelsize=18, labelcolor = 'dimgrey', color='dimgrey') plt.xlabel('Number of viruses per host species', fontsize = 18, color='dimgrey') plt.ylabel('Frequency (number of host species)', fontsize = 18, color='dimgrey') #number of hosts for which we know the host species information tot=np.sum([x*y for x,y in zip(counts,freqs)]) #print(tot) #calculating the fraction of unique host species associated with a virome, len(new) gives the number of unique species, #excluding those hosts with un-identified species level classification #f=len(newArch)/tot #print('fraction of hosts with one known virus: ', f) #mean and median of the distribution print('median: ', str(np.median(np.sort(allcounts)))) print('mean: ', str(np.mean(allcounts))) #plt.xlim(0,20) #just added to be able to perserve text when taking the plot image into illustrator version 6 #plt.savefig('virusHostHistArchs-shortRange.pdf') plt.savefig('virusHostHistArch-LongRange.pdf') plt.rcParams['pdf.fonttype'] = 42 plt.show() ``` ###Virus-host histogram for eukaryotic viruses ``` #unfortunately, the last entry in the host lineage is a combination of host species, strains, and sometimes genus names. #Hence, I had to clean up the list by hand for eukaryotes. badEntries=['Wissadula','Dalechampia','Panthera','Equus','Wisteria','Luffa','Rubus','Narcissus','Avena','Apache','Formicidae','Peristrophe','Calibrachoa','Citrus','Iso','Hea','Delphinium','Embryophyta','Melo','Agapanthus','Abutilon','Centrosema','Hibiscus','Zantedeschia','Ara','Channa','Peridroma','Saccharum','Odo','Fragaria','Coelacanthimorpha','Hero','Diuris','Macroptilium','Solanum','Brugmansia','Psorophora','Amoeba','Rheum','Aedes','Pedunculata','Culex','Verbena','Ochlerotatus',"Ostreococcus 'lucimarinus'",'Cercopithecidae','Lutzomyia','Sida','Hemileuca','Meridion','Epinotia','Apodemus','Uranotaenia','Micrantha','Pera','Cervidae','Culicoides','Hylocereus','Allium','Capsicum','Dichopogon','Lonicera','Furcraea','Amblyomma','Rhynchosia','Ixodida','Jatropha','Desmodium','Chaerephon','Acanthamoeba','Bathycoccus','Cleome','Chrysanthemum','Hylomyscus'] allViruses= [virus['hostEnglishLineage'][-1] for virus in lengthData if "Eukaryota" in virus['hostEnglishLineage'] and virus['hostEnglishLineage'][-1] not in badEntries] newEuk=[[x, allViruses.count(x)] for x in set(allViruses)] outliers = [x for x in newEuk if x[1]>20] #print(outliers) allcounts = [p[1] for p in newEuk] countsAndFreqs =[[x, allcounts.count(x)] for x in set(allcounts)] counts = [x[0] for x in countsAndFreqs] freqs = [x[1] for x in countsAndFreqs] plt.bar(counts, freqs, color='midnightBlue', linewidth=0) plt.tick_params(axis='both', which='both', labelsize=18, labelcolor = 'dimgrey', color='dimgrey') plt.xlabel('Number of viruses per host species', fontsize = 18, color='dimgrey') plt.ylabel('Frequency (number of host species)', fontsize = 18, color='dimgrey') #number of hosts for which we know the host species information tot=np.sum([x*y for x,y in zip(counts,freqs)]) #calculating the fraction of unique host species associated with a virome, len(new) gives the number of unique species, #excluding those hosts with un-identified species level classification #f=len(newEuk)/tot #print(f) #mean and median of the distribution print('total number of bacterial hosts:', tot) #mean and median of the distribution print('median number of viruses associated with a host species', np.median(np.sort(allcounts))) print('mean number of viruses associated with a host species', np.mean(allcounts)) #plt.xlim(0,20) #just added to be able to perserve text when taking the plot image into illustrator version 6 #plt.savefig('virusHostHistEuks-shortRange.pdf') plt.savefig('virusHostHistEuk-LongRange.pdf') plt.rcParams['pdf.fonttype'] = 42 plt.show() #To display the Opisthokonta sub categories and the number of associated viruses aa = [virus['hostEnglishLineage'][6] for virus in lengthData if 'Metazoa' in virus['hostEnglishLineage']] bb =[[x,aa.count(x)] for x in set(aa)] print(bb) cc = [virus['hostEnglishLineage'][6] for virus in lengthData if 'Fungi' in virus['hostEnglishLineage']] dd =[[x,cc.count(x)] for x in set(cc)] print(dd) #To display the Opisthokonta categories and the number of associated viruses aa = [virus['hostEnglishLineage'][5] for virus in lengthData if 'Opisthokonta' in virus['hostEnglishLineage']] bb =[[x,aa.count(x)] for x in set(aa)] print(bb) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Copying-results-into-right-folder." data-toc-modified-id="Copying-results-into-right-folder.-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Copying results into right folder.</a></span></li><li><span><a href="#Comparing-stuff" data-toc-modified-id="Comparing-stuff-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Comparing stuff</a></span></li></ul></div> ``` from pathlib import Path from planet4 import clustering, io, markings, region_data # At the beginning of the notebook import logging logger = logging.getLogger() assert len(logger.handlers) == 1 logger.addHandler(logging.StreamHandler()) handler = logger.handlers[1] handler.setLevel(logging.DEBUG) obsid = 'ESP_011394_0935' #obsid = 'ESP_012821_0865' obsids = region_data.Inca.season2 def do_parallel_hirise_scope(obsid): from planet4 import clustering, io path = io.p4data() / ('overlap_issue_GH45/hirise_level/' + obsid) path.mkdir(exist_ok=True, parents=True) cm = clustering.ClusteringManager(output_dir=path, scope='hirise') cm.cluster_image_name(obsid) return cm cm = do_parallel_hirise_scope(obsid) from pathlib import Path root = io.dropbox() fname = root / "overlap_issue_GH45/hirise_level/ESP_011394_0935/applied_cut_0.5/" from ipyparallel import Client c = Client() dview = c.direct_view() lbview = c.load_balanced_view() from nbtools import display_multi_progress results = lbview.map_async(do_parallel_hirise_scope, obsids) display_multi_progress(results, obsids) db = io.DBManager() data = db.get_image_name_markings(obsid) image_ids = data.image_id.unique() def do_in_parallel_p4scope(obsid): from pathlib import Path from planet4 import clustering, io db = io.DBManager() data = db.get_image_name_markings(obsid) image_ids = data.image_id.unique() path = io.p4data() / ('overlap_issue_GH45/p4_level_p4_coords/' + obsid) path.mkdir(exist_ok=True, parents=True) cm = clustering.ClusteringManager(output_dir=path, scope='planet4', include_angle=True, include_distance=False, include_radius=False, eps=10, min_distance=10) try: for image_id in image_ids: cm.cluster_image_id(image_id) except: return obsid, False else: return obsid, True results = lbview.map_async(do_in_parallel_p4scope, obsids) display_multi_progress(results, obsids) ``` ## Copying results into right folder. ``` import shutil targetpath = Path('/Users/klay6683/Dropbox/data/planet4/p4_analysis/inca_s23_redone') for obsid in region_data.Inca.season2 + region_data.Inca.season3: path = io.p4data() / 'overlap_issue_GH45/hirise_level/' / obsid / 'applied_cut_0.5' for f in path.glob('*.csv'): src = path / f dst = targetpath / src.name shutil.copy2(str(src), str(dst)) ``` ## Comparing stuff ``` class Comparer(object): markings = ['fans','blotches','fnotches'] root = io.p4data() / 'overlap_issue_GH45' def __init__(self, path, fnotched=True): if fnotched is True: self.path = self.root / path else: self.path = self.root / path / 'just_clustering' @property def blotches_iter(self): return self.path.glob("*_blotches.csv") @property def fans_iter(self): return self.path.glob("*_fans.csv") @property def fnotches_iter(self): return self.path.glob('*_fnotches.csv') def get_counts(self, theiter): return sum([len(pd.read_csv(str(f))) for f in theiter]) @property def n_blotches(self): return self.get_counts(self.blotches_iter) @property def n_fans(self): return self.get_counts(self.fans_iter) @property def n_fnotches(self): return self.get_length(self.fnotches_iter) @property def n_total(self): return self.blotches_lengths + self.fans_lengths + self.fnotches_lengths def __repr__(self): s = "Blotches: {}\n".format(self.blotches_lengths) s += "Fan: {}\n".format(self.fans_lengths) s += "Fnotches: {}\n".format(self.fnotches_lengths) s += "Total: {}".format(self.n_total) return s def __str__(self): return self.__repr__() def read_dataframes(self, marking, as_df=True): res = [pd.read_csv(str(p)) for p in self.path.glob('*_{}.csv'.format(marking))] return pd.concat(res) if as_df else res def read_combined_df(self): combine_all = [] for marking in self.markings: to_combine = read_dataframes(marking) combine_all.append(to_combine) all_combined = pd.concat(combine_all) return all_combined class HiRISEComparer(Comparer): root = io.p4data() / 'overlap_issue_GH45/hirise_level/' class P4Comparer(Comparer): root = io.p4data() / 'overlap_issue_GH45/p4_level_p4_coords' def compare_per_obsid(obsid): print(obsid) hicomp = HiRISEComparer(obsid) p4comp = P4Comparer(obsid) b_r = p4comp.blotches_lengths / hicomp.blotches_lengths f_r = p4comp.fans_lengths / hicomp.fans_lengths fn_r = p4comp.fnotches_lengths / hicomp.fnotches_lengths t_r = p4comp.n_total / hicomp.n_total return b_r, f_r, fn_r, t_r df = pd.DataFrame(obsids, columns=['obsid']) def get_ratios(row): obsid = row.obsid hicomp = HiRISEComparer(obsid) p4comp = P4Comparer(obsid) b_r = p4comp.blotches_lengths / hicomp.blotches_lengths f_r = p4comp.fans_lengths / hicomp.fans_lengths fn_r = p4comp.fnotches_lengths / hicomp.fnotches_lengths t_r = p4comp.n_total / hicomp.n_total return pd.Series(dict(blotch=b_r, fan=f_r, fnotch=fn_r, total=t_r, obsid=obsid)) results = df.apply(get_ratios, axis='columns') results results.set_index('obsid', inplace=True) results.sort_index(inplace=True) %matplotlib inline results.plot(style='*-', rot=60) results.index df = df[['obsid']] for marking in ['blotches', 'fans', 'fnotches']: for compare,cls in zip(['hi', 'p4'], [HiRISEComparer, P4Comparer]): colname = compare + '_' + marking df[colname] = df.obsid.map(lambda x: getattr(cls(x), marking+'_lengths')) df['hi_total'] = df.obsid.map(lambda x: HiRISEComparer(x).n_total) df['p4_total'] = df.obsid.map(lambda x: P4Comparer(x).n_total) df.set_index('obsid', inplace=True) df.sort_index(inplace=True) df df.plot(style='*-', rot=90) %matplotlib nbagg P4Comparer(obsid) P4Comparer(obsid, fnotched=False) blotches = P4Comparer(obsid).read_dataframes('blotches') blotches.head() from sklearn.metrics.pairwise import pairwise_distances as pdist arr = np.array([[100,100,5],[101,101,6],[102, 101, 10]]) arr distances = pdist(arr) distances indices = np.triu_indices_from(distances, k=1) distances[indices] indices a = [100, 100, 10, 20, 45] b = [101, 101, 11, 21, 49.5] pdist(np.array([a,b])) res = pdist(blotches[['image_x','image_y', 'radius_1', 'radius_2', 'angle']]) indices = np.triu_indices_from(res, k=1) upper = res[indices] upper for i in range(1,6): print(i, upper[upper<i].shape) import seaborn as sns %matplotlib inline sns.jointplot(x='image_x', y='image_y', kind='hex', data=blotches) all_combined = read_combined_df(path) 840*648 - (640*448) _/(840*648) _/__ all_combined.info() p = io.p4data() / 'overlap_issue_GH45/p4_level_p4_coords/applied_cut_0.5' get_total_survivors(p) ```
github_jupyter
## This notebook describes how to implement distributed tensorflow code. Content of this notebook is shown below. 0. Prepare CIFAR-10 Dataset (TFRecords Format) 1. Define parameters 2. Define data input pipeline 3. Define features 4. Define a model 5. Define serving function 6. Train, evaluate and export a model 7. Evaluate with Estimator 8. Prediction with Exported Model 8. Distributed Training with Cloud ML Engine ## 1. Prepare CIFAR-10 Dataset (TFRecords Format) ``` import cPickle import os import re import shutil import tarfile import tensorflow as tf print(tf.__version__) CIFAR_FILENAME = 'cifar-10-python.tar.gz' CIFAR_DOWNLOAD_URL = 'http://www.cs.toronto.edu/~kriz/' + CIFAR_FILENAME CIFAR_LOCAL_FOLDER = 'cifar-10-batches-py' def _download_and_extract(data_dir): tf.contrib.learn.datasets.base.maybe_download(CIFAR_FILENAME, data_dir, CIFAR_DOWNLOAD_URL) tarfile.open(os.path.join(data_dir, CIFAR_FILENAME), 'r:gz').extractall(data_dir) def _get_file_names(): """Returns the file names expected to exist in the input_dir.""" file_names = {} file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 5)] file_names['validation'] = ['data_batch_5'] file_names['eval'] = ['test_batch'] return file_names def _read_pickle_from_file(filename): with tf.gfile.Open(filename, 'r') as f: data_dict = cPickle.load(f) return data_dict def _convert_to_tfrecord(input_files, output_file): """Converts a file to TFRecords.""" print('Generating %s' % output_file) with tf.python_io.TFRecordWriter(output_file) as record_writer: for input_file in input_files: data_dict = _read_pickle_from_file(input_file) data = data_dict['data'] labels = data_dict['labels'] num_entries_in_batch = len(labels) for i in range(num_entries_in_batch): example = tf.train.Example(features=tf.train.Features( feature={ 'image': _bytes_feature(data[i].tobytes()), 'label': _int64_feature(labels[i]) })) record_writer.write(example.SerializeToString()) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) def create_tfrecords_files(data_dir='cifar-10'): _download_and_extract(data_dir) file_names = _get_file_names() input_dir = os.path.join(data_dir, CIFAR_LOCAL_FOLDER) for mode, files in file_names.items(): input_files = [os.path.join(input_dir, f) for f in files] output_file = os.path.join(data_dir, mode+'.tfrecords') try: os.remove(output_file) except OSError: pass # Convert to tf.train.Example and write to TFRecords. _convert_to_tfrecord(input_files, output_file) create_tfrecords_files() ``` ## 2. Define parameters ``` class FLAGS(): pass FLAGS.batch_size = 200 FLAGS.max_steps = 1000 FLAGS.eval_steps = 100 FLAGS.save_checkpoints_steps = 100 FLAGS.tf_random_seed = 19851211 FLAGS.model_name = 'cnn-model-02' FLAGS.use_checkpoint = False IMAGE_HEIGHT = 32 IMAGE_WIDTH = 32 IMAGE_DEPTH = 3 NUM_CLASSES = 10 ``` ## 3. Define data input pipeline ``` def parse_record(serialized_example): features = tf.parse_single_example( serialized_example, features={ 'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) image = tf.decode_raw(features['image'], tf.uint8) image.set_shape([IMAGE_DEPTH * IMAGE_HEIGHT * IMAGE_WIDTH]) image = tf.reshape(image, [IMAGE_DEPTH, IMAGE_HEIGHT, IMAGE_WIDTH]) image = tf.cast(tf.transpose(image, [1, 2, 0]), tf.float32) label = tf.cast(features['label'], tf.int32) label = tf.one_hot(label, NUM_CLASSES) return image, label def preprocess_image(image, is_training=False): """Preprocess a single image of layout [height, width, depth].""" if is_training: # Resize the image to add four extra pixels on each side. image = tf.image.resize_image_with_crop_or_pad( image, IMAGE_HEIGHT + 8, IMAGE_WIDTH + 8) # Randomly crop a [_HEIGHT, _WIDTH] section of the image. image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH]) # Randomly flip the image horizontally. image = tf.image.random_flip_left_right(image) # Subtract off the mean and divide by the variance of the pixels. image = tf.image.per_image_standardization(image) return image def generate_input_fn(file_names, mode=tf.estimator.ModeKeys.EVAL, batch_size=1): def _input_fn(): dataset = tf.data.TFRecordDataset(filenames=file_names) is_training = (mode == tf.estimator.ModeKeys.TRAIN) if is_training: buffer_size = batch_size * 2 + 1 dataset = dataset.shuffle(buffer_size=buffer_size) # Transformation dataset = dataset.map(parse_record) dataset = dataset.map( lambda image, label: (preprocess_image(image, is_training), label)) dataset = dataset.repeat() dataset = dataset.batch(batch_size) dataset = dataset.prefetch(2 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() features = {'images': images} return features, labels return _input_fn ``` ## 4. Define features ``` def get_feature_columns(): feature_columns = { 'images': tf.feature_column.numeric_column('images', (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH)), } return feature_columns feature_columns = get_feature_columns() print("Feature Columns: {}".format(feature_columns)) ``` ## 5. Define a model ``` def inference(images): # 1st Convolutional Layer conv1 = tf.layers.conv2d( inputs=images, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu, name='conv1') pool1 = tf.layers.max_pooling2d( inputs=conv1, pool_size=[3, 3], strides=2, name='pool1') norm1 = tf.nn.lrn( pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # 2nd Convolutional Layer conv2 = tf.layers.conv2d( inputs=norm1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu, name='conv2') norm2 = tf.nn.lrn( conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') pool2 = tf.layers.max_pooling2d( inputs=norm2, pool_size=[3, 3], strides=2, name='pool2') # Flatten Layer shape = pool2.get_shape() pool2_ = tf.reshape(pool2, [-1, shape[1]*shape[2]*shape[3]]) # 1st Fully Connected Layer dense1 = tf.layers.dense( inputs=pool2_, units=384, activation=tf.nn.relu, name='dense1') # 2nd Fully Connected Layer dense2 = tf.layers.dense( inputs=dense1, units=192, activation=tf.nn.relu, name='dense2') # 3rd Fully Connected Layer (Logits) logits = tf.layers.dense( inputs=dense2, units=NUM_CLASSES, activation=tf.nn.relu, name='logits') return logits def model_fn(features, labels, mode, params): # Create the input layers from the features feature_columns = list(get_feature_columns().values()) images = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) images = tf.reshape( images, shape=(-1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH)) # Calculate logits through CNN logits = inference(images) if mode in (tf.estimator.ModeKeys.PREDICT, tf.estimator.ModeKeys.EVAL): predicted_indices = tf.argmax(input=logits, axis=1) probabilities = tf.nn.softmax(logits, name='softmax_tensor') if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL): global_step = tf.train.get_or_create_global_step() label_indices = tf.argmax(input=labels, axis=1) loss = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) tf.summary.scalar('cross_entropy', loss) if mode == tf.estimator.ModeKeys.PREDICT: predictions = { 'classes': predicted_indices, 'probabilities': probabilities } export_outputs = { 'predictions': tf.estimator.export.PredictOutput(predictions) } return tf.estimator.EstimatorSpec( mode, predictions=predictions, export_outputs=export_outputs) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdamOptimizer(learning_rate=0.001) train_op = optimizer.minimize(loss, global_step=global_step) return tf.estimator.EstimatorSpec( mode, loss=loss, train_op=train_op) if mode == tf.estimator.ModeKeys.EVAL: eval_metric_ops = { 'accuracy': tf.metrics.accuracy(label_indices, predicted_indices) } return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=eval_metric_ops) ``` ## 6. Define a serving function ``` def serving_input_fn(): receiver_tensor = {'images': tf.placeholder( shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH], dtype=tf.float32)} features = {'images': tf.map_fn(preprocess_image, receiver_tensor['images'])} return tf.estimator.export.ServingInputReceiver(features, receiver_tensor) ``` ## 7. Train, evaluate and export a model ``` model_dir = 'trained_models/{}'.format(FLAGS.model_name) train_data_files = ['cifar-10/train.tfrecords'] valid_data_files = ['cifar-10/validation.tfrecords'] test_data_files = ['cifar-10/eval.tfrecords'] run_config = tf.estimator.RunConfig( save_checkpoints_steps=FLAGS.save_checkpoints_steps, tf_random_seed=FLAGS.tf_random_seed, model_dir=model_dir ) estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config) # There is another Exporter named FinalExporter exporter = tf.estimator.LatestExporter( name='Servo', serving_input_receiver_fn=serving_input_fn, assets_extra=None, as_text=False, exports_to_keep=5) train_spec = tf.estimator.TrainSpec( input_fn=generate_input_fn(file_names=train_data_files, mode=tf.estimator.ModeKeys.TRAIN, batch_size=FLAGS.batch_size), max_steps=FLAGS.max_steps) eval_spec = tf.estimator.EvalSpec( input_fn=generate_input_fn(file_names=valid_data_files, mode=tf.estimator.ModeKeys.EVAL, batch_size=FLAGS.batch_size), steps=FLAGS.eval_steps, exporters=exporter) if not FLAGS.use_checkpoint: print("Removing previous artifacts...") shutil.rmtree(model_dir, ignore_errors=True) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` ## 8. Evaluation with Estimator ``` test_input_fn = generate_input_fn(file_names=test_data_files, mode=tf.estimator.ModeKeys.EVAL, batch_size=1000) estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config) print(estimator.evaluate(input_fn=test_input_fn, steps=1)) ``` ## 9. Prediction with Exported Model ``` export_dir = model_dir + '/export/Servo/' saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[-1]) predictor_fn = tf.contrib.predictor.from_saved_model( export_dir = saved_model_dir, signature_def_key='predictions') import numpy data_dict = _read_pickle_from_file('cifar-10/cifar-10-batches-py/test_batch') N = 1000 images = data_dict['data'][:N].reshape([N, 3, 32, 32]).transpose([0, 2, 3, 1]) labels = data_dict['labels'][:N] output = predictor_fn({'images': images}) accuracy = numpy.sum( [ans==ret for ans, ret in zip(labels, output['classes'])]) / float(N) print(accuracy) ``` ## 10. Distributed Training with Cloud ML Engine ### a. Set environments ``` import os PROJECT = 'YOUR-PROJECT-ID' # REPLACE WITH YOUR PROJECT ID BUCKET = 'YOUR-BUCKET-NAME' # REPLACE WITH YOUR BUCKET NAME REGION = 'BUCKET-REGION' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION ``` ### b. Set permission to BUCKET (NOTE: Create bucket beforehand) ``` %%bash PROJECT_ID=$PROJECT AUTH_TOKEN=$(gcloud auth print-access-token) SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \ -H "Authorization: Bearer $AUTH_TOKEN" \ https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \ | python -c "import json; import sys; response = json.load(sys.stdin); \ print response['serviceAccount']") echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET" gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET ``` ### c. Copy TFRecords files to GCS BUCKET ``` %%bash echo ${BUCKET} gsutil -m rm -rf gs://${BUCKET}/cifar-10 gsutil -m cp cifar-10/*.tfrecords gs://${BUCKET}/cifar-10 ``` ### d. Run distributed training with Cloud MLE ``` %%bash OUTDIR=gs://$BUCKET/trained_models_3cpu JOBNAME=sm_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --region=$REGION \ --module-name=cnn-model-02.task \ --package-path="$(pwd)/trainer/cnn-model-02" \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --config=config_3cpu.yaml \ --runtime-version=1.4 \ -- \ --bucket_name=$BUCKET \ --train_data_pattern=cifar-10/train*.tfrecords \ --eval_data_pattern=cifar-10/eval*.tfrecords \ --output_dir=$OUTDIR \ --max_steps=10000 %%bash OUTDIR=gs://$BUCKET/trained_models_3gpu JOBNAME=sm_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ml-engine jobs submit training $JOBNAME \ --region=$REGION \ --module-name=cnn-model-02.task \ --package-path="$(pwd)/trainer/cnn-model-02" \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --config=config_3gpu.yaml \ --runtime-version=1.4 \ -- \ --bucket_name=$BUCKET \ --train_data_pattern=cifar-10/train*.tfrecords \ --eval_data_pattern=cifar-10/eval*.tfrecords \ --output_dir=$OUTDIR \ --max_steps=10000 ```
github_jupyter
``` %pylab inline import numpy as np ``` # Infinite Perimeter ## Question We have a rectangle with an area of $16m^2$, express the the perimeter of the rectangle as a function of one side. ## Observation If the rectangle has an area of $16m^2$ we know that for some width and height we have $w\cdot h=16m^2$. * $4m \cdot 4m = 16m^2$ * $8m \cdot 2m = 16m^2$ * $16m \cdot 1m = 16m^2$ The formula for the perimeter is $2(w+h)$. If we have a given side $L_1$, we can find the missing side $L_2 = \dfrac{16m}{L_1}$, because $L_1 \cdot L_2 = 16m^2$. ## Formula Putting everything together we get: $$\begin{align}P &= 2(L_1 + L_2) \\ &= 2\left(L_1 + \frac{16m}{L_1}\right) \\ &= 2L_1 + 2\frac{16m}{L_1} \\ &= \frac{2L_1^2+32}{L_1} \end{align}$$ This gives the function for the perimeter of the rectangle for any side $L$: $$P(L)=\frac{2L^2+32}{L}$$ where $L \not= 0$. ``` def P(L): return (2*L**2+32)/L ``` If we try this for the sides we specified above we will see: ``` print("Perimeter 4*4: %i" % (2*(4+4))) print("Perimeter 2*8: %i" % (2*(2+8))) print("Perimeter 1*16: %i" % (2*(1+16))) ``` We can give either of the sides, because $P(2)=P(8)=20$, etc. Also notice that this function is not injective or surjective. ``` P(4) P(8) P(2) P(16) P(1) ``` These results shows us that our expectation is correct. ## Plot Plotting this function yields: ``` points = np.arange(1,16,0.01) plot(points, [P(x) for x in points], c='b', lw=2) xlabel('Length of one side (m)') ylabel('Perimeter (m)'); ``` Upon seeing this graph, I did not found it intuitive at all. However, upon closer inspection, we can see that for example at $L=4$ we have a square. And being a square, this gives us the shortest perimeter, as we can see in the graph. Another thing to notice is that when we have $L<1$, the perimeter starts to grow rapidly. ``` P(0.5) P(0.25) P(0.01) ``` Thinking about it, it is a pretty obvious result, but not so obvious upon first sight. Because the rectangle is bounded with an area of $16m^2$, the other side of the rectangle has to become really big to meet this criteria if the length of the input side is very small. Obviously this also works in the other direction: ``` P(32) == P(0.5) == 65 ``` ## Differentiation To see how much the perimeter grows relatively to the input side length we give, we can differentiate the function with respect to $L$. Doing so will give: $$\begin{align} P(L) &= \frac{2L^2+32}{L} \\ \frac{dP}{dL} &= \dfrac{L \cdot \dfrac{d(2L^2+32)}{dL} + (2L^2+32)\dfrac{d(L)}{dL}}{L^2} \\ \frac{dP}{dL} &= \frac{L\cdot(4L) + (2L^2+32)\cdot(1)}{L^2} \\ \frac{dP}{dL} &= \frac{4L^2+2L^2+32}{L^2} \\ \frac{dP}{dL} &= \frac{6L^2+32}{L^2} \end{align}$$ If we implement $P'(L) = \dfrac{6L^2+32}{L^2}$: ``` def PPrime(L): return (6*L**2+32)/L**2 ``` And plot it with $P(L)$: ``` points = np.arange(1,16,0.01) plot(points, [P(x) for x in points], c='b', lw=2) plot(points, [PPrime(x) for x in points], c='r', lw=2) xlabel('Length of one side (m)') ylabel('Perimeter (m)') legend(['P(L)', "P'(L)"]); ``` And if we take a closer look at $0<L<4$ on a logarithmic scale: ``` points = np.arange(0.001,4,0.001) plot(points, [np.log(P(x)) for x in points], c='b', lw=2) plot(points, [np.log(PPrime(x)) for x in points], c='r', lw=2) legend(['P(L)', "P'(L)"]); ``` We can see that $P'(L)$ is causing $P(L)$ to grow really fast as we are starting to approach $L=0$. Now let's take the second derivative of $P(L)$: $$\begin{align} \frac{dP}{dL} &= \frac{4L^2+32}{L^2} \\ \frac{d^2P}{dL^2} &= \dfrac{L^2 \dfrac{d(6L^2+32)}{dL}+(6L^2+32)\dfrac{d(L^2)}{dL}}{(L^2)^2} \\ \frac{d^2P}{dL^2} &= \frac{L^2(12L) + (6L^2+32)2L}{L^4} \\ \frac{d^2P}{dL^2} &= \frac{24L^3+62L}{L^4}\end{align}$$ If we implement $P''(L)=\dfrac{24L^3+62L}{L^4}$: ``` def PDoublePrime(L): return (24*L**3+62*L)/L**4 ``` And plot it with $P(L)$, and $P'(L)$: ``` points = np.arange(0.001,4,0.001) plot(points, [np.log(P(x)) for x in points], c='b', lw=2) plot(points, [np.log(PPrime(x)) for x in points], c='r', lw=2) plot(points, [np.log(PDoublePrime(x)) for x in points], c='green', lw=2) grid() legend(['P(L)', "P'(L)", "P''(L)"]); ``` We can see that $P''(L)$ is causing $P'(L)$ to grow really big. If we keep differentiating I think we will keep finding derivatives that are tending faster to infinity. This observation probably has a name but I am currently unaware of it. ## Infinite perimeter We can see that when we let $L$ approach $0$, the perimeter becomes infinite. Thus, we come to the assertion that: $$ \lim_{L\rightarrow 0} \frac{2L^2+32}{L} = \infty$$ which makes the perimeter infinitely long! The proof is rather easy if we look at the domain of $P(L)$, which is $\mathbb{R}\setminus {0}$, because it is a rational function it means that there is a vertical asymptote that tends to infinity as $L$ approaches $0$. ## We must go deeper If we graph the functions over a greater interval, we can see this behaviour more clearly: ``` points = np.arange(-20,20, 0.01) interval = np.arange(0,4, 0.01) axes = plt.gca() axes.set_ylim([-50,50]) plot(points, [P(x) for x in points], c='b', lw=1) plot(points, [PPrime(x) for x in points], c='r', lw=1) plot(points, [PDoublePrime(x) for x in points], c='green', lw=1); plot(interval, np.zeros(len(interval)), c='black', ls='dotted', lw=3) legend(['P(L)', "P'(L)", "P''(L)", '0<L<4']) grid() ``` A cool observation is that $F''(L)$ has a horizontal asymptote on the horizontal axis because the degree of the nominator is $3$ and the degree of the denominator is $4$ (the largest power is in the denominator). Because of that the slope of $F'(L)$ will decrease at an infinitely small rate, and we can see that it's horizontal asymptote is at $6$ because we have $6L^2$ in the numerator and $L^2$ in the denominator. This horizontal asymptote causes that the value of $F'(L)$ is approaching a constant decreasement, which affects the slope of $F(L)$ and gives the oblique asymptote in $F(L)$. If we apply long polynomial division to $\dfrac{2L^2+32}{L}$ we will get $2L$, and plotting this with $P(L)$: ``` points = np.arange(-20,20, 0.05) plot(points, [P(x) for x in points], c='b', lw=1) plot(points, [2*x for x in points], c='r', lw=2) axes = plt.gca() axes.set_ylim([-50,50]); ``` which is the oblique asymptote! ## Continuing differentiation The third derivative $\dfrac{d^3P}{dL^3}=\dfrac{168L^6+320L^4}{L^8}$ gives the graph: ``` def PTriplePrime(L): return (168*L**6+320*L**4)/L**8 points = np.arange(-20,20, 0.01) interval = np.arange(0,4, 0.01) axes = plt.gca() axes.set_ylim([-5,100]) plot(points, [PTriplePrime(x) for x in points], c='b') grid() ```
github_jupyter
# 函數與其圖形(Functions and Plotting) ![Creative Commons License](https://i.creativecommons.org/l/by/4.0/88x31.png) This work by Jephian Lin is licensed under a [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/). _Tested on SageMath version 8.7_ ## 函數 數學中的函數連繫了前因和後果 相同的前因只會有一種後果 但不同的前因可能得到同樣的後果 ### 有限個數的函數與字典(dictionary) 有限個數的函數可以用**字典**(dictionary)結構來記綠 一個字典包含了許多**索引**(key) 許多**值**(value) 以及它們的對應關係 `{key1: value1, key2: value2, ...}` 回顧:列表用的是中括號(brackets)、字典用的是大括號(braces) 若 `d` 是一個字典 可以用 `d[key]` 來得到對應到索引為 `key` 的值 ``` d = {1:'one', 2:'two', 3:'three'} d[1] ``` 若索引有重覆 舊的會被洗掉 ``` d = {1:'one', 1:'first' ,2:'two', 3:'three'} d[1] ``` 但值可以重覆沒關係 ``` d = {1:'number', 2:'number', 3:'number'} d[1] ``` 在函數中 所有可以輸入的索引叫做**定義域**(domain) ``` d = {1:'one', 2:'two', 3:'three'} d.keys() ``` 在函數中 所有可能的輸出值叫做**值域**(range) ``` d = {1:'one', 2:'two', 3:'three'} d.values() ``` ### 實數上的函數與符號表示式 實數上有無窮個點 所以無法用字典來表示實數上的函數 於是我們用一個變數 `x` 來描述輸入和輸出的對應關係 (像是 `x |--> x^2`) Sage 中已經預設 `x` 為一個變數 然而若有須要還是可以重新設定一次 `a = var('b')` 意思是設定一個數學上的變數,稱作 `b` 並將其儲存在電腦裡的 `a` 這個位置 平常沒事兩個名字一樣就好 ``` x = var('x') ``` 利用符號表示式來定義一個函數 ``` f = x^2 f(2) ``` 若使用 `f(x) = x^2` 則可以明確地告訴 Sage 主要變數是哪一個 (`f = x^2` 和 `f(x) = x^2` 兩種用法在單變數時幾乎一樣) ``` f(x) = x^2 f(2) ``` 只有在輸入變數多的時候 有些許的差別 ``` x, y = var('x y') f(x) = x^2 + y^2 f(2) x, y = var('x y') f(y) = x^2 + y^2 f(2) x, y = var('x y') f(x,y) = x^2 + y^2 f(3,4) ``` 數學中的函數 是可以相加、相乘、或是合成 ``` f(x) = x^2 g(x) = x+1 f + g f(x) = x^2 g(x) = x+1 f * g f(x) = x^2 g(x) = x+1 f(g(x)) ``` ### 程式中的函數與 lambda 運算式 程式中的函數(又叫作函式) 只在意輸入和輸出 中間的關係不見得一定要用變數來表示 `lambda` 運算式可以輕鬆地定義一個程式函數 `lambda input: output` 舉例來說, `f = lambda k: k^2` 和 ```Python def f(k): return k^2 ``` 是一樣的 用 `lambda` 運算式較為簡潔 而且不一定要為函數命名 ``` f = lambda k: k^2 f(3) ``` 程式中的函數並沒有常用的加法 ``` f = lambda k: k^2 f + f ``` 函數可以**被呼叫**(callable) 但無法用索引找尋對應值 ``` f = lambda k: k^2 f(3) f = lambda k: k^2 f[3] ``` 字典及列表可以使用索引 但無法被呼叫 ``` d = {1:'one', 2:'two', 3:'three'} d[1] d = {1:'one', 2:'two', 3:'three'} d(1) ``` 有須要的話 字典和函數可以互相轉換 ``` d = {1:'one', 2:'two', 3:'three'} f = lambda k: d[k] f(1) f = lambda k: k^2 d = {k: k^2 for k in range(1,4)} d[3] ``` ### 三角函數 常用的三角函數有 $\sin$、$\cos$、以及 $\tan$ ~~而且通常人們不記得它們的中文叫什麼~~ Sage 會儘量回傳確切的函數值 ``` angles = [0, 1/6*pi, 1/4*pi, 2/6*pi, 1/2*pi] for theta in angles: print('sin', theta, 'equals', sin(theta)) angles = [0, 1/6*pi, 1/4*pi, 2/6*pi, 1/2*pi] for theta in angles: print('cos', theta, 'equals', cos(theta)) angles = [0, 1/6*pi, 1/4*pi, 2/6*pi] ### tan is not defined on 1/2*pi for theta in angles: print('tan', theta, 'equals', tan(theta)) ``` 用 `N` 函數來得到小數的逼近值 ``` sin(1) N(sin(1)) ``` ### 棣美弗公式(De Moivre's formula) 對任意角度 $\theta$ 及整數 $n$ $(\cos\theta + i\sin\theta)^n = \cos(n\theta) + i\sin(n\theta)$ 在 Sage 中用 `I` 代表虛數 $i$ ``` I I^2 ``` 感覺一下棣美弗公式 令 $\theta = \frac{\pi}{4}$ 且 $n=4$ ``` z = cos(1/4*pi) + I*sin(1/4*pi) z ``` $(\cos\theta + i\sin\theta)^n$ ``` z^4 ``` $\cos(n\theta) + i\sin(n\theta)$ ``` cos(pi) + I*sin(pi) ``` ## 二維座標上的圖形 由於實數上的函數 一次描述了許多點和點之間的關係 通常會在二維座標系上 記錄所有的 $(x,y)$ 其中 $y=f(x)$ ``` f = x^2 f.plot() ``` `plot` 函數有很多參數可以調整 像是 `xmin`、`xmax`、`ymin`、`ymax` 等等 可以參考 [Sage 說明書](http://doc.sagemath.org/html/en/reference/plotting/sage/plot/plot.html#sage.plot.plot.plot) ``` f = x^2 f.plot(xmin=-0.1, xmax=0.1, ymin=-0.1, ymax=0.1) ``` 兩個圖形可以相加 得到的就是將兩個圖形疊在一起 ``` f = x^2 g = x^3 f.plot() + g.plot() ``` 每個圖形可以用 `color` 參數調整顏色 也可以用 `legend_label` 參數來設定圖例的文字 這裡 `$x^2$` 是 $\TeX$ 語言的與法 $\TeX$ 是一套強大的排版系統 近代幾乎所有的數學論文都是由 $\TeX$ 排版完成的 ``` f = x^2 g = x^3 f.plot(color='blue', legend_label='$x^2$') + g.plot(color='red', legend_label='$x^3$') ``` 也可以用 `linestyle` 參數來調整線的格式 ``` f = sin(x) g = cos(x) pic1 = f.plot(xmin=-pi, xmax=pi, color='blue', linestyle='--', legend_label='$\sin(x)$') pic2 = g.plot(xmin=-pi, xmax=pi, color='red', linestyle=':', legend_label='$\cos(x)$') pic1 + pic2 ``` ## 動手試試看 ##### 練習 定義一個字典 `months` 將 `1` 到 `12` 對應到 `'January'` 到 `'December'`。 ``` ### your answer here ``` ##### 練習 上一個練習中 `months` 的定義域和值域分別是是什麼。 ``` ### your answer here ``` ##### 練習 將 $y$ 設為一個變數, 定義 $f(y) = y^5 + 2y^4 + 3y^3 + 4y^2 + 5y +6$, 並計算 $f(3)$ 的函數值。 ``` ### your answer here ``` ##### 練習 定義 $f(x) = \frac{x^2-1}{x-1}$。 並計算 $f(1.1)$、$f(1.001)$、以及 $f(1.00001)$。 而 $f(1)$ 又是多少。 ``` ### your answer here ``` ##### 練習 定義 $f(x) = \frac{1}{1-x}$ 及 $g(x) = \frac{x-1}{x}$。 計算 $f(g(x))$。 (利用 `simplify(expand(the_function))` 將函數化到最簡。) ``` ### your answer here ``` ##### 練習 用 `lambda` 運算式 定義一個函數其功能為: 輸入 `x`, 輸出 `x` 的絕對值 `abs(x)`。 ``` ### your answer here ``` ##### 練習 若 `a = [3,-5,2,-4,6,1]` 為一列表。 則 `a.sort()` 可以將 `a` 改為由小到大排列。 而 `sort` 函數裡面還有一個參數叫做 `key`。 若 `key` 被設定為一個函數 `f`, 則 `a.sort(key=f)` 會將 `a` 依照 `f(3)`, `f(-5)`, ... 的順序排列。 試著將 `a` 依照元素的絕對值大小排列。 ``` ### your answer here ``` ##### 練習 用 `lambda` 運算式 定義一個函數其功能為: 輸入一個兩個元素的列表, 輸出這個列表的第一位。 ``` ### your answer here ``` ##### 練習 若 `a = [[3,2], [1,4], [2,3], [4,2], [0,5]]`。 將 `a` 依照元素中的第一個元素排列。 ``` ### your answer here ``` ##### 練習 計算 $\sin 15^\circ$。 ``` ### your answer here ``` ##### 練習 計算 $\cos 18^\circ$。 ``` ### your answer here ``` ##### 練習 計算 $\sin 3$。 (注意這裡 $3$ 是徑度不是度數。) ``` ### your answer here ``` ##### 練習 畫一張圖: 上面有 $x$, $x^2$, $x^3$ 的函數圖形, 設定 $0\leq x\leq 1$ 及 $0\leq y\leq 1$, 其中每個函數圖形用不同顏色表示 並有圖例來說明各個圖形。 ``` ### your answer here ``` ##### 練習 若 `n` 為一個正整數, 則 `rainbow(n)` 會回傳一個含有 `n` 個顏色的列表。 例用 `for` 迴圈 將 $x^1$, $x^2$, ..., $x^{10}$ 都畫在同一張圖上 ($0\leq x\leq 1$ 及 $0\leq y\leq 1$), 並用不同顏色表示。 有辦法將圖例也補上嗎? ``` ### your answer here ```
github_jupyter
``` import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds cifar10_builder = tfds.builder("cifar10") cifar10_builder.download_and_prepare() cifar10_train = cifar10_builder.as_dataset(split="train") cifar10_test = cifar10_builder.as_dataset(split="test") cifar10_train.take(1) class VAE(tf.keras.Model): def __init__(self, input_shape, name='variational_autoencoder', latent_dim=10, hidden_dim=10, encoder='GaussianMLP', decoder='BernoulliMLP', iaf_model=None, number_iaf_networks=0, iaf_params={}, num_samples=100, **kwargs): super().__init__(name=name, **kwargs) self._latent_dim = latent_dim self._num_samples = num_samples self._iaf = [] if encoder == 'GaussianMLP': self._encoder = GaussianMLP(input_shape=input_shape, latent_dim=latent_dim, iaf=(iaf_model is not None), hidden_dim=hidden_dim) else: raise ValueError("Unknown encoder type: {}".format(encoder)) if decoder == 'BernoulliMLP': self._decoder = BernoulliMLP(input_shape=(1,latent_dim), latent_dim=input_shape[1], hidden_dim=hidden_dim) elif decoder == 'GaussianMLP': self._encoder = GaussianMLP(input_shape=(1,latent_dim), latent_dim=input_shape[1], iaf=(iaf_model is not None), hidden_dim=hidden_dim) else: raise ValueError("Unknown decoder type: {}".format(decoder)) if iaf_model: self._iaf = [] for t in range(number_iaf_networks): self._iaf.append( iaf_model(input_shape==(1,latent_dim*2), **iaf_params) ) @tf.function def sample(self, eps=None): if eps is None: eps = tf.random.normal(shape=(self._num_samples, self.latent_dim)) return self._decoder.call(eps, apply_sigmoid=False) def encode(self, x): return self._encoder.call(x) def decode(self, z, apply_sigmoid=False): logits, _, _ = self._decoder.call(z) if apply_sigmoid: probs = tf.sigmoid(logits) return probs return logits def reparameterize(self, mean, logvar): eps = tf.random.normal(shape=mean.shape) return eps * tf.exp(logvar * .5) + mean @property def iaf(self): return self._iaf #model = VAE(input_shape=(1,3072)) model = VAE(input_shape=(1,3072), hidden_dim=500, latent_dim=500, iaf_model=GaussianMLP, number_iaf_networks=3, iaf_params={'latent_dim': 500, 'hidden_dim': 500, 'iaf': False}) def log_normal_pdf(sample, mean, logvar, raxis=1): log2pi = tf.math.log(2. * np.pi) return tf.reduce_sum( -.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=raxis) @tf.function def compute_loss(model, x): mean, logvar, h = model.encode(x) z = model.reparameterize(mean, logvar) logqz_x = log_normal_pdf(z, mean, logvar) for iaf_model in model.iaf: mean, logvar, _ = iaf_model.call(tf.concat([z, h], 2)) s = tf.sigmoid(logvar) z = tf.add(tf.math.multiply(z,s), tf.math.multiply(mean,(1-s))) logqz_x -= tf.reduce_sum(tf.math.log(s)) x_logit = model.decode(z) cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x) logpx_z = -tf.reduce_sum(cross_ent, axis=[2]) logpz = log_normal_pdf(z, 0., 0.) return -tf.reduce_mean(logpx_z + logpz - logqz_x) @tf.function def compute_apply_gradients(model, x, optimizer): with tf.GradientTape() as tape: loss = compute_loss(model, x) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) import time as time epochs = 100 optimizer = tf.keras.optimizers.Adam(1e-4) for epoch in range(1, epochs + 1): start_time = time.time() for train_x in cifar10_train.map( lambda x: flatten_image(x, label=False)).batch(32): compute_apply_gradients(model, train_x, optimizer) end_time = time.time() if epoch % 1 == 0: loss = tf.keras.metrics.Mean() for test_x in cifar10_test.map( lambda x: flatten_image(x, label=False)).batch(32): loss(compute_loss(model, test_x)) elbo = -loss.result() print('Epoch: {}, Test set ELBO: {}, ' 'time elapse for current epoch {}'.format(epoch, elbo, end_time - start_time)) for sample in cifar10_train.map(lambda x: flatten_image(x, label=False)).batch(1).take(3): mean, logvar, h = model.encode(sample) z = model.reparameterize(mean, logvar) for iaf_model in model.iaf: mean, logvar, _ = iaf_model.call(tf.concat([z, h], 2)) s = tf.sigmoid(logvar) z = tf.add(tf.math.multiply(z,s), tf.math.multiply(mean,(1-s))) plt.figure(0) plt.imshow((sample.numpy().reshape(32,32,3)).astype(np.float32), cmap=plt.get_cmap("gray") ) plt.figure(1) plt.imshow((model.decode(z).numpy().reshape(32,32,3)).astype(np.float32), cmap=plt.get_cmap("gray") ) z = np.random.normal(size=(1,500)) h = np.random.normal(size=(1,500)) for iaf_model in model.iaf: mean, logvar, _ = iaf_model.call(tf.concat([z, h], 1)) s = tf.sigmoid(logvar) z = tf.add(tf.math.multiply(z,s), tf.math.multiply(mean,(1-s))) plt.imshow((model.decode(z).numpy().reshape(32,32,3)).astype(np.float32), cmap=plt.get_cmap("gray") ) import tensorflow_probability as tfp class GaussianMLP(tf.keras.Model): def __init__(self, input_shape, name='GaussianMLP', hidden_dim=10, latent_dim=10, iaf=False, **kwargs): super().__init__(name=name, **kwargs) self._h = tf.keras.layers.Dense(hidden_dim, activation='tanh') self._mean = tf.keras.layers.Dense(latent_dim) self._logvar = tf.keras.layers.Dense(latent_dim) self._iaf_output = None if iaf: self._iaf_output = tf.keras.layers.Dense(latent_dim) def call(self, x): if self._iaf_output: return self._mean(self._h(x)), self._logvar(self._h(x)), self._iaf_output(self._h(x)) else: return self._mean(self._h(x)), self._logvar(self._h(x)), None class BernoulliMLP(tf.keras.Model): def __init__(self, input_shape, name='BernoulliMLP', hidden_dim=10, latent_dim=10, **kwargs): super().__init__(name=name, **kwargs) self._h = tf.keras.layers.Dense(hidden_dim, activation='tanh') self._y = tf.keras.layers.Dense(latent_dim, activation='sigmoid') def call(self, x): return self._y(self._h(x)), None, None g = GaussianMLP(input_shape=(1,3072), hidden_dim=10, latent_dim=10) b = BernoulliMLP(input_shape=(1,3072), hidden_dim=10, latent_dim=10) for image in cifar10_train.map(lambda x: flatten_image(x, label=False)).take(10): print(g.call(image)) !pip3 install pydot !pip3 install graphviz import pydot import graphviz tf.keras.utils.plot_model(g) def flatten_image(x, label=False): if label: return (tf.divide( tf.dtypes.cast( tf.reshape(x["image"], (1, 32*32*3)), tf.float32), 256.0), x["label"]) else: return ( tf.divide(tf.dtypes.cast( tf.reshape(x["image"], (1, 32*32*3)), tf.float32), 256.0)) from PIL import Image import numpy as np import matplotlib.pyplot as plt for sample in cifar10_train.map(lambda x: flatten_image(x, label=True)).take(1): plt.imshow(sample[0].numpy().reshape(32,32,3).astype(np.float32), cmap=plt.get_cmap("gray") ) print("Label: %d" % sample[1].numpy()) ```
github_jupyter
ERROR: type should be string, got "https://codekansas.github.io/blog/2016/gensim.html\n\n```\nimport os\nimport numpy as np\nimport pandas as pd\n```\n\n## Import data\n\n```\n# load doc into memory\ndef load_doc(filename):\n # open the file as read only \n file = open(filename, 'r')\n # read all text\n text = file.read()\n text = text.splitlines()\n # close the file file.close()\n return text\ndir_path = os.path.realpath('..')\npath = 'data/raw/train.csv'\n\nfull_path = os.path.join(dir_path, path)\ndf_train = pd.read_csv(full_path, header=0, index_col=0)\nprint(\"Dataset has {} rows, {} columns.\".format(*df_train.shape))\npath = 'data/raw/test.csv'\n\ndir_path = os.path.realpath('..')\nfull_path = os.path.join(dir_path, path)\ndf_test = pd.read_csv(full_path, header=0, index_col=0)\nprint(\"Dataset has {} rows, {} columns.\".format(*df_test.shape))\n```\n\npath = 'data/processed/vocab.txt'\n\ndir_path = os.path.realpath('..')\nfull_path = os.path.join(dir_path, path)\nvocab = load_doc(full_path)\n\n```\n# fill NaN with string \"unknown\"\ndf_train.fillna('unknown',inplace=True)\ndf_test.fillna('unknown',inplace=True)\n```\n\n## Load trained w2v model, embeddings and vocab\n\n```\nfrom gensim.models import Word2Vec\nimport json\ndef load_vocab(vocab_path):\n with open(vocab_path, 'r') as f:\n data = json.loads(f.read())\n word2idx = data\n idx2word = dict([(v, k) for k, v in data.items()])\n return word2idx, idx2word\nfrom keras.layers import Embedding\nfrom keras.engine import Input\n\ndef word2vec_embedding_layer(embeddings_path='embeddings.npz'):\n \"\"\"\n Generate an embedding layer word2vec embeddings\n :param embeddings_path: where the embeddings are saved (as a numpy file)\n :return: the generated embedding layer\n \"\"\"\n \n weights = np.load(open(embeddings_path, 'rb'))\n layer = Embedding(input_dim=weights.shape[0],\n output_dim=weights.shape[1],\n weights=[weights])\n return layer\n# load model\nw2v_model = Word2Vec.load('w2v_model.bin')\nprint(w2v_model)\nembeddings_path = 'embeddings.npy'\nweights = np.load(open(embeddings_path, 'rb'))\nembedding = w2v_model.wv.get_keras_embedding()\nw2v_vocab = load_vocab('vocab.json')\npath = 'data/processed/vocab.txt'\n\ndir_path = os.path.realpath('..')\nfull_path = os.path.join(dir_path, path)\nvocab = load_doc(full_path)\n```\n\n## Training\n\n```\nfrom numpy import asarray\nfrom numpy import zeros\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Embedding\n%%time\n# prepare tokenizer\nt = Tokenizer()\nt.fit_on_texts(vocab)\nvocab_size = len(t.word_index) + 1\n%%time\n# integer encode the documents\nXtrain = t.texts_to_sequences(df_train.comment_text)\nXtest = t.texts_to_sequences(df_test.comment_text)\n# create a weight matrix for words in training docs\nembedding_matrix = zeros((vocab_size, 100))\nfor word, i in t.word_index.items():\n\tembedding_vector = w2v_vocab[0].get(word)\n\tif embedding_vector is not None:\n\t\tembedding_matrix[i] = embedding_vector\nembedding_matrix[0]\n# pad documents to a max length of 4 words\nmax_length = 4\npadded_train = pad_sequences(Xtrain, maxlen=max_length, padding='post')\npadded_test = pad_sequences(Xtest, maxlen=max_length, padding='post')\n# define model\nmodel = Sequential()\ne = Embedding(vocab_size, 100, weights=[embedding_matrix], input_length=4, trainable=False)\nmodel.add(e)\nmodel.add(Flatten())\nmodel.add(Dense(1, activation='sigmoid'))\n# compile the model\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\n# summarize the model\nmodel.summary()\ndef save_model(model, model_name):\n # serialize model to JSON\n model_json = model.to_json()\n with open(model_name + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(model_name + \".h5\")\n print(\"Saved model to disk\")\n%%time\n# fit the model\ntarget = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n\nfor label in target:\n print('... Processing {}'.format(label))\n ytrain = df_train[label]\n model_name = 'learnt_model_' + label\n \n # train the model\n model.fit(padded_train, ytrain, epochs=1, verbose=2)\n \n # save the model\n save_model(model, model_name)\n```\n\n## Prediction\n\n```\nfrom keras.models import model_from_json\ndef load_model(model_name):\n # load json and create model\n json_file = open(model_name+'.json', 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n loaded_model.load_weights(model_name+\".h5\")\n print(\"Loaded model from disk\")\n return loaded_model\n%%time\ntarget = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\nsubmission = pd.DataFrame(index=df_test.index, columns=target)\n\nfor label in target:\n print('... Processing {}'.format(label))\n model_name = 'learnt_model_' + label\n\n # load the model\n loaded_model = load_model(model_name)\n \n y_pred_proba = loaded_model.predict(padded_test, verbose=0, batch_size=1)\n submission[label] = y_pred_proba.flatten()\npath = 'data/submissions/w2v_keras.csv'\n\ndir_path = os.path.realpath('..')\nfull_path = os.path.join(dir_path, path)\n\nsubmission.to_csv(full_path, header=True, index=True)\n```\n\n## Evaluation\n\n```\n# evaluate the model\nloss, accuracy = model.evaluate(padded_docs, labels, verbose=0)\nprint('Accuracy: %f' % (accuracy*100))\n```\n\n"
github_jupyter
``` %matplotlib inline %config InlineBackend.figure_format = 'retina' %gui qt import time time.sleep(5) ``` # Segmentation -------------- ## Separating an image into one or more regions of interest. Everyone has heard or seen Photoshop or a similar graphics editor take a person from one image and place them into another. The first step of doing this is *identifying where that person is in the source image*. In popular culture, the Terminator's vision segments humans out of the overall scene: <img src="../images/terminator-vision.png" width="700px"/> Segmentation is a fundamental operation in scientific image analysis because we often want to measure properties of real, physical *objects* such as cells embedded in our image. As such, we want to find those objects within our image. Computationally, segmentations are most often represented as images, of the same size as the original image, containing integer *labels*, with one value representing one object. Here is a very simple image and segmentation, taken from [this scikit-image tutorial](https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html#sphx-glr-auto-examples-segmentation-plot-watershed-py): ``` import numpy as np from scipy import ndimage as ndi import napari from skimage.segmentation import watershed from skimage.feature import peak_local_max # Generate an initial image with two overlapping circles x, y = np.indices((80, 80)) x1, y1, x2, y2 = 28, 28, 44, 52 r1, r2 = 16, 20 mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2 mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2 image = np.logical_or(mask_circle1, mask_circle2) # Now we want to separate the two objects in image # Generate the markers as local maxima of the distance to the background distance = ndi.distance_transform_edt(image) coords = peak_local_max(distance, footprint=np.ones((3, 3)), labels=image) mask = np.zeros(distance.shape, dtype=bool) mask[tuple(coords.T)] = True markers, _ = ndi.label(mask) labels = watershed(-distance, markers, mask=image) viewer = napari.Viewer() image_layer = viewer.add_image(image) labels_layer = viewer.add_labels(labels) labels_as_image_layer = viewer.add_image( labels, name='labels as image' ) ``` Notice that "labels" is just a NumPy array with integer values. We have to be careful to interpret it as labels and not as an image. ## Segmenting nuclei and measuring cell properties In the rest of this notebook, we will segment nuclei from a small sample image provided by the Allen Institute for Cell Science. ``` import tifffile nuclei = tifffile.imread('../images/cells.tif') membranes = tifffile.imread('../images/cells_membrane.tif') print("shape: {}".format(nuclei.shape)) print("dtype: {}".format(nuclei.dtype)) print("range: ({}, {})".format(np.min(nuclei), np.max(nuclei))) ``` The pixel spacing in this dataset is 0.29µm in the z (leading!) axis, and 0.26µm in the x and y axes. ``` spacing = np.array([0.29, 0.26, 0.26]) ``` We can view the 3D image using napari. ``` viewer = napari.view_image( nuclei, contrast_limits=[0, 1], scale=spacing, ndisplay=3, ) from napari.utils.notebook_display import nbscreenshot viewer.dims.ndisplay = 3 viewer.camera.angles = (-30, 25, 120) nbscreenshot(viewer) ``` ## Edge detection We saw the [Sobel operator](https://en.wikipedia.org/wiki/Sobel_operator) in the filters lesson. It is an edge detection algorithm that approximates the gradient of the image intensity, and is fast to compute. The [Scharr filter](https://scikit-image.org/docs/dev/api/skimage.filters.html#skimage.filters.scharr) is a slightly more sophisticated version, with smoothing weights [3, 10, 3]. Both work for n-dimensional images in scikit-image. ``` from skimage import filters edges = filters.scharr(nuclei) nuclei_layer = viewer.layers['nuclei'] nuclei_layer.blending = 'additive' nuclei_layer.colormap = 'green' viewer.add_image( edges, scale=spacing, blending='additive', colormap='magenta', ) nbscreenshot(viewer) ``` ## Thresholding [Thresholding](https://en.wikipedia.org/wiki/Thresholding_%28image_processing%29) is used to create binary images. A threshold value determines the intensity value separating foreground pixels from background pixels. Foregound pixels are pixels brighter than the threshold value, background pixels are darker. In many cases, images can be adequately segmented by thresholding followed by labelling of *connected components*, which is a fancy way of saying "groups of pixels that touch each other". Different thresholding algorithms produce different results. [Otsu's method](https://en.wikipedia.org/wiki/Otsu%27s_method) and [Li's minimum cross entropy threshold](https://scikit-image.org/docs/dev/auto_examples/developers/plot_threshold_li.html) are two common algorithms. Below, we use Li. You can use `skimage.filters.threshold_<TAB>` to find different thresholding methods. ``` denoised = ndi.median_filter(nuclei, size=3) li_thresholded = denoised > filters.threshold_li(denoised) viewer.add_image( li_thresholded, scale=spacing, opacity=0.3, ) nbscreenshot(viewer) ``` ## Morphological operations [Mathematical morphology](https://en.wikipedia.org/wiki/Mathematical_morphology) operations and structuring elements are defined in `skimage.morphology`. Structuring elements are shapes which define areas over which an operation is applied. The response to the filter indicates how well the neighborhood corresponds to the structuring element's shape. There are a number of two and three dimensional structuring elements defined in `skimage.morphology`. Not all 2D structuring element have a 3D counterpart. The simplest and most commonly used structuring elements are the `disk`/`ball` and `square`/`cube`. Functions operating on [connected components](https://en.wikipedia.org/wiki/Connected_space) can remove small undesired elements while preserving larger shapes. `skimage.morphology.remove_small_holes` fills holes and `skimage.morphology.remove_small_objects` removes bright regions. Both functions accept a size parameter, which is the minimum size (in pixels) of accepted holes or objects. It's useful in 3D to think in linear dimensions, then cube them. In this case, we remove holes / objects of the same size as a cube 20 pixels across. ``` from skimage import morphology width = 20 remove_holes = morphology.remove_small_holes( li_thresholded, width ** 3 ) width = 20 remove_objects = morphology.remove_small_objects( remove_holes, width ** 3 ) viewer.add_image( remove_objects, name='cleaned', scale=spacing, opacity=0.3, ); viewer.layers['li_thresholded'].visible = False nbscreenshot(viewer) ``` ## Segmentation Now we are ready to label the connected components of this image. ``` from skimage import measure labels = measure.label(remove_objects) viewer.add_labels( labels, scale=spacing, opacity=0.5, ) viewer.layers['cleaned'].visible = False nbscreenshot(viewer) ``` We can see that tightly packed cells connected in the binary image are assigned the same label. A better segmentation would assign different labels to different nuclei. Typically we use [watershed segmentation](https://en.wikipedia.org/wiki/Watershed_%28image_processing%29) for this purpose. We place *markers* at the centre of each object, and these labels are expanded until they meet an edge or an adjacent marker. The trick, then, is how to find these markers. It can be quite challenging to find markers with the right location. A slight amount of noise in the image can result in very wrong point locations. Here is a common approach: find the distance from the object boundaries, then place points at the maximal distance. ``` transformed = ndi.distance_transform_edt(remove_objects, sampling=spacing) maxima = morphology.local_maxima(transformed) viewer.add_points( np.transpose(np.nonzero(maxima)), name='bad points', scale=spacing, size=4, n_dimensional=True, # points have 3D "extent" ) nbscreenshot(viewer) ``` You can see that these points are actually terrible, with many markers found within each nuclei. ### <span style="color:cornflowerblue">Exercise: improve the points</span> Try to improve the segmentation to assign one point for each nucleus. Some ideas: - use a smoothed version of the nuclei image directly - smooth the distance map - use morphological operations to smooth the surface of the nuclei to ensure that they are close to spherical - use peak_local_max with `min_distance` parameter instead of `morphology.local_maxima` - find points on a single plane, then prepend the plane index to the found coordinates As you will have seen from the previous exercise, there are many approaches to find better seed points, but they are often fiddly and tedious, and sensitive to parameters — when you encounter a new image, you might have to start all over again! With napari, in many cases, a little interactivity, combined with the segmentation algorithms in scikit-image and elsewhere, can quickly get us the segmentation we want. Below, you can use full manual annotation, or light editing of the points you found automatically. ``` viewer.layers['bad points'].visible = False viewer.dims.ndisplay = 2 viewer.dims.set_point(0, 30 * spacing[0]) points = viewer.add_points( name='interactive points', scale=spacing, ndim=3, size=4, n_dimensional=True, ) points.mode = 'add' # now, we annotate the centers of the nuclei in your image viewer.dims.ndisplay = 3 viewer.camera.angles = (-30, 25, 120) nbscreenshot(viewer) # unscaled points.data # this cell is only to simulate interactive annotation, # no need to run it if you already have annotations. points.data = np.array( [[ 30. , 14.2598685 , 27.7741219 ], [ 30. , 30.10416663, 81.36513029], [ 30. , 13.32785096, 144.27631406], [ 30. , 46.8804823 , 191.80920846], [ 30. , 43.15241215, 211.84758551], [ 30. , 94.87938547, 160.12061219], [ 30. , 72.97697335, 112.58771779], [ 30. , 138.21820096, 189.01315585], [ 30. , 144.74232372, 242.60416424], [ 30. , 98.14144685, 251.92433962], [ 30. , 153.59649032, 112.58771779], [ 30. , 134.49013081, 40.35635865], [ 30. , 182.95504275, 48.74451649], [ 30. , 216.04166532, 80.89912152], [ 30. , 235.14802483, 130.296051 ], [ 30. , 196.00328827, 169.44078757], [ 30. , 245.86622651, 202.06140137], [ 30. , 213.71162148, 250.52631331], [ 28. , 87.42324517, 52.00657787]], dtype=float, ) ``` Once you have marked all the points, you can grab the data back, and make a markers image for `skimage.segmentation.watershed`: ``` from skimage import segmentation marker_locations = points.data markers = np.zeros(nuclei.shape, dtype=np.uint32) marker_indices = tuple(np.round(marker_locations).astype(int).T) markers[marker_indices] = np.arange(len(marker_locations)) + 1 markers_big = morphology.dilation(markers, morphology.ball(5)) segmented = segmentation.watershed( edges, markers_big, mask=remove_objects, ) viewer.add_labels( segmented, scale=spacing, ) viewer.layers['labels'].visible = False nbscreenshot(viewer) ``` After watershed, we have better disambiguation between internal cells! ## Making measurements Once we have defined our objects, we can make measurements on them using `skimage.measure.regionprops` and the new `skimage.measure.regionprops_table`. These measurements include features such as area or volume, bounding boxes, and intensity statistics. Before measuring objects, it helps to clear objects from the image border. Measurements should only be collected for objects entirely contained in the image. Given the layer-like structure of our data, we only want to clear the objects touching the sides of the volume, but not the top and bottom, so we pad and crop the volume along the 0th axis to avoid clearing the mitotic nucleus. ``` segmented_padded = np.pad( segmented, ((1, 1), (0, 0), (0, 0)), mode='constant', constant_values=0, ) interior_labels = segmentation.clear_border(segmented_padded)[1:-1] ``` `skimage.measure.regionprops` automatically measures many labeled image features. Optionally, an `intensity_image` can be supplied and intensity features are extracted per object. It's good practice to make measurements on the original image. Not all properties are supported for 3D data. Below we build a list of supported and unsupported 3D measurements. ``` regionprops = measure.regionprops(interior_labels, intensity_image=nuclei) supported = [] unsupported = [] for prop in regionprops[0]: try: regionprops[0][prop] supported.append(prop) except NotImplementedError: unsupported.append(prop) print("Supported properties:") print(" " + "\n ".join(supported)) print() print("Unsupported properties:") print(" " + "\n ".join(unsupported)) ``` scikit-image 0.18 adds support for [passing custom functions](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops) for region properties as `extra_properties`. After this tutorial, you might want to try it out to [determine the surface area](https://github.com/scikit-image/scikit-image/issues/3797#issuecomment-471277056) of the nuclei or cells! `skimage.measure.regionprops` ignores the 0 label, which represents the background. `regionprops_table` returns a dictionary of columns compatible with creating a pandas dataframe of properties of the data: ``` import pandas as pd info_table = pd.DataFrame( measure.regionprops_table( interior_labels, intensity_image=nuclei, properties=['label', 'slice', 'area', 'mean_intensity', 'solidity'], ) ).set_index('label') info_table.head() ``` We can now use pandas and seaborn for some analysis! ``` import seaborn as sns sns.scatterplot(x='area', y='solidity', data=info_table, hue='mean_intensity'); ``` We can see that the mitotic nucleus is a clear outlier from the others in terms of solidity and intensity. ### <span style="color:cornflowerblue">Exercise: physical measurements</span> The "area" property above is actually the volume of the region, measured in voxels. Add a new column to your dataframe, `'area_um3'`, containing the volume in µm&sup3;. ### <span style="color:cornflowerblue">Exercise: full cell segmentation</span> Above, we loaded the membranes image into memory, but we have yet to use it. - Add the membranes to the image. - Use watershed to segment the full cells, and add the segmentation to the display ### <span style="color:cornflowerblue">Exercise: displaying regionprops (or other values)</span> Now that you have segmented cells, (or even with just the nuclei), use [`skimage.util.map_array`](https://scikit-image.org/docs/dev/api/skimage.util.html#skimage.util.map_array) to display a volume of the value of a regionprop (say, 'solidity' of the cells) on top of the segments.
github_jupyter
# Electron MVA Tutorial Hello, good to see you again! This is the main part of the tutorial, where we inspect and understand the ntuples a bit, before finally training a few BTDs to separate real prompt electrons from fakes. ## Prerequesites ### Install Python packages We need to install two additional Python packages to run this notebook: * [uproot](https://github.com/scikit-hep/uproot) to open ROOT files * [tqdm](https://github.com/tqdm/tqdm) to draw progessbars The other packages we rely on, in particular [xgboost](https://xgboost.readthedocs.io/en/latest/) for training BDTs, [pandas](https://pandas.pydata.org/) and [sklearn](https://scikit-learn.org/stable/index.html) are already installed in the SWAN environment. You can execute bash commands withing the notebook by putting an exclamation mark `!` in front of them. You can uncomment the following cell and run it to invoke `pip` to install the missing packages: ``` #!pip install --user uproot tqdm ``` After the installations are finished, you have to restart the notebook kernel with the little circular arrow button in the toolbar above. You may now comment out the pip command again so it's not always run when you execute all cells in this notebook. ### Login with you CERN account to get Ntuples from EOS In the first part of this tutorial, we showed you how to create the Electron MVA ntuples and you produced them yourself for one ROOT file. One file is unfortunately not enough for training the MVA. We processed the whole Run 3 __DY+Jets__ MC sample for you and put the ROOT files into the EOS user space. To access them, please login to EOS by running the cells below (don't forget to change the username to yours): ``` username = "rembserj" import getpass import os if os.system('klist | grep Default | grep ' + username + '@CERN.CH'): os.system('echo %s' % getpass.getpass() + " | kinit " + username) ``` ## Electron MVA Ntuple Data Let's import __uproot__ to open ROOT files, __glob__ to search for files, __pandas__ for DataFrames and __tqdm__ for progress bars. ``` import uproot import glob import pandas as pd from tqdm import tqdm ``` We need a lot of events to train the Electron MVA well, but using the full dataset would take too long in this tutorial. Let's just get the names of __10 files__. ``` n_files = 20 root_files = glob.glob("/eos/user/r/rembserj/ntuples/electron_mva_run3/*.root")[:n_files] ``` You can open a file with `uproot.open` and get the ROOT file directory structure by callling `allkeys`. Like in C++ ROOT, ignore the semicolon and the numbers after it in the names. ``` f = uproot.open(root_files[0]) f.allkeys() ``` There we go, the TTree with the electron data we seek must be `ntuplizer/tree`! With that knowledge we can write a function that takes the path to a Electron MVA Ntuple file and returns a DataFrame with all the data except for some columns we won't use. ``` unneccesary_columns = ['ele_index', 'Spring16GPV1Vals', 'Spring16GPV1RawVals', 'Spring16HZZV1Vals', 'Spring16HZZV1RawVals', 'Fall17NoIsoV2Vals', 'Fall17IsoV2Vals', 'Fall17IsoV1Vals', 'Fall17NoIsoV1Vals', 'Spring16GPV1wp80', 'Spring16GPV1wp90', 'Spring16HZZV1wpLoose', 'Fall17noIsoV2wp80', 'Fall17noIsoV2wpLoose', 'Fall17noIsoV2wp90', 'Fall17isoV2wpHZZ', 'Fall17isoV2wp80', 'Fall17isoV2wpLoose', 'Fall17isoV2wp90', 'Fall17noIsoV1wp90', 'Fall17noIsoV1wp80', 'Fall17noIsoV1wpLoose', 'Fall17isoV1wp90', 'Fall17isoV1wp80', 'Fall17isoV1wpLoose', 'nEvent', 'nRun', 'nLumi'] def get_df(root_file_name): f = uproot.open(root_file_name) if len(f.allkeys()) == 0: return pd.DataFrame() df = uproot.open(root_file_name)["ntuplizer/tree"].pandas.df() return df.drop(unneccesary_columns, axis=1) ``` Let's use this function in a loop to load all files and combine the DataFrames with `pandas.concat`: ``` df = pd.concat((get_df(f) for f in tqdm(root_files)), ignore_index=True) ``` We can check what columns we have in this data (or "branches" if you come from ROOT): ``` df.columns ``` That's a lot of information! Let's not try to understand everything at one, but explain when needed. ## Categorizing Electrons as Signal or Background To say it once more: signal will be reconstructed electrons that match to a true prompt electron, while the background will be all unmatched and non-prompt electrons. Electrons from tau decays should be ignored. The electron ntuplizer stores the category to which the electron belongs in the `matchedToGenEle` branch, and you can check the [ntuplizer source code](https://github.com/cms-sw/cmssw/blob/master/RecoEgamma/ElectronIdentification/plugins/ElectronMVANtuplizer.cc#L139) to learn which number corresponds to which category (starting from `UNMATCHED = 0`). The electrons from taus correspond to `matchedToGenEle = 2`, so let's drop them from the data frame. You can use the `query` methods of dataframes to select a subset of the dataframe based on a selection query: ``` df = df.query("matchedToGenEle != 2") ``` Also we don't care about the distinction between non-prompt and unmatched electrons, so let's set both categories to the value zeros. ``` df.loc[df["matchedToGenEle"] != 1, "matchedToGenEle"] = 0 ``` Very good, now `matchedToGenEle = 1` corresponds to signal and `matchedToGenEle = 0` to background. Finally, we drop the electrons that fall outside the detector acceptance beyond $|\eta|$ = 2.5 and throw out the electrons below 10 GeV as they are usually not used. ``` df = df.query("abs(scl_eta) < 2.5") df = df.query("ele_pt >= 10") ``` Here is a little helper function to plot histograms with distributions for signal and background electrons. ``` import matplotlib.pyplot as plt import numpy as np def get_label(name): if name == 0: return "background" else: return "signal" def plot_electrons(df, column, bins, logscale=False, ax=None, title=None): if ax is None: ax = plt.gca() for name, group in df.groupby("matchedToGenEle"): group[column].hist(bins=bins, histtype="step", label=get_label(name), ax=ax, density=True) ax.set_ylabel("density") ax.set_xlabel(column) ax.legend() ax.set_title(title) if logscale: ax.set_yscale("log", nonposy='clip') ``` Let's take a look at the $p_T$ and $\eta$ distributions to see if they look as expected: ``` fig, axes = plt.subplots(1, 2, figsize=(15, 5)) plot_electrons(df, "ele_pt", np.linspace(0, 100, 200), ax=axes[0]) plot_electrons(df, "scl_eta", np.linspace(-2.5, 2.5, 50), ax=axes[1]) plt.show() ``` Seems reasonable! The true prompt electrons have the $p_T$ peak that you expect from Z boson decays, and the fakes have the expected exponential spectrum and are predominantly at high pseudorapidity. It's also interesting to look at the distributions for the MVA input variables (also called "features"). The `ele_oldsigmaietaieta` for example represents the spread of the electromagnetic shower and is more peaked and has smaller values for true electrons. Let's look at `ele_oldsigmaietaieta` separately for barrel and endcap: ``` fig, axes = plt.subplots(1, 2, figsize=(15, 5)) plot_electrons(df.query("ele_isEB"), "ele_oldsigmaietaieta", 100, ax=axes[0], title="Barrel") plot_electrons(df.query("ele_isEE"), "ele_oldsigmaietaieta", 100, ax=axes[1], title="Endcap") plt.show() ``` Look at the X axis! The values are very different for barrel and endcap, which is the case also for other features. That's why we will have different trainings for barrel and endcap (also called EB and EE for Electronmagnetic Barrel/Endcap). In Run 2, we even split up the barrel in two different training regions (so 3 in total), because depending on the inner tracker the material budget is very different for the central Ecal barrel and the outer barrel. Naturally, the material budget influences the electron ID variables a lot. At this point, feel free to look at other variables from the feature list below! Do you know what they represent? Unfortunately they are not explained in this tutorial, but there are many experts in CMS who are happy to explain them to you. ## Training the Boosted Decision Tree Classifiers Let's write down a list with the the names of the features we will use. There are separate lists for EB and EE, because in the endcap we can use the fraction of the preshower energy as an additional feature. There is not preshower detector in the barrel. ``` features_barrel = ["ele_oldsigmaietaieta", "ele_oldsigmaiphiiphi", "ele_oldcircularity", "ele_oldr9", "ele_scletawidth", "ele_sclphiwidth", "ele_oldhe", "ele_kfhits", "ele_kfchi2", "ele_gsfchi2", "ele_fbrem", "ele_gsfhits", "ele_expected_inner_hits", "ele_conversionVertexFitProbability", "ele_ep", "ele_eelepout", "ele_IoEmIop", "ele_deltaetain", "ele_deltaphiin", "ele_deltaetaseed", "rho", "ele_pfPhotonIso", "ele_pfChargedHadIso", "ele_pfNeutralHadIso"] features_endcap = features_barrel + ["ele_psEoverEraw"] ``` We got to import some more libraries now. Most importantly, we got to import __XGBooost__, to train gradient boosted decision trees. It is one of the three highly optimized libraries for this task, the others being [LightGBM](https://github.com/Microsoft/LightGBM) and [CatBoost](https://catboost.ai/). Feel free to try them out at some point, they should have a similar interface! We also import the `train_test_split` function from __sklearn__ to shuffle and split the ntuple in a training and testing subset. Lastly we import xgboost2tmva, which is included in this repository but was originally spotted [here](https://gist.github.com/andreh7/da90266301ac0204e054ab6f02fc6faa). It's meant to save the XGBoost models in the XML format used by TMVA, which is also the format we use in CMSSW for now. ``` from sklearn.model_selection import train_test_split import xgboost as xgb import xgboost2tmva ``` Next, we define the training parameters for XGBoost. Besides `n_boost_rounds`, there are a lot of hyperparameters that can be set, follow [this link](https://xgboost.readthedocs.io/en/latest/parameter.html#parameters-for-tree-booster) for a reference. We don't change the defaults much. We just want to set `n_boost_rounds` to a relatively small value so the training does not take forever in a initial test run, set `eval_metric` to the Area Under the ROC Curver (AUC) and set the objective to binary classification. In a final training, you would rather set `n_boost_rounds` to a high number like 1000 and rely on early stopping to stop boosting when appropriate. If you want, you can try out different hyperparameters later on! There are many resources on the internet about hyperparamter tuning for XGBoost. ``` n_boost_rounds = 10 xgboost_params = {'eval_metric':'auc', #'max_depth' : 4, 'objective' :'binary:logitraw'} ``` In Run 2, the MVA was trained in 6 categories. The splitting in three $|\eta|$ regions was already discussed before. Additionally, there are 3 separate trainings for $5 < p_T < 10$ GeV in Run 2. We define a list of category names to reasonably label the plots later. However, we will just do the trainings for $p_T$ > 10 GeV here. ``` category_titles = ["EB1_5", "EB2_5", "EE_5", "EB1_10", "EB2_10", "EE_10"] ``` Now this is where the train the BDTs. Please reade the inline comments for further explanations. ``` # The index of the training category (0 to 5) is stored in the "EleMVACats" column. # We group the data frame by this column, which means in the loop we first have "group_df" where "EleMVACats" == 0, then 1, up to 5. for i, group_df in df.groupby("EleMVACats"): # As said before, let's skip the low-pt trainings if i < 3: continue # get the category name category = category_titles[i] # get the features (either for endcap or barrel) features = features_endcap if "EE" in category else features_barrel # get the features and the target from the data frame X = group_df[features] y = group_df["matchedToGenEle"] # split X and y up in train and test samples X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Fortunately we are dealing with pandas DataFrames here, so we can just get the indices correspondng to the testing and training samples. # This will come in handy when we want to figure out which rows in the original dataframe where used for trainingand testing. idx_train = X_train.index idx_test = X_test.index # XGBoost has it's own data format, so we have to create these structures. # The copies have no specific purpose other than silencing an xgboost warning. dmatrix_train = xgb.DMatrix(X_train.copy(), label=np.copy(y_train)) dmatrix_test = xgb.DMatrix(X_test.copy(), label=np.copy(y_test )) # Get the number of positive and nevative training examples in this category n_pos = np.sum(y_train == 1) n_neg = np.sum(y_train == 0) print(category + ":") print("training on {0} signal and {1} background electrons".format(n_pos, n_neg)) # There is one additional hyperparameter that we have to set per catoegy: `scale_pos_weight`. # It corresponds to a weight given to every positive sample, and it usually set to # n_neg / n_pos when you have imbalanced datasets to balance the total contributions # of the positive and negative classes in the loss function xgboost_params["scale_pos_weight"] = 1. * n_neg / n_pos # In this line, we actually train the model. # Notice the `early_stopping_rounds`, which cause the boosting to automatically stop # when the test AUC has not decreased for 10 rounds. How does xgboost know what the training set is? # You pass it some dmatrices with labels as a list of tuples to the `evals` keyword argument. # The last entry in this list will be used for the early stopping criterion, in our case `dmatrix_test`. model = xgb.train(xgboost_params, dmatrix_train, num_boost_round=n_boost_rounds, evals= [(dmatrix_train, 'train'), (dmatrix_test, 'test')], early_stopping_rounds=10, verbose_eval=False) # We want to know if and when the training was early stopped. # `best_iteration` counts the first iteration as zero, so we increment by one. best_iteration = model.best_iteration + 1 if best_iteration < n_boost_rounds: print("early stopping after {0} boosting rounds".format(best_iteration)) print("") # Just in case we want to later implement the BDTs in CMSSW, we use the xgboost2tmva script # to save the model as a TMVA compatible XML file. # Note that the XML will not be pretty, as everything is printed in one line. # You can use the commandline tool `xmllint` with the `--format` option if you want to make it pretty. xgboost2tmva.convert_model(model.get_dump(), input_variables=[(f,'F') for f in features], output_xml='electron_id_{0}.xml'.format(i)) # If we want to load the model again with the xgboost library, we can't use the XML file. # Hence, we also save the model in xgboosts own binary format just to be sure. model.save_model("electron_id_{0}.bin".format(i)) # Now we see why it's good to have the indices corresponding to the train and test set! # We can now calculate classification scores with our freshly-trained model and store them # in a new column `score` of the original DataFrame at the appropriate places. df.loc[idx_train, "score"] = model.predict(dmatrix_train) df.loc[idx_test, "score"] = model.predict(dmatrix_test) # When we look at how the model performs later, we are mostly interested in the performance on the # test set. We can add another boolean column to indicate whether an electron is in the test set or not. df.loc[idx_train, "test"] = False df.loc[idx_test, "test"] = True ``` ## ROC Curves Time for some ROC curves! The sklearn library has a very useful function [`metrics.roc_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html) which you pass the scores and the true labels, and it will return you: * the false positive rate (`fpr`), also known as _background efficiency_ * the true positive rate (`tpr`), also known as _signal efficiency_ * the score thresholds corresponding to a given point on the curve, also known as _working point_ We define a little helper function to quickly plot ROC curves as we like to have them for the Electron MVA: with logarithmic y-axis and only showing signal efficiencies greater than 0.7. ``` from sklearn import metrics def plot_roc_curve(df, score_column, tpr_threshold=0.7, ax=None, color=None, linestyle='-', label=None): if ax is None: ax = plt.gca() if label is None: label = score_column fpr, tpr, thresholds = metrics.roc_curve(df["matchedToGenEle"], df[score_column]) mask = tpr > tpr_threshold fpr, tpr = fpr[mask], tpr[mask] ax.semilogy(tpr, fpr, label=label, color=color, linestyle=linestyle) ``` Finally we plot three different ROC curves in the 3 training categories: 1. The Run 2 MVA scores, which are stored in the ntuple in the `Fall17IsoV2RawVals` column 2. Our new training applied to the testing set 3. Our new training applied to the training se in order to check for overtraining. __A note about overtraining__: Overtraining is not bad by itself! Usually you get the best performance on a test set for a model that is slightly overtrained. Too much overtraining however indicates that your model does not generalize well. You can more information on this topic if you research the "bias vs. variance tradeof" in machine learning. ``` fig, axes = plt.subplots(1, 3, figsize=(15, 5)) for i, df_group in df.groupby("EleMVACats"): if i < 3: continue df_train = df_group.query("not test") df_test = df_group.query("test") ax = axes[i%3] plot_roc_curve(df_test, "Fall17IsoV2RawVals", ax=ax) plot_roc_curve(df_test, "score", ax=ax, label="your new training (test sample)") plot_roc_curve(df_train, "score", ax=ax, color="#ff7f02", linestyle='--', label="your new training (train sample)") ax.set_title(category_titles[i]) ax.legend() ax.set_xlabel("Signal efficiency") ax.set_ylabel("Background efficiency") plt.show() ``` We could train an identification algorithm on the new Run 3 DY+Jets samples which performs at the same order of magnitute as the Run 2 MVA, but not better. This is a bit unexpected, as the Run 3 samples are different from the Run 2 samples in pileup, detector configuration and calibration. However, if you look at the [slides about the Run 2 training](https://rembserj.web.cern.ch/rembserj/slides/180321_egamma.pdf) (more precisely slide 6), you see that the previous training was done on millions of electrons, while we only had a few tens of thousands at hand. This illustrates that more often than not, "more data" can be the answer to your machine learning problems. __Question:__ what part of this notebook would you change to get out new training perform better? ## Determining Working Points In this last step, we want to determine 80 and 90 % efficient working points in the 3 training categories. Usually, we want a tighter working point with 80 % signal efficiency and a medium one woeh 90 % signal efficiency. We can find these working points by getting the appropriate quantiles of the score distribution in the signal class and compare them between the new training and the Run 2 training: ``` def get_working_points(df, score_column): working_points = {} for i, df_group in df.groupby("EleMVACats"): if i < 3: continue # get the category name category = category_titles[i] working_points[category] = {} df_test = df_group.query("test") signal_mask = df_test["matchedToGenEle"] == 1 df_sig = df_test[signal_mask] df_bkg = df_test[~signal_mask] # Little detail here: as the signal efficiency has a significant turnon before pT around 20 GeV, # we tune the working point to have a given signal efficiency for pT > 20 GeV. wp80, wp90 = np.percentile(df_sig.query("ele_pt > 20")[score_column], [20., 10.]) wp80_bkg_eff = 1.*len(df_bkg[df_bkg[score_column] >= wp80])/len(df_bkg) wp90_bkg_eff = 1.*len(df_bkg[df_bkg[score_column] >= wp90])/len(df_bkg) working_points[category]["wp80"] = wp80 working_points[category]["wp90"] = wp90 print("") print(category) print("bkg. efficiency at 80 % sig. eff: {0:.2f} %".format(wp90_bkg_eff * 100)) print("bkg. efficiency at 90 % sig. eff: {0:.2f} %".format(wp80_bkg_eff * 100)) return working_points print("Finding working points for new training:") working_points = get_working_points(df, "score") print("") print("Finding working points in the same way for Run 2 training:") get_working_points(df, "Fall17IsoV2RawVals"); ``` # Bonus: Differential Performance Studies Usually, we also want to look at signal and background efficiencies differential in $p_T$, $\eta$ and pileup. We won't go into details here as time is running short, but you can find an example on how to produce turnon curves in $p_T$ relying on pandas `groupby` feature in the cell below: ``` fig, axes = plt.subplots(1, 3, figsize=(15, 5)) pt_bins = np.linspace(10, 100, 46) pt_bin_centers = (pt_bins[:-1] + pt_bins[1:])/2. bin_indices = np.digitize(df["ele_pt"], pt_bins) - 1 bin_indices[bin_indices == 45] = 44 df["pt_binned"] = pt_bin_centers[bin_indices] def get_signal_efficiency(df, score_column, working_point): df_sig = df.query("matchedToGenEle == 1") return 1.*len(df_sig[df_sig[score_column] >= working_point])/len(df_sig) def get_background_efficiency(df, score_column, working_point): df_bkg = df.query("matchedToGenEle == 0") return 1.*len(df_bkg[df_bkg[score_column] >= working_point])/len(df_bkg) for i, df_group in df.groupby("EleMVACats"): if i < 3: continue wp80 = working_points[category]["wp80"] wp90 = working_points[category]["wp90"] ax = axes[i%3] df_group.groupby("pt_binned").apply(lambda df : get_background_efficiency(df, "score", wp80)).plot(label="background", ax=ax) df_group.groupby("pt_binned").apply(lambda df : get_signal_efficiency(df, "score", wp80)).plot(label="signal", ax=ax) ax.set_ylabel("Efficiency") ax.set_xlabel(r"$p_T$ [GeV]") ax.grid(True) ax.set_title(category_titles[i]) ax.legend() plt.show() df = df.drop("pt_binned", axis=1) ``` We see the turn-on effect is actually very strong, even above $p_T$ > 20 GeV. This would get better with a larger training sample and more boosting rounds.
github_jupyter
# Прогнозирование уровня средней заработной платы в России Известны данные о заработной плате за каждый месяц с января 1993 по август 2016. Необходимо проанализировать данные, подобрать для них оптимальную прогнозирующую модель в классе ARIMA и построить прогноз на каждый месяц на два года вперёд от конца данных. ``` %pylab inline import pandas as pd from scipy import stats import statsmodels.api as sm import matplotlib.pyplot as plt import warnings from itertools import product def invboxcox(y,lmbda): if lmbda == 0: return(np.exp(y)) else: return(np.exp(np.log(lmbda*y+1)/lmbda)) ``` ## 1. Визуальный анализ ряда Загрузим данные и построим график временного ряда ``` salary = pd.read_csv('WAG_C_M.csv', ';', index_col=['month'], parse_dates=['month'], dayfirst=True) plt.figure(figsize(15,7)) salary.WAG_C_M.plot() plt.ylabel('Month average salary') pylab.show() ``` Визуальный анализ ряда показывает, что в данных есть заметный возрастающий тренд и сезонность. Очевидно, что он не стационарен, однако для формальности проверим стационарность с помощью критерию Дики-Фуллера, а также выполним STL-декомпозиция ряда: ``` plt.figure(figsize(15,10)) sm.tsa.seasonal_decompose(salary.WAG_C_M).plot() print "Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(salary.WAG_C_M)[1] ``` ## 2. Стабилизация дисперсии Критерий Дики-Фуллера не отвергает гипотезу нестационарности. Временной ряд отличается переменной дисперсией, поэтому выполним преобразование Бокса-Кокса для стабилизации дисперсии. ``` salary['salary_box'], lmbda = stats.boxcox(salary.WAG_C_M) plt.figure(figsize(15,7)) salary.salary_box.plot() plt.ylabel(u'Transformed average salary') print "Оптимальный параметр преобразования Бокса-кокса: %f" % lmbda print "Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(salary.salary_box)[1] ``` ## 3. Выбор порядка дифференцирования Для приведения ряда к стационарному попробуем сезонное дифференцирование, сделаем на продифференцированном ряде STL-декомпозицию и проверим стационарность: ``` salary['salary_box_diff'] = salary.salary_box - salary.salary_box.shift(12) plt.figure(figsize(15,10)) sm.tsa.seasonal_decompose(salary.salary_box_diff[12:]).plot() print "Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(salary.salary_box_diff[12:])[1] ``` Гипотеза нестационарности отвергается, и визуально ряд выглядит лучше — явного тренда больше нет, однако данные выглядят довольно непредсказуемо. Применим обычное дифференцирование: ``` salary['salary_box_diff2'] = salary.salary_box_diff - salary.salary_box_diff.shift(1) plt.figure(figsize(15,10)) sm.tsa.seasonal_decompose(salary.salary_box_diff2[13:]).plot() print "Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(salary.salary_box_diff2[13:])[1] ``` Гипотеза нестационарности по-прежнему отвергается, и визуально ряд выглядит еще лучше — разброс значений меньше и нет переменных повышающих и понижающих участков. ## 4. Выбор начальных приближений для p,q,P,Q Построим графики ACF и PACF полученного ряда: ``` plt.figure(figsize(15,8)) ax = plt.subplot(211) sm.graphics.tsa.plot_acf(salary.salary_box_diff2[13:].values.squeeze(), lags=48, ax=ax) pylab.show() ax = plt.subplot(212) sm.graphics.tsa.plot_pacf(salary.salary_box_diff2[13:].values.squeeze(), lags=48, ax=ax) pylab.show() ``` Из расположения лагов в коррелограмме следуют начальные приближения: Q=0, q=5, P=1, p=5 ## 5. Обучение и сравнение моделей-кандидатов, выбор победителя Зададим последовательность значений параметров для перебора ``` ps = range(0, 6) d=1 qs = range(0, 6) Ps = range(0, 2) D=1 Qs = range(0, 1) parameters = product(ps, qs, Ps, Qs) parameters_list = list(parameters) len(parameters_list) ``` Выполним обучениие модели на всех вариантах параметров для нахождения наилучшей про критерию AIC ``` results = [] best_aic = float("inf") warnings.filterwarnings('ignore') for param in parameters_list: #try except нужен, потому что на некоторых наборах параметров модель не обучается try: model=sm.tsa.statespace.SARIMAX(salary.salary_box, order=(param[0], d, param[1]), seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1) #выводим параметры, на которых модель не обучается и переходим к следующему набору except ValueError: print 'wrong parameters:', param continue aic = model.aic #сохраняем лучшую модель, aic, параметры if aic < best_aic: best_model = model best_aic = aic best_param = param results.append([param, model.aic]) warnings.filterwarnings('default') result_table = pd.DataFrame(results) result_table.columns = ['parameters', 'aic'] print result_table.sort_values(by = 'aic', ascending=True).head() ``` Лучшая модель: ``` print best_model.summary() ``` ## 6. Анализ остатков построенной модели Остатки: ``` plt.figure(figsize(15,8)) plt.subplot(211) best_model.resid[13:].plot() plt.ylabel(u'Residuals') ax = plt.subplot(212) sm.graphics.tsa.plot_acf(best_model.resid[13:].values.squeeze(), lags=48, ax=ax) print "Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1] print "Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1] ``` Остатки несмещены (подтверждается критерием Стьюдента), стационарны (подтверждается критерием Дики-Фуллера и визуально) и неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой). Посмотрим, насколько хорошо модель описывает данные: ``` salary['model'] = invboxcox(best_model.fittedvalues, lmbda) plt.figure(figsize(15,7)) salary.WAG_C_M.plot() salary.model[13:].plot(color='r') plt.ylabel('Average salary') pylab.show() ``` ## 7. Прогнозирование Построим прогноз на каждый месяц на два года вперёд от конца данных ``` salary2 = salary[['WAG_C_M']] date_list = [datetime.datetime.strptime("01.09.2016", "%d.%m.%Y") + relativedelta(months=x) for x in range(0,24)] future = pd.DataFrame(index=date_list, columns= salary2.columns) salary2 = pd.concat([salary2, future]) salary2['forecast'] = invboxcox(best_model.predict(start=284, end=307), lmbda) plt.figure(figsize(15,7)) salary2.WAG_C_M.plot() salary2.forecast.plot(color='r') plt.ylabel('Average salary') pylab.show() ```
github_jupyter
# Batch learning and evaluation scripts demo For general use, want a more structured pipeline than experimental notebooks provide. This includes: * an experiment definition file * running from the command line / python interpreter * being able to run on a batch system like spice * run one of * a training experiment, which trains classifiers and produces metrics * run inference, which loads previously trained classifiers and performs classifications on the whole dataset ``` import warnings warnings.filterwarnings('ignore') import os import sys import pathlib import matplotlib import matplotlib.pyplot import cartopy.crs as ccrs root_repo_dir = pathlib.Path().absolute().parent sys.path = [os.path.join(root_repo_dir)] + sys.path from classification import experiment ``` ## Set up parameters Define some key paths for the experiment. Paths are not generally defined in experiment description, to make the experiment description more portable. Import definitions include * The root data directory. This should have subdirectories with the XBT input dataset, as well as for outputs. * The names of the input and output subdirectories * The path to JSON experiment description file. ``` # Set up some site specific parameters for the notebook try: environment = os.environ['XBT_ENV_NAME'] except KeyError: environment = 'pangeo' root_data_dirs = { 'MO_scitools': '/data/users/shaddad/xbt-data/', 'pangeo': '/data/misc/xbt-data/', } env_date_ranges = { 'MO_scitools': (1966,2015), 'pangeo': (1966,2015) } # Set up some dataset specific parameters root_data_dir = root_data_dirs[environment] year_range = env_date_ranges[environment] input_dir_name = 'csv_with_imeta' exp_out_dir_name = 'experiment_outputs' xbt_input_dir = os.path.join(root_data_dir, input_dir_name) xbt_output_dir = os.path.join(root_data_dir, exp_out_dir_name) json_params_path = os.path.join(root_repo_dir, 'examples', 'xbt_param_decisionTree_country.json') ``` Creating the experiment object will load all the definitions from JSON file but does not run the experiment. We have 3 choices of experiment using the following functions: * `run_single_experiment` - Run training, inference and evaluation with a single split, no cross-validation or hyperparameter tuning. Save classifier, metrics and classifications if requested (default is to output results). * `run_cvhpt_experiment` - Run training, inference and evaluation with cross-validation and hyperparameter tuning: * Use outer cross validation based on cruise number * Run hyperparameter tuning on each outer split using grid search. * Run inner cross-validation for each set of parameters being tried. * Save classifiers, metrics and classifications if requested (default is to output results). * Save a json file for running inference in future, which points to files containing trained classifiers. * Calculate a vote-based probability using the ensemble of classifiers trained on different splits. * `run_inference` - Load previous classifiers and run inference. See below for more details. ``` exp2_cv = experiment.ClassificationExperiment(json_params_path, xbt_input_dir, xbt_output_dir) %%time results_cv, classifiers_cv = exp2_cv.run_cvhpt_experiment() ``` The trained classifier objects are saved to the output directory, one per file. There is also a JSON experiment description file, which is the same as the original description, but with inference added to experiment name and a list of classifier file names. This file can be used to create and run an inference job. The classifier files should be in the same directory as the JSON inference description files. ``` json_inf_path = exp2_cv.inference_out_json_path fig_results = matplotlib.pyplot.figure('xbt_results',figsize=(25,15)) for label1, metrics1 in classifiers_cv.items(): ax_precision = fig_results.add_subplot(3,5,label1 +1, title='precision split {0}'.format(label1)) ax_recall = fig_results.add_subplot(3,5,label1 + 1 + 5 * 1, title='recall split {0}'.format(label1)) ax_f1 = fig_results.add_subplot(3,5,label1 + 1 + 5 * 2, title='f1 split {0}'.format(label1)) results_cv.plot.line(ax=ax_precision, x='year', y=[f'precision_train_{label1}_all',f'precision_test_{label1}_all'], color=['b', 'r'], ylim=(0.7,1.0)) results_cv.plot.line(ax=ax_recall, x='year', y=[f'recall_train_{label1}_all',f'recall_test_{label1}_all'], color=['b', 'r'], ylim=(0.7,1.0)) results_cv.plot.line(ax=ax_f1, x='year', y=[f'f1_train_{label1}_all',f'f1_test_{label1}_all'], color=['b', 'r'], ylim=(0.7,1.0)) ``` ### Inference Once we have trained the classifiers, we want to be able to load from the saved state files and run inference on the whole dataset, with the same results. We can use the JSOn file created by the training to run the inference. The JSON inference parameters and the saved classifier object files should be in the same directory. We can then use the `run_inference` function. This will perform the following steps: * load dataset * load previously classifiers from file list defined in JSON inference description. * run inference for each of the classifiers * fill in classifications where not possible with classifiers using iMeta algorithm * calculate vote-based probability using ensemble of previously trained classifiers * save classification results ``` exp3_inf = experiment.ClassificationExperiment(json_inf_path, xbt_input_dir, xbt_output_dir) %%time classifiers_reloaded = exp3_inf.run_inference() ```
github_jupyter
# Time Series Anomaly Detection With Autoencoders & LSTMs In this notebook, we will train a LSTM autoencoder model for detection anomalies over heartbeats, using PyTorch. ## About Dataset This dataset contains a set of N = 5000 univariate time series with 140 timesteps (T = 140). Each sequence corresponds to one heartbeat. Five classes are annotated, corresponding to the following labels: Normal (N), R-on-T Premature Ventricular Contraction (R-on-T PVC), Premature Ventricular Contraction (PVC), Supra-ventricular Premature or Ectopic Beat (SP or EB) and Unclassified Beat (UB). [Dataset Decription Link 1](https://link.springer.com/article/10.1007%2Fs10618-014-0388-4) [Dataset Description Link 2](https://ieeexplore.ieee.org/document/8679157) ## Preparation ``` %%capture !pip install arff2pandas !pip install transformers import numpy as np import pandas as pd import torch from sklearn.metrics import (confusion_matrix, classification_report, f1_score, accuracy_score) from sklearn.model_selection import train_test_split from glob import glob import time import copy import shutil import matplotlib.pyplot as plt import seaborn as sns from pylab import rcParams from matplotlib import rc from matplotlib.ticker import MaxNLocator from multiprocess import cpu_count import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset from arff2pandas import a2p from collections import defaultdict from transformers import get_linear_schedule_with_warmup, AdamW from tqdm.auto import tqdm import warnings warnings.simplefilter(action='ignore', category=FutureWarning) %matplotlib inline %config InlineBackend.figure_format='retina' sns.set(style="whitegrid", palette="muted", font_scale=1.2) HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"] sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE)) rcParams['figure.figsize'] = 10, 4 ``` Seed everything! ``` RANDOM_SEED = 2112 np.random.seed(RANDOM_SEED) torch.manual_seed(RANDOM_SEED) torch.backends.cudnn.deterministic = True torch.cuda.manual_seed_all(RANDOM_SEED) ``` Initialize the device. ``` device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device.type) ``` Load the dataset. ``` base_path = "/content/drive/MyDrive/Applied AI #6 Source/data/ECG5000/" with open(base_path + "ECG5000_TRAIN.arff") as f: train = a2p.load(f) with open(base_path + "ECG5000_TEST.arff") as f: test = a2p.load(f) train.head() df = train.append(test) df = df.sample(frac=1.0) df.shape new_columns = list(df.columns) new_columns[-1] = 'target' df.columns = new_columns class_names = ['Normal','R on T','PVC','SP','UB'] df.target.value_counts() ax = sns.countplot(df.target) ``` * Normal: 1 * R-on-T Premature Ventricular Contradiction (R-on-T PVC): 2 * Prematyre Ventricular Contraction (PVC): 3 * Supra-venctricular Premature or Ectopic Beat (SP or EB): 4 * Unclassified Beat (UB): 5 # Preprocessing ``` CLASS_NORMAL = 1 normal_df = df[df["target"] == str(CLASS_NORMAL)].drop(labels="target", axis=1) print(normal_df.shape) anomaly_df = df[df["target"] != str(CLASS_NORMAL)].drop(labels="target", axis=1) print(anomaly_df.shape) train_df, val_df = train_test_split(normal_df, test_size=0.15, random_state=RANDOM_SEED) val_df, test_df = train_test_split(val_df, test_size = 0.33, random_state=RANDOM_SEED) print(train_df.shape, val_df.shape, test_df.shape) ``` We wrote a PyTorch dataset class before. We can create a dataset without it, by ```TensorDataset``` and ```RandomSampler```. ``` BATCH_SIZE = 16 SEQ_LEN = 140 N_FEATURES = 1 def train_dataset(df,batch_size): torch_df = torch.tensor(df.to_numpy()) train_data = TensorDataset(torch_df) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) return train_dataloader def test_dataset(df, batch_size): torch_df = torch.tensor(df.to_numpy()) val_data = TensorDataset(torch_df) val_sampler = SequentialSampler(val_data) val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size) return val_dataloader train_dataloader = train_dataset(train_df, BATCH_SIZE) val_dataloader = test_dataset(val_df, BATCH_SIZE) test_dataloader = test_dataset(test_df, BATCH_SIZE) anomaly_dataloader = test_dataset(anomaly_df, 1) print(next(iter(train_dataloader))[0].shape) print(next(iter(val_dataloader))[0].shape) print(next(iter(test_dataloader))[0].shape) print(next(iter(anomaly_dataloader))[0].shape) fig, axs = plt.subplots(3,1, figsize=(10,10)) samples = torch.randint(0, 16, (3,)) axs[0].plot(next(iter(train_dataloader))[0][samples[0]].numpy(), color="black") axs[1].plot(next(iter(train_dataloader))[0][samples[1]].numpy(), color="black") axs[2].plot(next(iter(train_dataloader))[0][samples[2]].numpy(), color="black"); ``` # Model ![cdd.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABhEAAAM0CAYAAAC1UQOhAAAIwHRFWHRteGZpbGUAJTNDbXhmaWxlJTIwaG9zdCUzRCUyMmFwcC5kaWFncmFtcy5uZXQlMjIlMjBtb2RpZmllZCUzRCUyMjIwMjEtMDctMjZUMDYlM0E1MiUzQTAxLjA3MFolMjIlMjBhZ2VudCUzRCUyMjUuMCUyMChYMTElM0IlMjBMaW51eCUyMHg4Nl82NCklMjBBcHBsZVdlYktpdCUyRjUzNy4zNiUyMChLSFRNTCUyQyUyMGxpa2UlMjBHZWNrbyklMjBDaHJvbWUlMkY5MS4wLjQ0NzIuMTY0JTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwZXRhZyUzRCUyMnJjUmNSWUFCSnBlM2JUTFhhMWVDJTIyJTIwdmVyc2lvbiUzRCUyMjE0LjkuMCUyMiUyMHR5cGUlM0QlMjJkZXZpY2UlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJLaGlaRE51MUxBbzZWbDJJRUVqNSUyMiUyMG5hbWUlM0QlMjJQYWdlLTElMjIlM0U3VnhkazZJNEZQMDFQam9GQ1FSNDNQNmFlZWlwNnFxZXFwMTVaREVxMjBpWUdGdWRYNzlCZ2tqNzBWbHR1YUVyVDVJTGhKaVRjMGdPRndiNGRyYjZ5dU5pJTJCcDJOYURaQXptZzF3SGNEaEJBSmZmbFRSdFpWeFBYRHNJcE1lRHFxWWs0VGVFNyUyRlVIVmdIVjJrSXpwWHNTb2tHTXRFV3JTRENjdHptb2hXTE9hY0xkdUhqVmsyYWdXS2VFSmJ6U2dEejBtYzBiM0QlMkZrNUhZbHBGUXhRMDhXODBuVXpySzdza3F2YjhFeWN2RTg0V3VicGV6bkphN1puRmRUWHFrdk5wUEdMTG5SQyUyQkglMkJCYnpwaW90bWFyVzVxViUyRmRydXNZY2plN2RONWpRWE9pZjhlVTJlNWw3OHpYOGN6NHJsUzFqY2ZDVkQ5VGZtWWwxM0JSM0pubEZGeHNXVVRWZ2VaJTJGZE45R2J6ZDJsWnF5Tkx6VEdQakJVeTZNcmd2MVNJdFlJNVhnZ21RMU14eTlSZXVrckZ6JTJGTDBMNzRxJTJGVktWbGR0M3E5M0NXaFhtZ3JPWExUaFlScXFXbDgwOTJpRXFOR2NMbnRBVHZWQVB1WmhQcURoeG5MJTJCRlRWS0JzaGtWZkMzUDR6U0xSZnJhYmtlc2h1UmtlMXlEamR4UThQd1BxRndNaVpXN2k5UVd0OE5ZeWY3bjY1JTJCN2haMnp5bUp6MnFaa0ZNWUlGT09xM3RjNFc2Z3JEUkRKWkhOdnhreiUyQjRWM3d5ZThGcTNjTTV4djQlMkZwSUh1R0d4YW5iS3JVbjVlNThuVXJTNTNQJTJGNCUyRk9ON1hhZHNZbFZ0ZGREZThHb1BudVUwRmZTNWlEZTl2SlEzZ3ZaQTJRZHZuR2JaTGNzWTM5U0d4JTJCTXhTcEx0ZFY0cEYzUjFHdFo5R05RSlEzVjdVZmNiN0tHcXZHekUyNjExZDdvajNNUzVGbkRPWHU4WktxUU5PUnMlMkIlMkZxcnJneUluMGlRbkFTV25ad1c0QTR3eEpNYm9xQURQaXppJTJGZ2dCWDFmWk9nTjE2eW0lMkJPQXJ0NzNXY1ZXSiUyQmRXSk9kQWFnQyUyQjFhQk84RFlnOFFZV3dYV1VtQWNHRGNIUmxhQkwyQ25wOG5PRUpLZG5tV25GanY5d0RXTm5jVGVPeTlncDYlMkZKVHRBVnFtJTJGWmVZNTk1RVklMkJORGtEUzg0THlFbjZzSGdobHB4bldRc0dzRE8wN0x5QW5VRWZKcmFCWmVkWnkwNTRkbnFuVGFITkUzbTd6bnhETXgwZiUyRnNoQTZHaWhlWHE1WWxFOUYxVlFjODg3UGMlMkIxcUo2TEt1aXlrMWl1WGdWVkJEb2pDdTJNNkN5ckQzNUdoRUN0UG1PeSUyQnBDMmVvTHlERm52cHh1VVFlZXo2RnB5ZWtmaDVaU1FHJTJGVHc4REZ5R29ST1MwNXJlc0NwYVc5eXBJMmM3R0RkeEZ2UE9Ud3VPa283QUUzZzdMMEthNk1NdSUyRjZzbTJsViUyQkIwVmpnSnNsZ3BqWkZYNEVuN3FacGRVZVFwZyUyRkxUdnVIU0NNcXhmaEslMkJWUmZUWlZOaDFhMnZOR0JrR3pjRHR2d3pyWmlwNG9HOUI0R3ZsS253NmdtSSUyRk1veWcxazI2aUtDNmJoTHNhdFUlMkZQVSUyQkNmT0xTQVR3WDlMbzY5WW1sbTlkcEZZMURyJTJCMDVEUkZCN1RxcThhQk9ld1BldGgwWHNOYTZnJTJCZTVnOE90dXNJcExxZyUyRjJIdkZyYWxuJTJCTXJVUCUyRjN5cnlIUHVPWGR2NFZzRklidllMc3BQVkdleW02aTNDekFZZDkwY2F3a24yVVZHaURKSHVpN3dQMlhaS1RMVU5CVnF0JTJCTHhFJTJGaVJDMzc0WXZqdm1kQkFHaXlOdUt3bW95c0pwOW5IQm9neXI2NXVaJTJGWDVkYWxJbmw0emVwNmJ0dDY2bnpSNmwzclZmMVB4OGEzTHFFQmJQU09PdzY2bnhyRDNpSHdIdE9jeGh6bUUyUFVIZmswJTJCQ0Nub1g1S1ZXT0cwWDVlWiUyQmdFSFdKVzg5dEFCVFh5YVl1dmElMkJhQ1pnYjZoNWo0QnVaM2lCSFBpJTJCcXJxT04wVlNLdmtRQWRKdlREWGdrck1kJTJCbGl1ZEZlMHlwYnhhN1JLbGpIMDhVYyUyQjF4TTRrU2FSSUYxb0tMZWs0VVNSWGptR0t1clhuZEx3Ym9tcENnRGdjNTVFSDJhY1I3TGpGdXhQZkNOVExuM2tDMDg3ZEJtWElvc2JkUFRDRk8xQjFUWkxINTdueGxCelFmOXNmMyUyRndFJTNEJTNDJTJGZGlhZ3JhbSUzRSUzQyUyRm14ZmlsZSUzRbAxzusAACAASURBVHhe7N13nF5VtTDgldBBIKD03ntHkJ7Qu/TeVEDFQuyVC9ixIMjFroCgoNJ7J6H3BJBeQu8gYKEz328fvpk7qeedk0zmnLOf/ceNQ/Z+56xn7XVz9rvmzDuoq6urKwwCBAgQIECAAAECBAZUYNCgQQP6/X1zAgQIECBAgAABAgQIjCuQ2geDNBFsDAIECBAgQIAAAQIDL6CJMPA5cAUECBAgQIAAAQIECIwtoIlgRxAgQIAAAQIECBCoiYAmQk0S4TIIECBAgAABAgQIEOgR0ESwGQgQIECAAAECBAjURKB3E+Gxu5+pyVW5DAIECBAgQIAAAQIEchNYZIX5NBFyS7p4CRAgQIAAAQIE6i+giVD/HLlCAgQIECBAgAABAjkIaCLkkGUxEiBAgAABAgQINE5AE6FxKXPBBAgQIECAAAECBFopoInQyrQKigABAgQIECBAoOkCmghNz6DrJ0CAAAECBAgQINAOAU2EduRRFAQIECBAgAABAi0T0ERoWUKFQ4AAAQIECBAgQKChApoIDU2cyyZAgAABAgQIEGi3gCZCu/MrOgIECBAgQIAAAQJNEdBEaEqmXCcBAgQIECBAgEBWApoIWaVbsAQIECBAgAABAgRqK6CJUNvUuDACBAgQIECAAIGcBTQRcs6+2AkQIECAAAECBAjUR0AToT65cCUECBAgQIAAAQIEegQ0EWwGAgQIECBAgAABAgTqIKCJUIcsuAYCBAgQIECAAAEC4whoItgSBAgQIECAAAECBAjUQUAToQ5ZcA0ECBAgQIAAAQIENBHsAQIECBAgQIAAAQIEaiigiVDDpLgkAgQIECBAgAABAp5EsAcIECBAgAABAgQIEKiDgCZCHbLgGggQIECAAAECBAiMI6CJYEsQIECAAAECBAgQIFAHAU2EOmTBNRAgQIAAAQIECBDQRLAHCBAgQIAAAQIECBCooYAmQg2T4pIIECBAgAABAgQIeBLBHiBAgAABAgQIECBAoA4Cmgh1yIJrIECAAAECBAgQIDCOgCaCLUGAAAECBAgQIECAQB0ENBHqkAXXQIAAAQIECBAgQEATwR4gQIAAAQIECBAgQKCGApoINUyKSyJAgAABAgQIECDgSQR7gAABAvUTuPaGq2PvA3cvLmz66aePB0c9Vvkij/v1MfHT444q1g8/5Evxhc98ufJrWUiAAAECBPpTQBOhP3W9NgECBAgQIECAAIGKApoIFeEsI0CAQD8KaCL0I66XJkCAAIHaCmgi1DY1LowAAQIECBAgQCBnAU2EnLMvdgIE6iqgiVDXzLguAgQIEOhPAU2E/tT12gQIECBAgAABAgQqCmgiVISzjAABAv0oMCWbCKPvGhW3jbq1uNrVV1k9VltljX68ci9NgAABAgSqC2giVLezkgABAgQIECBAgEC/CWgi9ButFyZAgEBlgSnZRKh8ERYSIECAAIGpLKCJMJXBfTsCBAgQIECAAAECnQhoInSiZA4BAgSmroAmwtT19t0IECBAoB4Cmgj1yIOrIECAAAECBAgQIDCWgCaCDUGAAIH6CUzJJsJxvz4mfnrcUUWQww/5UnzhM1/uCfidd9+JJVZeqPh68423jN8dd0K88cYbcerpp8T5F58bYx4bE//692vxoQ/OFR9efe3Yc5e9Yp211usI7MqRl8d5F58Tt95+S7zw4vOR/r2Ze6554sOrrxU7brdzrPeRDTp6nZdefinOPO/0uP6ma+OBh+6Pf77yz3jrrTdjtllni/nnWyA+suY6sccue8eSiy81wde7YuRl8fFD9iv+7qgjfxq77rhH/PJ3x8Vf/n5yvPzKyzHv3PPFOmutG1/+/NeKOA0CBAgQGDgBTYSBs/edCRAgQIAAAQIECExUQBPB5iBAgED9BAaqifC1L3wzDv78x+PhMQ9NFGW3HfeIo77zsxg8ePAE5zz3/LPx2S9/Km6+7aZJwg7bcJM49qjjY/bZZp/ovF/94X/j2F8eHa+/8fokXytdy+c+OTy++NmvjDdv3CbCXffcGaf89U9jzZtpxpli1HV3R/rTIECAAIGBE9BEGDh735kAAQIECBAgQICAJoI9QIAAgQYJDEQTYfVV14zUAHjq6SdjuummK548WHD+BePFl18sngJITyh0j299+X/i4I99ejzRp599Onbca5t49rlni7+bccYZY5ONNotlllo2urq64r4H7o2rrrmi57WWWGzJOOfUC2LWWWcb77V+fvxP45hf/qznv6+y4qqxxmofjjmHzBlvvv1mPPTwgzHimivHajD8+pjfx1abbTPWa/VuImyzxXZxwSXnjfe90n//5dG/bdAOcakECBBop4AmQjvzKioCBAgQIECAAIGGC3gSoeEJdPkECLRSYCCaCN2QG2+0afzoiJ/EPHPP22ObfqXQp4Z/oufpgvRrf24deUfxK4q6x3vvvRc77bN9jLrjtuI/rbv2+vGLHx8fc31o7rFylBoNn/nSJ+P20bcW/33rzbeNX/38d2PNeeyJR2PjbTeMd955u/gex/74+Pjo1juOl+uXXn4xDvrcx+O20bcUf5d+1dJpJ5w+1rzeTYTuv9h7t31j/70+HjPMMEPcePP1sfBCixTXaxAgQIDAwApoIgysv+9OgAABAgQIECBAYIICmgg2BgECBOonMFBNhBWWWzHOPe3CmHba6cZDSU8XrLvZmvHuu+8Wf3fJWVfGsksv1zPv4ssvjE8e+oni6/TkwbmnXVQ8iTCh8eprr8bQrdeNl//5cvHXF51xeSy/7Ao9U4/91dFx9P/+pPh61x12j59+/5iJJumOf4yO7Xffqvj7D3xg1rj7pgfGmjtuE2G3nfaMn3z36Pol3RURIECAQGgi2AQECBAgQIAAAQIEaiigiVDDpLgkAgSyFxioJsLRP/hF7PzRXSfqv+VOm8a9999d/P0pvzstNlh3o565+x60R1x9/cji6+N/9pvYdsvtJ5nH9KuK0q8sSuOg/T8Z3/7qET3zU/y3jrolHn/ysThg70/EyiusMtHX6v3h0OnftEf/8fRYc8dtIlx5/jWRfo2SQYAAAQL1E9BEqF9OXBEBAgQIECBAgACBsX4VxWN3P0OEAAECBGogMFBNhKsvviEWWWjRiQrs/8m9YsS1VxV//4fjT4pNh25e/O/0Rv7KH1k2/vPf/xRfj77unphjyByTlOwdY/q8g3P/elEl+UcefSQ23nb94jMX0njkzidjmmmm6Xmt3k2EdE3p2gwCBAgQqKeAJkI98+KqCBAgQIAAAQIEMhfwJELmG0D4BAjUUmCgmggP3P5o8TkBExufGn5gXHTZBcVf/+64E2Lzjbcs/nf6DIMNt1ynsuUH5/xQ3H7NXRNdnxoE6QOfxzz2SDz+5OPF93vw4QfirrvvjBdefH6sdQ/f+URMO820Pf+tdxNh1ZVWi3NOu7DydVpIgAABAv0roInQv75enQABAgQIECBAgEAlAU2ESmwWESBAoF8FBqKJMHjw4Bhz11OTjKt3E+G3vzghttjk/SZC788lqAKT3vRPb/6PO2685fr406knFr8m6V//eq2jl55UE2HDdTeKk393WkevYxIBAgQITH0BTYSpb+47EiBAgAABAgQIECgV0EQoJTKBAAECU11gIJoI6VcApV8FNKkxsSbC7aNvjR333q5Ymn5l0CEHfq5PZoMGDy4+F6F7pA9vPux734g//+3k8V4nNRwWXniRWHG5lWKNVdeMYRtuMtZTEJNqImy80aZxwi/Hf80+XazJBAgQINBvApoI/UbrhQkQIECAAAECBAhUF9BEqG5nJQECBPpLoGlNhIfHPBQbb7tBwTHLzLPEPbc8NFk0x/36mPjpcUf1vMaWm24dW222Tay4/Eqx6MKLxrTTTtfzd2+++WYsvfr/fY6DJsJk0VtMgACBARXQRBhQft+cAAECBAgQIECAwIQFNBHsDAIECNRPoGlNhNffeD1WXHuZeOedtwvMy84ZEUsvucwkYd966614++23YpZZPjDWvDfeeCPWHLpKz68vOuxrR8aB+x080dd6+tmnY51N1uj5+4fveHysJkPvz0TwJEL99rorIkCAQG8BTQT7gQABAgQIECBAgEANBTQRapgUl0SAQPYCTWsipISlX2eUfq1RGp856PPx1eHfmGQef/PHX8YPfvbdGDL7kEhv7v/8h8cV89OHJW+72xbF/55uuunivlsfHqspMO6Lnn3+mXHo1z7T858fHPVYTD/99D1fayJkX04ACBBokIAmQoOS5VIJECBAgAABAgTyEdBEyCfXIiVAoDkCTWwinHr6n+Prh3+5QE6/0uj8v10Siy+2xATRX3r5pdh0+w3j5X++XPz9t778P3Hwxz5d/O/bRt8SO+29ffG/0+c0/OOmB2LmmWae4OukJyC23mWzeGTMwz1/n36VUvr+3UMToTn73pUSIEBAE8EeIECAAAECBAgQIFBDAU2EGibFJREgkL1AE5sI6dcTbbnTJpE+HyGN+eedP3597B9ilRVXHSuf6dcPHfKFg2LUnbcX/32eueeNERde19MoePW1V2P19VeMd959p/j7Qz/9xfjiZ78y3p545rln4vNf+XTcfNtNY/3d7dfcFR+c80OaCNlXEQACBJoooInQxKy5ZgIECBAgQIAAgdYLaCK0PsUCJECggQK9mwjp/08vvOAiHUeRfmr/4rOu6Jnf+0OKhx/ypfjCZ95/WiCN9Eb9EisvVPzv9FP/j9z55CS/z6eGHxgXXXZBMee3vzghtthky7HmP/jwA7HzPttHagSkka59vY9sEKutsnpMM3iaeOiRB+PSKy+O1HBIY4YZZoiTf3tarL3mR8Z6na8c9sX425mn9vy3dddev/iVR3N9cK548eUXY9Qdt431OumDlrs/j+GK866OJRdfqmetJxE63jomEiBAYMAFNBEGPAUugAABAgQIECBAgMD4ApoIdgUBAgTqJ9C7idDXq0tNhHtv/b9f7zM1mwjpWh99fEwc8sWD4+57/zHJS59/vgXi2KOOj7XWWHu8ef/573/i44fsGzfecsMkXyN9KPO3v3J43Drq5jjjnL8Xc4/+wS9i54/uqonQ141jPgECBGogoIlQgyS4BAIECBAgQIAAAQLjCmgi2BMECBCon0CTmwhJs6urKy698pK48NLzYtQdt8eLL70Qb739VswxZM5YfpnlY7ONt4ydtt9lop91kF7j3XffjbPOPyPOOf/MuPu+u+PVV1+JaaebNuaYfY5YaomlY52114vdd9orPjjnB+OcC8+Kz3/lkCKR6amFU//4fkMhDU8i1G9/uyICBAhMTEATwd4gQIAAAQIECBAgUEMBTYQaJsUlESBAgAABAgQIEMhQQBMhw6QLmQABAgQIECBAoP4Cmgj1z5ErJECAAAECBAgQIJCDgCZCDlkWIwECBAgQIECAQOMENBEalzIXTIAAAQIECBAgQKCVApoIrUyroAgQIECAAAECBJouoInQ9Ay6fgIECBAgQIAAAQLtENBEaEceRUGAAAECBAgQINAyAU2EliVUOAQIECBAgAABAgQaKqCJ0NDEuWwCBAgQIECAAIF2C2gitDu/oiNAgAABAgQIECDQFAFNhKZkynUSIECAAAECBAhkJaCJkFW6BUuAAAECBAgQIECgtgKaCLVNjQsjQIAAAQIECBDIWUATIefsi50AAQIECBAgQIBAfQQ0EeqTC1dCgAABAgQIECBAoEdAE8FmIECAAAECBAgQIECgDgKaCHXIgmsgQIAAAQIECBAgMI6AJoItQYAAAQIECBAgQIBAHQQ0EeqQBddAgAABAgQIECBAQBPBHiBAgAABAgQIECBAoIYCmgg1TIpLIkCAAAECBAgQIOBJBHuAAAECBAgQIECAAIE6CGgi1CELroEAAQIECBAgQIDAOAKaCLYEAQIECBAgQIAAAQJ1ENBEqEMWXAMBAgQIECBAgAABTQR7gAABAgQIECBAgACBGgpoItQwKS6JAAECBAgQIECAgCcR7AECBAgQIECAAAECBOogoIlQhyy4BgIECBAgQIAAAQLjCGgi2BIECBAgQIAAAQIECNRBQBOhDllwDQQIECBAgAABAgQm0USAQ4AAAQIECBAgQIAAgToIdHV1xaCu9H8NAgQIECBAgAABAgQGVKD3kwgDeiG+OQECBAgQIECAAAECBP6/gCaCrUCAAAECBAgQIECgJgKaCDVJhMsgQIAAAQIECBAgQKBHQBPBZiBAgAABAgQIECBAgAABAgQ6EhgxYkQMGzasmHv44YfHEUcc0dE6kwi0SWDo0KGx6KKLxoknntimsMRCgACBSQr4dUY2CAECBAgQIECAAAECBPog8OijjxZvIBkEchNIb56OHDmyCHvIkCExZsyY4k+DQC4CvRtpaf/7tyCXzItzXAH3QvntCU2E/HIuYgIECBAgQIAAAQIEKgq88sorsdpqq8WoUaO8eVrR0LJmCvR+87Q7Ak8jNDOXrrq6QO9G2v777+9phOqUVjZYwL1Qg5M3GZeuiTAZeJYSIECAAAECBAgQIJCXQPr1LUceeaRf5ZJX2kUbEb3fPO0G8TSCrZGTwOjRo4smcu/haYScdoBYuwXcC+W5FzQR8sy7qAkQIECAAAECBAgQ6KNA+sm7xRZbLNKf3jztI57pjRaY0FMI3QF5GqHRqXXxfRA44IAD4qSTThprhacR+gBoaisE3Au1Io2VgtBEqMRmEQECBAgQIECAAAECuQl0/+SdN09zy7x4J/QUQreKhpr9kYNA+v3vqYk8oeFphBx2gBi7BdwL5bsXNBHyzb3ICRAgQIAAAQIECBDoUKD3T95587RDNNNaITCppxA01FqRYkF0IDChpxC6l3kaoQNAU1oh4F6oFWmsHIQmQmU6CwkQIECAAAECBAgQyEVg3J+88+ZpLpkX56SeQtBQsz9yEJjUUwjd8XsaIYedIEb3QnnvAU2EvPMvegIECBAgQIAAAQIESgQm9JN33jy1bXIQ6OQpBA21HHZC3jFO6imEbhlPI+S9R3KI3r1QDlmedIyaCPYAAQIECBAgQIAAAQIEJiEwsZ+88+apbdN2gU6eQtBQa/suyDu+Tp5C6BbyNELee6Xt0bsXanuGy+PTRCg3MoMAAQIECBAgQIAAgUwFJvWTd948zXRTZBL2uE8hLLLIIrHZZpvF73//+0Jgiy22iDfeeCNGjhzZI3L44YdHeqPJINAWgU6eQuiO1dMIbcm6OMYVcC9kTyQBTQT7gAABAgQIECBAgAABAhMRKPvJu+5l3jy1hdom0P0UQmoepDrofjM1/ZlG9xumqdmQ/j41E4YMGRLpp7HTnwaBpgtM6CmERRddNNJ/T2OVVVaJO+64Y6wwR40aFauuumrTQ3f9BMYScC9kQ2gi2AMECBAgQIAAAQIECBCYiEAnP3nXvdSbp7ZRmwRSYyA1C7qbB92x/elPfyqaB2nst99+cdJJJ/WE3d1MSM0HTyO0aTfkG0vvpxA22mijYl9fe+21cdhhhxUo3/72t+MTn/hE8d+7ayHt/6uuuipfNJG3TsC9UOtSWjkgTyJUprOQAAECBAgQIECAAIE2C3T6k3fdBp5GaPNuyCu20aNHT/CnqU8++eSieZDGvvvuG6mpMO6Y2Nq8BEXbdIHupxC6mwepOZDG97///aJ50N1E+O53v1v87zS/u5mQmgjd85vu4PoJuBeyB7oFNBHsBQIECBAgQIAAAQIECIwjMO5P3nW/kTRs2LCememNou5f45L+o6cRbKO2C5xyyilF8yCNffbZJ1JTwSDQRoETTzwx0q8uGrcZ8IMf/CC+9a1vFSGnP7/3ve+NFX5qJnQ/ydNGFzHlJeBeKK98l0WriVAm5O8JECBAgAABAgQIEMhOoPsn78b9KdRBgwb1WHR1dRX/u/fvhPc0QnZbJauANRGySrdgJyDwwx/+ML75zW8Wf5P+TE8mGATaKuBeqK2ZrRaXJkI1N6sIECBAgAABAgQIEGipQPrJu/S7sIcPHz7eT6FOqInQzZCaCcccc0ykn2D1wbIt3RyZh/XnP/+5eAIhjb333jtSU8EgkJNA7ybCN77xjUhPJhgE2ijgXqiNWZ28mDQRJs/PagIECBAgQIAAAQIEMhKYVBMhIwahZirwl7/8pWgepLHXXntFaioYBHIS+NGPfhSpeZDG17/+9UhNBYNAbgLuhXLL+PvxaiLkmXdREyBAgAABAgQIECBQQcDBuQKaJa0R6N1E2HPPPSN9bRDISeCoo44qmgdpfO1rX4vUVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtEDj11FOLJxDS0ERoQUKF0GeBH//4x0XzII2vfvWrkZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCp512WtE8SGOPPfaI1FQwCOQk8JOf/KRoHqTxla98JVJTwSCQm4B7odwyromQZ8ZFTYAAAQIECBAgQIBAZQEH58p0FrZA4K9//WvRPEhj9913j9RUMAjkJPDTn/60aB6k8eUvfzlSU8EgkJuAe6HcMq6JkGfGRU2AAAECBAgQIECAQGUBB+fKdBa2QEAToQVJFMJkCfzsZz8rmgdpfOlLX4rUVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtEPjb3/5WPIGQxm677RapqWAQyEng6KOPLpoHaXzxi1+M1FQwCOQm4F4ot4xrIuSZcVETIECAAAECBAgQIFBZwMG5Mp2FLRD4+9//XjQP0th1110jNRUMAjkJ/PznPy+aB2l84QtfiNRUMAjkJuBeKLeMayLkmXFREyBAgAABAgQIECBQWcDBuTKdhS0QOP3004vmQRq77LJLpKaCQSAngWOOOaZoHqQxfPjwSE0Fg0BuAu6Fcsu4JkKeGRc1AQIECBAgQIAAAQKVBRycK9NZ2AIBTYQWJFEIkyVw7LHHFs2DNA499NBITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoEzzjijeAIhjZ133jlSU8EgkJPAL37xi6J5kMbnP//5SE0Fg0BuAu6Fcsu4JkKeGRc1AQIECBAgQIAAAQKVBRycK9NZ2AKBM888s2gepLHTTjtFaioYBHISOO6444rmQRqf+9znIjUVDAK5CbgXyi3jmgh5ZlzUBAgQIECAAAECBAhUFnBwrkxnYQsENBFakEQhTJbA//7v/xbNgzQ++9nPRmoqGARyE3AvlFvGNRHyzLioCRAgQIAAAQIECBCoLODgXJnOwhYInHXWWcUTCGnsuOOOkZoKBoGcBI4//viieZDGZz7zmUhNBYNAbgLuhXLLuCZCnhkXNQECBAgQIECAAAEClQUcnCvTWdgCgbPPPrtoHqSxww47RGoqGARyEvjlL39ZNA/SOOSQQyI1FQwCuQm4F8ot45oIeWZc1AQIECBAgAABAgQIVBZwcK5MZ2ELBHo3ET760Y9G+togkJPAr371q6J5kManP/3pSE0Fg0BuAu6Fcsu4JkKeGRc1AQIECBAgQIAAAQKVBRycK9NZ2AKBc845p3gCIQ1NhBYkVAh9Fvj1r39dNA/S+NSnPhWpqWAQyE3AvVBuGddEyDPjoiZAgAABAgQIECBAoLKAg3NlOgtbIHDuuecWzYM0tt9++0hNBYNATgK/+c1viuZBGp/85CcjNRUMArkJuBfKLeOaCHlmXNQECBAgQIAAAQIECFQWcHCuTGdhCwTOO++8onmQxnbbbRepqWAQyEngt7/9bdE80ETIKetiHVfAvVCee2JQV1dXV56hi5oAAQIECBAgQIAAAQJ9E3Bw7puX2e0S0ERoVz5F03eB3/3ud3HwwQcXC9Of6ckEg0BuAu6Fcsv4+/FqIuSZd1ETIECAAAECBAgQIFBBwMG5ApolrRE4//zziycQ0th2220jNRUMAjkJ/P73v4+DDjqoCDn9mZ5MMAjkJuBeKLeMayLkmXFREyBAgAABAgQIECBQWcDBuTKdhS0QuOCCC4rmQRrbbLNNpKaCQSAngT/84Q9x4IEHFiGnP9OTCQaB3ATcC+WWcU2EPDMuagIECBAgQIAAAQIEKgs4OFems7AFAhdeeGHRPEhj6623jtRUMAjkJPDHP/4xPvGJTxQhpz/TkwkGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQKaCC1IohAmS+CEE06Ij3/848VrpD/TkwkGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQIXXXRR8QRCGltttVWkpoJBICeBE088MT72sY8VIac/05MJBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCF198cdE8SGPLLbeM1FQwCOQk0LuJcMABB0R6MsEgkJuAe6HcMq6JkGfGRU2AAAECBAgQIECAQGUBB+fKdBa2QEAToQVJFMJkCZx00kmRmgdp7L///pGaCgaB3ATcC+WWcU2EPDMuagIECBAgQIAAAQIEKgs4OFems7AFApdccknxBEIaW2yxRaSmgkEgJ4E//elPRfMgjf322y9SU8EgkJuAe6HcMq6JkGfGRU2AAAECBAgQIECAQGUBB+fKdBa2QODSSy8tmgdpbL755pGaCgaBnAROPvnkonmQxr777hupqWAQyE3AvVBuGddEyDPjoiZAgAABAgQIECBAoLKAg3NlOgtbIHDZZZcVzYM0Nttss0hNBYNATgKnnHJK0TxIY5999onUVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtENBEaEEShTBZAn/+85+L5kEae++9d6SmgkEgNwH3QrllXBMhz4yLmgABAgQIECBAgACBygIOzpXpLGyBwOWXX148gZDGpptuGqmpYBDISeAvf/lL0TxIY6+99tWXswAAIABJREFU9orUVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtELjiiiuK5kEam2yySaSmgkEgJ4FTTz21aB6kseeee0ZqKhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuImy88caRvjYI5CRw2mmnFc2DNPbYY49ITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoErr7yyeAIhDU2EFiRUCH0W+Otf/1o0D9LYfffdIzUVDAK5CbgXyi3jmgh5ZlzUBAgQIECAAAECBAhUFnBwrkxnYQsErrrqqqJ5kMawYcMiNRUMAjkJ/O1vfyuaB2nstttukZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCI0aMKJoHaQwdOjRSU8EgkJPA3//+96J5kMauu+4aqalgEMhNwL1QbhnXRMgz46ImQIAAAQIECBAgQKCygINzZToLWyCgidCCJAphsgROP/30onmQxi677BKpqWAQyE3AvVBuGddEyDPjoiZAgAABAgQIECBAoLKAg3NlOgtbIDBy5MjiCYQ0Ntpoo0hNBYNATgJnnHFG0TxIY+edd47UVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtELj66quL5kEaG264YaSmgkEgJ4EzzzyzaB6ksdNOO0VqKhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuImywwQaRvjYI5CRw1llnFc0DTYScsi7WcQXcC+W5JwZ1dXV15Rm6qAkQIECAAAECBAgQINA3AQfnvnmZ3S6Ba665pngCIQ1NhHblVjSdCZx99tmx4447FpPTn+nJBINAbgLuhXLL+PvxaiLkmXdREyBAgAABAgQIECBQQcDBuQKaJa0RuPbaa4vmQRrrr79+pKaCQSAngXPOOSd22GGHIuT0Z3oywSCQm4B7odwyromQZ8ZFTYAAAQIECBAgQIBAZQEH58p0FrZA4LrrriuaB2mst956kZoKBoGcBM4999z46Ec/WoSc/kxPJhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCGgitCCJQpgsgfPOOy+233774jXSn+nJBINAbgLuhXLLuCZCnhkXNQECBAgQIECAAAEClQUcnCvTWdgCgeuvv754AiGNddddN1JTwSCQk0DvJsJ2220X6ckEg0BuAu6Fcsu4JkKeGRc1AQIECBAgQIAAAQKVBRycK9NZ2AKBG264oWgepLHOOutEaioYBHISOP/88yM1D9LYdtttIzUVDAK5CbgXyi3jmgh5ZlzUBAgQIECAAAECBAhUFnBwrkxnYQsEejcRPvKRj0T62iCQk8AFF1xQNA/S2GabbSI1FQwCuQm4F8ot45oIeWZc1AQIECBAgAABAgQIVBZwcK5MZ2ELBG688cbiCYQ0NBFakFAh9FngwgsvLJoHaWy99daRmgoGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQI33XRT0TxIY+21147UVDAI5CRw0UUXFc2DNLbaaqtITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoGbb765aB6ksdZaa0VqKhgEchK4+OKLi+ZBGltuuWWkpoJBIDcB90K5ZVwTIc+Mi5oAAQIECBAgQIAAgcoCDs6V6SxsgYAmQguSKITJErjkkkuK5kEaW2yxRaSmgkEgNwH3QrllXBMhz4yLmgABAgQIECBAgACBygIOzpXpLGyBwC233FI8gZDGhz/84UhNBYNATgKXXnpp0TxIY/PNN4/UVDAI5CbgXii3jGsi5JlxURMgQIAAAQIECBAgUFnAwbkynYUtELj11luL5kEaa665ZqSmgkEgJ4HLLrusaB6ksdlmm0VqKhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuIqyxxhqRvjYI5CRw+eWXF82DNDbddNNITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoHbbruteAIhDU2EFiRUCH0WuOKKK4rmQRqbbLJJpKaCQSA3AfdCuWVcEyHPjIuaAAECBAgQIECAAIHKAg7OleksbIHA7bffXjQP0lh99dUjNRUMAjkJXHnllUXzII2NN944UlPBIJCbgHuh3DKuiZBnxkVNgAABAgQIECBAgEBlAQfnynQWtkBg1KhRRfMgjdVWWy1SU8EgkJPAVVddVTQP0hg2bFikpoJBIDcB90K5ZVwTIc+Mi5oAAQIECBAgQIAAgcoCDs6V6SxsgYAmQguSKITJEhgxYkTRPEhj6NChkZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCo0ePLp5ASGPVVVeN1FQwCOQkMHLkyKJ5kMZGG20UqalgEMhNwL1QbhnXRMgz46ImQIAAAQIECBAgQKCygINzZToLWyBwxx13FM2DNFZZZZVITQWDQE4CV199ddE8SGPDDTeM1FQwCOQm4F4ot4xrIuSZcVETIECAAAECBAgQIFBZwMG5Mp2FLRDo3URYeeWVI31tEMhJQBMhp2yLdWIC7oXy3BuDurq6uvIMXdQECBAgQIAAAQIECBDom4CDc9+8zG6XwJ133lk8gZCGJkK7ciuazgSuueaa4gmENDbYYINITQWDQG4C7oVyy/j78Woi5Jl3URMgQIAAAQIECBAgUEHAwbkCmiWtEbjrrruK5kEaK620UqSmgkEgJ4Frr722aB6ksf7660dqKhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuIqy44oqRvjYI5CRw3XXXFc2DNNZbb71ITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoF//OMfxRMIaWgitCChQuizwPXXX180D9JYd911IzUVDAK5CbgXyi3jmgh5ZlzUBAgQIECAAAECBAhUFnBwrkxnYQsE7r777qJ5kMYKK6wQqalgEMhJ4IYbbiiaB2mss846kZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUC99xzT9E8SGP55ZeP1FQwCOQkcOONNxbNgzQ+8pGPRGoqGARyE3AvlFvGNRHyzLioCRAgQIAAAQIECBCoLODgXJnOwhYI9G4iLLfccpG+NgjkJHDTTTcVzYM01l577UhNBYNAbgLuhXLLuCZCnhkXNQECBAgQIECAAAEClQUcnCvTWdgCgXvvvbd4AiENTYQWJFQIfRa4+eabi+ZBGmuttVakpoJBIDcB90K5ZVwTIc+Mi5oAAQIECBAgQIAAgcoCDs6V6SxsgcB9991XNA/SWHbZZSM1FQwCOQnccsstRfMgjQ9/+MORmgoGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQK9mwjLLLNMpK8NAjkJ3HrrrUXzII0111wzUlPBIJCbgHuh3DKuiZBnxkVNgAABAgQIECBAgEBlAQfnynQWtkDg/vvvL55ASGPppZeO9LVBICeB2267rWgepLHGGmtEaioYBHITcC+UW8Y1EfLMuKgJECBAgAABAgQIEKgs4OBcmc7CFgg88MADkZ5A0ERoQTKFUEng9ttvL5oHaay++uqRmgoGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQIPPvhg8QRCGksttVSkpoJBICeBUaNGFc2DNFZbbbVITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoHeTYQll1wy0tcGgZwERo8eXTQP0lh11VUjNRUMArkJuBfKLeOaCHlmXNQECBAgQIAAAQIECFQWcHCuTGdhCwQeeuih4gmENDQRWpBQIfRZ4I477iiaB2msssoqkZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCDz/8cNE8SGOJJZaI1FQwCOQkcOeddxbNgzRWXnnlSE0Fg0BuAu6Fcsu4JkKeGRc1AQIECBAgQIAAAQKVBRycK9NZ2AKB3k2ExRdfPNLXBoGcBHo3EVZaaaVIXxsEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCDzyyCPFEwhpLLbYYpG+NgjkJHDXXXcVTyCkseKKK0b62iCQm4B7odwyromQZ8ZFTYAAAQIECBAgQIBAZQEH58p0FrZAYMyYMZGeQNBEaEEyhVBJ4B//+EekJxDSWGGFFSJ9bRDITcC9UG4Z10TIM+OiJkCAAAECBAgQIECgsoCDc2U6C1sg8OijjxZPIKSx6KKLRmoqGARyErj77ruLJxDSWH755SN9bRDITcC9UG4Z10TIM+OiJkCAAAECBAgQIECgsoCDc2U6C1sg0LuJsMgii0T62iCQk8A999xTPIGgiZBT1sU6roB7oTz3xKCurq6uPEMXNQECBAgQIECAAAECBPom4ODcNy+z2yXw2GOPFU8gpKGJ0K7ciqYzgXvvvbd4AiGN5ZZbLlJTwSCQm4B7odwy/n68mgh55l3UBAgQIECAAAECBAhUEHBwroBmSWsEHn/88aJ5kMbCCy8cqalgEMhJ4L777iuaB2ksu+yykZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCvZsICy20UKSvDQI5Cdx///1F8yCNZZZZJlJTwSCQm4B7odwyromQZ8ZFTYAAAQIECBAgQIBAZQEH58p0FrZA4IknniieQEhjwQUXjPS1QSAngQceeKBoHqSx9NJLR2oqGARyE3AvlFvGNRHyzLioCRAgQIAAAQIECBCoLODgXJnOwhYIPPnkk5GeQNBEaEEyhVBJ4MEHHyyaB2kstdRSkZoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUCTz31VPEEQhoLLLBApKaCQSAngYceeqhoHqSx5JJLRmoqGARyE3AvlFvGNRHyzLioCRAgQIAAAQIECBCoLODgXJnOwhYI9G4izD///JG+NgjkJPDwww8XzYM0llhiiUhNBYNAbgLuhXLLuCZCnhkXNQECBAgQIECAAAEClQUcnCvTWdgCgaeffrp4AiENTYQWJFQIfRZ45JFHiuZBGosvvnikpoJBIDcB90K5ZVwTIc+Mi5oAAQIECBAgQIAAgcoCDs6V6SxsgcAzzzxTNA/SmG+++SI1FQwCOQmMGTOmaB6ksdhii0VqKhgEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuIsw777yRvjYI5CTw6KOPFs2DNBZddNFITQWDQG4C7oVyy7gmQp4ZFzUBAgQIECBAgAABApUFHJwr01nYAoFnn322eAIhjXnmmSfS1waBnAQee+yxonmQxiKLLBKpqWAQyE3AvVBuGddEyDPjoiZAgAABAgQIECBAoLKAg3NlOgtbIPDcc89FegJBE6EFyRRCJYHeTYSFF1440tcGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQLPP/988QRCGnPPPXekpoJBICeBxx9/vHgCIY2FFloo0tcGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQK9mwhzzTVXpK8NAjkJPPHEE5GeQEhjwQUXjPS1QSA3AfdCuWVcEyHPjIuaAAECBAgQIECAAIHKAg7OleksbIHACy+8UDyBkMaHPvShSF8bBHISePLJJ4snENJYYIEFIn1tEMhNwL1QbhnXRMgz46ImQIAAAQIECBAgQKCygINzZToLWyDw4osvRnoCQROhBckUQiWBp556qngCIY35558/0tcGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQK9mwgf/OAHI31tEMhJ4Omnny6eQEhjvvnmi/S1QSA3AfdCuWVcEyHPjIuaAAECBAgQIECAAIHKAg7OleksbIHASy+9VPwaozTmnHPOSF8bBHISeOaZZ4onENKYd955I31tEMhNwL1QbhnXRMgz46ImQIAAAQIECBAgQKCygINzZToLWyDw8ssvR3oCQROhBckUQiWBZ599tngCIY155pkn0tcGgdwE3AvllnFNhDwzLmoCBAgQIECAAAECBCoLODhXprOwBQL//Oc/iycQ0phjjjkiNRUMAjkJPPfcc8UTCGmkDxlPXxsEchNwL5RbxjUR8sy4qAkQIECAAAECBAgQqCzg4FyZzsIWCPRuIgwZMiTS1waBnASef/754gmENNKHjKevDQK5CbgXyi3jmgh5ZlzUBAgQIECAAAECBAhUFnBwrkxnYQsEXnnlleIJhDQ0EVqQUCH0WeCFF14onkDQROgznQUtEnAv1KJk9iGUQV1dXV19mG8qAQIECBAgQIAAAQIEshVwcM429QKPiFdffbVoHqQx++yzR2oqGARyEnjxxReLJxDSSB8ynpoKBoHcBNwL5Zbx9+PVRMgz76ImQIAAAQIECBAgQKCCgINzBTRLWiPQu4kw22yzFU0Fg0BOAi+99FLRPEgjfch4aioYBHITcC+UW8Y1EfLMuKgJECBAgAABAgQIEKgs4OBcmc7CFgi89tprxRMIacw666yRvjYI5CSQPkw8NQ/SSB8ynpoKBoHcBNwL5ZZxTYQ8My5qAgQIECBAgAABAgQqCzg4V6azsAUC//rXvyI9gaCJ0IJkCqGSQO8PF0+fD5KaCgaB3ATcC+WWcU2EPDMuagIECBAgQIAAAQIEKgs4OFems7AFAv/+97+LJxDS+MAHPhCpqWAQyEnAh4vnlG2xTkzAvVCee8NnIuSZd1ETIECAAAECBAgQIFBBwMG5ApolrRHo3USYZZZZIn1tEMhJoHcTwYeL55R5sfYWcC+U537QRMgz76ImQIAAAQIECBAgQKCCgINzBTRLWiPwn//8p3gCIY2ZZ5450tcGgZwEfLh4TtkW68QE3AvluTc0EfLMu6gJECBAgAABAgQIEKgg4OBcAc2S1gj897//jfQEgiZCa1IqkD4K+HDxPoKZ3koB90KtTGtpUJoIpUQmECBAgAABAgQIECBA4H0BB2c7IWeB3k2EmWaaKdLXBoGcBHp/uLjPBckp82LtLeBeKM/9oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRG4PXXXy9+jVEaM844Y6SvDQI5CfhckJyyLdaJCbgXynNvaCLkmXdREyBAgAABAgQIECBQQcDBuQKaJa0ReOONNyI9gaCJ0JqUCqSPAj4XpI9gprdSwL1QK9NaGpQmQimRCQQIECBAgAABAgQIEHhfwMHZTshZ4M033yyeQEhjhhlmiNRUMAjkJOBXeuWUbbFOTMC9UJ57QxMhz7yLmgABAgQIECBAgACBCgIOzhXQLGmNQO8mwvTTTx/pa4NATgJ+pVdO2RarJoI90FtAE8F+IECAAAECBAgQIECAQIcCmggdQpnWSoG33nqreAIhjemmmy7S1waBnAR6/0ovT+PklHmxjvVm8qBBPV92dXXByURAEyGTRAuTAAECBAgQIECAAIHJF9BEmHxDr9BcgbfffjvSEwiaCM3NoSufPAFP40yen9XtEHAv1I489jUKTYS+iplPgAABAgQIECBAgEC2Ag7O2aZe4BHRu4kw7bTTFl8bBHIS8DROTtkW68QE3AvluTc0EfLMu6gJECBAgAABAgQIEKgg4OBcAc2S1gi88847xa8xSmOaaaaJ9LVBICcBjbScsi1WTQR7oLeAJoL9QIAAAQIECBAgQIAAgQ4FNBE6hDKtlQLvvvtupCcQNBFamV5BdSCgkdYBkimtF3Av1PoUTzBATYQ88y5qAgQIECBAgAABAgQqCDg4V0CzpDUC7733XvEEQhqDBw+O1FQwCOQk0LuRpgZyyrxYewu4F8pzP2gi5Jl3URMgQIAAAQIECBAgUEHAwbkCmiWtEejdREi1kL42COQkoAZyyrZYJybgXijPvaGJkGfeRU2AAAECBAgQIEBgigj0PkhOkRf0IrUV6Orqqu21DeSFqYGB1J+631sNTNhbDUzdfTiQ300NqIGB3H91+N4514AmQh12oGsgQIAAAQIECBAg0FABbx41NHEVLjvng/OkuNRAhc3U0CVqwBuoDd26U+yy1YAamGKbqaEvlHMNaCI0dNO6bAIECBAgQIAAAQJ1EPAGah2yMHWuIeeDsybC1Nljdf8uasAbqHXfo/19fWpADfT3Hqv76+dcA5oIdd+dro8AAQIECBAgQIBAjQV6NxEOPtevu6lxqipd2m+3H9SzLueDc6dNBDVQaZvVepEaKE+PfwfKjZo8Qw2UZ08NlBs1eYYaeD97mghN3sWunQABAgQIECBAgMAACzg4D3AC+vnbOziXA6uBcqMmz1AD5dlTA+VGTZ6hBsqzpwbKjZo8Qw1oIjR5/7p2AgQIECBAgAABArUQcHCuRRr67SIcnMtp1UC5UZNnqIHy7KmBcqMmz1AD5dlTA+VGTZ6hBjQRmrx/XTsBAgQIECBAgACBWgg4ONciDf12EQ7O5bRqoNyoyTPUQHn21EC5UZNnqIHy7KmBcqMmz1ADmghN3r+unQABAgQIECBAgEAtBByca5GGfrsIB+dyWjVQbtTkGWqgPHtqoNyoyTPUQHn21EC5UZNnqAFNhCbvX9dOgAABAgQIECBAoBYCDs61SEO/XYSDczmtGig3avIMNVCePTVQbtTkGWqgPHtqoNyoyTPUgCZCk/evaydAgAABAgQIECBQCwEH51qkod8uwsG5nFYNlBs1eYYaKM+eGig3avIMNVCePTVQbtTkGWpAE6HJ+9e1EyBAgAABAgQIEKiFgINzLdLQbxfh4FxOqwbKjZo8Qw2UZ08NlBs1eYYaKM+eGig3avIMNaCJ0OT969oJECBAgAABAgQI1ELAwbkWaei3i3BwLqdVA+VGTZ6hBsqzpwbKjZo8Qw2UZ08NlBs1eYYa0ERo8v517QQIECBAgAABAgRqIeDgXIs09NtFODiX06qBcqMmz1AD5dlTA+VGTZ6hBsqzpwbKjZo8Qw1oIjR5/7p2AgQIECBAgAABArUQmNoH56uPOzDuu+wPY8U+eNrpY6bZ54r5VtgwVt3l6zHnoivXwmZCF3HawUvGkAWXjS3/5/zaXmPvC3NwLk+TGig36j1DDfTNqwmz1UDfsqQG+ubVhNlqoG9ZUgN986rL7EFdXV1ddbkY10GAAAECBAgQIECAQLMEBurgvPTG+8U0089UYL3z5n/jX88+Es/dd0MMmmba2PKw82LB1TavJaSDcy3TMlkXpQb6xqcG+ubVhNlqoG9ZUgN982rCbDXQtyypgb551WW2JkJdMuE6CBAgQIAAAQIECDRQYKAOzvuc9EzMPMe8Y4k984+RcdGRW8c0080Qe/5+TEw/8+y1E3Vwrl1KJvuC1EDfCNVA37yaMFsN9C1LaqBvXk2YrQb6liU10DevuszWRKhLJlwHAQIECBAgQIAAgQYK1OngnPjuvuD4uO43n421DzgqVtnpq7UTdXCuXUom+4LUQN8I1UDfvJowWw30LUtqoG9eTZitBvqWJTXQN6+6zNZEqEsmXAcBAgQIECBAgACBBgrU7eD8zluvx0l7zRlzL712bPeDET2izz9wU9x6ymHFrzzqiq6YZ5l14sP7fDfmXuYjY6k/c/fVcftp340XHrw50mctzLvcerHW/j8qPsegezx914i4/bTvRHrNQYMGxzzLrhNr7HVEzLPsumO91sPX/DVGn/7DeOWp+2O2eZeI9T55XFx93EHjfSZCJ9f298+uGPMuv34Mnma6uO+y38dMs88dOx59S/Fnfw6fiVCuqwbUQPkuafcMNaAG2r3Dy6NTA2qgfJc0f4YmQvNzKAICBAgQIECAAAECAyZQt4Nzgjhj+Grxr2fHxAGnvVK4PH3XVXHh4VvGbPMsFstufmDx3x648qR45akHYusjL4n5Vxpa/Lcnb78kLv7utvGBuRct5g0ePE3cec7Po+u9d2PnY26PmeecP8Zcf0ZcftRuMdv8S8YymxwQ7737Ttx36e/jvy8/E1scdm4stPqW77/+FSfGiGM/FnMvvVYsscEe8dqzD8f9l59Q/N38Kw3r+WDlTq8tNRFef+W5mHbGWWLx9XaNN159PoYOP6nf866JUE6sBtRA+S5p9ww1oAbavcPLo1MDaqB8lzR/hiZC83MoAgIECBAgQIAAAQIDJlDHg/MFh20aT91xRRx41ttFI+BvhywXXV3vxc7HjCrehE8jPbFw5hfWKP5+l+PuKv7b3z+zQvEhzTv/YnTP5ym89Mjooimx5l5Hxio7fy1OPXDRmHaGmWPnY0fHdDPNWqx747UXi7Xptff47UMRXV1xyv7zx6zzLBrb/+ia4omGNB4ccUpcdfS+sfCa27zfROjq6vzaPrti/PPxu2O3X90XQxZYZqrlWxOhnFoNqIHyXdLuGWpADbR7h5dHpwbUQPkuaf4MTYTm51AEBAgQIECAAAECBAZMoI4H5/O+sVGkX0t00NnvxCtP3hfpp/hX3eUbsfIOXxzL6Y4zfxx3nPmT2Ov3jxZNhlMPWjzW2PPwWGPPI8aa9+JDt8Ws8y4eLz92V6TXXvegY2LF7Q4da85tpx4Rt516ZOx0zO3x3ttvxtlfWSc2+MxvYrktDu6Zl77HyfvOU/yqpdRESE2BTq7tA3MvUsx7963XY4/fPjxVc62JUM6tBt43UgPle6WtM9SAGmjr3u40LjWgBjrdK02ep4nQ5Oy5dgIECBAgQIAAAQIDLFDHg3N68iD96p/9//JyPHbzuXHJ9z46SaXtf3h1vPfeO3H+tzbbisr3AAAgAElEQVSOjb94Siw5dO8Jzk+/AmnEMQfE1kdcHAuuvsVYc9LnH1zxkz1i82+eVTzlcOVP94ptv3dFzL/yxmPNO+er68UMH5ijaCJ0em3zrrBB0USYYZYhsf1R107VjGsilHOrgfeN1ED5XmnrDDWgBtq6tzuNSw2ogU73SpPnaSI0OXuunQABAgQIECBAgMAAC9Tt4PzWf1+Nk/acM+ZbccPY9vtXxaM3nBWX/nCnWHWXr8cCq2wyQa0PLbFGvPjwbXHBYZvFxl/+Syy54Z4TbiL8/885mFAT4aGrTy0aB1v+zwXx9uuvxRU/2bP43wuvufVYr5WeUJhx1g8WTYROry01HVITYcbZPjTWh0VPjdRrIpQrq4H3jdRA+V5p6ww1oAbaurc7jUsNqIFO90qT52kiNDl7rp0AAQIECBAgQIDAAAvU7eD8j/OPi+t/+/lY58Cfx0rbD48XHxkVZw5fPVbd+Wux1v4/GkvrxYdvj9dffT7mW3Gj+M8LT8RfP71MrLn3d2L13Q8ba971vzs0Zp1nsfjgYqvG+d8aFusedGysuN3nx5rT/atcdjnuznjnzdfj7C+v3XMNPRO7uuLk/eeLuZZcs2gidHpt004/kybCVNjnr7zySgwZMqTP30kNvE+mBvq8dWq3QA34d2BCmzKnZrIaUAO518Ck/mHSRKjdP9suiAABAgQIECBAgEBzBOr0Bupz990QFx2xZUwz3Yyx+28ejOlnni263ns3Tj14iXj7v6/FrsffEzPPMW+Bmz5AOTUX3vz3P2PvE56IwdNMF6d9aulI8aQPTU4fnpzGP5+4J07/3Mqx2q7fiNV2+3b8+WMLFh+6vMsv7uj5kOb0Gn//zPIxeLoZYq/fjYmu6IpTP7FoTDP9jLFzmjf9TMVrPXzNacUTCt0frNzxtU07vSbCVCiJHXbYIVZbbbU49NBD+9RMUANR1JEamAqbtJ+/hRrw70Dub6CqATWQew1oIvTzP7RengABAgQIECBAgECuAgP1BurSG+8X0/z/N+fTBw6/8tQD8fwDN8W0081Y/JR/788ieOzm8+LSH+wYMw2ZJ1bY+pCYfpYhcf/lf4z0JMKwL/wplhq2b5G+x2+9oPj8hCELLhvLbPrx6Hrvnbjr3GNj8OBpYqdjRxW/huihkX+JK4/ep5iz7KYfj/fefSfuu/R38e8XHo/Nv3VOz68vevSmc+KyH+wUcy66Uiyz6cfiPy89FekpidScmGeZdYprTKPTa/PrjPq/wkaMGBHDhg0rGgjDhw/vuJmgBtRA/+/OqfMd1IB/B3J/A1UNqIHca0ATYer8e+u7ECBAgAABAgQIEMhOYKDeQO0NPXja6WPmOeeL+VccWvzaoiELLTdeHp75x8i4/a/fjefvvyli0KCYc5EVi89JWGSt7cea+/SdV8atpx4RLz54a0w306xFM2LtA46KD8y1cM+8J26/OEb97ftFEyI9wTDPcusWvwJpnmXXGeu1nrz9krj1L4fHy4/eGTPPOX+sudeRce+lv4vpZvxATxMhLejk2jQRpk5pDR06NEaOHFl8s06bCWpADUyd3Tl1vosaiPDvwNh7LadfZ5QiVwNqYNz/b5tbDUzsXxu/zmjq/DvsuxAgQIAAAQIECBBopcDUfgO1lYg1Diq3g3P3T6H2TklZM0EN1HgDT4FLUwPlDTU1MAU2Wo1fQg2ogRpvz6lyabnVgCbCVNlWvgkBAgQIECBAgACBvAS8edTufOd4cO79U6idNBPUgBpom4AaaFtGJy8e/w78n9/Emsr+HZi8PVb31TnWwIRy4kmEuu9U10eAAAECBAgQIECgxgIOzjVOzhS4tBwPzhN6GmFSzQQ1MAU2Wo1fQg2Mn5xx30hVAzXewFPg0tSAGpgC26jRL5FjDWgiNHrLungCBAgQIECAAAEC9RPw5lH9cjIlr6j3wXlKvm4bXqv7jdQjjjiiJ5yDz+1qQ2hi6CWgBia+HdRAHqWiBtRAHjt94lFqIrxv40mE3CtB/AQIECBAgAABAgQmQ0ATYTLwGrDUm0d9S5ImQt+8mjBbDfQtS2qgb15NmK0G+pYlNdA3rybM1kTQRGjCPnWNBAgQIECAAAECBGotoIlQ6/RM9sV582jihLPPPnsMHz48jjzyyJ5J3jya7C1XuxdQA2qgdptyKl+QGlADU3nL1e7baSJoItRuU7ogAgQIECBAgAABAk0T0ERoWsb6dr05HpzLPhOhu3mQGgjp17mogb7tqabNVgPjZ0wNNG0XT971qgE1MHk7qPmrc6yBCWXNrzNq/l4WAQECBAgQIECAAIEBE6jrG6iXfn+HePSmc+ITp78e00w/4yR93n79X3HHmT+JMdefEf96bkwMnnb6mGPh5WOpYfvFclseHIMGDS7WX/ebz8bdFxxfar3k0L1j4y+eElf8ZI94+Jq/xqDB08R+Jz8fM8w65wTXnv75VeLlR++MpTfeP4YOP7H09afmhBwPzkOHDo2RI0eOxzzuG6fdE9TA+DtSDUzNKp3y30sN+Heg967y78D/afh3wL1QV1e+n32kiTDl/731igQIECBAgAABAgSyEWj6G6ipgXDOV9eLlx+7K+Zacs2YY5EV4923Xo/n7rsh/v3C47HYujvHZl/7e8SgQUVD4Om7rurJ7b+efSSeHH1ZzL/S0Jh9gWV6/vvcS68dy2z6sZ4mQvqLYV/4Uyw1bN/x9sVrzz4Spx28RPHfNREGvmwm9BTCxN40aksTQQ1Met/l9gaqGvDvwLgVoQYi/DvgXqi7LjQRBv5ezRUQIECAAAECBAgQINBAgaY3Ee48+2dx4x+/HBsc8utYbstP9mTgvXfeiit/tnc8ct3pscW3z4lF1tp+vOykv7v8qF1j6KEnxNKbHDDe36cnEdKcWT64QMy11Idjs6+fPt6cO8/6aYw+88fxxqsvaCLUYP/3/gnssjeN2tJEUAOaCL0F1IB/B3JvIqgBNZB7DUzsX0VPItTgRtUlECBAgAABAgQIEGiqQNObCJf/ePd49IYz4xNnvFH82qHeI/1qo1MPWjyW3+rTsf6nf1m5iZDWP3DFibHfn1+MaaabYazXSU9BzLHQcnHfZX/QRBjgIuj+CexOmwdtaSKoAU2EbgE14N+BCVVDTk8iqAE1kHsNTOpfRE2EAb5R9e0JECBAgAABAgQINFmg6U2Ea391SNxz0a9i2+9dEfOvvPF4qUi/bmimIXPHdDN+oHITYesjL4kLDts0tjr8wlhoja16Xue//3w2/nzAArHFYefFxd/ZRhNhgAthhx12iFVXXTW6PzC508tRA+VP46iBTnfTwM5TA/4dyP0NVDWgBnKvAU2Egf132HcnQIAAAQIECBAg0FqBpr+Bmj7Q+KwvrRXpd9wuvu7OsehHdoj5VhoaM80+d2nOOv11Rukph5P3nTsWX3+34tcmdY97Lvp13Pynb8Q+Jz0df9xlZk2EUvH+nfDKK6/EkCFD+vxN1EB5E0EN9HlbDcgCNeDfgdzfQFUDaiD3GtBEGJB/fn1TAgQIECBAgAABAu0XaPobqClDz959TVz9y0/GK0/c25OwORdZqfhQ5ZU+Ojymn3n2CSay0ybCQWe/E1cdvW88dccVsc+JTxUf0pzGhYdvUTQrNjr0hPj9jtNpIjS0XNRAeRNBDTR0c3d42WpADXS4VVo7TQ2ogdZu7l6B+XVGOWRZjAQIECBAgAABAgT6SaANB+dumucfuCmeuO2ieOrOK+P5+26I9959p/hQ5O1+MCJmm2/J8QT70kQYc/0ZcdmPdokdfnpTzL30WvHWf1+NP+09V2zy1dOKD23WROinDToVXlYNdPbmkRqYCptxgL6FGlADA7T1avNt1YAaqM1m7McL0UToR1wvTYAAAQIECBAgQKDtAm06OPfO1Zv//mfcceaPY/TpPyo+KyF9ZsK4oy9NhHfe+E+ctM+HYuUdvhQf3ud78eCIU+Ka4z8Z+53yQgyednpNhAYXihro7M0jNdDgTV5y6WpADbR3d3cWmRpQA53tlGbP0kRodv5cPQECBAgQIECAAIEBFWjywfn1V5+PG//wpVj4w9vEEhvsMUHHi47YKp4cfVmk3+k+eJppx5rTlyZCWnjxd7aNf7/wWOxy3F1x2Q93jq7ois2/cWbxxIMnEQZ0G0/WN1cDnb15pAYma5vVerEaUAO13qBT4eLUgBqYCttswL+FJsKAp8AFECBAgAABAgQIEGiuQJMPzu+89XqcuPtsMe8KG07wSYOUlSt+skeMueGs+MQZr8egQYMnq4lw36W/j6v/96DY9fi7iw9z3uDTv4qlhu2ridDc7V9cuRro/M0jNdDwzT6Ry1cDaqCdO7vzqNSAGuh8tzR3piZCc3PnygkQIECAAAECBAgMuECTD84J78qj94mHRvw51tjz8Fhtt2+P9bTB03deGRd9Z5tYaPUtY/NvnjWedV+fREhPPpyy33yxwGqbxdN3XBn7nfJ8TD/LEE2EAd/Fk3cBaqDzN4/UwOTttbquVgNqoK57c2pdlxpQA1Nrrw3k99FEGEh935sAAQIECBAgQIBAwwXqfnBedvMDY9DgacZTXnrj/WOeZdeJN157Mc792vrxylP3x0xD5ol5l1svpp1xlnj1qfvj+QdujpnnmDd2+MmN8YG5F5nsJkJ6gfS9nr33ulhw1c1i6+9cWrymX2fU7CJQA52/eaQGmr3XJ3b1akANtHNndx6VGlADne+W5s7URGhu7lw5AQIECBAgQIAAgQEXqPvBeWJAQw89IZbe5IDir9MHvt55ztHx6I1nx2vPPhLvvvV6zPLBBWORtbaL1Xb9Zsw4+1wTfJm+PomQXuTOs34aN57wlVj/07+K5bf6lCbCgO/gyb8ANdC3N4/UwOTvubq9ghpQA3Xbk1P7etSAGpjae24gvp8mwkCo+54ECBAgQIAAAQIEWiJQ14NzS3gHPIzfbj+o5xq6uroG/HrqeAFqoI5ZmXLXpAbKLdVAuVGTZ6iB8uypgXKjJs9QA+9nTxOhybvYtRMgQIAAAQIECBAYYAEH5wFOQD9/ewfncmA1UG7U5BlqoDx7aqDcqMkz1EB59tRAuVGTZ6gBTYQm71/XToAAAQIECBAgQKAWAg7OtUhDv12Eg3M5rRooN2ryDDVQnj01UG7U5BlqoDx7aqDcqMkz1IAmQpP3r2snQIAAAQIECBAgUAsBB+dapKHfLsLBuZxWDZQbNXmGGijPnhooN2ryDDVQnj01UG7U5BlqQBOhyfvXtRMgQIAAAQIECBCohYCDcy3S0G8X4eBcTqsGyo2aPEMNlGdPDZQbNXmGGijPnhooN2ryDDWgidDk/evaCRAgQIAAAQIECNRCwMG5Fmnot4twcC6nVQPlRk2eoQbKs6cGyo2aPEMNlGdPDZQbNXmGGtBEaPL+de0ECBAgQIAAAQIEaiHg4FyLNPTbRTg4l9OqgXKjJs9QA+XZUwPlRk2eoQbKs6cGyo2aPEMNaCI0ef+6dgIECBAgQIAAAQK1EHBwrkUa+u0iHJzLadVAuVGTZ6iB8uypgXKjJs9QA+XZUwPlRk2eoQY0EZq8f107AQIECBAgQIAAgVoIODjXIg39dhEOzuW0aqDcqMkz1EB59tRAuVGTZ6iB8uypgXKjJs9QA5oITd6/rp0AAQIECBAgQIBALQQcnGuRhn67CAfnclo1UG7U5BlqoDx7aqDcqMkz1EB59tRAuVGTZ6gBTYQm71/XToAAAQIECBAgQKAWAg7OtUhDv12Eg3M5rRooN2ryDDVQnj01UG7U5BlqoDx7aqDcqMkz1IAmQpP3r2snQIAAAQIECBAgUAsBB+dapKHfLsLBuZxWDZQbNXmGGijPnhooN2ryDDVQnj01UG7U5BlqQBOhyfvXtRMgQIAAAQIECBCohUDvg3MtLshF9JtAV1dXv712k19YDTQ5e327djUwYS810Ld91OTZakANNHn/Tolrz7kGBnXlHP2U2D1egwABAgQIECBAgEDGAt48yif5jo7ePMpnt084UjWgBtSAZvKE9oB7oXwqI+d/BzQR8tnnIiVAgAABAgQIECAwxQUcnKc4aW1fMOeD86SSogZqu2Wn+IWpAU2EKb6pGvaCakANNGzLTvHLzbkGNBGm+HbyggQIECBAgAABAgQItFXgiCOOiCOPPDIOP/zwSP/bIJCbgBrILePiHVdADdgTuQuogTx3gCZCnnkXNQECBAgQIECAAAECfRR45ZVXYrHFFov055AhQ2LMmDHFnwaBXATUQC6ZFufEBNSAvZG7gBrIdwdoIuSbe5ETIECAAAECBAgQINAHge6fvOte4mmEPuCZ2goBNdCKNApiMgTUwGTgWdoKATXQijRWCkIToRKbRQQIECBAgAABAgQI5CTQ+yfvuuP2NEJOO0CsasAeyF1ADeS+A8SvBvLeA5oIeedf9AQIECBAgAABAgQIdCAw7k/edS/xNEIHeKa0QkANtCKNgpgMATUwGXiWtkJADbQijZWD0ESoTGchAQIECBAgQIAAAQI5CEzoJ++64/Y0Qg47QIxqwB7IXUAN5L4DxK8G7AFNBHuAAAECBAgQIECAAAECkxCY2E/edS/xNILt03YBNdD2DIuvTEANlAn5+7YLqIG2Z7g8Pk2EciMzCBAgQIAAAQIECBDIVGBSP3nXTeJphEw3RyZhq4FMEi3MiQqoAZsjdwE1kPsOeD9+TQT7gAABAgQIECBAgAABAhMRKPvJu+5lnkawhdoqoAbamllxdSqgBjqVMq+tAmqgrZntW1yaCH3zMpsAAQIECBAgQIAAgUwEOvnJu24KTyNksikyC1MNZJZw4Y4noAZsitwF1EDuO+D/4tdEsBcIECBAgAABAgQIECAwAYFOf/Kue6mnEWyjtgmogbZlVDx9FVADfRUzv20CaqBtGa0ejyZCdTsrCRAgQIAAAQIECBBoqcC4P3m30UYbRTpIDxs2rCfiq666qvhvI0eOLP6bpxFauhkyDUsNZJp4YfcIqAGbIXcBNZD7Dhg7fk0E+4EAAQIECBAgQIAAAQLjCHT/5F1382Do0KHFjEGDBvXM7OrqKv73iBEjepoJnkawldoioAbakklxVBVQA1XlrGuLgBpoSyanTByaCFPG0asQIECAAAECBAgQINASgfSTdwcccEAMHz48upsH3aFNqInQ/XepmXDMMcfEiSeeWDyVYBBoqoAaaGrmXPeUElADU0rS6zRVQA00NXP9d92aCP1n65UJECBAgAABAgQIEGiZwKSaCC0LVTgEJiigBmyM3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECCQu4CDc+47QPxqwB7IXUAN5L4DxK8G8twDmgh55l3UBAgQIECAAAECBAhUEHBwroBmSasE1ECr0imYCgJqoAKaJa0SUAOtSmfHwWgidExlIgECBAgQIECAAAECuQs4OOe+A8SvBuyB3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECCQu4CDc+47QPxqwB7IXUAN5L4DxK8G8twDmgh55l3UBAgQIECAAAECBAhUEHBwroBmSasE1ECr0imYCgJqoAKaJa0SUAOtSmfHwWgidExlIgECBAgQIECAAAECuQs4OOe+A8SvBuyB3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECCQu4CDc+47QPxqwB7IXUAN5L4DxK8G8twDmgh55l3UBAgQIECAAAECBAhUEHBwroBmSasE1ECr0imYCgJqoAKaJa0SUAOtSmfHwWgidExlIgECBAgQIECAAAECuQs4OOe+A8SvBuyB3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECCQu4CDc+47QPxqwB7IXUAN5L4DxK8G8twDmgh55l3UBAgQIECAAAECBAhUEHBwroBmSasE1ECr0imYCgJqoAKaJa0SUAOtSmfHwWgidExlIgECBAgQIECAAAECuQs4OOe+A8SvBuyB3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH5wpolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECCQu4CDc+47QPxqwB7IXUAN5L4DxK8G8twDmgh55l3UBAgQIECAAAECBAhUEHBwroBmSasE1ECr0imYCgJqoAKaJa0SUAOtSmfHwWgidExlIgECBAgQIECAAAECuQs4OOe+A8SvBuyB3AXUQO47QPxqIM89oImQZ95FTYAAAQIECBAgQIBABQEH50wt4V4AACAASURBVApolrRKQA20Kp2CqSCgBiqgWdIqATXQqnR2HIwmQsdUJhIgQIAAAQIECBAgkLuAg3PuO0D8asAeyF1ADeS+A8SvBvLcA5oIeeZd1AQIECBAgAABAgQIVBBwcK6AZkmrBNRAq9IpmAoCaqACmiWtElADrUpnx8FoInRMZSIBAgQIECBAgAABArkLODjnvgPErwbsgdwF1EDuO0D8aiDPPaCJkGfeRU2AAAECBAgQIECAQAUBB+cKaJa0SkANtCqdgqkgoAYqoFnSKgE10Kp0dhyMJkLHVCYSIECAAAECBAgQIJC7gINz7jtA/GrAHshdQA3kvgPErwby3AOaCHnmXdQECBAgQIAAAQIECFQQcHCugGZJqwTUQKvSKZgKAmqgApolrRJQA61KZ8fBaCJ0TGUiAQIECBAgQIAAAQK5Czg4574DxK8G7IHcBdRA7jtA/Gogzz2giZBn3kVNgAABAgQIECBAgEAFAQfnCmiWtEpADbQqnYKpIKAGKqBZ0ioBNdCqdHYcjCZCx1QmEiBAgAABAgQIECAwrkDvgySddgt0dXW1O8CK0amBinANXKYGJpw0NdDAzVzxktWAGqi4dVqzLOca0ERozTYWCAECBAgQIECAAIGpL+DNo6lvPlDfMeeD86TM1cBA7cip/33VgDdQp/6uq9d3VANqoF47cupfTc41oIkw9feb70iAAAECBAgQIECgNQLeQG1NKksDyfngrIlQuj2ymKAGvIGaxUafRJBqQA2ogXyfytREyH33i58AAQIECBAgQIDAZAj0biIcfG6+B6vJIKz10t9uP6jn+rx5VP7mkRqo9XaudHFqoJzNvwPlRk2eoQbKs6cGyo2aPEMNvJ89TYQm72LXToAAAQIECBAgQGCABRycBzgB/fztHZzLgdVAuVGTZ6iB8uypgXKjJs9QA+XZUwPlRk2eoQY0EZq8f107AQIECBAgQIAAgVoIODjXIg39dhEOzuW0aqDcqMkz1EB59tRAuVGTZ6iB8uypgXKjJs9QA5oITd6/rp0AAQIECBAgQIBALQQcnGuRhn67CAfnclo1UG7U5BlqoDx7aqDcqMkz1EB59tRAuVGTZ6gBTYQm71/XToAAAQIECBAgQKAWAg7OtUhDv12Eg3M5rRooN2ryDDVQnj01UG7U5BlqoDx7aqDcqMkz1IAmQpP3r2snQIAAAQIECBAgUAsBB+dapKHfLsLBuZxWDZQbNXmGGijPnhooN2ryDDVQnj01UG7U5BlqQBOhyfvXtRMgQIAAAQIECBCohYCDcy3S0G8X4eBcTqsGyo2aPEMNlGdPDZQbNXmGGijPnhooN2ryDDWgidDk/evaCRAgQIAAAQIECNRCwMG5Fmnot4twcC6nVQPlRk2eoQbKs6cGyo2aPEMNlGdPDZQbNXmGGtBEaPL+de0ECBAgQIAAAQIEaiHg4FyLNPTbRTg4l9OqgXKjJs9QA+XZUwPlRk2eoQbKs6cGyo2aPEMNaCI0ef+6dgIECBAgQIAAAQK1EHBwrkUa+u0iHJzLadVAuVGTZ/w/9u4DTIoiffz4uwssGZaco0hWkqKiIIgRFVHUUwwYMAe4M53eeaIXfncYDvXMWTGjgmdEFxAUBBUFRAGRoOS4xCUs7P952//M9cTumZ3QNf3t59lnYanurvpUvcxOv1NVxIBz7xEDzkYmlyAGnHuPGHA2MrkEMUASweTxS90RQAABBBBAAAEEEPCEQKbfOE97eIQs/OSZkLbnVyyQqrUbSJMu/aT72X+Uuq0P9YRNtEq8dmU7KWzeUU7+y3ueraO9Yrxxdu4mYsDZyF6CGEjMy4TSxEBivUQMJOZlQmliILFeIgYS8/JK6byysrIyr1SGeiCAAAIIIIAAAggggIBZAtl649z+uIulQkFVC6t0zy7ZvnaprFs4U/IqVJST7/yvNO9xoicheePsyW4pV6WIgcT4iIHEvEwoTQwk1kvEQGJeJpQmBhLrJWIgMS+vlCaJ4JWeoB4IIIAAAggggAACCBgokK03zhe+sEaq1WkcIrbm+8/kw7sHSYVKleX8p5dJQbXanhPljbPnuqTcFSIGEiMkBhLzMqE0MZBYLxEDiXmZUJoYSKyXiIHEvLxSmiSCV3qCeiCAAAIIIIAAAgggYKCAl944K9+C9x+RL564Xo645F/S7axbPSfKG2fPdUm5K0QMJEZIDCTmZUJpYiCxXiIGEvMyoTQxkFgvEQOJeXmlNEkEr/QE9UAAAQQQQAABBBBAwEABr71xLt1bIi8MqysN2x8hp/9jalB0/eJZ8vW4O60lj8qkTBp1OEoOv/Cv0rDDkSHqaxZMkzmv/VU2/DRbdK+Fxp2Olt7D/2ntYxA4Vs+fKnNeu0f0mnl5+dKo41HSa9hoadSxT8i1fp7+unw3/v+keNUiqdX4IDn6qodl2sNXROyJ4KZub17fVRp3PkbyK1SShZ88LVVrN5QzH/jK+p7Ogz0RnHWJAWLAeZTkdgligBjI7RHu3DpigBhwHiXmlyCJYH4f0gIEEEAAAQQQQAABBLIm4LU3zgrx1qgesn3tMrnktWLLZfX8KfLBXSdLrUZtpOOJI6yfLZ78ghSvWiyD7v5Ymh7S3/rZyjkfy0d/PU1qNGxtlcvPryDzJv5byg7sl6Fj50i1uk1l2Yy35NN/nSu1mraTDgMvkQP7S2XhpKdl1+Y1ctKd70qLnif/dv2i52Xqg5dKw/a95aC+58m2tT/Lok+fs/6t6SEDghsru62bJhFKitdJxSrVpe3R58jureul/6gX0t7vJBGciYkBYsB5lOR2CWKAGMjtEe7cOmKAGHAeJeaXIIlgfh/SAgQQQAABBBBAAAEEsibgxTfO7995vKyaWyQj3tlnJQLeuLaTlJUdkKFjv7UewuuhMxbe/n0v69/Pfni+9bM3r+tibdI89KHvgvspbFr6nZWUOGzY3dJt6G3y6ojWUrFyNRn64HdSqWpN67zd2zZa5+q1z3tyiUhZmYwb3lRqNmotg/853ZrRoMdPU8fJlAcukpaHnfpbEqGszH3dru8qW35ZIOc+tlAKm3XIWH+TRHCmJgaIAedRktsliAFiILdHuHPriAFiwHmUmF+CJIL5fUgLEEAAAQQQQAABBBDImoAX3zj/9/ZjRZclumJCqRSvXCj6Kf7uZ98uhw75Q4jT3LfHyNy375VhTy+3kgyvXtFWep1/l/Q6f3RIuY1LvpGajdvK5hXzRa/d54qx0vX0kSFlvnl1tHzz6t1y1tg5cmDfHplwy1HS97onpNNJVwbL6T1euqiRtdSSJhE0KeCmbjUatrLK7d9bIuc9+XNG+5okgjM3MfCbETHgPFZytQQxQAzk6th22y5igBhwO1ZMLkcSweTeo+4IIIAAAggggAACCGRZwItvnHXmgS79M/yVzbJi9rvy8d/OiKs0+P+myYEDpfLen46T4/4wTtr1vyBqeV0CaerYS2TQ6I+kec+TQsro/gdF954nJ97xjjXLYfJ9w+S0vxVJ00OPCyk38dajpXKNOlYSwW3dGnfpayURKlcvlMH/+jyjPU4SwZmbGPjNiBhwHiu5WoIYIAZydWy7bRcxQAy4HSsmlyOJYHLvUXcEEEAAAQQQQAABBLIs4LU3znt3bZUXzq8rTbr2k9P+PkWWz3xHJv3fWdL97D9Ks24Do2rVP6iXbPz5G3n/zhPkuJtfkXb9zo+eRPj/+xxESyIsmfaqlTg4+S/vy76SbVJ07/nWn1seNijkWjpDoUrNelYSwW3dNOmgSYQqteqHbBadia4nieCsTAz8ZkQMOI+VXC1BDBADuTq23baLGCAG3I4Vk8uRRDC596g7AggggAACCCCAAAJZFvDaG+fv33tYZjx5oxw14t9yyOBRsnHpt/L2qJ7Sfeht0nv4P0O0Nv48R0q2rpcmXY+VnRt+ldev6SCHXXCP9PzdnSHlZjw1Umo2aiP12nSX9/40QPpc8aB0Pf3GkDKBpVzOfnielO4pkQk3HxGsQ7BgWZm8NLyJNGh3mJVEcFu3igVVSSJkYJwXFxdLYWFhwnciBn4jIwYSHjqeO4EY4HUg2qD0UzKZGCAG/B4D8V6YSCJ47mWbCiGAAAIIIIAAAgggYI6Alx6grls4Uz4cfbJUqFRFfvfET1JQrZaUHdgvr155kOzbtU3OeeQHqVansYWrGyhrcmHPji1ywXO/Sn6FSvLa1e1F26ObJuvmyXps+fUHGX/DodLjnNulx7l/lpcvbW5tunz2Q3ODmzTrNd68rrPkV6osw55aJmVSJq9e3loqFFSRoVquoKp1rZ+nv2bNUAhsrOy6bhULSCJkICSGDBkiPXr0kJEjRyaUTCAGxIojYiADgzTNtyAGeB3w+wNUYoAY8HsMkERI8wstl0cAAQQQQAABBBBAwK8C2XqA2v64i6XC/384rxsOF69aLOsXz5KKlapYn/K370WwYvZ/ZdI/zpSqhY2ky6BrpaB6oSz69FnRmQgDfv+iHDzgIqv7fvn6fWv/hMLmHaXD8ZdJ2YFSmf/ug5KfX0HOevBbaxmiJZ+9IpMfuNAq0/H4y+TA/lJZOOkp2bHhFznxTxODyxctnzVRPvnHWVK39SHS4fhLZeemVaKzJDQ50ajDUVYd9XBbN5YzSn+ETZ06VQYMGGAlEEaNGuU6mUAMEAPpH52ZuQMxwOuA3x+gEgPEgN9jgCRCZl5vuQsCCCCAAAIIIIAAAr4TyNYDVDt0fsUCqVa3iTTt2t9atqiwRaeIfljz/Wcy5/W/yvpFs0Ty8qRuq67WPgmteg8OKbt63mT5+tXRsvGnr6VS1ZpWMuKIS/4lNRq0DJb7dc5H8u0bf7eSEDqDoVGnPtYSSI06HhVyrZVzPpavX7lLNi+fJ9XqNpXDht0tP056SipVqRFMIugJbupGEiEzodW/f3/57LPPrJu5TSYQA8RAZkZnZu5CDIjwOhA61vy0nJG2nBggBsL/t/VbDMR6tWE5o8y8DnMXBBBAAAEEEEAAAQRyUiDTD1BzEtHDjfLbG+fAp1DtXeKUTCAGPDyAU1A1YsA5oUYMpGCgefgSxAAx4OHhmZGq+S0GSCJkZFhxEwQQQAABBBBAAAEE/CXAw6Pc7m8/vnG2fwrVTTKBGCAGck2AGMi1Hi1fe3gd+J9frKQyrwPlG2NeP9uPMRCtT5iJ4PWRSv0QQAABBBBAAAEEEPCwAG+cPdw5KaiaH984R5uNEC+ZQAykYKB5+BLEQGTnhD9IJQY8PIBTUDVigBhIwTAy+hJ+jAGSCEYPWSqPAAIIIIAAAggggID3BHh45L0+SWWN7G+cU3ndXLhW4EHq6NGjg8258t2yXGgabbAJEAOxhwMx4I9QIQaIAX+M9NitJInwmw0zEfweCbQfAQQQQAABBBBAAIFyCJBEKAeeAafy8CixTiKJkJiXCaWJgcR6iRhIzMuE0sRAYr1EDCTmZUJpkggkEUwYp9QRAQQQQAABBBBAAAFPC5BE8HT3lLtyPDyKTVi7dm0ZNWqU3H333cFCPDwq95Dz3AWIAWLAc4MywxUiBoiBDA85z92OJAJJBM8NSiqEAAIIIIAAAggggIBpAiQRTOuxxOrrxzfOTnsiBJIHmkDQ5VyIgcTGlGmliYHIHiMGTBvF5asvMUAMlG8EmX+2H2MgWq+xnJH5Y5kWIIAAAggggAACCCCQNQGvPkCd9PchsnzWRLl8fIlUKKgS12dfyXaZ+/a9smzGW7J93TLJr1ggdVp2loMHXCydTr5S8vLyrfO/eOJ6WfD+I47W7fpfIMf9YZwU3Xue/Dz9dcnLryAXv7ReKtesG/Xc8Td2k83L50n744ZL/1HPO14/kwX8+Ma5f//+8tlnn0Uwhz84DRQgBiJHJDGQyShN/b2IAV4H7KOK14H/afA6wO9CZWX+3fuIJELqX2+5IgIIIIAAAggggAACvhEw/QGqJhAm3nq0bF4xXxq0O0zqtOoq+/eWyLqFM2XHhl+kTZ+hcsJtb4rk5VkJgdXzpwT7dvvapbLyu0+k6SH9pXazDsGfN2x/hHQ4/tJgEkH/YcDvX5SDB1wUMS62rV0qr115kPVzkgjZD5tosxBiPTTKlSQCMRB/3PntASoxwOtAeEQQAyK8DvC7UCAuSCJk/3c1aoAAAggggAACCCCAAAIGCpieRJg34X758tmbpe+1j0unk68K9sCB0r0y+f4LZOkX4+WkP0+UVr0HR/SO/tun/zpH+o98TtoPvCTi33UmgpapXq+ZNDj4cDnhj+Mjysx75z757u0xsnvrBpIIHhj/9k9gOz00ypUkAjFAEsEuQAzwOuD3JAIxQAz4PQZivSoyE8EDv6hSBQQQQAABBBBAAAEETBUwPYnw6ZjfyfKZb8vlb+22lh2yH7q00atXtJXOp1wjx1zzaNJJBD1/cdHzcvHLG6VCpcoh19FZEHVadJKFnzxDEiHLQRD4BLbb5EGuJBGIAZIIAQFigNeBaNHgp5kIxAAx4PcYiPeKSBIhy7+ocnsEEEAAAQQQQAABBEwWMD2J8Plj18oPHz4mp/2tSJoeelxEV+hyQ1ULG0qlKjWSTiIMuvtjef/O4+WUuz6QFr1OCV5n15a18vIlzeSkO/8rH91zKkmELAfCkCFDpHv37hLYMNltdYgB59k4xIDb0ZTdcsQArwN+f4BKDBADfo8BkgjZfR3m7ggggAACCCCAAAII5KyA6Q9QdUPjd27qLbrGbds+Q6X1kUOkySH9pWrtho595nY5I53l8NJFDaXtMedayyYFjh8+fFxmv3i7XPjCann27GokERzF01uguLhYCgsLE74JMeCcRCAGEh5WWTmBGOB1wO8PUIkBYsDvMUASISsvv9wUAQQQQAABBBBAAIHcFzD9Aar20NoF02Xao1dJ8a8/BjusbqtDrE2VDzljlBRUqx21I90mEa6YUCpTHrhIVs0tkgufX2Vt0qzHB3edZCUrjh35nDx9ZiWSCIaGCzHgnEQgBgwd3C6rTQwQAy6HSs4WIwaIgZwd3LaGsZyRH3qZNiKAAAIIIIAAAgggkCaBXHjjHKBZv3iW/PrNh7Jq3mRZv3CmHNhfam2KfPo/pkqtJu0iBBNJIiyb8ZZ88s+zZch9s6Rh+96yd9dWefGCBjLw1tesTZtJIqRpgGbgssSAu4dHxEAGBmOWbkEMEANZGnqeuS0xQAx4ZjCmsSIkEdKIy6URQAABBBBAAAEEEMh1gVx642zvqz07tsjct8fId+P/ae2VoHsmhB+JJBFKd++UFy6sL4cOuUkOv/Bv8tPUcTL9kavk4nEbJL9iAUkEgwOFGHD38IgYMHiQO1SdGCAGcnd0u2sZMUAMuBspZpciiWB2/1F7BBBAAAEEEEAAAQSyKmDyG+eSrevly2dukpaHnyoH9T0vquOHo0+Rld99Irqme36FiiFlEkki6Ikf3XOa7NiwQs5+eL588n9DpUzK5MTb37ZmPDATIavDuFw3JwbcPTwiBso1zDx9MjFADHh6gGagcsQAMZCBYZb1W5BEyHoXUAEEEEAAAQQQQAABBMwVMPmNc+neEnn+d7WkcZd+UWcaaK8U3XueLJv5jlz+Vonk5eWXK4mwcNLTMu0/V8g5jyywNnPue81jcvCAi0gimDv8rZoTA+4fHhEDhg/2GNUnBoiB3BzZ7ltFDBAD7keLuSVJIpjbd9QcAQQQQAABBBBAAIGsC5j8xlnxJj9woSyZ+rL0Ov8u6XHun0NmG6yeN1k+vOdUadHzZDnxjncirBOdiaAzH8Zd3ESa9ThBVs+dLBePWy8F1QtJImR9FJevAsSA+4dHxED5xppXzyYGiAGvjs1M1YsYIAYyNdayeR+SCNnU594IIIAAAggggAACCBgu4PU3zh1PHCF5+RUilNsfN1wadTxKdm/bKO/edowUr1okVQsbSeNOR0vFKtVl66pFsn7xbKlWp7EMufdLqdGwVbmTCHoBvdfaH7+Q5t1PkEH3TLKuyXJGZgcBMeD+4RExYPZYj1V7YoAYyM2R7b5VxAAx4H60mFuSJIK5fUfNEUAAAQQQQAABBBDIuoDX3zjHAuo/8jlpP/AS6591w9d5Ex+Q5V9OkG1rl8r+vSVSvV5zadX7dOlxzh1SpXaDqJdJdCaCXmTeO/fJl8/dIsdc85h0PuVqkghZH8HlrwAxkNjDI2Kg/GPOa1cgBogBr43JTNeHGCAGMj3msnE/kgjZUOeeCCCAAAIIIIAAAgjkiIBX3zjnCG/Wm/Hk4LxgHcrKyrJeHy9WgBjwYq+krk7EgLMlMeBsZHIJYsC594gBZyOTSxADv/UeSQSTRzF1RwABBBBAAAEEEEAgywK8cc5yB6T59rxxdgYmBpyNTC5BDDj3HjHgbGRyCWLAufeIAWcjk0sQAyQRTB6/1B0BBBBAAAEEEEAAAU8I8MbZE92QtkrwxtmZlhhwNjK5BDHg3HvEgLORySWIAefeIwacjUwuQQyQRDB5/FJ3BBBAAAEEEEAAAQQ8IcAbZ090Q9oqwRtnZ1piwNnI5BLEgHPvEQPORiaXIAace48YcDYyuQQxQBLB5PFL3RFAAAEEEEAAAQQQ8IQAb5w90Q1pqwRvnJ1piQFnI5NLEAPOvUcMOBuZXIIYcO49YsDZyOQSxABJBJPHL3VHAAEEEEAAAQQQQMATArxx9kQ3pK0SvHF2piUGnI1MLkEMOPceMeBsZHIJYsC594gBZyOTSxADJBFMHr/UHQEEEEAAAQQQQAABTwjwxtkT3ZC2SvDG2ZmWGHA2MrkEMeDce8SAs5HJJYgB594jBpyNTC5BDJBEMHn8UncEEEAAAQQQQAABBDwhwBtnT3RD2irBG2dnWmLA2cjkEsSAc+8RA85GJpcgBpx7jxhwNjK5BDFAEsHk8UvdEUAAAQQQQAABBBDwhABvnD3RDWmrBG+cnWmJAWcjk0sQA869Rww4G5lcghhw7j1iwNnI5BLEAEkEk8cvdUcAAQQQQAABBBBAwBMCvHH2RDekrRK8cXamJQacjUwuQQw49x4x4GxkcgliwLn3iAFnI5NLEAMkEUwev9QdAQQQQAABBBBAAAFPCPDG2RPdkLZK8MbZmZYYcDYyuQQx4Nx7xICzkckliAHn3iMGnI1MLkEMkEQwefxSdwQQQAABBBBAAAEEPCHAG2dPdEPaKsEbZ2daYsDZyOQSxIBz7xEDzkYmlyAGnHuPGHA2MrkEMUASweTxS90RQAABBBBAAAEEEPCEgP2NsycqRCXSJlBWVpa2a5t8YWLA5N5LrO7EQHQvYiCxcWRyaWKAGDB5/Kai7n6OgbwyP7c+FaOHayCAAAIIIIAAAggg4GMBHh75p/N568jDI/+M9ugtJQaIAWKAZHK0McDvQv6JDD+/DpBE8M84p6UIIIAAAggggAACCKRcgDfOKSf17AX9/MY5XqcQA54dsimvGDFAEiHlg8qwCxIDxIBhQzbl1fVzDJBESPlw4oIIIIAAAggggAACCCDgZYHi4mIZPXq0PPjggwlVs3bt2vLdd99J69atEzqPwgh4UWDnzp0yefJk+fDDD+W5556T3bt3x62mjv/nn39ehgwZ4sXmUCcEogroOJ83b57Mnz/f+gr8ecuWLQmLHXvssTJhwgQpLCxM+FxOQCCdAjt27JClS5fKzz//bH2F/zlVD75HjhwpY8eOTWdTuLaHBUgieLhzqBoCCCCAAAIIIIAAAgikT0AfBl1yySWydetWVzd55513eIDqSopCXhWYMWOGFBUVWcmDqVOnuq6mJhC0fPfu3V2fQ0EEMi2gD04DSYLA90WLFqWkGnfddZeVfOZAIFsC69evj5og0KTBmjVrkq5W3bp15aCDDpKNGzfKsmXLol6nVatWVhK5f//+Sd+HE80XIIlgfh/SAgQQQAABBBBAAAEEEEhSYPny5TJ48GDrE6rxDj59lyQwp2VVYOHChVbCIPCVzKevu3XrZj08IoGQ1a7k5jYB/dS1fVZBIGGgs8zcHg0aNJBDDjlEDj30UOu7Jonfe++9iNM1gaYJZx6eupWlXHkE9HeSWLMJ3H7gIdr9W7RoIW3btrWSBYHvgT/Xq1fPOkVn2ES7h/7+owk0ZuCUp2dz41ySCLnRj7QCAQQQQAABBBBAAAEEEhQoLS2Ve++91/qK93BVH6LqMkYcCHhdQD+pak8a6MOoeEevXr3kuOOOk/r168ttt90WUVTHvs5A4OGR13s+d+unYzg8YbB48eKEGty1a9eQhIEmDvShqv2I9gCV5YsSYqawCwFdNi7WskM61vX3kmSOihUrWgkCe5LAniyoUqVK3MtqouzMM88MKUMCLZmeyO1zSCLkdv/SOgQQQAABBBBAAAEEEIgi8O9//9tKHjgtAcAyLgwfLwvs378/mDTQZYq++uqruNXV/Tw0aTBw4EDre+PGja3y+inTu+++O+TcM844w5qBQALByyMgd+q2ffv2qLMLEvn0tc4u0ARBYHZB4HtBQUFcKE0S9+jRI6QMyxflztjKdEs2b94cc9mhX3/9Nenq6O8jsWYTtGnTJunr6om6183EiROD1+D//3Jx5uzJJBFytmtpGAIIIIAAAggggAACCIQLPProo1byQJcMsB9dunSRBQsWRIDphrO6bwIHAl4R+Oabb0JmG+zduzdm1WrVqmUlCwJfOs6jHeEPkIYPH24lEDgQSIfAkiVLIhIGP/30U0K30iWI7MsRacKgefPmCV0jUHjUqFHy4IMPWn/l09dJEfrupJUrV8ZcdmjTpk1JezRp0iRiuaHA7IJGjRolfd14J+oyYHXq1AmOf/2/X18TOBAIFyCJwJhAAAEEEEAAAQQQQACBnBd49tlnreSBrhFvP9q1aye33HKLXHnllZKXlxfyb/pJPJ3iz4FANgV0o0v7EkVr166NW51+/foFkwZ9+/Z1VXWdobBixQqrrM7S0YeqHAiUV2Dbtm1WsiB8OSL9uduj1kXgbQAAIABJREFUYcOGITMLArMLKlWq5PYSjuV0v4+5c+cKyxc5UvmmgC4rpMsLxVp6SJclSvYIX3bIvgRRjRo1kr1s0udp0uDSSy8VZh8kTeibE0ki+KaraSgCCCCAAAIIIIAAAv4TePnll63kgT4gsh/6iVVNHtx4443BH9uTCK1atbL2QWApF/+NmWy3WB+wBpIGukTRDz/8ELdKOrsgsDyRzjioWbNmQk2wfwqVmTcJ0VHYJqAzCcKTBTrjIJEjMLPAvhxRs2bNErlEwmV1VpouBcPyRQnTGX+C/l8bnigI/F2Tt8ke1atXjzmbQBMG+fn5yV46LefpbEtNpJE8TgtvTl2UJEJOdSeNQQABBBBAAAEEEEAAARV46623rOTBrFmzQkB0A1lNHuhX+MwD+9+//fZb6001BwKZEJg2bVowcTB9+vS4t9TlLuxLFOksgvIcunGyLl2hs2769+9fnktxrg8E9MHrvHnzIhIGuqeB20OXZQnft0D/rpvDZvrQca/JYsZ+puUzcz+duaWJgWjJAt2IPtlD99+ItYlx06ZNk71sVs7TRFp5X0eyUnFumnEBkggZJ+eGCCCAAAIIIIAAAgggkC6B999/X8aMGSP6UNZ+6KezA8mDKlWqRL19IInAJ1LT1TtcNyDw/fffhyxRFO8BbOXKlUOSBj179kwp5NixY60HqCTNUsqaExdbvHhxxHJEicwu0P9Tw/ct0GSBaQ9Zc6Izc7gR8ZYd2rFjR9It1xmJ9kSB/c/MUkyalRMNFiCJYHDnUXUEEEAAAQQQQAABBBD4TeDTTz+1Zh5MmjQphETXzQ4kD5ze9OsDL10TWz+ZzYFAKgXWrFkTskRRYP+BWPfo3bt3yBJFXlv+IpU2XCv7Alu3bo06uyCRB7CNGzcOJgzsswwqVKiQ/QZSA6MFdu3aFXMTY00gHDhwIKn2aYI22myCtm3bWj8vKChI6rqchECuCpBEyNWepV0IIIAAAggggAACCPhA4PPPP7eSB++++25Ea2+66SYrgaBLZ7g5dEkX3WDQKdng5lqU8bfAnj17QmYazJkzJy6IbvBtX6JIl8rgQCAdAjq7IHw5In0Q6/bQhFb47AL9O7ML3ApSLprAhg0bYm5ivHr16qTR6tSpE3M2QcuWLZO+Lici4EcBkgh+7HXajAACCCCAAAIIIICA4QJfffWVlTx48803I1py3XXXWckDXYogkYN1gRPRomy4gO6/EdgQWb/H+3Rs3bp1Q5IGHTp0ABSBlArohtmBZIF9w+NEZhfo/hvRliNiZkxKu8o3F9MZWEuXLg3OKrAvQ6TjNdlDN9/WmQPRZhXoPkgcCCCQGgGSCKlx5CoIIIAAAggggAACCCCQAQF9GKbJg5deeinibpdffrnceuut0r59+wzUhFv4XUDXhg8kDYqKimTjxo1xSQYMGGAlDgYOHChHHXWU3/lofwoFFi1aFDG7QB/Wuj00KaBLEIVvdqxJBA4E3Ars3bs37rJD+u/JHLokVqxNjHXpoWrVqiVzWc5BAIEEBUgiJAhGcQQQQAABBBBAAAEEEMi8gC7BocmDp59+OuLmF154oTXzQB+AcSCQLoHNmzeHzDTQB7fxjm7duoXMNuBBV7p6xj/X3bJli7XRcfhyRDt37nSNoMsORVuOiNkFrgl9XVDHoH02gf3Pv/zyS9I2NWvWjLnskCYKOBBAIPsCJBGy3wfUAAEEEEAAAQQQQAABBGII6EMJTR785z//iShx9tlnW8kD3YSWA4F0CNiXJ5o5c2bcW7Ro0SIkadC8efN0VIlr+kRg4cKFEQmDZcuWuW69fno7fGaB/l03QOZAIJ7AqlWrrBkF0ZIFTjOu4l1Xx15g0+LA98AMA8YlYxIB7wuQRPB+H1FDBBBAAAEEEEAAAQR8J7B+/XoreXDfffdFtP3000+3kgd9+/b1nQsNTq/A3LlzQ5YoKikpiXlDnVkQWJ5IvzMTJr19k6tX1xku0WYX7Nq1y3WTdXZBtOWI8vLyXF+Dgv4R2L9/f8zZBJo4SGTshatpciA8QRBIFNSqVcs/yLQUgRwUIImQg51KkxBAAAEEEEAAAQQQMFVg69atVvJAv8LXTz7hhBOs5IF+50AgFQK//vpryBJFK1eujHvZPn36BGcb6B4HHAgkIvDjjz9GJAx0Q3e3R8WKFaPOLmjUqJHbS1DOJwLbt2+PmyhIlqFq1aoxNzHWZIHOgOFAAIHcFCCJkJv9SqsQQAABBBBAAAEEEDBKYM+ePcHkwbZt20Lq3q9fPyt5cNpppxnVJirrPQFdO96+RJGuLR/v6NixY8gSRXXq1PFeo6iR5wR0dkH4vgX693gzW8Ib0axZs6gJA881lgplTWDdunUxlx1au3Zt0vWqX79+zNkELNOWNCsnImC8AEkE47uQBiCAAAIIIIAAAgggYLZAYObBhg0bQhqiex1o8kD3PuBAIFmBGTNmBBMHU6ZMiXuZBg0aBJMGAwcOtD5xy4FAPIEffvghYnbBihUrXKNVqlQpuNGxfQ+Dhg0bur4GBXNXQPfB0P0Jou1REJ5wT0ShZcuWIRsZ25cgIlmaiCRlEfCPAEkE//Q1LUUAAQQQQAABBBBAwFMCDz/8sDX7QJeUsR/6IE2TBxdeeKGn6ktlzBDQDWntsw22bNkSs+K69IbuZxD4YpNuM/o4G7XctGlT1NkFu3fvdl0d/RR3+GbHhxxyiOvzKZh7Ajo7JdYmxvpz3b8gmaOgoCDmJsaaHK1cuXIyl+UcBBDwsQBJBB93Pk1HAAEEEEAAAQQQQCAbAk899ZSMGTNGlixZEnL7Dh06WMmDyy+/PBvV4p6GCugm3PakgT54i3f06tUrJHGgD9s4ELAL6OyC8OWIEpldoGNKkwPhCQNmF/hznGkCKtZsAqd9WOKJFRYWxlx2qHXr1v7EptUIIJA2AZIIaaPlwggggAACCCCAAAIIIGAXePHFF62ZB99//30ITKtWrazkwXXXXQcYAo4C+slce9Jg9uzZcc/Rh2mBmQa6RFHjxo0d70EBfwhs3Lgx6uwC3aPF7dGiRYuI5Yi6du3q9nTK5YiAzqiLlijQn8WbDeXU/KZNm8ZcdkiXX+NAAAEEMiVAEiFT0twHAQQQQAABBBBAAAGfCrzxxhtW8uDrr78OEdCHuZo8+MMf/uBTGZrtVuCbb74JSRzs3bs35qm1atUKmWnQpUsXt7ehXA4LLFiwICJh8Msvv7husS7/Em12AQ9yXRMaXXDfvn1xlx1KJPFkh8jPz485m0CXHapevbrRblQeAQRyR4AkQu70JS1BAAEEEEAAAQQQQMBTAhMnTrSSB1988UVIvXQJBk0e6JduKsqBQLjA8uXLpaioKJg4WLt2bVykfv36BRMHffv2BdTHArpB+/z58yMSBok85NVNZ8MTBiSjcn9Qbd26NeayQ/p/UrJHjRo1Ys4m0A2N8/Lykr005yGAAAIZEyCJkDFqboQAAggggAACCCCAgD8EPv74Yyt5oA+B7UeVKlWCyYOaNWv6A4NWuhLYtm1byEwD/dR4vEMf6NqXKGI8uWLOuUK6NFr43gXhG7XHa7TOLrDvWxD4c/369XPOigb9JrB69WpZunRp1GSBJqCSPXS/C505oEkB/W7/c5MmTZK9LOchgAACnhEgieCZrqAiCCCAAAIIIIAAAgiYLTB16lQrefDBBx+ENEQ/ZRmYecDDObP7OJW1nzZtWjBxMH369LiX1odwgaSBfmfT0FT2hPevpZtnR5tdEG9Zq/BW6d4r4bMLOnfu7P3GU8OEBMrKyiKWHdJ9CQKJg507dyZ0PXth/X8nPEEQSBbUrl076etyIgIIIGCCAEkEE3qJOiKAAAIIIIAAAggg4GGBL7/80koevP322xG1vPHGG60EQvPmzT3cAqqWCQH91Lh9Q+Tt27fHvK1+QtyeNOjZs2cmqsg9PCCgyYLwhMHKlStd10xnPNlnFwQSB/Xq1XN9DQp6W2DHjh3BpIB9VkEgWaCJhGQOHTuxZhPoDAOW30tGlXMQQCBXBEgi5EpP0g4EEEAAAQQQQAABBDIs8N1331nJg1deeSXizldddZWVPNAHMhz+FFizZk1I0sBpTfHevXuHLFGkG45y5K7AunXros4u0A1s3R46u0ATBvakQadOndyeTjkPC+jsk1jLDun/LckedevWjZhNEFiCqEWLFslelvMQQACBnBcgiZDzXUwDEUAAAQQQQAABBBBIrcCPP/5oJQ+ee+65iAsPHz7cSh6wCWlqzU24mm5ca59pMGfOnLjVbteuXchsgwYNGpjQTOqYhED4vgX691WrVrm+UtWqVaPOLtAHwhzmCmhi0b7UkP3PuslxsocmAwKJgfA9CpiRkqwq5yGAgN8FSCL4fQTQfgQQQAABBBBAAAEEXAosW7bMSh489thjEWecd955VvKAZWdcYuZIsVmzZoUkDg4cOBCzZfrA175EUYcOHXJEgWYEBHR2QbSEQWlpqWskXXc+fDkiZhe45vNUwd27d8ecTaCzDBKZdWJvWMWKFWPOJtCkgSadOBBAAAEEUitAEiG1nlwNAQQQQAABBBBAAIGcE1i9erWVPBg7dmxE24YMGWIlD/r06ZNz7aZBkQJLliwJSRps2LAhLtOAAQOCiQPGSO6MKF1zPlqyQP+vcHtUq1YtuNGxPWlQp04dt5egnAcENm/eHHM2wa+//pp0DXWj4lizCdq0aZP0dTkRAQQQQCA5AZIIyblxFgIIIIAAAggggAACOS+gD4c0eaBf+/fvD2nvySefbCUP9JPlHLkroGPAvkTRokWL4ja2W7duIbMN9EExh9kCa9eujZowCP8/IV4r9aFv+OyCjh07mg3jo9rrxtaxlh3atGlT0hJNmjQJJgrsGxpr8qBRo0ZJX5cTEUAAAQRSL0ASIfWmXBEBBBBAAAEEEEAAAaMFdu3aZSUOxowZI/pn+6GfLNfkwSmnnGJ0G6l8bIEpU6ZIUVGRlTyYOXNmXCpde9y+RFHz5s2hNVRAl6KaP39+RMIgkdkF1atXD84uOOSQQ4KJA2YXeHtQ6HJTsTYx1p+XlJQk3QBNDoQnCAI/q1GjRtLX5UQEEEAAgcwKkETIrDd3QwABBBBAAAEEEEDAswL6yeLAzAP9BLr9OOqoo6zkwZlnnunZ+lOx5ATmzp0bMtsgPHFkv6rOLLAnDXTmAYd5AmvWrIk6uyDenhbhrdRPiwcSBYFZBuxz4d2xsG3btpizCXS/m2QPTRyFb15sTxrk5+cne2nOQwABBBDwkABJBA91BlVBAAEEEEAAAQQQQCBbArrfgSYQwj91rBsla/JAN07myA0BXafcvkSRLlUS79C9DAKJA52JwmGOgCYGo80u0CSC20M/LW6fVRBIGBQWFrq9BOUyJKBLT+myQ9GWHtJNr5M9GjRoEHU2gSYPmjVrluxlOQ8BBBBAwCABkggGdRZVRQABBBBAAAEEEEAg1QKPPfaYlTwI/yRq586dreTBJZdckupbcr0MC+jMgsDyRJo80A1x4x26Vr19tgFL0WS4w5K8nSYAoyUMEpldoJ8gD08YtG/fPskacVo6BOwJAvsSRPrzHTt2JH3LVq1ahSQK7LMJSBglzcqJCCCAQM4IkETIma6kIQgggAACCCCAAAIIuBd47rnnrOTBjz/+GHKSfrJUkwdXX321+4tR0nMCM2bMCM420D0O4h36KWN70qBdu3aeaw8V+p+Azi7QRJB+2ZMG+il0t4fOLrBvdBz4c+3atd1egnJpEtCkX6xNjPXniSSF7FWsXLly3GWHCgoK0tQiLosAAgggkAsCJBFyoRdpAwIIIIAAAggggAACLgVeeeUVK3nw3XffhZyhS1Jo8mDkyJEur0QxLwksXLgwZImiLVu2xKxehQoVQpIGvXv39lJTqItNYNWqVVFnF5SVlbl20qRQ+OyCgw8+2PX5FEy9wMaNG2MuO6R9nuyhs4bsMwjsf27ZsmWyl+U8BBBAAAEEhCQCgwABBBBAAAEEEEAAAR8IvP3221by4Msvvwxpbb169azkgX6xAaY5A2HDhg0hSxTpJ5TjHb169QpJHPCpY2/1dWlpadSNjhNZx75mzZohswsCiYNatWp5q7E+qc2KFSvEvtyQ/c/FxcVJK2jCV5MD9gRBYGPj+vXrJ31dTkQAAQQQQCCeAEkExgcCCCCAAAIIIIAAAjks8MEHH1jJg6lTp4a0UpczCSQPqlatmsMCudE0XcLGvhny7Nmz4zasdevWIUmDJk2a5AZEDrRCN7KOtndBIk3T2QW6BJF9SSKWoUpEsPxl9+7dG3M2gSb19N+TOXSmULQEQeBn1apVS+aynIMAAggggEC5BEgilIuPkxFAAAEEEEAAAQQQ8KaAbqSryYOPP/44pIIVK1YMJg/YMNebfReo1TfffBOSOIj3UFI/bW7f16BLly7ebpwPardv376oswvWr1/vuvXar/ZEQWB2gc464Ei/gC4LFphBED6rQGcaJHto/8VadkhnFXAggAACCCDgNQGSCF7rEeqDAAIIIIAAAggggEA5BL744gsreTBx4sSIq/z+97+XW2+9VRo3blyOO3BqugSWL19uJQ00AaTfnTbK7devXzBx0Ldv33RVi+u6ENDZBeEbHetsg0QO3acgPGHA7IJEBJMrq3sQxFp2SPcuSPbQ/2cDywyFf+f/4GRVOQ8BBBBAIFsCJBGyJc99EUAAAQQQQAABBBBIocDXX39tJQ/eeOONiKtee+211uwDXeKGwzsC27ZtC5lpsGDBgriV09kF9tkGrHWf+b7U2SCBpYjsSQPdo8LtUbt27eBGx4Gkgc4wYHaBW8HEyh04cCDuskO7du1K7IK20pocCCQIwpcgIj6TZuVEBBBAAAEPCpBE8GCnUCUEEEAAAQQQQAABBNwKfP/991by4MUXX4w45bLLLrOSBx07dnR7OcqlWWDatGnBxMH06dPj3k33MbAnDUgCpblzwi7/66+/Rswu0HhL5Gjfvn3E7AJ92MyRWoHt27fHnE2gswySPXS/mFibGGvyQJeH40AAAQQQQMAPAiQR/NDLtBEBBBBAAAEEEEAg5wSWLFliJQ+efPLJiLZdcMEFVvKgW7duOddu0xqkswsCyxPpEkX6sDPWUVBQIAMHDgwmDnr27Glac42s7549e6JudJzI7ILCwsLg7ILAvgX6XTcw50iNwLp166wZBdH2KHBa+iteDerXrx+x7FBgVkHz5s1TU3muggACCCCAgOECJBEM70CqjwACCCCAAAIIIOAvAf10tCYPHn744YiGDx061EoeHHHEEf5C8VBr16xZE7JEke5zEO/o3bt3yGyDChUqeKg1uVeVX375JWJ2gdMyUuEKHTp0iEgYsBluasbKsmXLQhIF9qSBLv+V7NGyZcuQjYztexTUrVs32ctyHgIIIIAAAr4RIIngm66moQgggAACCCCAAAImC+inojV5oF/hx2mnnWYlD3SjXY7MCuin2HWGQeBrzpw5cSugG+Xalyhq0KBBZivsk7vt3r076uyCRDbK1dkFumdB+GbH1atX94li6ptZUlISd9mh0tLSpG6qs3jCNy+271FQuXLlpK7LSQgggAACCCDwmwBJBEYCAggggAACCCCAAAIeFtBP3waSB/rA2n4cf/zxVvLgxBNP9HALcq9qs2fPDlmiSDdujXXUqVMnZIki/RQ7R2oFVqxYEZEw+OGHHxK6ie4bYl+GSBMHbdq0SegaFP5NYNOmTTFnE6xcuTJpJk3qxNrEmP1CkmblRAQQQAABBFwJkERwxUQhBBBAAAEEEEAAAQQyK7B3795g8mDr1q0hNz/mmGOs5MHgwYMzWymf3k33n7DPNnBaK3/AgAHB2QZ9+vTxqVrqm62zC+bNmxexHJE+tHZ7aFInfGaB/r1atWpuL0E5EdFl1XSpIftyQ4E/b968OWmjpk2bRl12SGcVMGsnaVZORAABBBBAoNwCJBHKTcgFEEAAAQQQQAABBBBIrcD9998vY8aMkfXr14dc+PDDD7eSB+ecc05qb8jVQgT0Iag9abBo0aK4QrqBtX2JIh5Il39A6V4S8+fPD0kY/PjjjwlduFOnThGzC/jEujvCffv2xdzEWJMF4bOi3F1VJD8/P+ZsAk0UsFSUW0nKIYAAAgggkFkBkgiZ9eZuCCCAAAIIIIAAAgjEFPjPf/5jzT7QzV/thy6zosmDiy66CL00CUyZMsVKHBQVFcnMmTPj3qV58+YhSxTp3zmSE9A18nV2QXjCIJFPs+vGuNFmF1StWjW5SvnkLJ3hFG0mgf7MaUPweEQ1atSIuYmxJgry8vJ8IkwzEUAAAQQQyB0Bkgi505e0BAEEEEAAAQQQQMBQgaefftpKHixevDikBe3bt7eSByNGjDC0Zd6t9ty5c0NmG+zatStmZXVmgX2mgc484EhcQB9Mhy9HtHDhwoQupLMLwhMGzC6ITbh69ergRsZLly4NWYIofKZTIh3RsGHDkESBfRPjJk2aJHIpyiKAAAIIIICAAQIkEQzoJKqIAAIIIIAAAgggkJsCL730kpU80E9h24+WLVtayYPrr78+NxuehVbpGu72JYqcNnjVvQwCiQPd44DDvYAmZKLNLtiyZYvri9SrVy/q7IIqVaq4voYfCpaVlcWcTaAzCnbu3Jk0gyZnNDlgTxAENjauXbt20tflRAQQQAABBBAwT4Akgnl9Ro0RQAABBBBAAAEEDBd48803reTBV199FdIS/XSvJg9uvvlmw1uY/errg+zA8kT6XR9qxzs6dOgQskSRbsDL4SywbNmyiIRBorMLOnfuHJEwaNWqlfPNfVJCEwGxNjHWn2siIZlDEzLREgSBn1WqVCmZy3IOAggggAACCOSgAEmEHOxUmoQAAggggAACCCDgTYF3333XSh58/vnnIRXUT/Vq8kC/CgoKvFl5A2o1Y8aM4GwD3eMg3tGgQYOQJYratWtnQAuzV0V9kB3Yt8A+y6C4uNh1perXrx+x0bHu98HsArE2UQ9fbiiQOFizZo1r4/CCul9ErNkELVq0SPq6nIgAAggggAAC/hIgieCv/qa1CCCAAAIIIIAAAlkQmDRpkpU8+PTTT0PuXrly5WDyoFatWlmomdm31E+825coirdcToUKFUKSBr179za78WmsvT7MDl+OaNGiRQndsUuXLhGzC3SZLj8fuidErESBbnKc7KHJgMAyQ4HvgdkEuiwUBwIIIIAAAgggUF4BkgjlFeR8BBBAAAEEEEAAAQRiCEybNs1KHrz33nsRJQIzD/QT8RzuBDZs2BBMGhQVFVlLvMQ7evbsGbJEEbM8QrV27NgRnF0QmGWg3xOZXaDjV2cThG92rAkyvx27d++OuYmxjtV9+/YlRVKxYsWYswk0aVC1atWkrstJCCCAAAIIIICAWwGSCG6lKIcAAggggAACCCCAgEuBWbNmWcmDt956K+KMG264wZp9wFIizpj79+8PmWkwe/bsuCfpRrCBzZD1e5MmTZxv4pMS+hA7fDmixYsXJ9T6rl27RiQM/DaON2/eHHM2gW7eneyhS5qFzyYI/L1NmzbJXpbzEEAAAQQQQACBlAiQREgJIxdBAAEEEEAAAQQQQEBk7ty5VvLg5ZdfjuC48sorreQBa+/HHynffPNNSOJg7969MU/QJaDsSQNdQsfvx/bt26POLkhkuRydXaAzC8JnF/hlJsfKlSutWS7Rlh7atGlT0kNMk1rhyw0Flh1q1KhR0tflRAQQQAABBBBAIN0CJBHSLcz1EUAAAQQQQAABBHJeQNfm1+TBs88+G9HWiy++2Eoe6Ke4OSIFdJ34wL4GukTR2rVr4zL17ds3uESR/tnPx5IlSyISBj/99FNCJLoUUfhyRM2bN0/oGqYVLi0tjZogCCQNSkpKkm5SrE2MNXlQs2bNpK/LiQgggAACCCCAQDYFSCJkU597I4AAAggggAACCBgtoA/ANXnw6KOPRrTj3HPPtZIHhx12mNFtTHXlt23bFjLTYMGCBXFvobML7LMN/LgBtc4u0I2Owzc7Vku3R8OGDSNmFuhMg0qVKrm9hFHl1MY+m8A+q2DZsmVJt6V69eoxNzHWBEJ+fn7S1+ZEBBBAAAEEEEDAqwIkEbzaM9QLAQQQQAABBBBAwLMC+ml5TR488MADEXU844wzrOTB0Ucf7dn6Z7piusF0YLbB9OnT495el3yxJw10nwM/HTqTwL7JsSYOdMZBIkf4MkT692bNmiVyCSPKahzGWnZo3bp1SbdBl3MKLDMU/j0XHZOG4kQEEEAAAQQQ8I0ASQTfdDUNRQABBBBAAAEEECivwJYtW6zkgX7pkij246STTrKSBwMHDizvbYw/X2cX2Jco0k/Sxzp0nX1NGqibfu/Zs6fx7XfTAP2kfPjMAv17PKvw6+o6+tESBhUrVnRTBSPKRNuXIJA4SMQqvLGtWrUKSRTYkwWFhYVG2FBJBBBAAAEEEEAgUwIkETIlzX0QQAABBBBAAAEEjBXQNdIDyYMdO3aEtKN///5W8mDQoEHGtq+8FV+zZk3IEkW6zFO8o3fv3iGzDSpUqFDeKnj6fJ1dEL4ckT4Id3vk5eVF7Fug+xjkwqfid+3aFXM2gRodOHDALVNIucqVK8fcxFgTBn7ZJDopPE5CAAEEEEAAAQTCBEgiMCQQQAABBBBAAAEEEIghoA8wA8mDTZs2hZQ68sgjreTBWWed5Tu/vXv3im6CHJhtMGfOnLgG7dq1C0ka6HIxuXhs3bo16uyC8MRTvLY3btw46uwCkxMtGzdutBIF0fYoWLVqVdJDoU6dOjGXHdKZBhwIIIAAAggggAACqREgiZAaR66CAAIIIIAAAgggkGMCDz74oJVACH/I2b17dyt5MGzYsBxrcfzmzJ49O2SJonifENeHu4F9DXSZog4dOuSc1eLFiyMSBonMLtANeHU2QfhyRE2bNjXSasWKFRJj3snBAAAgAElEQVRYeih8CSJdBizZQ2db6MyBaHsU1K9fP9nLch4CCCCAAAIIIIBAAgIkERLAoigCCCCAAAIIIIBA7gs8/vjjVvJAH4Taj06dOlnJg0svvTT3EUSszXwDMw30+4YNG+K2e8CAAcHEQZ8+fXLGqLi4OLjRsX0Pg507d7puo24WHS1hYNLsAp19EmsTY/25/nsyhxrE2sRYf16tWrVkLss5CCCAAAIIIIAAAikUIImQQkwuhQACCCCAAAIIIGCuwPPPP28lD3744YeQRrRp08ZKHlxzzTXmNs5FzfXT4vYlihYtWhT3rG7duoUsUZQLD3u1zeGbHYcnk+Kh6OwCnVkQPrtAkwgmHJowCV92KJA40JkGyR41a9aMuYlx27Ztk70s5yGAAAIIIIAAAghkSIAkQoaguQ0CCCCAAAIIIICANwVee+01K3kQvq6/LiujyYNRo0Z5s+IpqNWUKVOCsw1mzJgR94rNmzcPWaJI/27qoQmT+fPnRyQMEpldoOMj2uwCTSR4+dDlucKXGwokDnTvgmQP3ctBEwLRZhXov3EggAACCCCAAAIImCtAEsHcvqPmCCCAAAIIIIAAAuUQeOedd6zkwcyZM0OuUrduXSt5oF8mLTfjhmLu3LkhSxTt2rUr5mk6syCwr4F+15kHJh4LFy6MWI5o2bJlrpuiYyB8ZoH+3asPxnWvilibGOvP4/W5E4omCcITBYGkQa1atZxO598RQAABBBBAAAEEDBUgiWBox1FtBBBAAAEEEEAAgeQEPvzwQyt5oJ/Ctx/Vq1cPJg9yYWkebdvKlStDlijSv8c7dC+DQOJA9zgw6dDZBboUUfhyRIk8NNdNfO2zCwJ/zsvL8xTF9u3bY25inMjmzuGNqlq1asxNjDV5ULFiRU85UBkEEEAAAQQQQACBzAiQRMiMM3dBAAEEEEAAAQQQyLKAbg6syYOPPvoopCb6SfPAzAOdhWDyoQ/M7Zsh68yDeEeHDh2spMHAgQOt73Xq1DGi+T/++GPEckTLly93XXd9GB5tdkGjRo1cXyPdBdetWxdz2aG1a9cmffv69evHXHbI5CWqkgbhRAQQQAABBBBAAAFHAZIIjkQUQAABBBBAAAEEEDBZQNf61+TBhAkTIpqh+x1oAkHXtzf10PYFEgfhsyvC29SgQYOQJYratWvn6WZv3rw5ZGZBYJZBSUmJ63rr7IJoCQPXF0hjQV1WKbBxcfgSRNu2bUv6zi1btgwmCsL3KDA9UZY0CicigAACCCCAAAIIJC1AEiFpOk5EAAEEEEAAAQQQ8LKAbpSsyQPdODn8uOaaa6zkQZs2bbzchKh1W7RoUcgSRbqMT6xDZ1nY9zXo3bu3Z9urswvClyNasWKF6/pWqlQpuBRRIGmgyxFlc3aBJjtibWKsPy8tLXXdPnvBgoKCmLMJNGlQuXLlpK7LSQgggAACCCCAAAIIRBMgicC4QAABBBBAAAEEEMgpgQULFljJgxdeeCGiXZdeeqmVPOjUqZMxbd6wYUPIEkVLliyJW/eePXsGEwe6TJE+cPbSsWnTpoh9CzR5sHv3btfV1GV3wmcXaMIgG4e2xz6bwJ40cNqDIl59CwsLY84maN26dTaayj0RQAABBBBAAAEEfCpAEsGnHU+zEUAAAQQQQACBXBPQB7maPHjiiScimjZs2DAredC9e3fPN3v//v0hSYPZs2fHrbM+ULbPNmjSpIln2vjDDz9ELEf0yy+/uK6fJkDsGx0HEgcNGzZ0fY1UFPz1119jLjukSy4le+gyWuHLDQX+nuk2JtsGzkMAAQQQQAABBBDIfQGSCLnfx7QQAQQQQAABBBDIaQH9tLcmDx566KGIdp511llW8uDII4/0tIEuvVRUVBRMHuzduzdmfWvVqhWSNOjSpUvW27Zx48bgRseBfQv0+549e1zXrUWLFsHZBYHEQdeuXV2fX56C+/bti7vsUCKzJOz1yM/Pj1h2yJ40qF69enmqzbkIIIAAAggggAACCGREgCRCRpi5CQIIIIAAAggggECqBfTBtSYP9KusrCzk8oMGDbKSB/3790/1bVNyveXLl4fMNlizZk3c6/bt2zeYOOjXr19K6pDsRXS5KHuiYP78+ZLI7AJdrz/a7ALd9Dmdx9atW2POJtD+SPaoUaNGzNkEmjDIy8tL9tKchwACCCCAAAIIIICAJwRIIniiG6gEAggggAACCCCAgFuB7du3y3333SdjxoyJWEdf9wDQ5MFJJ53k9nIZKbdt27aQpIE+iI936OwC+xJFOvsg04fuxaAJgvDNjuPNkgivY8uWLSMSBumcOaHJGF3WKtoeBevXr0+aUJcWirbskP7MS8tHJd1ATkQAAQQQQAABBBBAII4ASQSGBwIIIIAAAggggIARArrkTGDmQXFxcUidjz76aCt5cMYZZ3imLdOnTw8uUaR/jnfog2h70iDTG+d+//33EbMLdB8At4fOLgjf6Fj/Xr9+fbeXcFVOZ5zYEwThyYIdO3a4uk60QmquSYFoyYLatWsnfV1ORAABBBBAAAEEEEDAdAGSCKb3IPVHAAEEEEAAAQR8IPDAAw9YCYS1a9eGtPawww6zkgfnnntu1hV0dsHkyZODXzr7INahGwbbkwa9evXKSP310/iB2QX274nMLmjVqlXE7ILOnTunrP47d+6MmSjQpEH40lVub1ylSpW4yw5VqlTJ7aUohwACCCCAAAIIIICArwRIIviqu2ksAggggAACCCBglsAjjzxiJQ9WrFgRUnHdcFeTBxdffHHWGqRL59iTBk7r6vfu3TskcVChQoW01l2TBOHLEekm1G4PfegebXZBvXr13F4iZjlNZixdujRqsmD16tVJX79u3boxZxPoxs0cCCCAAAIIIIAAAgggkLgASYTEzTgDAQQQQAABBBBAIM0CzzzzjJU8WLRoUcid2rVrJ7feeqtcccUVaa5B5OX10/qaNCgqKrK+z5kzJ24dtK722Qbp2jhYH8iH71ugyQNd/sntoUv56GbH9g2Pyzu7QJMqgURB+BJEuslxsocmA9q2bRt1VkEqEhzJ1ovzEEAAAQQQQAABBBDIVQGSCLnas7QLAQQQQAABBBAwUGDcuHFW8kAfitsPfXCsMw9uuOGGjLZq9uzZIbMN9u/fH/P+derUCUkadOzYMeV1VZfw5YhWrVrl+j5Vq1aNOrtAP8Gf6LFnz56Ymxhr0iCRJIb93hUrVgxJEITvUaBt4EAAAQQQQAABBBBAAIHMCZBEyJw1d0IAAQQQQAABBBCIITB+/HgreaAP7e2Hfnpfkwf6lYljyZIlIUmDDRs2xL3tgAEDgomDPn36pKyK69atC9noOJA8KC0tdX0PnV0QvhxRp06dXJ+vBTdv3hx1NoHOMPjll18Supa9sG5UHGs2QZs2bZK+LicigAACCCCAAAIIIIBA6gVIIqTelCsigAACCCCAAAIIuBR47733rOTBtGnTQs6oVatWMHlQuXJll1dLvNiWLVtCligKXz4p/IrdunULmW1QrVq1xG9qO0M3CQ7ft0ATBonsC6B1sC9DFEgcuJ1doPsk2Jcbsi9BtGnTpqTb16RJk4hEQWBWQaNGjZK+LicigAACCCCAAAIIIIBAZgVIImTWm7shgAACCCCAAAIIiMgnn3xiJQ/0u/0oKCgIJg/00+rpOKZMmRKcbTBjxoy4t2jevHlI0qA8m/OuXbs2ZHZBIHkQb4mk8Mrpp/TDZxc4LZuksxdibWKsyYOSkpKkmTUpEL7cUODvNWvWTPq6nIgAAggggAACCCCAAALeESCJ4J2+oCYIIIAAAggggEDOC0yfPt1KHvz3v/+NaOvNN99sJRAaNmyYUoe5c+eGLFG0a9eumNfXT/XbN0PWmQeJHgcOHIjYt0BnF6xZs8b1papXrx51doHuuxDt2LZtW8xNjJctW+b6vuEFtR6xlh3SZEF+fn7S1+ZEBBBAAAEEEEAAAQQQMEOAJIIZ/UQtEUAAAQQQQAABowV0rwNNHujeB+HH9ddfbyUPWrZsmZI26vI8kydPlqKiIuu7/j3eoXsZBBIHusdBIocmBqItR6SJBLeHPqQPX46oQ4cOEafrTAb7skP2P+seCskeuu9ErNkEzZo1S/aynIcAAggggAACCCCAAAI5IkASIUc6kmYggAACCCCAAAJeFNBP4GvyYNy4cRHVGzFihJU8aN++fbmqrjMLNFkQ+NKZB/EOfUBvn23gZu8ATQpoWwIbHAe+JzK7oEaNGlFnFxQWFgara9+PIHwJou3btyft1KpVq6iJAk0e2O+f9A04EQEEEEAAAQQQQAABBHJWgCRCznYtDUMAAQQQQAABBLInsHjxYhkzZow888wzEZW46KKLrOSBfvo+2UP3MggkDXSPg3iHftLenjRo165d3PK6qXFgdoF9loFuguz20Ifz4bMLNFmiCY9Ymxhr0iCR/RHsddHNpwPLDkWbVaB7TXAggAACCCCAAAIIIIAAAskIkERIRo1zEEAAAQQQQAABBKIKrFixwpp58Mgjj0T8+znnnGMlDw4//PCE9RYtWhSyRNGWLVtiXkPX6R84cGAwcdC7d++oZfWBvX1mQeDPumyQ20NnF4RvdKxLAG3YsCHqHgWrVq1ye+mIcrofQqxlh3SmAQcCCCCAAAIIIIAAAgggkA4BkgjpUOWaCCCAAAIIIICAzwR0TX5NHtx///0RLR88eLCVPDjmmGNcq+hDePsSRUuWLIl7bs+ePUNmG+gn8+2Hzi6IthxRIrMLdAZDYHZBkyZNRDdh3rNnT0SyIF6CwwlAExD2RIH9z/Xr13c6nX9HAAEEEEAAAQQQQAABBFIuQBIh5aRcEAEEEEAAAQQQ8I9AcXGxlTzQr3379oU0/MQTT7SSB8cff7wjiO45ENgIWZMHuhFzvKN169YhSQN9qK9HaWlpxL4FmjxIZOPhmjVrWsmCFi1aSL169aRSpUpWskA3aA4sRaR/T+aoUKFCzNkEmjDQxAQHAggggAACCCCAAAIIIOAlAZIIXuoN6oIAAggggAACCBgisHv37mDyIHzD3379+smtt94qp556atzWzJkzJ2SJor1798Ysrw/27UsUdenSRXRpoGjLESVCqIkCTUDo9fPy8mTHjh2imyXrskzJHnqtaMsOBX6W7HU5DwEEEEAAAQQQQAABBBDIhgBJhGyoc08EEEAAAQQQQMBQAV3+JzDzYOPGjSGtOOKII6yZB0OHDo3auuXLl4csUaQP6+Mdffv2tWYbaFKisLAwYjmi9evXu1asWrWq6HJAVapUsWYr6JJDOosi2aNRo0ZWoiBasqBx48bJXpbzEEAAAQQQQAABBBBAAAHPCZBE8FyXUCEEEEAAAQQQQMCbAg899JCVQNBlfexHt27drOTBBRdcEPJznaFgX6JowYIFcRumswt0E2TdF0CXENLNlOfPn299JXLoZse6bNCuXbsillhK5Dpt27YV/YqWKKhVq1Yil6IsAggggAACCCCAAAIIIGCsAEkEY7uOiiOAAAIIIIAAApkRePLJJ63kQfjmxh07drSSB5dddlmwItOnTw8uUaR/jnfofgMHH3ywtZSQPvBfvHix6IbKbo/8/HzRvRSSPXR2QqzZBJo8qFixYrKX5jwEEEAAAQQQQAABBBBAIGcESCLkTFfSEAQQQAABBBBAILUCL7zwgpU8CJ9BoJsaa/Lg2muvtf5NN0IOfG3bti1mJXR2QMOGDUWXRFq7dm1qKxvjarqEUazZBM2bN89IHbgJAggggAACCCCAAAIIIGCyAEkEk3uPuiOAAAIIIIAAAmkQeP31163kwTfffBNydd2A+Oqrr7Y+vR9IGug+B/EO/TS/7kGQzqNly5bBREH40kN169ZN5625NgIIIIAAAggggAACCCCQ8wIkEXK+i2kgAggggAACCCDgTmDChAlW8mDGjBkhJ+geAz169JAdO3bIt99+6+5iKSxVUFAQczaBzjLQzZI5EEAAAQQQQAABBBBAAAEE0iNAEiE9rlwVAQQQQAABBBAwRuCjjz6ykgc6u8B+6J4DuvSQfqX7KCwsjEgUBGYV6PJJHAgggAACCCCAAAIIIIAAAtkRIImQHXfuigACCCCAAAIIZF1g6tSpVvLggw8+yEhdmjZtai2FFG2PAt0rgQMBBBBAAAEEEEAAAQQQQMB7AiQRvNcn1AgBBBBAAAEEEEirgC5bdOutt8pPP/2U0vvozIVYmxhr8qB69eopvR8XQwABBBBAAAEEEEAAAQQQSL9Anoikf356+tvBHVwIZGIpAhfV8FyRvDwNAw4/CBAD0XuZGPDD6P+tjcQAMeCf0R69pcSA30cA7UcAAQQQQAABBBBAAIFkBEgiJKNm6Dm8cebhkaFDN2XVJgaIgZQNJkMvRAwQA4YO3ZRVmxhIGSUXQgABBBBAAAEEEEAAAR8JkETwUWfzxpmHRz4a7lGbSgwQA8QAkw+jjQFm4/gnMngd8E9f01IEEEAAAQQQQAABBBBInUBIEqFs29epuzJX8oRAXq3DgvXgjbPzA1RiwBPDNqWVIAacOe0PUIkBZy/TShADzj1GDDgbmVyCGDC596g7AggggAACCCCAAAIIeEGAJIIXeiGNdeCNszMuD4+cjUwuQQw49x4x4GxkcgliwLn3iAFnI5NLEAMm9x51RwABBBBAAAEEEEAAAS8IkETwQi+ksQ68cXbG5eGRs5HJJYgB594jBpyNTC5BDDj3HjHgbGRyCWLA5N6j7ggggAACCCCAAAIIIOAFAZIIXuiFNNaBN87OuDw8cjYyuQQx4Nx7xICzkckliAHn3iMGnI1MLkEMmNx71B0BBBBAAAEEEEAAAQS8IEASwQu9kMY68MbZGZeHR85GJpcgBpx7jxhwNjK5BDHg3HvEgLORySWIAZN7j7ojgAACCCCAAAIIIICAFwRIInihF9JYB944O+Py8MjZyOQSxIBz7xEDzkYmlyAGnHuPGHA2MrkEMWBy71F3BBBAAAEEEEAAAQQQ8IIASQQv9EIa68AbZ2dcHh45G5lcghhw7j1iwNnI5BLEgHPvEQPORiaXIAZM7j3qjgACCCCAAAIIIIAAAl4QIInghV5IYx144+yMy8MjZyOTSxADzr1HDDgbmVyCGHDuPWLA2cjkEsSAyb1H3RFAAAEEEEAAAQQQQMALAiQRvNALaawDb5ydcXl45GxkcgliwLn3iAFnI5NLEAPOvUcMOBuZXIIYMLn3qDsCCCCAAAIIIIAAAgh4QYAkghd6IY114I2zMy4Pj5yNTC5BDDj3HjHgbGRyCWLAufeIAWcjk0sQAyb3HnVHAAEEEEAAAQQQQAABLwhkPImwcVOxNGhzvGPbp3/8tBxzVHfHcpksMO71D+SiK/4iX332ohzWo3Mmb530vXjj7EyX6YdHxIBzn6SyBDHgrEkMOBsFSvA64N7KpJLEgPveIgbcW1ESAQQQQAABBBBAAAEEEMgVgawlEdq3aykD+h0W0/GWkRfLQW2ae8qZN86e6o6UVSZbD4+IgZR1YdwLkURwdiYGnI1IIrg3MrEkMeC+1/hdyL0VJRFAAAEEEEAAAQQQQACBXBHIWhJh+LDT5PnHRxvlyBtno7rLdWWz9fCIGHDdReUqSBLBmY8YcDYiieDeyMSSxID7XuN3IfdWlEQAAQQQQAABBBBAAAEEckWAJEICPckb5wSwDCrKwyP3nUUMuLcyqSQx4L63iAH3ViaVJAbc9xYx4N6KkggggAACCCCAAAIIIIBArgh4Pokw4b2pcuawm+XzSc/Ivx95WT6dMltKS/fLccceLv/+5x9CljwqKdkjfx3ztLw2/mNZu36TtGrRRK667CwZec35EnhAsHNXidz9f0/J629Nsso0a9JAhp17svz5lhFSpUpBsF/Xb9gst/3lYXnvo+myd+8+Oe/sk6R3ry4y4vq/huyJsGfPXvn7fc/KuNc+kFVrNkjTxg3kgt+dLHfeOkIqV/7teuMnFMk5F98mH7z1kNx4y72yes0GuWXUxTL69ivTPo74FLYzsdcfHhEDzn0YrwQx4OxHDPA64DxKcrsEMUAM5PYIp3UIIIAAAggggAACCCCAQPkEjEki1KtbW84eMlDOGnycrPhljdz8p7HSulVTmTvjVUugrKxMBpx6lUz74lu55ILTrQf+076YI6+O/1juvG2E3POnq2X37r3Sf9CVMvubBVaZHt06yMxZ86wyuj/DpAmPSMWKFaxyPY4ZJkuXr5LrrzxXmjZpIC+99oF13+Kt24NJhAMHDsigoSPl06mz5NILB0uv7p3ku/mL5OkXJsgJA46U98ePlfz8/GASoUH9OjKw/+FSq2YNOf2UvnLayX3L13suzuYBqjOSKQ+PiAHnvoxWghhwdiMGeB1wHiW5XYIYIAZye4TTOgQQQAABBBBAAAEEEECgfAJZSyJ0bN9aBvbvHbX2Bx/Uwpo9oEfgU9jh68f/9V9Py1/+/rjM//J16dr5IHnznU/l3OF/lAfH3Cw3Xn1e8LpDzr/Jmr2wftkn8tTz78io2+6XR+6/Ta694pxgmXv++ZTc9Y8n5NlH/2IlAx5+4nVrxsCEV++XM0491iq3q2S3HHncJTJ/wZJgEkGTD8Mu+5M8+dCf5IpLzgxe74VX3pNLrh4t418aI0PPOC6YRDj/7JPklWf/Xr4eS/BsHqA6g2Xr4REx4Nw3qShBDDgrEgMivA44j5NcLkEMEAO5PL5pGwIIIIAAAggggAACCCBQXoGsJRHiVfzYY3rK1A+eDEkivPbcP+R3Q08MnhZIGkx+73FrFsHwq+6S19/+RLaunBpcRkgLr1q9XnbsLBFNTBw/+ForCbDmp4+tGQeBQ5dBqttqgJx43JEy8bUH5OQzb5B5C36S1Ys/CqlmILnw1WcvymE9OltLFOlyR0vnvSuVKlUMlt23r1RadTlNLjj3FHnusbuCSYQXn7xHLjpvUHn7LKHzeYDqzJWth0fEgHPfpKIEMeCsSAyI8DrgPE5yuQQxQAzk8vimbQgggAACCCCAAAIIIIBAeQWylkQIn1kQqyGBmQiTJj4iJww4Ilgs8PNPJj4qxw/oLceecqW118BP370T06RN18HSuFE9mVn0XESZToedbSUfvvviFenQ8yxp1LCeTPvoqZByn0yZJSeecV1wJkLPvhfIt3MXxbyf7ttQ9N/HgkmEQF3L22mJnM8DVGetbD08Igac+yYVJYgBZ0Vi4DcjXgecx0quliAGiIFcHdu0CwEEEEAAAQQQQAABBBBIhYAxSYTwB/DhSYRjTrxcNmzcIovmvB3TpXWX06VJ4/pRkwiaOKhdq4bMnvqidOw1VGrVrG792X58XDTTmqUQmInQrc/5snFTsbzw+N1R71mnTk1rn4TAxspT3n9C+vftlYp+c30NHqA6U5ny8IgYcO7LaCWIAWc3YuA3I14HnMdKrpYgBoiBXB3btAsBBBBAAAEEEEAAAQQQSIVAziQRdG+Cd96bIltXfiYFBZWCNl98OVcef+YtGX3HlXL5dffIgh+Xytolk6RChfxgGd1IuU7L/jLoxGPkrXFjZNDQG63Nl9f9/ElIuUefelOuu+lfwSTCGef9Qd7/+HMpXjlValSvFrze/v0HZPyET+XQrgdLpw5tSCKkYqS6uEZxcbEUFha6KBlaJFceHhED0bveT0kEYoDXAb8n0ogBYsDvMZDwL0GcgAACCCCAAAIIIIAAAgi4EMiZJMK41z+Qi674S8Qmx+dfdoe1OfPG5UVWMuHmP42Vx/59u1x9+dAgzz8feF5uH/2f4MbKT78wQa644W/y1MN/lhHDh1jldJ+DXv0uDNlY+ZkXJ8qI6/8qt990qfzjruuC13v2pXethMXYf91kbRDNTAQXIzEFRYYMGSI9evSQkSNHJpRMyJUkAjFAEoEY4HXA7w9QiQFiwO8xkIJfp7gEAggggAACCCCAAAIIIBAhkLUkQvt2La0NkWMdJww4UoaecZyVADhz2M3itJRLael+GXDqVTJj1jy56rKzpFvX9jL186/ltfGTZMxfb5RbRl5sbZzZ54RLZd73S+Tyi8+QHod2kC+/mi8vvvq+9O3TQ3S5IZ2hoNfS5ZG+/vYHuXbEOdL+4FYy7rUP5OdlK63liwLLGWliQe+psx3OPesEGdD3MFm8ZIU8+vR46di+tXw+6WlrhgJJhMxE3tSpU2XAgAFWAmHUqFGukwnZSiIQA5kZF36aiUAM8Drg9weoxAAx4PcYyMwrK3dBAAEEEEAAAQQQQAABvwlkLYngBH3TDRfKfX8f5TqJoNfbsXOX3PX3J+SNdz6xHvYffFBLGXXtMLnsosHB223dtsMq89a7RbJ+wxZp2byxDDv3ZLnjpkutjZUDx/Ydu+SO0f+RNyd8Kjt2lMgpJ/aRc888Qc4d/sdgEkHL7txVIn8b84yVrFi9doM0blhPTh/UT0bffqXUr/fb0jokEZx6O3X/3r9/f/nss8+sC7pNJmQrieDUamLAScjdv/spiaAixACvA+GRQQw4J5V5HeB3IXevKJRCAAEEEEAAAQQQQAABBPwpkPEkgj+Zs9dqvz08CnwK1S7ulEzI9MOj7I0Gf96ZGHBOqBEDuR0bxAAxkNsj3Ll1fosBZxFKIIAAAggggAACCCCAAAKJCZBESMzLuNJ+fONs/yS2m2QCD1CNG9YJVZgY+B9XrIQaMZDQkDKuMDFADBg3aFNcYT/GQIoJuRwCCCCAAAIIIIAAAgj4XIAkQo4PAD++cY42GyFeMoEHqLkdBMRAZP+GJxOIAWIg1wR4Hci1Hi1fe/z4OlA+Mc5GAAEEEEAAAQQQQAABBEIFSCLk+Iiwv3HO8aYm3LzAg9TRo0cHzy3b9nXC1+EEbwsQA7H7hxjw9thNVe2IAWIgVWPJ1OuQRDC156g3AggggAACCCCAAAIIeEWAJIJXeiJN9eDhUWKwJBES8zKhNDGQWC8RA3ExZW8AACAASURBVIl5mVCaGEisl4iBxLxMKE0SwYReoo4IIIAAAggggAACCCDgZQGSCF7unRTUjYdHsRFr164to0aNkrvvvjtYiIdHKRh0HrsEMUAMeGxIZrw6xAAxkPFB57EbkkTwWIdQHQQQQAABBBBAAAEEEDBOgCSCcV2WWIX9+MbZaS3sQPJAEwi6nAvrwSc2pkwrTQxE9hgxYNooLl99iQFioHwjyPyz/RgD5vcaLUAAAQQQQAABBBBAAAEvCXg2iTDk/Jtk4vufScn6GVKlSkFcs+07dsm9D74ob02cLMtWrJKCSpWkc8e2cvH5p8qVl54p+fn51vnX3zxGHnnyDUf/C849RcY9/Vc579I75PW3JkmFCvmyfumnUrdOrajndutzvsz7/icZPuw0ef7x/62v73ijDBTw4xvn/v37y2effRahG/7gNFDAq0kEYiA1AUIM/M+RGOB1oKysLDWB5fGr8DrA70L2IerH1wGPhyjVQwABBBBAAAEEEEAAAcMEjE8iaALh6BMuk/kLlshhPTpL184HScnuPTJz1jz5ZeVaGXrGcfLmi/+yPm2uCYEp0/+3ce7SZavkkymzpH/fXtLh4FbBrjvisK5y6YWDg0kE/YcXn7xHLjpvUET3Ll2+Sg469Azr5yQRsj/6o81CiPXgNFeSCMRA/HHnt4dHxACvA+ERQQyI8DrA70KBuPBLIi37v5FRAwQQQAABBBBAAAEEEMglAeOTCPc/PE5u/tNYeXzsHXLVZWcF+2bv3n1ywYg/y/gJRTLxtQdk8KB+Ef2m/3bOxbfJc4/dJZdccHrEv+tMhPETPpVmTRrK4b06y/iXxkSUue+hl2TM2Bdlw8YtJBE8EBn2T586PTTKlSQCMUASwS5ADPA64PckAjFADPg9Bjzw6xhVQAABBBBAAAEEEEAAgRwTMD6J8LtLbpe3350suzfMtJYdsh/LVqyWtocMlmtGnC2PPvDHpJMI11x+tjz/8nuycfmnUrly6NJKOguiU4c28syLE0kiZDk4Ap/Adps8yJUkAjFAEiEgQAzwOhAtGvw0E4EYIAb8HgNZ/lWM2yOAAAIIIIAAAggggECOChifRLj2D/+Ux54eL0X/fUyOO/bwiG7S5YYaNqgjNapXSzqJ8PE7/5HjB18rH7z1kJxyQp/gddau2yTNOpwi/33j33Lq2SNJImQ5SIYMGSLdu3eXwIbJbqtj+p4IxABJhIAAMcDrgN8foBIDxIDfY8Dt7z6UQwABBBBAAAEEEEAAAQQSETA+iaAbGvceMFx0jduhg4+TIaf1t/Y4aNigrqOD2+WMdJZDw7YnyLlnHW8tmxQ4Hn/mLbl99H9k9eKPpFqjo0kiOIqnt0BxcbEUFhYmfBPTkwjEAEmEgAAxwOuA3x+gEgPEgN9jIOFfgjgBAQQQQAABBBBAAAEEEHAhYHwSQds4fca3ctXIf8iPi5YFm3xIl3ZWUmHUdcOkdq0aUSncJhFKt8yWi674ixR9NltWLfrQ2qRZj5OGXG8lK3RPhUp1jyCJ4GLAebGI6UkEYoAkQnnjihhw3huH14HyjjJvn08MEAPeHqHUDgEEEEAAAQQQQAABBBDIrkBOJBEChLO+/l4+/GSGTP7sK5k5e56Ulu6XZk0bytQPnpB2bVtESCeSRHhr4mQ5+6JbZdaUF6R3ry6yddsOadDmeHntuX/I4EHHkkTI7jgu191z4eERMRB7CPhpPfhkA4EYcPcAldeBZEeY988jBogB749SaogAAggggAACCCCAAAIIZE8gp5IIdsYtxdtkzNgX5Z8PPG/tlaB7JoQfiSQRdu4qkfqtB8pNN1wof7vzWhn3+gdy1Y3/kA3LP5WCSpVIImRvDJf7zrn08IgYiBwOJBGcQ4QYcPcAldcB57FkagligBgwdexSbwQQQAABBBBAAAEEEEAgEwJGJxHWb9gsN90xVk496Rg57+wTo3qdctaN8smUL0X3NahYsUJImUSSCHriaeeMkhW/rpH5X74uQy+81dqH4e2X77VmPLCcUSaGa3ruYfLDI2LAeUyQRHBh9P+XaNOSZdu+dj4hQyWGnH+TTHz/MylZP0OqVCmIeldiwLkziAEXRsSAtTTjJRecHoF13qV3yPgJn4ou6cXvQs5jiRIIIIAAAggggAACCCCAQC4KGJ1EKCnZI7Wa9ZN+R/eMOtNAO0zf/L7z3ylSsv4Lyc/PL1cS4ekXJsgVN/xNFsx+Q3r3Hy6Pjb1dLjpvEEkEwyPD5CQCMeA8+HiA6sLI4AeoxICL/q11WLCQJr85IgV4HXA3E0Hl+F2ICEIAAQQQQAABBBBAAAEE/CdgdBJBu+vCEXfKy298KHfdfqX8+ZbLQ2Yb6N4Ip54zUk4+vo+888p9Eb2b6EwE/cRrk4NPlhMGHCGTp30l65d+IoW1a5JEMDxuTH54RAw4Dz6SCC6MDE4iEAMu+pckgiMSrwPukwj8LuQ4nCiAAAIIIIAAAggggAACCOScgOeTCCOGD5EKFUJnEGgvDB92mhzV+1DZuKlYjjnxcln00wpp1LCuHH1kd6lerYr199nfLJDGjerJl5Ofl1YtmpQ7iaAX0Ht98eVcK5EwaeIj1jVZzsjsuPD6wyNioHzjiySCsx8x4P4BKq8DzuPJxBLEADFg4rilzggggAACCCCAAAIIIIBApgQ8n0SIBWFfu1c3u3zg4ZdlwntTZenyVVKye480b9pQTj+ln9xx86XSoH6dqJdJdCaCXuS+h16SW/78oDz279vl6suHkkTI1EhN4328/vCIGChf55NEcPYjBhJ7gMrrgPOYMq0EMUAMmDZmqS8CCCCAAAIIIIAAAgggkEkBzyYRMomQy/fiAapz73r14ZFzzSnhRoAYcFYiBpyNTC5BDDj3HjHgbGRyCWLA5N6j7ggggAACCCCAAAIIIOAFAZIIXuiFNNaBN87OuDw8cjYyuQQx4Nx7xICzkckliAHn3iMGnI1MLkEMmNx71B0BBBBAAAEEEEAAAQS8IEASwQu9kMY68MbZGZeHR85GJpcgBpx7jxhwNjK5BDHg3HvEgLORySWIAZN7j7ojgAACCCCAAAIIIICAFwRIInihF9JYB944O+Py8MjZyOQSxIBz7xEDzkYmlyAGnHuPGHA2MrkEMWBy71F3BBBAAAEEEEAAAQQQ8IIASQQv9EIa68AbZ2dcHh45G5lcghhw7j1iwNnI5BLEgHPvEQPORiaXIAZM7j3qjgACCCCAAAIIIIAAAl4QIInghV5IYx144+yMy8MjZyOTSxADzr1HDDgbmVyCGHDuPWLA2cjkEsSAyb1H3RFAAAEEEEAAAQQQQMALAiQRvNALaawDb5ydcXl45GxkcgliwLn3iAFnI5NLEAPOvUcMOBuZXIIYMLn3qDsCCCCAAAIIIIAAAgh4QYAkghd6IY114I2zMy4Pj5yNTC5BDDj3HjHgbGRyCWLAufeIAWcjk0sQAyb3HnVHAAEEEEAAAQQQQAABLwiQRPBCL6SxDrxxdsbl4ZGzkckliAHn3iMGnI1MLkEMOPceMeBsZHIJYsDk3qPuCCCAAAIIIIAAAggg4AUBkghe6IU01oE3zs64PDxyNjK5BDHg3HvEgLORySWIAefeIwacjUwuQQyY3HvUHQEEEEAAAQQQQAABBLwgQBLBC72QxjrwxtkZl4dHzkYmlyAGnHuPGHA2MrkEMeDce8SAs5HJJYgBk3uPuiOAAAIIIIAAAggggIAXBEgieKEX0lgH3jg74/LwyNnI5BLEgHPvEQPORiaXIAace48YcDYyuQQxYHLvUXcEEEAAAQQQQAABBBDwgkBIEsELFaIO6RMoKytL38UNvrL94ZHBzaDqLgSIgehIxICLwZMjRYgBYiBHhnLSzSAGkqbjRAQQQAABBBBAAAEEEPCxAEkEH3U+b5x5eOSj4R61qcQAMUAMkEyONgZIpPknMngd8E9f01IEEEAAAQQQQAABBBBInQBJhNRZev5KvHHmAarnB2maK0gMEANpHmKevzwxQAx4fpCmuYLEQJqBuTwCCCCAAAIIIIAAAgjkpEBemc/eTY0ePVruvvtuueuuu0T/zIGA3wSIAb/1OO0NFyAGGBN+FyAG/D4CaD8CCCCAAAIIIIAAAgggkJiAr5IIxcXF0qZNG9HvhYWFsmzZMus7BwJ+ESAG/NLTtDOWADHA2PC7ADHg9xFA+xFAAAEEEEAAAQQQQACBxAV8lUQIfPIuwMRshMQHDGeYLUAMmN1/1L78AsRA+Q25gtkCxIDZ/UftEUAAAQQQQAABBBBAAIFsCPgmiWD/5F0AmtkI2Rhy3DNbAsRAtuS5r1cEiAGv9AT1yJYAMZAtee6LAAIIIIAAAggggAACCJgt4JskQvgn7wLdxmwEswcwtXcvQAy4t6JkbgoQA7nZr7TKvQAx4N6KkggggAACCCCAAAIIIIAAAv8T8EUSIdon7wIEzEYgHPwgQAz4oZdpYzwBYoDx4XcBYsDvI4D2I4AAAggggAACCCCAAALJC/giiRDrk3cBNmYjJD+AONMMAWLAjH6ilukTIAbSZ8uVzRAgBszoJ2qJAAIIIIAAAggggAACCHhRIOeTCPE+eRfoEGYjeHFoUqdUCRADqZLkOqYKEAOm9hz1TpUAMZAqSa6DAAIIIIAAAggggAACCPhTIOeTCE6fvAt0O7MR/BkAfmg1MeCHXqaN8QSIAcaH3wWIAb+PANqPAAIIIIAAAggggAACCJRPIKeTCG4+eRfgYzZC+QYSZ3tTgBjwZr9Qq8wJEAOZs+ZO3hQgBrzZL9QKAQQQQAABBBBAAAEEEDBJIKeTCG4/eRfoMGYjmDR0qasbAWLAjRJlclmAGMjl3qVtbgSIATdKlEEAAQQQQAABBBBA4P+1dyfQclVV3sA3IMhyYFBBcUKcULRVBNFGhgQBmYmANDNhVhzIElDBIYnTpzilnUBFjYgKGiQo0jImcQS02xbl6+XQoP2J2oBtVCQOaL5V9fo9M7zkVKXOqzrn3l/WcjUNp+4997f3f73U2a/eI0CAwNoEGjtEWPU773bffffovJGePn36hMeiRYu6/27JkiXdf+fTCMLSJAEZaFI1Pcu6CMjAuqh5TZMEZKBJ1fQsBAgQIECAAAECBAgQGJ1AY4cI4995Nz48mDZtWld5vfXWm9Bevnx5958XL148MUzwaYTRNaM75xWQgbyerlafgAzUVzM7zisgA3k9XY0AAQIECBAgQIAAAQJtFWjkEKHznXczZ86MWbNmxfjwYLzAkw0Rxv9bZ5gwb968mD9/fvdTCf4QqFVABmqtnH3nEpCBXJKuU6uADNRaOfsmQIAAAQIECBAgQIBAeQKNHCKsjXltQ4TyymNHBPILyEB+U1esS0AG6qqX3eYXkIH8pq5IgAABAgQIECBAgACBJgsYIjS5up6NwCQCDo+0RdsFZKDtHeD5ZUAPECBAgAABAgQIECBAgEA/AoYI/WhZS6ABAg6PGlBEjzCQgAwMxOfFDRCQgQYU0SMQIECAAAECBAgQIEBgiAKGCEPEdisCJQg4PCqhCvYwSgEZGKW+e5cgIAMlVMEeCBAgQIAAAQIECBAgUI+AIUI9tbJTAlkEHB5lYXSRigVkoOLi2XoWARnIwugiBAgQIECAAAECBAgQaI2AIUJrSu1BCYwJODzSCW0XkIG2d4DnlwE9QIAAAQIECBAgQIAAAQL9CBgi9KNlLYEGCDg8akARPcJAAjIwEJ8XN0BABhpQRI9AgAABAgQIECBAgACBIQoYIgwR260IlCDg8KiEKtjDKAVkYJT67l2CgAyUUAV7IECAAAECBAgQIECAQD0Chgj11MpOCWQRcHiUhdFFKhaQgYqLZ+tZBGQgC6OLECBAgAABAgQIECBAoDUChgitKbUHJTAm4PBIJ7RdQAba3gGeXwb0AAECBAgQIECAAAECBAj0I2CI0I+WtQQaIODwqAFF9AgDCcjAQHxe3AABGWhAET0CAQIECBAgQIAAAQIEhihgiDBEbLciUIKAw6MSqmAPoxSQgVHqu3cJAjJQQhXsgQABAgQIECBAgAABAvUIGCLUUys7JZBFwOFRFkYXqVhABiounq1nEZCBLIwuQoAAAQIECBAgQIAAgdYIGCK0ptQelMCYgMMjndB2ARloewd4fhnQAwQIECBAgAABAgQIECDQj4AhQj9a1hJogIDDowYU0SMMJCADA/F5cQMEZKABRfQIBAgQIECAAAECBAgQGKKAIcIQsd2KQAkCDo9KqII9jFJABkap794lCMhACVWwBwIECBAgQIAAAQIECNQjYIhQT63slEAWAYdHWRhdpGIBGai4eLaeRUAGsjC6CAECBAgQIECAAAECBFojYIjQmlJ7UAJjAg6PdELbBWSg7R3g+WVADxAgQIAAAQIECBAgQIBAPwKGCP1oWUugAQIOjxpQRI8wkIAMDMTnxQ0QkIEGFNEjECBAgAABAgQIECBAYIgChghDxHYrAiUIODwqoQr2MEoBGRilvnuXICADJVTBHggQIECAAAECBAgQIFCPgCFCPbWyUwJZBBweZWF0kYoFZKDi4tl6FgEZyMLoIgQIECBAgAABAgQIEGiNgCFCa0rtQQmMCTg80gltF5CBtneA55cBPUCAAAECBAgQIECAAAEC/QgYIvSjZS2BBgg4PGpAET3CQAIyMBCfFzdAQAYaUESPQIAAAQIECBAgQIAAgSEKGCIMEdutCJQg4PCohCrYwygFZGCU+u5dgoAMlFAFeyBAgAABAgQIECBAgEA9AoYI9dTKTglkEXB4lIXRRSoWkIGKi2frWQRkIAujixAgQIAAAQIECBAgQKA1AoYIrSm1ByUwJuDwSCe0XUAG2t4Bnl8G9AABAgQIECBAgAABAgQI9CNgiNCPlrUEGiDg8KgBRfQIAwnIwEB8XtwAARloQBE9AgECBAgQIECAAAECBIYoYIgwRGy3IlCCgMOjEqpgD6MUkIFR6rt3CQIyUEIV7IEAAQIECBAgQIAAAQL1CBgi1FMrOyWQRcDhURZGF6lYQAYqLp6tZxGQgSyMLkKAAAECBAgQIECAAIHWCBgitKbUHpTAmIDDI53QdgEZaHsHeH4Z0AMECBAgQIAAAQIECBAg0I+AIUI/WtYSaICAw6MGFNEjDCQgAwPxeXEDBGSgAUX0CAQIECBAgAABAgQIEBiigCHCELHdikAJAg6PSqiCPYxSQAZGqe/eJQjIQAlVsAcCBAgQIECAAAECBAjUI2CIUE+t7JRAFgGHR1kYXaRiARmouHi2nkVABrIwuggBAgQIECBAgAABAgRaI2CI0JpSe1ACYwIOj3RC2wVkoO0d4PllQA8QIECAAAECBAgQIECAQD8Chgj9aFlLoAECDo8aUESPMJCADAzE58UNEJCBBhTRIxAgQIAAAQIECBAgQGCIAoYIQ8R2KwIlCDg8KqEK9jBKARkYpb57lyAgAyVUwR4IECBAgAABAgQIECBQj4AhQj21slMCWQQcHmVhdJGKBWSg4uLZehYBGcjC6CIECBAgQIAAAQIECBBojYAhQmtK7UEJjAk4PNIJbReQgbZ3gOeXAT1AgAABAgQIECBAgAABAv0IGCL0o2UtgQYIODxqQBE9wkACMjAQnxc3QEAGGlBEj0CAAAECBAgQIECAAIEhChgiDBHbrQiUIODwqIQq2MMoBWRglPruXYKADJRQBXsgQIAAAQIECBAgQIBAPQKGCPXUyk4JZBFweJSF0UUqFpCBiotn61kEZCALo4sQIECAAAECBAgQIECgNQKGCK0ptQclMCbg8EgntF1ABtreAZ5fBvQAAQIECBAgQIAAAQIECPQjYIjQj5a1BBog4PCoAUX0CAMJyMBAfF7cAAEZaEARPQIBAgQIECBAgAABAgSGKGCIMERstyJQgoDDoxKqYA+jFJCBUeq7dwkCMlBCFeyBAAECBAgQIECAAAEC9QgYItRTKzslkEXA4VEWRhepWEAGKi6erWcRkIEsjC5CgAABAgQIECBAgACB1ggYIrSm1B6UwJiAwyOd0HYBGWh7B3h+GdADBAgQIECAAAECBAgQINCPgCFCP1rWEmiAgMOjBhTRIwwkIAMD8XlxAwRkoAFF9AgECBAgQIAAAQIECBAYooAhwhCx3YpACQIOj0qogj2MUkAGRqnv3iUIyEAJVbAHAgQIECBAgAABAgQI1CNgiFBPreyUQBYBh0dZGF2kYgEZqLh4tp5FQAayMLoIAQIECBAgQIAAAQIEWiNgiNCaUntQAmMCDo90QtsFZKDtHeD5ZUAPECBAgAABAgQIECBAgEA/AoYI/WhZS6ABAg6PGlBEjzCQgAwMxOfFDRCQgQYU0SMQIECAAAECBAgQIEBgiAKGCEPEdisCJQg4PCqhCvYwSgEZGKW+e5cgIAMlVMEeCBAgQIAAAQIECBAgUI+AIUI9tbJTAlkEHB5lYXSRigVkoOLi2XoWARnIwugiBAgQIECAAAECBAgQaI2AIUJrSu1BCYwJODzSCW0XkIG2d4DnlwE9QIAAAQIECBAgQIAAAQL9CBgi9KNlLYEGCDg8akARPcJAAjIwEJ8XN0BABhpQRI9AgAABAgQIECBAgACBIQoYIgwR260IlCDg8KiEKtjDKAVkYJT67l2CgAyUUAV7IECAAAECBAgQIECAQD0Chgj11MpOCWQRcHiUhdFFKhaQgYqLZ+tZBGQgC6OLECBAgAABAgQIECBAoDUChgitKbUHJTAm4PBIJ7RdQAba3gGeXwb0AAECBAgQIECAAAECBAj0I2CI0I+WtQQaIODwqAFF9AgDCcjAQHxe3AABGWhAET0CAQIECBAgQIAAAQIEhihgiDBEbLciUIKAw6MSqmAPoxSQgVHqu3cJAjJQQhXsgQABAgQIECBAgAABAvUIGCLUUys7JZBFwOFRFkYXqVhABiounq1nEZCBLIwuQoAAAQIECBAgQIAAgdYIGCK0ptQelMCYgMMjndB2ARloewd4fhnQAwQIECBAgAABAgQIECDQj4AhQj9a1hJogIDDowYU0SMMJCADA/F5cQMEZKABRfQIBAgQIECAAAECBAgQGKKAIcIQsd2KQAkCDo9KqII9jFJABkap794lCMhACVWwBwIECBAgQIAAAQIECNQjYIhQT63slEAWAYdHWRhdpGIBGai4eLaeRUAGsjC6CAECBAgQIECAAAECBFojYIjQmlJ7UAJjAg6PdELbBWSg7R3g+WVADxAgQIAAAQIECBAgQIBAPwKGCP1oWUugAQIOjxpQRI8wkIAMDMTnxQ0QkIEGFNEjECBAgAABAgQIECBAYIgChghDxHYrAiUIODwqoQr2MEoBGRilvnuXICADJVTBHggQIECAAAECBAgQIFCPgCFCPbWyUwJZBBweZWF0kYoFZKDi4tl6FgEZyMLoIgQIECBAgAABAgQIEGiNgCFCa0rtQQmMCTg80gltF5CBtneA55cBPUCAAAECBAgQIECAAAEC/QisFxHL+3mBtfUKLF+u1JNVb8XDlHqra+e9CMjA5Eoy0Ev3NGONDMhAMzp53Z9CBtbdzisJECBAgAABAgQIEGivgCFCi2rvjbPDoxa1+6SPKgMyIAOGyYbJ7U6BrwPtrr+nJ0CAAAECBAgQIEBg3QQMEdbNrcpXeePsALXKxs24aRmQgYztVOWlZEAGqmzcjJuWgYyYLkWAAAECBAgQIECAQGsEVhoiLP/dd1rz4G150PU22XHiUb1xTh8eyUDzkiED6Zqu9PPRfR1Ig1W2QgbSBZOBtFHNK2Sg5urZOwECBAgQIECAAAECJQgYIpRQhSncgzfOaVyHR2mjmlfIQLp6MpA2qnmFDKSrJwNpo5pXyEDN1bN3AgQIECBAgAABAgRKEDBEKKEKU7gHb5zTuA6P0kY1r5CBdPVkIG1U8woZSFdPBtJGNa+QgZqrZ+8ECBAgQIAAAQIECJQgYIhQQhWmcA/eOKdxHR6ljWpeIQPp6slA2qjmFTKQrp4MpI1qXiEDNVfP3gkQIECAAAECBAgQKEHAEKGEKkzhHrxxTuM6PEob1bxCBtLVk4G0Uc0rZCBdPRlIG9W8QgZqrp69EyBAgAABAgQIECBQgoAhQglVmMI9eOOcxnV4lDaqeYUMpKsnA2mjmlfIQLp6MpA2qnmFDNRcPXsnQIAAAQIECBAgQKAEAUOEEqowhXvwxjmN6/AobVTzChlIV08G0kY1r5CBdPVkIG1U8woZqLl69k6AAAECBAgQIECAQAkChgglVGEK9+CNcxrX4VHaqOYVMpCungykjWpeIQPp6slA2qjmFTJQc/XsnQABAgQIECBAgACBEgQMEUqowhTuwRvnNK7Do7RRzStkIF09GUgb1bxCBtLVk4G0Uc0rZKDm6tk7AQIECBAgQIAAAQIlCBgilFCFKdyDN85pXIdHaaOaV8hAunoykDaqeYUMpKsnA2mjmlfIQM3Vs3cCBAgQIECAAAECBEoQGPoQ4e5fL40tttkz+exfu/rC2OUfn5VcN8wFF196VRx7yhvj20suih23326Yt17ne3njnKYb9uGRDKRrknOFDKQ1ZSBtNL7C14HerWpaKQO9V0sGereykgABAgQIECBAgAABAk0RGNkQ4clPfGxM323HNTqefcZx8YRtHl2UszfORZUj22ZGdXgkA9lKuNYLGSKknWUgbWSI0LtRjStloPeq+btQ71ZWEiBAgAABAgQIECBAoCkCIxsiHH/UATH/gjlVOXrjXFW5et7sqA6PZKDnEg200BAhzScDaSNDhN6NalwpA71Xzd+FereykgABAgQIECBAgAABAk0RMEToo5LeOPeBVdFSh0e9F0sGereqaaUM9F4tGejdqqaVMtB7tWSgdysrCRAgQIAAAQIECBAg0BSB4ocIC69cHC866qz4+jUfi/d+8NNx3aKb4777/hp77P6ceO/bX7XSjzxatuxP6MAPvwAAIABJREFU8ebzLoxLFlwdv7rz17H1Y7aK0048JM546ZExfkDwh3uXxdz/89G49LJrumsetdUWcdTh+8Trzz45Nt54o4m63nnX/8Rr3vj+uPIrX4s///kvccRhL4yddnhanPzyN6/0OxH+9Kc/x1vf9fG4+JKr4o5f3hWPfMQWcfQ/7RNvePXJcf/7j11vwcLr48XHvSauuux98cqz3xm/+OVdcfas42LOOadOeR/5Luw0cemHRzKQruHaVshA2k8GfB1Id0mzV8iADDS7wz0dAQIECBAgQIAAAQIEBhOoZojw0IdsGofNeEEcctAe8bP/+mWc9bp58bitHxnf++ZnuwLLly+P6fufFl/9xndj5tEHdg/8v/qNf4vPLrg63vCak+NNr3tJ/PGPf45p+50aN//rrd012z9z2/jWTbd013R+P8M1Cz8Y97vfBt112+9yVNz20zvi5aceHo/caov41CVXde+79Le/nxgi/O1vf4v9Dj0jrlt8U5xwzEGxw7OeGv/+/R/GhZ9cGHtNf158ecG8WH/99SeGCFs8bPN4wbTnxCYPflAcuO+uccA+uw5WvR5e7QA1jVTL4ZEMpGs52QoZSLvJgK8D6S5p9goZkIFmd7inI0CAAAECBAgQIECAwGACIxsiPOXJj4sXTNtp0t0/6QmP6X56oPNn/LuwV/358W9+x4XxxrdeEN+/8dJ4+nZPiM9ffl0cfvxr45/POyte+ZIjJq4748gzu59euPP2a+Oj8y+PWa95d3zw3a+J00958cSaN739ozH7bR+Oj3/ojd1hwPs/fGn3EwMLP/vuOHj/3bvr7l32x3jeHjPj+7f+ZGKI0Bk+HHXi6+Ij73tdnDLzRRPX++RnroyZL5kTCz51Xhx68B4TQ4QjD3thfObjbx2sYn2+2gFqGmxUh0cykK5NjhUykFaUgQhfB9J90uQVMiADTe5vz0aAAAECBAgQIECAAIFBBUY2RFjbxnff5dmx+KqPrDREuOQTb4t/OnTviZeNDw1uuPKC7qcIjj9tdlz6hWvjtz9fPPFjhDqL7/jFnXHPH5ZFZzCx50Gnd4cAv/zx1d1PHIz/6fwYpIdsPT323uN5ccUl74l9XvSKuOXWH8cvfvSVlbY5Plz49pKLYsftt+v+iKLOjzu67ZYvxoYb3m9i7V/+cl9s/bQD4ujD941PnD97Yohw0UfeFMcesd+gNevr9Q5Q01yjOjySgXRtcqyQgbSiDET4OpDukyavkAEZaHJ/ezYCBAgQIECAAAECBAgMKjCyIcKqnyxY04OMfxLhmis+GHtNf+7EsvF/f+0VH4o9p+8Uu+97avd3Dfz43y9fo8k2Tz8oHvHwh8a3rv/EamueuuNh3eHDv3/jM7Htsw+Jh2/50PjqVz660rprF90Uex/8solPIjx716Pju9/74Rrv1/m9Ddd/6fyJIcL4XgctWj+vd4Ca1hrV4ZEMpGuTY4UMpBVlYMzI14F0rzR1hQzIQFN723MRIECAAAECBAgQIEAgh0A1Q4RVD+BXHSLssvdJcdfdv4kf/tsX1ujyuKcdGFs94mGTDhE6g4NNN3lQ3Lz4onjKDofGJg9+YPefV/xz9fXf6n5KYfyTCM/c+ci4+9dL45MXzJ30nptv/uDu70kY/8XKi7784Zi26w456tbzNRygpqlqOTySgXQtJ1shA2k3GRgz8nUg3StNXSEDMtDU3vZcBAgQIECAAAECBAgQyCHQmCFC53cTXH7lovjtz5fERhttOGHzjRu/Fxd87LKYc+6pcdLL3hS3/sdt8aufXBMbbLD+xJrOL1Le/LHTYr+9d4nLLj4v9jv0ld1fvvzf/3ntSus+9NHPx8vOfMfEEOHgI14VX77667H054vjQQ98wMT1/vrXv8WChdfFM57+pHjqttsYIuTo1B6usXTp0thss816WLnykqYcHsnA5KVv0xBBBnwdaPsgTQZkoO0Z6PsvQV5AgAABAgQIECBAgACBHgQaM0S4+NKr4thT3rjaLzk+8sRzu7+c+e6fXt8dJpz1unlx/nvPiZecdOgEz9vfMz/OmfOBiV+sfOEnF8Ypr3hLfPT9r4+Tj5/RXdf5PQc77HbMSr9Y+WMXXREnv/zNcc6ZJ8TbZr9s4nof/9QXuwOLee84s/sLon0SoYdOzLBkxowZsf3228cZZ5zR1zChKUMEGTBEkAFfB9p+gCoDMtD2DGT465RLECBAgAABAgQIECBAYDWBkQ0RnvzEx3Z/IfKa/uw1/Xlx6MF7dAcALzrqrEj9KJf77vtrTN//tPjmTbfEaSceEs98+pNj8de/E5csuCbOe/Mr4+wzjuv+4syd9zohbvnBT+Kk4w6O7Z+xbdz47e/HRZ/9cuy68/bR+XFDnU8odK7V+fFI3/nu/43TT35xPPlJW8fFl1wV/3n7z7s/vmj8xxl1Bgude3Y+7XD4IXvF9F13jB/95GfxoQsXxFOe/Lj4+jUXdj+hYIgwnOQtXrw4pk+f3h0gzJo1q+dhwqiGCDIwnL5o0ycRZMDXgbYfoMqADLQ9A8P5yuouBAgQIECAAAECBAi0TWBkQ4QU9JmvOCbe9dZZPQ8ROte75w/3xuy3fjg+d/m13cP+Jz3hsTHr9KPixGMPmrjdb393T3fNZV+8Pu686zfx2Ec/Io46fJ8498wTur9YefzP7++5N86d84H4/MLr4p57lsW+e+8ch79orzj8+NdODBE6a/9w77J4y3kf6w4rfvGru+IRWz40Dtxvt5hzzqnxsIeO/WgdQ4RUtfP992nTpsWSJUu6F+x1mDCqIULqqWUgJdTbf2/TEKEjIgO+DqyaDBlID5V9HfB3od6+olhFgAABAgQIECBAgACBdgoMfYjQTubRPXXbDo/Gvwt1RfHUMGHYh0ej64Z23lkG0gM1GWh2NmRABprd4emna1sG0iJWECBAgAABAgQIECBAoD8BQ4T+vKpb3cY3zit+J3YvwwQHqNW1dV8bloG/c61poCYDfbVUdYtlQAaqa9rMG25jBjITuhwBAgQIECBAgAABAi0XMERoeAO08Y3zZJ9GWNswwQFqs0MgA6vXd9VhggzIQNMEfB1oWkUHe542fh0YTMyrCRAgQIAAAQIECBAgsLKAIULDO2LFN84Nf9S+H2/8IHXOnDkTr13+u+/0fR0vKFtABtZcHxkou3dz7U4GZCBXL9V6HUOEWitn3wQIECBAgAABAgQIlCJgiFBKJaZoHw6P+oM1ROjPq4bVMtBflWSgP68aVstAf1WSgf68alhtiFBDleyRAAECBAgQIECAAIGSBQwRSq5Ohr05PFoz4qabbhqzZs2KuXPnTixyeJSh6Qq7hAzIQGEtOfTtyIAMDL3pCruhIUJhBbEdAgQIECBAgAABAgSqEzBEqK5k/W24jW+cUz8Le3x40BkgdH6ci58H319P1bZaBlavmAzU1sWD7VcGZGCwDqr/1W3MQP1V8wQECBAgQIAAAQIECJQkUOwQYcaRZ8YVX14Sy+78Zmy88UZrNfv9PffGO//5orjsihvi9p/dERttuGFs95THx3FH7h+nnvCiWH/99buvf/lZ58UHP/K5pP/Rh+8bF1/45jjihHPj0suuiQ02WD/uvO26eMjmm0z62mfufGTc8oMfx/FHHRDzL/j7z9dP3mgIC9r4xnnatGmxZMmS1XRXPTgdX1DqEEEG8gREBv7uKAO+DixfvjxPsAq/iq8D/i60You28etA4RG1PQIECBAgQIAAAQIEKhOofojQGSA8f68T4/u3/iR23H67ePp2T4hlf/xTfOumW+K/fv6rOPTgPeLzF72j+93mnYHAoq/9/Rfn3nb7HXHtopti2q47xLZP2nqidM/d8elxwjEHTQwROv/hoo+8KY49Yr/VynvbT++IJzzj4O6/N0QYffdP9imENR2cNmWIIANr77u2HR7JgK8DqyZCBiJ8HfB3ofFctGWQNvq/kdkBAQIECBAgQIAAAQJNEqh+iPDu918cZ71uXlww79w47cRDJmrz5z//JY4++fWxYOH1ccUl74mD9ttttbp1/tuLj3tNfOL82THz6ANX+++dTyIsWHhdPGqrLeM5O2wXCz513mpr3vW+T8V58y6Ku+7+jSFCAclY8btPU4dGTRkiyIAhwooCMuDrQNuHCDIgA23PQAF/HbMFAgQIECBAgAABAgQaJlD9EOGfZp4TX/jiDfHHu77V/bFDK/65/We/iMf/w0Hx0pMPiw+957XrPER46UmHxfxPXxl3//S6uP/9V/7RSp1PQTx1223iYxddYYgw4nCMfwd2r8ODpgwRZMAQYVxABnwdmCwNbfokggzIQNszMOK/irk9AQIECBAgQIAAAQINFah+iHD6q94e51+4IK7/0vmxx+7PWa1MnR83tOUWm8eDHviAdR4iXH35B2LPg06Pqy57X+y7184T1/nVf/86HrXtvvGlz7039j/sDEOEEYdkxowZ8axnPSvGf2Fyr9up/XciyIAhwriADPg60PYDVBmQgbZnoNe/+1hHgAABAgQIECBAgACBfgSqHyJ0fqHxTtOPj87PuD30oD1ixgHTur/jYMstHpJ06PXHGXU+5bDl4/eKww/Zs/tjk8b/XPCxy+KcOR+IX/zoK/GAhz/fECEpPrULli5dGptttlnfN6l9iCADhgjjAjLg60DbD1BlQAbanoG+/xLkBQQIECBAgAABAgQIEOhBoPohQucZv/bN78ZpZ7wt/uOHt0888j887YndocKslx0Vm27yoEkpeh0i3Pebm+PYU94Y1y+5Oe744b90f0lz588LZ7y8O6zo/E6FDR/yXEOEHhquxCW1DxFkwBBh0FzJQPp34/g6MGiXlf16GZCBsjvU7ggQIECAAAECBAgQIDBagUYMEcYJb/rOD+Jfrv1m3LDk2/Gtm2+J++77azzqkVvG4qs+HE98/GNWk+5niHDZFTfEYce+Om5a9MnYaYenxW9/d09ssc2ecckn3hYH7be7IcJo+3iguzfh8EgG1twCbfp58OsaBBno7QDV14F17bDyXycDMlB+l9ohAQIECBAgQIAAAQIERifQqCHCioy/Wfq7OG/eRfH298zv/q6Ezu9MWPVPP0OEP9y7LB72uBfEma84Jt7yhtPj4kuvitNe+ba466fXxUYbbmiIMLoeHvjOTTo8koHV28EQIR0RGejtANXXgXQv1bpCBmSg1t61bwIECBAgQIAAAQIECAxDoOohwp13/U+cee682P+Fu8QRh+09qde+h7wyrl10Y3R+r8H97rfBSmv6GSJ0XnjAi2fFz/7fL+P7N14ahx7z6u7vYfjCp9/Z/cSDH2c0jHadmnvUfHgkA+meMEToweh/f0RbZ+Xy330n/YIhrZhx5JlxxZeXxLI7vxkbb7zRpHeVgXQxZKAHIxno/mjGmUcfuBrWESecGwsWXhedH+nl70LpXrKCAAECBAgQIECAAAECTRSoeoiwbNmfYpNH7Ra7Pf/Zk37SoFOwzpvfy7+0KJbd+Y1Yf/31BxoiXPjJhXHKK94St978udhp2vFx/rxz4tgj9jNEqDwZNQ8RZCDdfA5QezCq+ABVBnqo7yY7TizqDL/9WV3A14HePonQkfN3IQkiQIAAAQIECBAgQIBA+wSqHiJ0ynXMyW+IT3/uX2L2OafG688+aaVPG3R+N8L+Lz4j9tlz57j8M+9arbr9fhKh8x2vWz1pn9hr+nPjhq9+O+687drYbNMHGyJUnpuaD49kIN18hgg9GFU8RJCBHupriJBE8nWg9yGCvwsl28kCAgQIECBAgAABAgQINE6g+CHCycfPiA02WPkTBJ0qHH/UAfGPOz0j7v710thl75Pihz/+WTx8y4fE85/3rHjgAzbu/v83/+ut8YiHPzRuvGF+bP2YrQYeInQu0LnXN278XneQcM0VH+xe048zqjsXpR8eycBg/WWIkPaTgd4PUH0dSPdTjStkQAZq7Ft7JkCAAAECBAgQIECAwLAEih8irAlixZ/d2/lll+95/6dj4ZWL47af3hHL/vinePQjt4wD990tzj3rhNjiYZtPepl+P4nQuci73vepOPv1/xznv/eceMlJhxoiDKtTp/A+pR8eycBgxTdESPvJQH8HqL4OpHuqthUyIAO19az9EiBAgAABAgQIECBAYJgCxQ4RhonQ5Hs5QE1Xt9TDo/TOrehFQAbSSjKQNqp5hQykqycDaaOaV8hAzdWzdwIECBAgQIAAAQIEShAwRCihClO4B2+c07gOj9JGNa+QgXT1ZCBtVPMKGUhXTwbSRjWvkIGaq2fvBAgQIECAAAECBAiUIGCIUEIVpnAP3jincR0epY1qXiED6erJQNqo5hUykK6eDKSNal4hAzVXz94JECBAgAABAgQIEChBwBChhCpM4R68cU7jOjxKG9W8QgbS1ZOBtFHNK2QgXT0ZSBvVvEIGaq6evRMgQIAAAQIECBAgUIKAIUIJVZjCPXjjnMZ1eJQ2qnmFDKSrJwNpo5pXyEC6ejKQNqp5hQzUXD17J0CAAAECBAgQIECgBAFDhBKqMIV78MY5jevwKG1U8woZSFdPBtJGNa+QgXT1ZCBtVPMKGai5evZOgAABAgQIECBAgEAJAoYIJVRhCvfgjXMa1+FR2qjmFTKQrp4MpI1qXiED6erJQNqo5hUyUHP17J0AAQIECBAgQIAAgRIEDBFKqMIU7sEb5zSuw6O0Uc0rZCBdPRlIG9W8QgbS1ZOBtFHNK2Sg5urZOwECBAgQIECAAAECJQgYIpRQhSncgzfOaVyHR2mjmlfIQLp6MpA2qnmFDKSrJwNpo5pXyEDN1bN3AgQIECBAgAABAgRKEDBEKKEKU7gHb5zTuA6P0kY1r5CBdPVkIG1U8woZSFdPBtJGNa+QgZqrZ+8ECBAgQIAAAQIECJQgYIhQQhWmcA/eOKdxHR6ljWpeIQPp6slA2qjmFTKQrp4MpI1qXiEDNVfP3gkQIECAAAECBAgQKEHAEKGEKkzhHrxxTuM6PEob1bxCBtLVk4G0Uc0rZCBdPRlIG9W8QgZqrp69EyBAgAABAgQIECBQgsBKQ4QSNmQPUyewfPnyqbt4xVde8fCo4sew9R4EZGByJBnooXkaskQGZKAhrbzOjyED60znhQQIECBAgAABAgQItFjAEKFFxffG2eFRi9p90keVARmQAcPkyXrAIK09yfB1oD219qQECBAgQIAAAQIECOQTMETIZ1n8lbxxdoBafJNO8QZlQAamuMWKv7wMyEDxTTrFG5SBKQZ2eQIECBAgQIAAAQIEGimw3vKWvZuaM2dOzJ07N2bPnh2df/aHQNsEZKBtFfe8qwrIgJ5ou4AMtL0DPD8BAgQIECBAgAABAgT6E2jVEGHp0qWxzTbbROf/brbZZnH77bd3/68/BNoiIANtqbTnXJOADOiNtgvIQNs7wPMTIECAAAECBAgQIECgf4FWDRHGv/NunMmnEfpvGK+oW0AG6q6f3Q8uIAODG7pC3QIyUHf97J4AAQIECBAgQIAAAQKjEGjNEGHF77wbh/ZphFG0nHuOSkAGRiXvvqUIyEAplbCPUQnIwKjk3ZcAAQIECBAgQIAAAQJ1C7RmiLDqd96Nl82nEepuYLvvXUAGereyspkCMtDMunqq3gVkoHcrKwkQIECAAAECBAgQIEDg7wKtGCJM9p134wQ+jSAObRCQgTZU2TOuTUAG9EfbBWSg7R3g+QkQIECAAAECBAgQILDuAq0YIqzpO+/G2XwaYd0byCvrEJCBOupkl1MnIANTZ+vKdQjIQB11sksCBAgQIECAAAECBAiUKND4IcLavvNuvCA+jVBia9pTLgEZyCXpOrUKyECtlbPvXAIykEvSdQgQIECAAAECBAgQINBOgcYPEVLfeTdedp9GaGcA2vDUMtCGKnvGtQnIgP5ou4AMtL0DPD8BAgQIECBAgAABAgQGE2j0EKGX77wb5/NphMEayavLFJCBMutiV8MTkIHhWbtTmQIyUGZd7IoAAQIECBAgQIAAAQI1CTR6iNDrd96NF8ynEWpqXXvtRUAGelGypskCMtDk6nq2XgRkoBclawgQIECAAAECBAgQIEBgbQKNHSKs+p13u+++e3TeSE+fPn3CY9GiRd1/t2TJku6/82kEYWmSgAw0qZqeZV0EZGBd1LymSQIy0KRqehYCBAgQIECAAAECBAiMTqCxQ4Tx77wbHx5Mmzatq7zeeutNaC9fvrz7z4sXL54YJvg0wuia0Z3zCshAXk9Xq09ABuqrmR3nFZCBvJ6uRoAAAQIECBAgQIAAgbYKNHKI0PnOu5kzZ8asWbNifHgwXuDJhgjj/60zTJg3b17Mnz+/+6kEfwjUKiADtVbOvnMJyEAuSdepVUAGaq2cfRMgQIAAAQIECBAgQKA8gUYOEdbGvLYhQnnlsSMC+QVkIL+pK9YlIAN11ctu8wvIQH5TVyRAgAABAgQIECBAgECTBQwRmlxdz0ZgEgGHR9qi7QIy0PYO8PwyoAcIECBAgAABAgQIECBAoB8BQ4R+tKwl0AABh0cNKKJHGEhABgbi8+IGCMhAA4roEQgQIECAAAECBAgQIDBEAUOEIWK7FYESBBwelVAFexilgAyMUt+9SxCQgRKqYA8ECBAgQIAAAQIECBCoR8AQoZ5a2SmBLAIOj7IwukjFAjJQcfFsPYuADGRhdBECBAgQIECAAAECBAi0RsAQoTWl9qAExgQcHumEtgvIQNs7wPPLgB4gQIAAAQIECBAgQIAAgX4EDBH60bKWQAMEHB41oIgeYSABGRiIz4sbICADDSiiRyBAgAABAgQIECBAgMAQBQwRhojtVgRKEHB4VEIV7GGUAjIwSn33LkFABkqogj0QIECAAAECBAgQIECgHgFDhHpqZacEsgg4PMrC6CIVC8hAxcWz9SwCMpCF0UUIECBAgAABAgQIECDQGgFDhNaU2oMSGBNweKQT2i4gA23vAM8vA3qAAAECBAgQIECAAAECBPoRMEToR8taAg0QcHjUgCJ6hIEEZGAgPi9ugIAMNKCIHoEAAQIECBAgQIAAAQJDFDBEGCK2WxEoQcDhUQlVsIdRCsjAKPXduwQBGSihCvZAgAABAgQIECBAgACBegQMEeqplZ0SyCLg8CgLo4tULCADFRfP1rMIyEAWRhchQIAAAQIECBAgQIBAawQMEVpTag9KYEzA4ZFOaLuADLS9Azy/DOgBAgQIECBAgAABAgQIEOhHwBChHy1rCTRAwOFRA4roEQYSkIGB+Ly4AQIy0IAiegQCBAgQIECAAAECBAgMUcAQYYjYbkWgBAGHRyVUwR5GKSADo9R37xIEZKCEKtgDAQIECBAgQIAAAQIE6hEwRKinVnZKIIuAw6MsjC5SsYAMVFw8W88iIANZGF2EAAECBAgQIECAAAECrREwRGhNqT0ogTEBh0c6oe0CMtD2DvD8MqAHCBAgQIAAAQIECBAgQKAfAUOEfrSsJdAAAYdHDSiiRxhIQAYG4vPiBgjIQAOK6BEIECBAgAABAgQIECAwRAFDhCFiuxWBEgQcHpVQBXsYpYAMjFLfvUsQkIESqmAPBAgQIECAAAECBAgQqEfAEKGeWtkpgSwCDo+yMLpIxQIyUHHxbD2LgAxkYXQRAgQIECBAgAABAgQItEbAEKE1pfagBMYEHB7phLYLyEDbO8Dzy4AeIECAAAECBAgQIECAAIF+BAwR+tGylkADBBweNaCIHmEgARkYiM+LGyAgAw0ookcgQIAAAQIECBAgQIDAEAUMEYaI7VYEShBweFRCFexhlAIyMEp99y5BQAZKqII9ECBAgAABAgQIECBAoB4BQ4R6amWnBLIIODzKwugiFQvIQMXFs/UsAjKQhdFFCBAgQIAAAQIECBAg0BoBQ4TWlNqDEhgTcHikE9ouIANt7wDPLwN6gAABAgQIECBAgAABAgT6ETBE6EfLWgINEHB41IAieoSBBGRgID4vboCADDSgiB6BAAECBAgQIECAAAECQxQwRBgitlsRKEHA4VEJVbCHUQrIwCj13bsEARkooQr2QIAAAQIECBAgQIAAgXoEDBHqqZWdEsgi4PAoC6OLVCwgAxUXz9azCMhAFkYXIUCAAAECBAgQIECAQGsEDBFaU2oPSmBMwOGRTmi7gAy0vQM8vwzoAQIECBAgQIAAAQIECBDoR6DYIcLcuXNjzpw5/TxLEWs7e549e3YRe7GJugVkoO762f3gAjIwuKEr1C0gA3XXz+4JECBAgAABAgQIECDQFAFDhMyVNETIDNriyzk8anHxPXpXQAY0QtsFZKDtHeD5CRAgQIAAAQIECBAgUIaAIULmOhgiZAZt8eUcHrW4+B7dEEEPEDBI0wMECBAgQIAAAQIECBAgUIhAFUOE1592anT+V+qft3z4I9H5X+ePIUKpVapvXysOEWSgvvrZ8eACMjC4oSvULSADddfP7gkQIECAAAECBAgQINAUAUOEDJU0RMiA6BKrCTg80hRtF5CBtneA55cBPUCAAAECBAgQIECAAAECJQgYImSogiFCBkSXMETQAwRWEXCAqiXaLiADbe8Az0+AAAECBAgQIECAAIEyBAwRMtTBECEDoksYIugBAoYIeoDASgKGCBqCAAECBAgQIECAAAECBEoQMETIUAVDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMEQGNlrcAAAFMUlEQVTIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTMETIUBJDhAyILmGIoAcIGCLoAQKGCHqAAAECBAgQIECAAAECBIoTqGKIUJzaWjY0Z86cmD17dk1bttdCBebOnRudfqrtjwzUVrFy9ysD5dbGzoYjIAPDcXYXAgQIECBAgAABAgQIEFi7gCFC5g5xgJoZtMWXc3jU4uJ79K6ADGiEtgvIQNs7wPMTIECAAAECBAgQIECgDAFDhMx1METIDNriyzk8anHxPbohgh4gYJCmBwgQIECAAAECBAgQIECgEIH/D/cUpmnuuxgCAAAAAElFTkSuQmCC) ``` class Encoder(nn.Module): def __init__(self, seq_len, n_features, hidden_dim = 64): super(Encoder, self).__init__() self.seq_len = seq_len self.n_features = n_features self.hidden_dim_1 = hidden_dim self.hidden_dim_2 = hidden_dim * 2 self.lstm1 = nn.LSTM( input_size = n_features, hidden_size = self.hidden_dim_2, num_layers = 1, batch_first = True ) self.lstm2 = nn.LSTM( input_size = self.hidden_dim_2, hidden_size = self.hidden_dim_1, num_layers = 1, batch_first = True ) def forward(self, x): # x: [B, seq_len] == [B, 140] x = torch.unsqueeze(x, dim = 2).float() # x = x[:,:,None].float() out, (hidden, _) = self.lstm1(x) #[B, seq_len, hidden_dim_1] out, (hidden, _) = self.lstm2(out) #[1, B, hidden_dim_2] out = torch.squeeze(hidden, dim = 0) # [B, hidden_dim_2] return out class Decoder(nn.Module): def __init__(self, seq_len, n_features = 1, encoder_hidden_dim = 64): super(Decoder, self).__init__() self.seq_len = seq_len self.n_features = n_features self.encoder_hidden_dim = encoder_hidden_dim self.hidden_dim = encoder_hidden_dim * 2 self.lstm1 = nn.LSTM( input_size = encoder_hidden_dim, hidden_size = encoder_hidden_dim, num_layers = 1, batch_first = True ) self.lstm2 = nn.LSTM( input_size = encoder_hidden_dim, hidden_size = self.hidden_dim, num_layers = 1, batch_first = True ) self.fcn = nn.Linear(self.hidden_dim, n_features) def forward(self, x): # x: [B, encoder_hidden_dim] batch_size = x.size(0) x = x.repeat(self.seq_len, self.n_features) # x: [B * seq_len, encoder_hidden_dim] x = x.reshape(batch_size, -1, self.encoder_hidden_dim) # x: [B, seq_len, encoder_hidden_dim] x, (hidden, _) = self.lstm1(x) x, (hidden, _) = self.lstm2(x) x = self.fcn(x) # self.hidden_dim -> 1 #[b, 140, 1] x = torch.squeeze(x, dim = -1) return x class LSTMAE(nn.Module): def __init__(self, seq_len, n_features, device, hidden_dim = 64): super(LSTMAE, self).__init__() self.encoder = Encoder(seq_len, n_features, hidden_dim).to(device) self.decoder = Decoder(seq_len, n_features, hidden_dim).to(device) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x model = LSTMAE(SEQ_LEN, N_FEATURES, device, hidden_dim = 128).to(device) model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print(f"Number of trainable parameters: {params}") print(model) model(next(iter(anomaly_dataloader))[0].to(device)).shape ``` # Training ``` def train(model, optimizer, criterion, train_dataloader, val_dataloader, device, EPOCHS, EVALUATE_EVERY, scheduler = None, gradient_clipping = False, gradient_clipping_rate = 1., evaluate_epoch = False): model = model.to(device) total = len(train_dataloader) * EPOCHS log = defaultdict(list) log["train_loss"] = [] log["eval_loss"] = [] loss_a = [] with tqdm(total = total, desc="Training Round") as tt: for epoch in range(EPOCHS): total_loss, batch_loss, batch_counts = 0, 0, 0 for step, batch in enumerate(train_dataloader): model.train() batch_counts += 1 b_inputs, = tuple(t.to(device) for t in batch) optimizer.zero_grad() out = model(b_inputs) loss = criterion(out, b_inputs) total_loss += loss.item() loss.backward() optimizer.step() loss_a.append(loss.item()) if gradient_clipping: torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clipping_rate) if scheduler != None: scheduler.step() tt.update() if evaluate_epoch: val_loss = evaluate(model, criterion, val_dataloader, device) print(30*"*") print(f"{epoch+1}/{EPOCHS}:") print(f" - Train Loss: {np.mean(loss_a)}") print(f" - Validation Loss: {val_loss}") log["train_loss"].append(total_loss / len(train_dataloader)) log["eval_loss"].append(val_loss) if log["eval_loss"][np.argmin(log["eval_loss"])] == val_loss: print(f"Best model is stored with eval loss {val_loss}.") best_model = model print("Done.") tt.close() return model, best_model, log def evaluate(model, criterion, val_dataloader, device): total = len(val_dataloader) loss_a = [] with tqdm(total = total, leave=False, position=0, desc="Validation Round") as ee: val_loss, val_batch_loss, val_batch_counts = 0, 0, 0 for step, batch in enumerate(val_dataloader): model.eval() val_batch_loss += 1 b_inputs, = tuple(t.to(device) for t in batch) with torch.no_grad(): out = model.forward(b_inputs) loss = criterion(out, b_inputs) loss_a.append(loss.detach().cpu().numpy()) #val_batch_loss += loss.item() #val_loss += loss.item() ee.update() ee.close() #return val_loss/total return np.mean(loss_a) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") optimizer = torch.optim.Adam(model.parameters()) criterion = torch.nn.L1Loss(reduction="sum") EPOCHS = 10 EVALUATE_EVERY = 100 optimizer = AdamW( model.parameters(), lr=0.0005, eps=1e-9, betas = (0.9, 0.98) ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=len(train_dataloader) * 5 ) model, best_model, log = train( model = model, optimizer = optimizer, criterion = criterion, train_dataloader = train_dataloader, val_dataloader = val_dataloader, device = device, EPOCHS = EPOCHS, EVALUATE_EVERY = EVALUATE_EVERY, evaluate_epoch=True ) torch.save(best_model.state_dict(), "/content/drive/MyDrive/Applied AI #6 Source/Time Series/anomaly_ckpts/best_model.pt") torch.save(model.state_dict(), "/content/drive/MyDrive/Applied AI #6 Source/Time Series/anomaly_ckpts/model.pt") model = LSTMAE(SEQ_LEN, N_FEATURES, device, hidden_dim = 128).to(device) model.load_state_dict(torch.load("/content/drive/MyDrive/Applied AI #6 Source/Time Series/anomaly_ckpts/best_model.pt")) plt.plot(log["train_loss"], label = "train loss", color = "black") plt.plot(log["eval_loss"], label = "eval loss", color = "blue") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.show() ``` # Evaluation ``` def thresholding(model, criterion, dataset, device): preds, losses = [], [] total = len(dataset) dataset = train_dataset(dataset, 1) with tqdm(total = total) as ee: with torch.no_grad(): for step, batch in enumerate(dataset): model.eval() b_inputs, = tuple(t.to(device) for t in batch) out = model.forward(b_inputs) loss = criterion(out, b_inputs) preds.append([out.detach().cpu().numpy(), b_inputs.cpu().numpy()]) losses.append(loss.detach().cpu().numpy()) ee.update() ee.close() return preds, losses reconstructions, losses = thresholding(model, criterion, train_df, device) sns.distplot(losses, bins=50, kde=True); threshold = np.mean(losses) print(threshold) def torch_classification_report(model, criterion, df_all, device, threshold, print_report = True, disable=False): all_data = TensorDataset(torch.tensor(df_all.iloc[:,:-1].to_numpy()), torch.tensor(df_all["target"].to_numpy())) all_sampler = SequentialSampler(all_data) all_dataloader = DataLoader(all_data, sampler=all_sampler, batch_size=1) total = len(all_dataloader) preds = [] targets = [] with tqdm(total = total, disable=disable) as ee: with torch.no_grad(): for step, batch in enumerate(all_dataloader): model.eval() b_inputs, b_targets = tuple(t.to(device) for t in batch) out = model.forward(b_inputs) loss = criterion(out, b_inputs) targets.append(b_targets.item()) if loss> threshold: preds.append(1) else: preds.append(0) ee.update() ee.close() f1 = f1_score(targets, preds) acc = accuracy_score(targets, preds) if print_report: print(classification_report(targets, preds)) return acc, f1 def density_classification_report(model, criterion, df_all, device, losses, disable): f1s = [] accs = [] for loss in losses: acc, f1 = torch_classification_report( model = model, criterion = criterion, df_all = df_all, device = device, threshold = loss, print_report = False, disable = disable ) f1s.append(f1) accs.append(acc) return accs, f1s anomaly_df = anomaly_df.reset_index(drop = True) test_df = test_df.reset_index(drop = True) anomaly_df["target"] = 1 test_df["target"] = 0 all_df = pd.concat([anomaly_df,test_df]).reset_index(drop = True) acc, f1 = torch_classification_report(model, criterion, all_df, device, threshold, disable=False) print(f"Accuracy: {acc}\nF1: {f1}") fig, axs = plt.subplots(1,6,figsize=(30,5)) for i in range(6): axs[i].plot(reconstructions[i][0][0,:]) axs[i].plot(reconstructions[i][1][0,:]) ```
github_jupyter
# Getting started First some dependcies need to be installed. Make sure to restart your runtime after installation. For all map operations we're using OSMnx, check it out if you haven't already: https://github.com/gboeing/osmnx ``` !pip install osmnx ``` # Creating the maps ``` import osmnx as ox from IPython.display import Image import networkx as nx import numpy as np from matplotlib import cm import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap, LinearSegmentedColormap %matplotlib inline ox.config(log_console=True) ox.__version__ ``` ## Choosing a location Selecting the location of your maps is as simple as a Google search. Just add the location to the list. You my add more than one entry to the list, to get a connected street network you should only do that for neighboring cities. ### Tips & Tricks: - This may take a while, especially for large cities - Limit yourself to cities or counties instead of entire states if you don't want to wait forever - If you want to use a box instead of a location, uncomment the first line and use this instead - For smaller cities, you may want to change `network_type='drive'` to `network_type='all'` to add more details to your map ``` # Using a bounding box instead of a place: # G = ox.graph_from_bbox(47.01,47.10,9.5,9.2, retain_all=True, simplify=False, network_type='all') # Using a place places = ["Berlin, Germany"] G = ox.graph_from_place(places, retain_all=True, simplify=True, network_type='drive') # Fallback if roads do not contain any speed info. Adapt to your area speeds = { 'primary': 100, 'secondary': 80, 'track': 10, 'cycleway': 10, 'footpath': 2 } ox.speed.add_edge_speeds(G, hwy_speeds=speeds, fallback=1, precision=1) ox.speed.add_edge_travel_times(G, precision=1) ``` ## Shortest path visualization Now we can compute the shortest paths to a node on out map. If you prefer the maps created by road type skip to that section. This section calculates the shortest path from every node on the map to an object specified. The roadwith is then adapted depending on how often that strech of road is used to get to get to the object. The more often a strech of road is used, the wider it gets. ### Finding your target The shortest paths to a specified point are computed. This point is specified by an ID set by OPenStreetMap. To find the ID of your target, head over to OpenStreetMaps https://www.openstreetmap.org/ Find the location of your map and right click on the object you want to use as a target, select `Query features`. In the nearby features list, select the road you like. Extend the `Nodes` object and copy the ID any node. It doesn't matter if it's not exactly the node next to your house, the visualization wont be precise enough to distigush that later anyways. Paste the node as node_id: ``` node_id = 8901809522 # Colors used in the visulization. Adapt them to your taste street_color = "#434142" bg_color = "#FDD540" if not node_id in G.nodes(): print("Node not in graph. Please select a different node in OpenStreetMap.") # Calculate the shortest paths u = [] v = [] key = [] data = [] for uu, vv, kkey, ddata in G.edges(keys=True, data=True): u.append(uu) v.append(vv) ddata["uses"] = 0 key.append(kkey) data.append(ddata) paths = nx.shortest_path(G, target=8901809522, weight='travel_time') uses_max = 0 # Count how often each road is used as a shortest path for path in paths.items(): path_nodes = path[1] for x in range(len(path_nodes) - 1): G[path_nodes[x]][path_nodes[x+1]][0]['uses'] += 1 if G[path_nodes[x]][path_nodes[x+1]][0]['uses'] > uses_max: uses_max = G[path_nodes[x]][path_nodes[x+1]][0]['uses'] # List to store colors road_colors = [] road_widths = [] for id, item in zip(key, data): linewidth = item["uses"] / uses_max color = street_color road_colors.append(color) road_widths.append(linewidth) road_widths = np.array(road_widths) road_widths = np.tanh(road_widths * 100.0) + 0.003 hsv_modified = cm.get_cmap('hsv', 256) img_folder = "images" extension = "png" fig, ax = ox.plot_graph(G, node_size=0, close=False, dpi=1000, bgcolor=bg_color, save=True, edge_color=road_colors, edge_linewidth=road_widths, edge_alpha=1) fig.tight_layout(pad=0) fig.savefig("Berlin.png", dpi=1000, format="png", transparent=False) # View in full size Image("Berlin.png") ``` ## Highway visualisation If you prefer to plot the width of the highways according to their importance, this might help: ``` street_color = "#434142" bg_color = "#FDD540" # Calculate the shortest paths u = [] v = [] key = [] data = [] for uu, vv, kkey, ddata in G.edges(keys=True, data=True): u.append(uu) v.append(vv) key.append(kkey) data.append(ddata) road_colors = [] road_widths = [] for id, item in zip(key, data): linewidth = item["speed_kph"] color = street_color road_colors.append(color) road_widths.append(linewidth) # Normalize road width by max speed. Adapt to the max speed on your map(in kph) road_widths = np.array(road_widths) / 130 # Make highways "pop" more (optional) road_widths = np.power(road_widths, 2) hsv_modified = cm.get_cmap('hsv', 256) img_folder = "images" extension = "png" fig, ax = ox.plot_graph(G, node_size=0, close=False, dpi=1000, bgcolor=bg_color, save=True, edge_color=road_colors, edge_linewidth=road_widths, edge_alpha=1) fig.tight_layout(pad=0) fig.savefig("Berlin.png", dpi=1000, format="png", transparent=False) # View in full size Image("Berlin.png") ```
github_jupyter
# Sparse-Group Lasso Inductive Matrix Completion via ADMM ``` import numpy as np %matplotlib inline import matplotlib.pyplot as plt ``` Fix the random state ``` random_state = np.random.RandomState(0x0BADCAFE) ``` ## Problem? ``` PROBLEM = "classification" if True else "regression" ``` ### Synthetic data ``` assert PROBLEM in ("classification", "regression") ``` Produce a low rank matrix ``` n_samples, n_objects = 19990, 201 n_rank, n_features = 5, 20 n_samples, n_objects = 1990, 2010 n_rank, n_features = 5, 20 n_samples, n_objects = 199, 2010 n_rank, n_features = 5, 20 n_samples, n_objects = 199, 201 n_rank, n_features = 5, 200 n_samples, n_objects = 75550, 40 n_rank, n_features = 5, 20 n_samples, n_objects = 199, 201 n_rank, n_features = 5, 20 n_samples, n_objects = 1990, 2010 n_rank, n_features = 5, 100 n_samples, n_objects = 550, 550 n_rank, n_features = 5, 25 ``` Transform the problem ``` from sgimc.utils import make_imc_data, sparsify X, W_ideal, Y, H_ideal, R_full = make_imc_data( n_samples, n_features, n_objects, n_features, n_rank, scale=(0.05, 0.05), noise=0, binarize=(PROBLEM == "classification"), random_state=random_state) ``` Drop the bulk of the values from $R$ ``` R, mask = sparsify(R_full, 0.10, random_state=random_state) ``` Plot the matrix ``` fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111, title="The synthetic matrix") ax.imshow(R.todense(), cmap=plt.cm.RdBu, origin="upper") print("Observed entries: %d / %d" % (R.nnz, np.prod(R.shape))) plt.show() ``` # The IMC problem ``` from sgimc import IMCProblem ``` The IMC problem is: $$\begin{aligned} & \underset{W, H}{\text{miminize}} & & \sum_{(i,j)\in \Omega} l(p_{ij}, R_{ij}) + \nu_W \sum_{m=1}^{d_1} \bigl\| W' e_m \bigr\|_2 + \nu_H \sum_{m=1}^{d_2} \bigl\| H' e_m \bigr\|_2 + \mu_W \bigl\| W \bigr\|_1 + \mu_H \bigl\| H \bigr\|_1 \,, \\ & \text{with} & & p_{ij} = e_i'\, X W \, H' Y'\, e_j \,, \end{aligned}$$ where $X \in \mathbb{R}^{n_1 \times d_1}$, $Y \in \mathbb{R}^{n_2 \times d_2}$, $W \in \mathbb{R}^{d_1\times k}$ and $H \in \mathbb{R}^{d_2\times k}$. ### Quadratic Approximation The target objective without regularization (holding $H$ fixed) is $$ F(W; H) = \sum_{(i,j)\in \Omega} l(p_{ij}, R_{ij}) \,, $$ in which $p = p(W) = (e_i' X W H' Y' e_j)_{(i,j)\in \Omega}$ are the current predictions. The Quadratic Approximation to $F$ around $W_0$ is $$ Q(W; W_0) = F(W_0) + \nabla F(W_0)' \delta + \frac12 \delta' \nabla^2 F(W_0) \delta \,, $$ for $\delta = \mathtt{vec}(W - W_0)$. Now the gradient of $F$ w.r.t. vec-form of $W$ is $$ \nabla F(W_0) = \mathtt{vec}\bigl( X' g Y H \bigr) \,, $$ with $g = g(W_0) = (l{'}_{p}(p(W_0)_{ij}, R_{ij}))_{(i,j)\in \Omega}$ is $\Omega$-sparse matrix of first-order (gradient) data. For a matrix $D \in \mathbb{R}^{d_1 \times k}$ $$ \nabla^2F(W_0)\, \mathtt{vec}(D) = \mathtt{vec}\Bigl( X' \underbrace{\bigl\{h \odot (X D H'Y')\bigr\}}_{\Omega-\text{sparse}} YH \Bigr) \,, $$ where $h = h(W_0) = (l{''}_{pp}(p(W_0)_{ij}, R_{ij}))_{(i,j)\in \Omega}$ is the $\Omega$-sparse matrix of the second order (hessian) values and $\odot$ is the element-wise matrix product. The quadratic approximation with respect to $H$ around $H_0$ holding $W$ and $\Sigma$ fixed is similar up to transposing $R$ and swapping $X \leftrightarrow Y$ and $W \leftrightarrow H$ in the above formulae. **Note** that although the expressions for the gradient and the hessian-vector product presented above are identical to the fast operations in *section 3.1* of [H. Yu et al. (2014)](http://bigdata.ices.utexas.edu/publication/993/), the fomulae here have been derived independently. In fact, they are obvious products of simple block-matrix and **vech** algebra. The implementation below is, however, completely original (although, nothing special). #### Implementation details To compute the gradient and the hessian-vector product we need the following "elementary" operations: * $\mathtt{Op}_d: D \mapsto (e_i' X D H'Y' e_j)_{(i, j)\in \Omega}$ -- a map of some $\mathbb{R}^{d_1\times k}$ dense $D$ to a $\mathbb{R}^{n_1\times n_2}$ $\Omega$-sparse matrix $S$; * $\mathtt{Op}_s: S \mapsto X'S YH$ mapping an $\mathbb{R}^{n_1\times n_2}$ $\Omega$-sparse $S$ to a $\mathbb{R}^{d_1\times k}$ dense matrix $D$. The gradient becomes $$ \nabla F(W_0) = \mathtt{vec}(\mathtt{Op}_s(g)) \,, $$ and the hessian-vector product transforms into $$ \nabla^2 F(W_0)\,\mathtt{vec}(D) = \mathtt{vec}\bigl(\mathtt{Op}_s\bigl(h\odot \mathtt{Op}_d(D)\bigr)\bigr) \,. $$ In fact the predictions $p = p(W_0)$ also form an $\mathbb{R}^{n_1\times n_2}$ $\Omega$-matrix, that cam be computed by $p(W_0) = \mathtt{Op}_d(W_0)$. The gradient $g(W_0)$ and hessian $h(W_0)$ statistics are also $\Omega$-sparse, and can be computed by element-wise application of $l'_p$ and $l''_{pp}$ to $p$. Similar formulae hold for $H$ with appropriate re-labellings and transpositions. **Note** that the sparsity structure remains unchanged and the thin matrix $YH$ can be cached, since both $H$ and $Y$ fit in memory and $k < d_2 \ll n_2$. ``` # from sgimc import op_s, op_d ``` Define the objectives The $l_2$ loss $l(p, t) = \frac12 (p-t)^2$. ``` from sgimc.qa_objective import QAObjectiveL2Loss ``` The log-loss $l(p, t) = \log \bigl(1 + e^{-t p}\bigr)$ for $t\in \{-1, +1\}$ and $p\in \mathbb{R}$. \begin{align} \sigma(x) &= \frac1{1+e^{-x}} \,, \\ \sigma'(x) &= -\frac{- e^{-x}}{(1+e^{-x})^2} = \frac{e^{-x}}{1+e^{-x}} \frac1{1+e^{-x}} = (1-\sigma(x))\,\sigma(x) \,, \\ l(p, t) &= \log \bigl(1 + e^{-t p}\bigr) = \log \bigl(1 + e^{- \lvert p \rvert}\bigr) - \min\bigl\{t p, 0\bigr\} \,, \\ l_p'(p, t) &= \frac{-t e^{-t p}}{1 + e^{-t p}} = -t (1 - \sigma(t p)) \,, \\ l_p''(p, t) &= (1 - \sigma(t p))\sigma(t p) = (1 - \sigma(p))\sigma(p) \,. \end{align} ``` from sgimc.qa_objective import QAObjectiveLogLoss ``` Huber loss: $$ l(x; \epsilon) = \begin{cases} \frac12 x^2 & \text{if } \lvert x \rvert \leq \epsilon\,, \\ \epsilon \bigl(\lvert x \rvert - \frac\epsilon2\bigr) & \text{otherwise} \end{cases} \,. $$ Therefore $$ l_p' = \begin{cases} x & \text{if } \lvert x \rvert \leq \epsilon\,, \\ \epsilon \frac{x}{\lvert x \rvert} & \text{otherwise} \end{cases} \,, $$ and $$ l_{pp}'' = \begin{cases} 1 & \text{if } \lvert x \rvert \leq \epsilon\,, \\ 0 & \text{otherwise} \end{cases} \,. $$ ``` from sgimc.qa_objective import QAObjectiveHuberLoss ``` Choose the objective ``` if PROBLEM == "classification": QAObjectiveLoss = QAObjectiveLogLoss else: QAObjectiveLoss = QAObjectiveL2Loss # QAObjectiveHuberLoss problem = IMCProblem(QAObjectiveLoss, X, Y, R, n_threads=4) ``` ### Optimisation Fix $H$ and consider the problem with respect to $W$: $$\begin{aligned} & \underset{W \in \mathbb{R}^{d_1\times k}}{\text{miminize}} & & Q(W; W_0) + \sum_{m=1}^{d_1} \nu_m \bigl\| W' e_m \bigr\|_2 + \mu_m \bigl\| W' e_m \bigr\|_1 + \frac{\kappa_m}2 \bigl\| W' e_m \bigr\|_2^2 \,. \end{aligned}$$ Let's move to an equivalent problem by splitting the variables in the objective, introducing linear consensus constraints and adding $d_1$ ridge-like regularizers (augmenation) $$\begin{aligned} & \underset{Z_m, \delta_m \in \mathbb{R}^{k\times 1}}{\text{miminize}} & & Q(\delta; W_0) + \sum_{m=1}^{d_1} \nu_m \bigl\| Z_m \bigr\|_2 + \mu_m \bigl\| Z_m \bigr\|_1 + \frac{\kappa_m}2 \bigl\| Z_m \bigr\|_2^2 + \frac1{2\eta} \sum_{m=1}^{d_1} \bigl\| \delta_m - (Z_m - W_0'e_m) \bigr\|_2^2 \,, \\ & \text{subject to} & & Z_m - \delta_m = W_0' e_m\,, m=1 \ldots d_1 \,, \end{aligned}$$ with $\sum_{m=1}^{d_1} e_m \delta_m' = \delta$. The objective is convex and the constraints are linear, which means that Strong Duality holds for this problem. The lagrangian is \begin{align} \mathcal{L}(Z_m, \delta_m; \lambda_m) &= F(W_0) + \nabla F(W_0)' \mathtt{vec}(\delta) + \frac12 \mathtt{vec}(\delta)' \nabla^2 F(W_0) \mathtt{vec}(\delta) \\ & + \sum_{m=1}^{d_1} \nu_m \bigl\| Z_m \bigr\|_2 + \mu_m \bigl\| Z_m \bigr\|_1 + \frac{\kappa_m}2 \bigl\| Z_m \bigr\|_2^2 \\ & + \frac1\eta \sum_{m=1}^{d_1} \lambda_m'\bigl(\delta_m - (Z_m - W_0'e_m)\bigr) + \frac1{2\eta} \sum_{m=1}^{d_1} \bigl\| \delta_m - (Z_m - W_0'e_m) \bigr\|_2^2 \,. \end{align} Note the following expressions \begin{align} \sum_{m=1}^{d_1} \lambda_m'\bigl(\delta_m - (Z_m - W_0'e_m)\bigr) &= \mathtt{tr}\bigl((\delta - (Z - W_0))\Lambda'\bigr) \,, \\ \sum_{m=1}^{d_1} \bigl\| \delta_m - (Z_m - W_0'e_m) \bigr\|_2^2 &= \Bigl\| \delta - (Z - W_0) \Bigr\|_\text{F}^2 \,, \\ \end{align} where $\Lambda = \sum_{m=1}^{d_1}e_m \lambda_m'$ and $Z = \sum_{m=1}^{d_1}e_m Z_m'$. #### Sub-0 Consider the following subproblem ($\mathtt{Sub}_0^\text{QA}$): $$\begin{aligned} & \underset{\delta \in \mathbb{R}^{d_1\times k}}{\text{miminize}} & & \nabla F(W_0)' \mathtt{vec}(\delta) + \frac12 \mathtt{vec}(\delta)' \nabla^2 F(W_0) \mathtt{vec}(\delta) \\ % & & & + \frac1\eta % \mathtt{tr}\bigl((\delta - (Z - W_0))\Lambda'\bigr) % + \frac1{2\eta} % \Bigl\| \delta - (Z - W_0) \Bigr\|_\text{F}^2 & & & + \frac1{2\eta} \Bigl\| \delta + W_0 - Z + \Lambda \Bigr\|_\text{F}^2 - \frac1{2\eta} \| \Lambda \|_\text{F}^2 \,. \end{aligned}$$ The first-order-conditions for this convex problem w.r.t. $\mathtt{vec}(\delta)$ are $$ \nabla F(W_0) + \nabla^2 F(W_0) \mathtt{vec}(\delta) + \frac1\eta \mathtt{vec}\bigl( \delta - (\underbrace{Z - W_0 - \Lambda}_{D}) \bigr) = 0 \,. $$ Since computing the inverse of the hessian is out of the quiestion, we use Conjugate Gradient method to solve for $\delta$, because it queries the hessian only through matrix-vector priducts, which are efficicnetly computable. The map $\mathtt{Sub}_0^\text{QA}(D; \eta)$ returns the $\delta$ which satisfies $$\Bigl( \nabla^2 F(W_0) + \frac1\eta I\Bigr)\mathtt{vec}(\delta) = \frac1\eta \mathtt{vec}\bigl(D \bigr) - \nabla F(W_0) \,. $$ ``` from sgimc.algorithm.admm import sub_0_cg ``` Using a more comprehensive solver, like `L-BFGS` we can tackle the original objective, instead of its Quadratic approximation. Consider the subproblem ($\mathtt{Sub}_0^\text{Orig}$): $$\begin{aligned} & \underset{W \in \mathbb{R}^{d_1\times k}}{\text{miminize}} & & F(W; H) + \frac1{2\eta} \Bigl\|W - Z + \Lambda \Bigr\|_\text{F}^2 - \frac1{2\eta} \| \Lambda \|_\text{F}^2 \,. \end{aligned}$$ The L-BFGS requires the gradient of the final objective: $$ \nabla F(W) + \frac1\eta \mathtt{vec}\bigl( W - (Z - \Lambda) \bigr) \,. $$ ``` from sgimc.algorithm.admm import sub_0_lbfgs ``` #### Sub-m The next set of subproblems is represented by the following problem ($\mathtt{Sub}_m$): $$\begin{aligned} & \underset{Z_m \in \mathbb{R}^{k\times 1}}{\text{miminize}} & & \mu_m \bigl\| Z_m \bigr\|_1 + \nu_m \bigl\| Z_m \bigr\|_2 + \frac{\kappa_m}2 \bigl\| Z_m \bigr\|_2^2 \\ % & & & + \frac1\eta \lambda_m'\bigl(\delta_m - (Z_m - W_0'e_m)\bigr) % + \frac1{2\eta} \bigl\| \delta_m - (Z_m - W_0'e_m) \bigr\|_2^2 & & & + \frac1{2\eta} \bigl\| (\delta_m + W_0'e_m + \lambda_m) - Z_m\bigr\|_2^2 - \frac1{2\eta} \| \lambda_m \|_2^2 \,. \end{aligned}$$ After a **lot of math** this problem admits a closed form solution: $$ Z_m = \frac1{1 + \kappa_m \eta} \biggl(1 - \frac{\nu_m \eta}{\|S(V_m; \mu_m \eta)\|_2}\biggr)_+ S(V_m; \mu_m \eta) \,, $$ where $V_m = \delta_m + W_0'e_m + \lambda_m$ and $$ S(u; \mu_m \eta) = \Bigl(\Bigl(1 - \frac{\mu_m \eta}{\lvert u_i \rvert}\Bigr)_+ u_i\Bigr)_{i=1}^k\,, $$ is the **soft_thresholding** operator. The map $\mathtt{Sub}_m(D; \eta)$ returns $Z_m$ defined above. ``` from sgimc.algorithm.admm import sub_m ``` #### ADMM Thus the QA-ADMM for $W$ around $W_0$ with $H$ fixed is the follwing iterative procedure: \begin{align} Z^{t+1}_m &= \mathtt{Sub}_m(W^t_m + \lambda^t_m) \,,\, m = 1\ldots d_1 \,,\\ W^{t+1} &= \mathtt{Sub}_0(Z^{t+1} - W_0 - \Lambda^t) + W_0 \,,\\ % W^{t+1} &= \mathtt{Sub}_0(Z^t - W_0 - \Lambda^t) + W_0 \,,\\ % Z^{t+1}_m &= \mathtt{Sub}_m(W^{t+1}_m + \lambda^t_m) \,,\, m = 1\ldots d_1 \,,\\ \Lambda^{t+1} &= \Lambda^t + (W^{t+1} - Z^{t+1})\,,\\ \end{align} where $W^{t+1}_m$ is the $m$-th row of $W^{t+1}$, $Z^{t+1}_m$ is the $m$-th row of $Z^{t+1}$ and $\lambda_m$ is the $m$-th row of $\Lambda$. These iterations necessarily converge to a fixed point, which is the solution of the original optimisation problem. If stopped early, the current values of $W^t$ and $Z^t$ would be close to each other, however $Z^t$ would be sparse and $W^t$ -- dense. Note that we can also consider ADMM with a linear approximation of $F$ w.r.t. $W$ at $W_0$, instead of the quadratic (LA-ADMM). This way the algorithm reduces to prox-gradient descent with step $\eta$. Although it does not utilize the second order infromation, it can be fused with Nesterov's Accelerated gradient. ``` from sgimc.algorithm import admm_step def step_qaadmm(problem, W, H, C, eta, method="l-bfgs", sparse=True, n_iterations=50, rtol=1e-5, atol=1e-8): approx_type = "quadratic" if method in ("cg",) else "linear" Obj = problem.objective(W, H, approx_type=approx_type) return admm_step(Obj, W, C, eta, sparse=sparse, method=method, n_iterations=n_iterations, rtol=rtol, atol=atol) from sgimc.algorithm.decoupled import step as decoupled_step def step_decoupled(problem, W, H, C, eta, rtol=1e-5, atol=1e-8): Obj = problem.objective(W, H, approx_type="linear") return decoupled_step(Obj, W, C, eta, rtol=rtol, atol=atol) ``` Ad-hoc procedure. No guarantees for convergence. ``` # def step_adhoc(problem, W, H, C, eta, rtol=1e-5, atol=1e-8): # Obj = problem.objective(W, H, approx_type="quadratic") # delta = sub_0_cg(np.zeros_like(W), Obj, eta=eta, tol=1e-8) # return sub_m(delta + W, *C, eta=eta) # def QA_argmin(D, Obj, tol=1e-8): # # set up the CG arguments # x = D.reshape(-1).copy() # b = - Obj.grad().reshape(-1) # Ax = lambda x: Obj.hess_v(x.reshape(D.shape)).reshape(-1) # n_iter = simple_cg(Ax, b, x, tol=tol) # return x.reshape(D.shape) ``` Thus Sparse Group IMC via QA-ADMM is the follwing iterative procedure: * $W^{t+1} = \mathtt{ADMM}\bigl(W^t; H^t\bigr)$, * $H^{t+1} = \mathtt{ADMM}\bigl(H^t; W^{t+1}\bigr)$, until convergence. ``` from sgimc import imc_descent ``` The loss information: value and regularization on the train data and value of the full matrix. ``` from sgimc.utils import performance ``` ### Illustration ``` step_fn = step_qaadmm # step_fn = step_decoupled ``` $$\bigl(C_\mathtt{lasso}, C_\mathtt{group}, C_\mathtt{ridge}\bigr) = C \,.$$ It seems that it must hold $C_\mathtt{lasso} > C_\mathtt{group}$ so that individual sparsity preceeds group sparsity. ``` if PROBLEM == "classification": C = 1e0, 1e-1, 1e-3 eta = 1e0 else: # C = 2e-5, 2e-3, 0 C = 2e-3, 2e-4, 1e-4 # 1e-2 eta = 1e1 if step_fn == step_decoupled: eta = 1e-3 ``` Let's see how the feature coefficients look like. ``` from sgimc.utils import plot_WH, plot_loss ``` Initialization ``` K = 10 # n_rank # K = n_rank W_0 = random_state.normal(size=(X.shape[1], K)) H_0 = random_state.normal(size=(Y.shape[1], K)) # W_0 = W_ideal.copy() # + random_state.normal(scale=0.1, size=(X.shape[1], K)) # H_0 = H_ideal.copy() # + random_state.normal(scale=0.1, size=(Y.shape[1], K)) ``` Now in this experiment the ideal solution is a unit matrix stacked atop a zero martix. ``` plot_WH(W_ideal, H_ideal) loss_arr, exp_type, norm_type = performance( problem, W_ideal, H_ideal, C, R_full) print("The loss on the initial guess is:") print("%.3e + %.3e -- partial matrix" % (loss_arr[0, -1], loss_arr[1, -1])) print("%.3e -- full matrix" % loss_arr[3, -1]) print("score %.4f" % loss_arr[2, -1]) plt.imshow(np.dot(W_ideal, H_ideal.T)) ``` The initial guess is: ``` plot_WH(W_0, H_0) loss_arr, exp_type, norm_type = performance( problem, W_0, H_0, C, R_full) print("The loss on the initial guess is:") print("%.3e + %.3e -- partial matrix" % (loss_arr[0, -1], loss_arr[1, -1])) print("%.3e -- full matrix" % loss_arr[3, -1]) print("score %.4f" % loss_arr[2, -1]) plt.imshow(np.dot(W_0, H_0.T)) ``` Run! ``` W, H = W_0.copy(), H_0.copy() ``` Setup the parameters of the subalgorithm. ``` step_kwargs = { "C": C, # the regularizr constants (C_lasso, C_group, C_ridge) "eta": eta, # the eta of the ADMM (larger - faster but more unstable) "rtol": 1e-5, # the relative tolerance for stopping the ADMM "atol": 1e-8, # the absolute tolerance "method": "l-bfgs", # the method to use in Sub_0 "n_iterations": 2, # the number of iterations of the inner ADMM } # n_iterations = 2, 10, 25, 50 ``` Run the alternating minimization with ADMM as the sub-algorithm. ``` W, H = imc_descent(problem, W, H, step_fn, # the inner optimization step_kwargs=step_kwargs, # asrtguments for the inner optimizer n_iterations=1000, # the number of outer iterations (Gauss-Siedel) return_history=True, # Record the evolution of the matrices (W, H) rtol=1e-5, # relative stopping tolerance for the outer iterations atol=1e-7, # absolute tolerance verbose=True, # show the progress bar check_product=True, # use the product W H' for stopping ) ``` Inspect ``` plot_WH(abs(W[..., -1]), abs(H[..., -1])) loss_arr, exp_type, norm_type = performance(problem, W, H, C, R_full) print("The loss on the final estimates is:") print("%.3e + %.3e -- partial matrix" % (loss_arr[0, -1], loss_arr[1, -1])) print("%.3e -- full matrix" % loss_arr[3, -1]) print("score %.4f" % loss_arr[2, -1]) plt.imshow(np.dot(W[..., -1], H[..., -1].T)) fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111, title="Elementwise loss value") R_hat = problem.prediction(W[..., -1], H[..., -1]) ax.imshow(problem.loss(R_hat, R_full), cmap=plt.cm.hot, origin="upper") plt.show() print(str(np.array(["#", "."])[np.isclose(W[..., -1], 0)*1]).replace("' '", "")) print(str(np.array(["#", "."])[np.isclose(H[..., -1], 0)*1]).replace("' '", "")) np.linalg.norm(W[..., -1], 2, axis=-1) np.linalg.norm(H[..., -1], 2, axis=-1) fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111) ax.imshow(~ np.isclose(np.dot(W[..., -1], H[..., -1].T), np.dot(W_ideal, H_ideal.T)), cmap=plt.cm.binary_r) plt.show() plt.hist(abs(np.dot(W[..., -1], H[..., -1].T)).reshape(-1), bins=20) ; plt.hist(abs(R_hat).reshape(-1), bins=200) ; from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d', xlabel="col", ylabel="row") ZZ = problem.loss(R_hat, R_full) mesh_ = np.meshgrid(*[np.linspace(0, 1, num=n) for n in ZZ.shape[::-1]]) surf = ax.plot_surface(*mesh_, ZZ, alpha=0.5, lw=0, antialiased=True, cmap=plt.cm.coolwarm) fig.colorbar(surf, shrink=0.5, aspect=10) ax.view_init(37, 15) ZZ[~mask].std() ZZ[mask].std() from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d', xlabel="col", ylabel="row") ZZ = np.dot(W[..., -1], H[..., -1].T) mesh_ = np.meshgrid(*[np.linspace(0, 1, num=n) for n in ZZ.shape[::-1]]) surf = ax.plot_surface(*mesh_, ZZ, alpha=0.5, lw=0, antialiased=True, cmap=plt.cm.coolwarm) fig.colorbar(surf, shrink=0.5, aspect=10) ax.view_init(37, 15) plot_loss(loss_arr, exp_type, norm_type, fig_size=4, max_cols=4, yscale="log") ``` <hr/>
github_jupyter
# Advanced Model with BathNormalization & Adam using keras The **conv-block-model** looks like: (the shape will be different) <div align="center"> <img src="images/version2/conv-block.png" height="255" width="1000" /><br> </div> The whole **model** looks like: <div align="center"> <img src="images/version2/model.png" height="248" width="1000" /><br> </div> The **detail model** : <div align="center"> <img src="images/version2/model-params.png" height="400" width="800" /><br> </div> ``` import h5py import numpy as np from keras import layers import keras.backend as K from keras.models import Model import matplotlib.pyplot as plt from keras.callbacks import Callback from keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D K.set_image_data_format('channels_last') %matplotlib tk ``` ## 1. Build the function of defining model ``` def AdvancedModel(X_shape): X_input = Input(shape=X_shape, name='X_input') # 1st CONV-BLOCK # Conv2D: strides default (1,1), padding default 'valid' # Not use bias but center(offset, or named: 'beta') Z1 = Conv2D(filters=8, kernel_size=(3,3), use_bias=False, kernel_initializer='glorot_uniform', name='conv_1')(X_input) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_2')(Z1) A = Activation('relu', name='relu_3')(Z2) X = AveragePooling2D(pool_size=(3,3), strides=(1,1), name='avg_pool_4')(A) # 2nd CONV-BLOCK Z1 = Conv2D(filters=12, kernel_size=(5,5), use_bias=False, kernel_initializer='glorot_uniform', name='conv_5')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_6')(Z1) A = Activation('relu', name='relu_7')(Z2) X = AveragePooling2D(pool_size=(5,5), strides=(1,1), name='avg_pool_8')(A) # 3rd CONV-BLOCK Z1 = Conv2D(filters=16, kernel_size=(5,5), use_bias=False, kernel_initializer='glorot_uniform', name='conv_9')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_10')(Z1) A = Activation('relu', name='relu_11')(Z2) X = AveragePooling2D(pool_size=(5,5), strides=(1,1), name='avg_pool_12')(A) # 4th CONV-BLOCK Z1 = Conv2D(filters=20, kernel_size=(5,5), use_bias=False, kernel_initializer='glorot_uniform', name='conv_13')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_14')(Z1) A = Activation('relu', name='relu_15')(Z2) X = AveragePooling2D(pool_size=(5,5), strides=(1,1), name='avg_pool_16')(A) # flatten X = Flatten()(X) # 5th NN-BLOCK Z1 = Dense(units=128, use_bias=False, kernel_initializer='glorot_uniform', name='fc_17')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_18')(Z1) X = Activation('relu', name='relu_19')(Z2) # 6th NN-BLOCK Z1 = Dense(units=64, use_bias=False, kernel_initializer='glorot_uniform', name='fc_20')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_21')(Z1) X = Activation('relu', name='relu_22')(Z2) # 7th NN-BLOCK Z1 = Dense(units=32, use_bias=False, kernel_initializer='glorot_uniform', name='fc_23')(X) Z2 = BatchNormalization(scale=False, moving_variance_initializer='glorot_uniform', name='bn_24')(Z1) X = Activation('relu', name='relu_25')(Z2) # 8th NN-BLOCK Y = Dense(units=1, activation='sigmoid', kernel_initializer='glorot_uniform', name='fc_26')(X) model = Model(inputs = X_input, outputs = Y, name='AdvancedModel') return model ``` ## 2. Create a callback to record the losses during training. ``` class LossRecorder(Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) ``` ## 3. Create a model ``` model = AdvancedModel(X_shape=(32,32,3)) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) ``` ## 4. Load the dataset ``` with h5py.File('datasets/car-v2.h5', 'r', driver='core') as f: DIVIDE = 6400 X_test, Y_test = f['X'][DIVIDE:], f['Y'][DIVIDE:].reshape(-1, 1) X_train, Y_train = f['X'][:DIVIDE], f['Y'][:DIVIDE].reshape(-1, 1) print('X_shape: ', X_train.shape) print('Y_shape: ', Y_train.shape) loss_recorder = LossRecorder() ``` ## 5. OK, it's time to train the model! ``` model.fit(X_train, Y_train, batch_size = 32, epochs = 10, callbacks=[loss_recorder]) ``` ## 6. Evaluate the performance of model ``` preds = model.evaluate(X_test, Y_test) print ("Final Loss \t = " + str(preds[0])) print ("Test Accuracy \t = " + str(preds[1])) ``` The loss and accuracy are both little worse than training. But it's still ok. ## 7. Now, let's show the loss. ``` num = len(loss_recorder.losses) plt.plot(range(num), loss_recorder.losses) plt.xlabel('number_of_batch') plt.ylabel('loss') plt.show() ``` <div align="center"> <img src="images/version2/loss.png" height="500" width="1000" /><br> </div> ## 8. Check the model archtecture. ``` model.summary() ``` **Also can save the model.** ``` with open('model.txt','w') as fh: # Pass the file handle in as a lambda function to make it callable model.summary(print_fn=lambda x: fh.write(x + '\n')) ``` **save the weights** ``` model.save_weights('trained_params/weights_v2.h5') ```
github_jupyter
# Multi-Layer Perceptron, MNIST --- In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database. The process will be broken down into the following steps: >1. Load and visualize the data 2. Define a neural network 3. Train the model 4. Evaluate the performance of our trained model on a test dataset! Before we begin, we have to import the necessary libraries for working with data and PyTorch. ``` # import libraries import torch import numpy as np ``` --- ## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html) Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time. This cell will create DataLoaders for each of our datasets. ``` from torchvision import datasets import torchvision.transforms as transforms # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 20 # convert data to torch.FloatTensor transform = transforms.ToTensor() # choose the training and test datasets train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform) test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform) # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) ``` ### Visualize a Batch of Training Data The first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data. ``` import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') # print out the correct label for each image # .item() gets the value contained in a Tensor ax.set_title(str(labels[idx].item())) ``` ### View an Image in More Detail ``` img = np.squeeze(images[1]) fig = plt.figure(figsize = (12,12)) ax = fig.add_subplot(111) ax.imshow(img, cmap='gray') width, height = img.shape thresh = img.max()/2.5 for x in range(width): for y in range(height): val = round(img[x][y],2) if img[x][y] !=0 else 0 ax.annotate(str(val), xy=(y,x), horizontalalignment='center', verticalalignment='center', color='white' if img[x][y]<thresh else 'black') ``` --- ## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html) The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting. ``` import torch.nn as nn import torch.nn.functional as F # define the NN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() # number of hidden nodes in each layer (512) hidden_1 = 512 hidden_2 = 512 # linear layer (784 -> hidden_1) self.fc1 = nn.Linear(28 * 28, hidden_1) # linear layer (n_hidden -> hidden_2) self.fc2 = nn.Linear(hidden_1, hidden_2) # linear layer (n_hidden -> 10) self.fc3 = nn.Linear(hidden_2, 10) # dropout layer (p=0.2) # dropout prevents overfitting of data self.dropout = nn.Dropout(0.2) def forward(self, x): # flatten image input x = x.view(-1, 28 * 28) # add hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add hidden layer, with relu activation function x = F.relu(self.fc2(x)) # add dropout layer x = self.dropout(x) # add output layer x = self.fc3(x) return x # initialize the NN model = Net() print(model) ``` ### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html) It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss. ``` # specify loss function (categorical cross-entropy) criterion = nn.CrossEntropyLoss() # specify optimizer (stochastic gradient descent) and learning rate = 0.01 optimizer = torch.optim.SGD(model.parameters(), lr=0.01) ``` --- ## Train the Network The steps for training/learning from a batch of data are described in the comments below: 1. Clear the gradients of all optimized variables 2. Forward pass: compute predicted outputs by passing inputs to the model 3. Calculate the loss 4. Backward pass: compute gradient of the loss with respect to model parameters 5. Perform a single optimization step (parameter update) 6. Update average training loss The following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data. ``` # number of epochs to train the model n_epochs = 50 model.train() # prep model for training for epoch in range(n_epochs): # monitor training loss train_loss = 0.0 ################### # train the model # ################### for data, target in train_loader: # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update running training loss train_loss += loss.item()*data.size(0) # print training statistics # calculate average loss over an epoch train_loss = train_loss/len(train_loader.dataset) print('Epoch: {} \tTraining Loss: {:.6f}'.format( epoch+1, train_loss )) ``` --- ## Test the Trained Network Finally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy. ``` # initialize lists to monitor test loss and accuracy test_loss = 0.0 class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) model.eval() # prep model for evaluation for data, target in test_loader: # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update test loss test_loss += loss.item()*data.size(0) # convert output probabilities to predicted class _, pred = torch.max(output, 1) # compare predictions to true label correct = np.squeeze(pred.eq(target.data.view_as(pred))) # calculate test accuracy for each object class for i in range(batch_size): label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 # calculate and print avg test loss test_loss = test_loss/len(test_loader.dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) for i in range(10): if class_total[i] > 0: print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % ( str(i), 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i])) print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total))) ``` ### Visualize Sample Test Results This cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions. ``` # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() # get sample outputs output = model(images) # convert output probabilities to predicted class _, preds = torch.max(output, 1) # prep images for display images = images.numpy() # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())), color=("green" if preds[idx]==labels[idx] else "red")) ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Effect-of-Economic,-Social,-and-Cultural-Opportunity-on-Academic-Performance" data-toc-modified-id="Effect-of-Economic,-Social,-and-Cultural-Opportunity-on-Academic-Performance-1">Effect of Economic, Social, and Cultural Opportunity on Academic Performance</a></span><ul class="toc-item"><li><span><a href="#by-Ken-Norton" data-toc-modified-id="by-Ken-Norton-1.1">by Ken Norton</a></span></li></ul></li><li><span><a href="#Investigation-Overview" data-toc-modified-id="Investigation-Overview-2">Investigation Overview</a></span><ul class="toc-item"><li><span><a href="#Dataset-Overview" data-toc-modified-id="Dataset-Overview-2.1">Dataset Overview</a></span></li><li><span><a href="#Countries-with-the-most-disadvantaged-students" data-toc-modified-id="Countries-with-the-most-disadvantaged-students-2.2">Countries with the most disadvantaged students</a></span></li><li><span><a href="#Countries-with-the-most-advantaged-students" data-toc-modified-id="Countries-with-the-most-advantaged-students-2.3">Countries with the most advantaged students</a></span></li><li><span><a href="#Nations-with-the-highest-economic,-social,-and-cultural-status" data-toc-modified-id="Nations-with-the-highest-economic,-social,-and-cultural-status-2.4">Nations with the highest economic, social, and cultural status</a></span></li><li><span><a href="#Nations-with-the-lowest-economic,-social,-and-cultural-status" data-toc-modified-id="Nations-with-the-lowest-economic,-social,-and-cultural-status-2.5">Nations with the lowest economic, social, and cultural status</a></span></li><li><span><a href="#Overall-Literacy-Scores-by-Opportunity" data-toc-modified-id="Overall-Literacy-Scores-by-Opportunity-2.6">Overall Literacy Scores by Opportunity</a></span></li></ul></li></ul></div> # Effect of Economic, Social, and Cultural Opportunity on Academic Performance ## by Ken Norton # Investigation Overview In this investigation, I wanted to look at how economic, social, and cultural opportunity affects academic performance worldwide. ## Dataset Overview Around 510,000 students in 65 economies took part in the PISA 2012 assessment of reading, mathematics and science. The dataset included more than 600 dimensions, including an index of how the student's economic, social, and cultural status compared to the broader population. ``` # import all packages and set plots to be embedded inline import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline # suppress warnings from final output import warnings warnings.simplefilter("ignore") %config InlineBackend.figure_format = 'retina' %matplotlib inline plt.style.use('fivethirtyeight') plt.style.use('seaborn-poster') # load in the dataset into a pandas dataframe df = pd.read_csv('data/pisa2012_clean.csv') df_clean = df.copy() ``` ## Countries with the most disadvantaged students Several countries had > 50% of their students from disadvantaged backgrounds. Indonesia, Turkey, Peru, Brazil, and Thailand had the highest percentage of disadvantaged students. ``` fig, ax = plt.subplots() fig.set_size_inches(8.5, 11) dt = (df_clean.query('disadvantaged == 1').groupby('country')['student_id'].count() / df_clean.groupby('country')['student_id'].count()).sort_values() * 100 dt.dropna().plot(kind="barh", fontsize=11) ax.set_ylabel("Country", fontsize=14) ax.set_xlabel("Percentage of Students Disadvantaged", fontsize=14) plt.show() ``` ## Countries with the most advantaged students On the other hand, Iceland had by far the largest percentage of advantaged students. No other country had more than 30%. Canada, Qatar, Norway, and Finland rounded out the top five. ``` fig, ax = plt.subplots() fig.set_size_inches(8.5, 11) dt = (df_clean.query('advantaged == 1').groupby('country')['student_id'].count() / df_clean.groupby('country')['student_id'].count()).sort_values() * 100 dt.dropna().plot(kind="barh", fontsize=11) ax.set_ylabel("Country", fontsize=14) ax.set_xlabel("Percentage of Students Advantaged", fontsize=14) plt.show() ``` ## Nations with the highest economic, social, and cultural status When we look at the overall economic, social, and cultural status for each nation, we can see that the top countries are primarily free nations on the _Freedom House_ Civil Liberties scale. ``` cnt_sort = df_clean.groupby('country')['ESCS'].mean().sort_values( ascending=False).dropna().head(10) fig, ax = plt.subplots() fig.set_size_inches(11, 8.5) sns.boxplot(data=df_clean, y='country', x='ESCS', order=cnt_sort.index.get_level_values('country'), dodge=False, palette='RdBu_r', hue='civil_liberties') ax.set_ylabel("Country", fontsize=12) ax.set_xlabel("ESCS (0=average, +/- stdev)", fontsize=12) legend = ax.legend(loc='best', title_fontsize=10).set_title( 'Civil Liberties\n(1=most free,\n6=least free)') ax.tick_params(labelsize=13) plt.show() ``` ## Nations with the lowest economic, social, and cultural status The bottom countries score worse on the _Freedom House_ Civil Liberties scale. Costa Rica is the only country in the bottom ten that scores 1 (most free) on this index. We found that freer countries perform better on the overall academic literacy score than less free countries (p=0.00). ``` cnt_sort = df_clean.groupby('country')['ESCS'].mean().sort_values( ascending=False).dropna().tail(10) fig, ax = plt.subplots() fig.set_size_inches(11, 8.5) sns.boxplot(data=df_clean, y='country', x='ESCS', order=cnt_sort.index.get_level_values('country'), dodge=False, palette='RdBu_r', hue='civil_liberties') ax.set_ylabel("Country", fontsize=12) ax.set_xlabel("ESCS (0=average, +/- stdev)", fontsize=12) legend = ax.legend(loc='best', title_fontsize=10).set_title( 'Civil Liberties\n(1=most free,\n6=least free)') ax.tick_params(labelsize=13) plt.show() ``` ## Overall Literacy Scores by Opportunity We defined children to be "advantaged" if they were +1 or higher standard deviations above the mean in opportunity. They are "disadvantaged" if the are -1 or lower standard deviations below the mean. We can see that children who are advantaged perform better in overall academic literacy and children that are disadvantaged perform worse. This effect is statistically significant (p=0.00). ``` df1 = pd.DataFrame(df.query('disadvantaged == 1'), columns=['overall_literacy']).assign(opp='Disadvantaged') df2 = pd.DataFrame(df_clean.query('advantaged == 0 and disadvantaged == 0'), columns=['overall_literacy' ]).assign(opp='Neither Advantaged\nnor Disadvantaged') df3 = pd.DataFrame(df_clean.query('advantaged == 1'), columns=['overall_literacy']).assign(opp='Advantaged') cdf = pd.concat([df1, df2, df3]) mdf = pd.melt(cdf, id_vars=['opp'], var_name=['overall_literacy']) fig, ax = plt.subplots() fig.set_size_inches(11, 8.5) ax = sns.violinplot(x='opp', y='value', data=mdf, palette='RdBu') ax.set_xlabel(None) ax.set_ylabel("Overall Literacy") ax.set_title("Overall Literacy by Opportunity") plt.show() ``` > Once you're ready to finish your presentation, check your output by using nbconvert to export the notebook and set up a server for the slides. From the terminal or command line, use the following expression: > > `jupyter nbconvert <file_name>.ipynb --to slides --post serve --template output_toggle` > This should open a tab in your web browser where you can scroll through your presentation. Sub-slides can be accessed by pressing 'down' when viewing its parent slide. Make sure you remove all of the quote-formatted guide notes like this one before you finish your presentation!
github_jupyter
# Mask Dataset. To make the training mask dataset we have used the approximation proposed here https://spark-in.me/post/playing-with-dwt-and-ds-bowl-2018. For that we have transformed each mask into an image with 3 different channels. 1. Binnary mask 2. Border mask 3. Energy mask 4. Eroded mask 1 iter 5. Eroded mask 3 iter 6. Eroded mask 7 iter For the script to work, the original kaggle train.csv must be in the `data/raw/internal/` folder and the 'data/interim/train_mask/images/' directory must exist Al new images and object are saved in `data/interim/train_mask/` and `data/interim/train_mask/images/` All images are saved and called by its asociated train image ID ``` import pickle import get_root from src.utils import rle_utils as rle from scipy import sparse import sys import numpy as np import pandas as pd import collections from skimage.morphology import thin from PIL import Image from skimage.segmentation.boundaries import find_boundaries from scipy import ndimage from numba import jit import os import matplotlib.pyplot as plt from scipy import sparse import sys DATA_RAW_INTERNAL_DIR = '../../data/raw/internal/' DATA_INTERIM_MASK_DIR = '../../data/interim/train_mask/images/' TRAIN_DIR = os.path.join(DATA_RAW_INTERNAL_DIR, "train") TEST_DIR = os.path.join(DATA_RAW_INTERNAL_DIR, "test") TRAIN_CSV = os.path.join(DATA_RAW_INTERNAL_DIR, "train.csv") pd.read_csv(TRAIN_CSV).head() def preprocess(image_dir: str, df_path: str, path: str, width: int = 520, height: int = 704) -> None: """ Script for transform individual data masks into 3-channels image 1 - Binnary mask 2 - Border mask 3 - Energy mask 4 - Eroded mask 1 iter 5 - Eroded mask 3 iter 6 - Eroded mask 7 iter Will add more in the future And save it in path :param df: path to csv with all image's ids and it's respectives rle's masks :param path: Path to save masks """ df = pd.read_csv(df_path) image_info = collections.defaultdict(dict) temp_df = df.groupby('id')['annotation'].agg(lambda x: list(x)).reset_index() for index, row in temp_df.iterrows(): image_info[index] = { 'image_id': row['id'], 'image_path': os.path.join(image_dir, row['id'] + '.png'), 'annotations': row["annotation"] } total = len(image_info) # Iterate over each image: for idx in range(total): img_path = image_info[idx]["image_path"] img = np.array(Image.open(img_path), dtype=np.int16) img = img[:, :, None] info = image_info[idx] mask = np.zeros((len(info['annotations']), width, height), dtype=np.uint8) labels = [] for m, annotation in enumerate(info['annotations']): sub_mask = rle.decode(annotation, (width, height)) sub_mask = Image.fromarray(sub_mask) sub_mask = np.array(sub_mask) > 0 mask[m, :, :] = sub_mask labels.append(1) num_objs = len(labels) new_labels = [] new_masks = [] for i in range(num_objs): try: new_labels.append(labels[i]) new_masks.append(mask[i, :, :]) except ValueError: print("Error in xmax xmin") pass nmx = np.zeros((len(new_masks), width, height), dtype=np.uint8) for i, n in enumerate(new_masks): nmx[i, :, :] = n img_mask = np.maximum.reduce(nmx) img_bound = boundaries(nmx) img_ener = energy(nmx) img_ero1 = eroded(nmx, 1) img_ero3 = eroded(nmx, 3) img_ero7 = eroded(nmx, 7) print('DONE[X] ' + str(image_info[idx]["image_id"])+ '.npy' + ' --------> ' + str(idx + 1) + '/' + str(total)) print() np.save(path + str(image_info[idx]["image_id"])+ '.npy', np.dstack(( img_mask, img_bound, img_ener, img_ero1, img_ero3, img_ero7 ))) def boundaries(nmx): return np.maximum.reduce( [find_boundaries(i) for i in nmx] ) def energy(nmx): return np.maximum.reduce( [ndimage.distance_transform_edt(i) for i in nmx] ) def eroded(nmx, pixl): return np.maximum.reduce( [thin(i,pixl) for i in nmx] ) preprocess(TRAIN_DIR, TRAIN_CSV, DATA_INTERIM_MASK_DIR) ``` ## Saving All Images: Another option is to save all previously created and stored images as a single numpy object. We are going to use two different approaches: 1. Save all images as numpy arrays 2. Save all the images as sparse matrix and then make them dense to operate on them The first approach is faster but the object takes up much more space (5 gb) while the second option is slower but the object takes up less space (800mb) The objects are python dict(), the key is the asociated image ID and the value is the matrix/array ``` # dict of numpy arrays mascaras = dict() for l in os.listdir(DATA_INTERIM_MASK_DIR): if 'npy' in l: mascaras[l[:-4]] = np.load(DATA_INTERIM_MASK_DIR+l, allow_pickle=True) # dict of sparse matrix mascaras_ = dict() for l in os.listdir(DATA_INTERIM_MASK_DIR): if 'npy' in l: mask = np.load(DATA_INTERIM_MASK_DIR+l, allow_pickle=True) mask_ = [] for i in range(6): mask_.append(sparse.csr_matrix(mask[:,:,i])) mascaras_[l[:-4]] = mask_ # Save list of numpy arrays with open("../../data/interim/train_mask/npy_mask.pkl", "wb") as output_file: pickle.dump(mascaras, output_file) # Save list of sparse arrays with open("../../data/interim/train_mask/sparse_mask.pkl", "wb") as output_file: pickle.dump(mascaras_, output_file) ``` ## Testing Datasets ``` with open("../../data/interim/train_mask/sparse_mask.pkl", 'rb') as f: msk_ = pickle.load(f) with open("../../data/interim/train_mask/npy_mask.pkl", 'rb') as f: mascaras = pickle.load(f) plt.imshow(msk_['f8902ee8890c'][0].toarray()) plt.imshow(mascaras['f8902ee8890c'][:,:,5]) ``` ## Testing Times sparse is ~400x times slower than numpy ``` from datetime import datetime s = datetime.now() for i in range(100000): a = msk_['f8902ee8890c'][1].toarray() print(datetime.now() - s) from datetime import datetime s = datetime.now() for i in range(100000): a = mascaras['f8902ee8890c'][:,:,2] print(datetime.now() - s) def preprocess2(image_dir: str, df_path: str, path: str, width: int = 520, height: int = 704) -> None: """ Script for transform individual data masks into 3-channels image 1 - Binnary mask 2 - Border mask 3 - Energy mask 4 - Eroded mask 1 iter 5 - Eroded mask 3 iter 6 - Eroded mask 7 iter Will add more in the future And save it in path :param df: path to csv with all image's ids and it's respectives rle's masks :param path: Path to save masks """ df = pd.read_csv(df_path) image_info = collections.defaultdict(dict) temp_df = df.groupby('id')['annotation'].agg(lambda x: list(x)).reset_index() for index, row in temp_df.iterrows(): image_info[index] = { 'image_id': row['id'], 'image_path': os.path.join(image_dir, row['id'] + '.png'), 'annotations': row["annotation"] } total = len(image_info) # Iterate over each image: for idx in range(total): img_path = image_info[idx]["image_path"] img = np.array(Image.open(img_path), dtype=np.int16) img = img[:, :, None] info = image_info[idx] mask = np.zeros((len(info['annotations']), width, height), dtype=np.uint8) labels = [] for m, annotation in enumerate(info['annotations']): sub_mask = rle.decode(annotation, (width, height)) sub_mask = Image.fromarray(sub_mask) sub_mask = np.array(sub_mask) > 0 mask[m, :, :] = sub_mask labels.append(1) num_objs = len(labels) new_labels = [] new_masks = [] for i in range(num_objs): try: new_labels.append(labels[i]) new_masks.append(mask[i, :, :]) except ValueError: print("Error in xmax xmin") pass nmx = np.zeros((len(new_masks), width, height), dtype=np.uint8) for i, n in enumerate(new_masks): nmx[i, :, :] = n img_mask = np.maximum.reduce(nmx) img_bound = boundaries(nmx) img_ener = energy(nmx) img_ero1 = eroded(nmx, 1) img_ero3 = eroded(nmx, 3) img_ero7 = eroded(nmx, 7) print('DONE[X] ' + str(image_info[idx]["image_id"])+ '.npy' + ' --------> ' + str(idx + 1) + '/' + str(total)) print() return np.dstack(( img_mask, img_bound, img_ener, img_ero1, img_ero3, img_ero7 )) def boundaries(nmx): return np.maximum.reduce( [find_boundaries(i) for i in nmx] ) def energy(nmx): return np.maximum.reduce( [ndimage.distance_transform_edt(i) for i in nmx] ) def eroded(nmx, pixl): return np.maximum.reduce( [thin(i,pixl) for i in nmx] ) a = preprocess2(TRAIN_DIR, TRAIN_CSV, DATA_INTERIM_MASK_DIR) plt.imshow(a[:,:,5]) ```
github_jupyter
# **Intruder Detection and Tracking** ## Setup Google Drive ``` from google.colab import drive drive.mount('/content/gdrive') !cp -r /content/gdrive/My\ Drive/Academic/Person/All/Demo /content/ !cp -r /content/gdrive/My\ Drive/Academic/Person/All/test /content/ !cp -r /content/gdrive/My\ Drive/Academic/Person/All/yolo-coco /content/ !cp -r /content/gdrive/My\ Drive/Academic/Person/All/Person_reID_baseline_pytorch /content/ !unzip /content/gdrive/My\ Drive/Academic/Person/Dataset.zip -d /content/Person_reID_baseline_pytorch ``` ## Install ``` %%bash pip3 install pretrainedmodels pip install pyairmore ``` ## Communication ``` from ipaddress import IPv4Address from pyairmore.request import AirmoreSession from pyairmore.services.messaging import MessagingService c = "192.168.0.197" ip = IPv4Address(c) s = AirmoreSession(ip) print("Running:", s.is_server_running) wa = s.request_authorization() print("Authorization:",wa) service = MessagingService(s) ``` ## Importing Libraries ``` import numpy import random import numpy as npy import matplotlib.pyplot as plt import os import time import cv2 from shutil import copyfile import argparse import scipy.io import torch import numpy as np from torchvision import datasets import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt import PIL from PIL import Image from IPython.display import Image as image1 ``` ## Prepare - Market1501 ``` %%bash cd /content/Person_reID_baseline_pytorch/Market mkdir pytorch download_path = '/content/Person_reID_baseline_pytorch/Market' if not os.path.isdir(download_path): print('please change the download_path') save_path = download_path + '/pytorch' if not os.path.isdir(save_path): os.mkdir(save_path) query_path = download_path + '/query' query_save_path = download_path + '/pytorch/query' if not os.path.isdir(query_save_path): os.mkdir(query_save_path) for root, dirs, files in os.walk(query_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = query_path + '/' + name dst_path = query_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) query_path = download_path + '/gt_bbox' if os.path.isdir(query_path): query_save_path = download_path + '/pytorch/multi-query' if not os.path.isdir(query_save_path): os.mkdir(query_save_path) for root, dirs, files in os.walk(query_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = query_path + '/' + name dst_path = query_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) gallery_path = download_path + '/bounding_box_test' gallery_save_path = download_path + '/pytorch/gallery' if not os.path.isdir(gallery_save_path): os.mkdir(gallery_save_path) for root, dirs, files in os.walk(gallery_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = gallery_path + '/' + name dst_path = gallery_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) train_path = download_path + '/bounding_box_train' train_save_path = download_path + '/pytorch/train_all' if not os.path.isdir(train_save_path): os.mkdir(train_save_path) for root, dirs, files in os.walk(train_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = train_path + '/' + name dst_path = train_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) train_path = download_path + '/bounding_box_train' train_save_path = download_path + '/pytorch/train' val_save_path = download_path + '/pytorch/val' if not os.path.isdir(train_save_path): os.mkdir(train_save_path) os.mkdir(val_save_path) for root, dirs, files in os.walk(train_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = train_path + '/' + name dst_path = train_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) dst_path = val_save_path + '/' + ID[0] #first image is used as val image os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) ``` ## Demo ``` %%bash # cd /content/Person_reID_baseline_pytorch # python3 test.py --gpu_ids 0 --name ft_ResNet50 --test_dir "./Market/pytorch" --batchsize 32 --which_epoch 59 cd /content/Person_reID_baseline_pytorch/ python3 demo.py --test_dir "./Market/pytorch" import cv2 from google.colab.patches import cv2_imshow # /Market/pytorch/gallery/0342/0342_c1s2_006291_02.jpg img = cv2.imread('/content/Person_reID_baseline_pytorch/Market/pytorch/gallery/0252/0252_c6s1_058251_02.jpg', cv2.IMREAD_UNCHANGED) cv2_imshow(img) # cv2.imshow(cv2.imread('', cv2.IMREAD_UNCHANGED)) ``` ## Prepare - Test Data ``` download_path = '/content/test' gallery_path = download_path + '/crops' gallery_save_path = '/content/Person_reID_baseline_pytorch/Market/pytorch/gallery' if not os.path.isdir(gallery_save_path): os.mkdir(gallery_save_path) for root, dirs, files in os.walk(gallery_path, topdown=True): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = gallery_path + '/' + name dst_path = gallery_save_path + '/' + ID[0] if not os.path.isdir(dst_path): os.mkdir(dst_path) copyfile(src_path, dst_path + '/' + name) !rm -r "/content/Person_reID_baseline_pytorch/Market/pytorch/query/1502" # download_path = '/content/test' # query_path = download_path + '/crops' # query_save_path = download_path + '/pytorch/query' # if not os.path.isdir(query_save_path): # os.mkdir(query_save_path) # for root, dirs, files in os.walk(query_path, topdown=True): # for name in files: # if not name[-3:]=='jpg': # continue # ID = name.split('_') # src_path = query_path + '/' + name # dst_path = query_save_path + '/' + ID[0] # if not os.path.isdir(dst_path): # os.mkdir(dst_path) # copyfile(src_path, dst_path + '/' + name) ``` ## Produce Result file ``` %%bash cd /content/Person_reID_baseline_pytorch python3 test.py --gpu_ids 0 --name ft_ResNet50 --test_dir "./Market/pytorch" --batchsize 32 --which_epoch 59 ``` ## Confirmation ``` def imshow(path, title=None): """Imshow for Tensor.""" im = plt.imread(path) plt.imshow(im) if title is not None: plt.title(title) plt.pause(0.001) def sort_img(qf, ql, qc, gf, gl, gc): query = qf.view(-1,1) # print(query.shape) score = torch.mm(gf,query) score = score.squeeze(1).cpu() score = score.numpy() # predict index index = np.argsort(score) #from small to large index = index[::-1] # index = index[0:2000] # good index query_index = np.argwhere(gl==ql) #same camera camera_index = np.argwhere(gc==qc) #good_index = np.setdiff1d(query_index, camera_index, assume_unique=True) junk_index1 = np.argwhere(gl==-1) junk_index2 = np.intersect1d(query_index, camera_index) junk_index = np.append(junk_index2, junk_index1) mask = np.in1d(index, junk_index, invert=True) index = index[mask] return index def prepare_test(): # /content/Person_reID_baseline_pytorch/Market/pytorch/gallery # /content/Person_reID_baseline_pytorch/Market/pytorch/query # /content/test/pytorch/gallery # /content/test/pytorch/query image_datasets = {'gallery': datasets.ImageFolder('/content/Person_reID_baseline_pytorch/Market/pytorch/gallery'),'query':datasets.ImageFolder('/content/Person_reID_baseline_pytorch/Market/pytorch/query')} result = scipy.io.loadmat('/content/Person_reID_baseline_pytorch/pytorch_result.mat') query_feature = torch.FloatTensor(result['query_f']) query_cam = result['query_cam'][0] query_label = result['query_label'][0] gallery_feature = torch.FloatTensor(result['gallery_f']) gallery_cam = result['gallery_cam'][0] gallery_label = result['gallery_label'][0] query_feature = query_feature.cuda() gallery_feature = gallery_feature.cuda() i = 3368 index = sort_img(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) query_path, _ = image_datasets['query'].imgs[i] query_label = query_label[i] print(query_path) print('Top 10 images are as follows:') try: # Visualize Ranking Result # Graphical User Interface is needed fig = plt.figure(figsize=(16,4)) ax = plt.subplot(1,2,1) ax.axis('off') imshow(query_path,'query') for i in range(10): ax = plt.subplot(1,11,i+2) ax.axis('off') img_path, _ = image_datasets['gallery'].imgs[index[i]] label = gallery_label[index[i]] imshow(img_path) if label == query_label: ax.set_title('%d'%(i+1), color='green') else: ax.set_title('%d'%(i+1), color='red') print(img_path) except RuntimeError: for i in range(10): img_path = image_datasets.imgs[index[i]] print(img_path[0]) print('If you want to see the visualization of the ranking result, graphical user interface is needed.') fig.savefig("show.png") prepare_test() import cv2 from google.colab.patches import cv2_imshow # /Market/pytorch/gallery/0342/0342_c1s2_006291_02.jpg img = cv2.imread('/content/Person_reID_baseline_pytorch/Market/pytorch/query/1502/1502_c6s6_694161_04.jpg', cv2.IMREAD_UNCHANGED) cv2_imshow(img) # cv2.imshow(cv2.imread('', cv2.IMREAD_UNCHANGED)) import cv2 from google.colab.patches import cv2_imshow # /Market/pytorch/gallery/0342/0342_c1s2_006291_02.jpg img = cv2.imread('/content/Person_reID_baseline_pytorch/Market/pytorch/gallery/0000/0000_c6s5_694262_00.jpg', cv2.IMREAD_UNCHANGED) cv2_imshow(img) # cv2.imshow(cv2.imread('', cv2.IMREAD_UNCHANGED)) def prepare(filepath,j): return 1 ``` ## Detection ``` def main1(n1,j): y1 = 0 y11 = 0 n = n1 global ld # Global variable for showing last detected time # load the COCO class labels our YOLO model was trained on - *preset lpath = os.path.sep.join(['yolo-coco', "coco.names"]) la = open(lpath).read().strip().split("\n") # derive the paths to the YOLO weights and model configuration - *preset weightsPath = os.path.sep.join(['yolo-coco', "yolov3.weights"]) configPath = os.path.sep.join(['yolo-coco', "yolov3.cfg"]) # load our YOLO object detector trained on COCO dataset (80 classes) - *preset net = cv2.dnn.readNetFromDarknet(configPath, weightsPath) # load input to get its dimensions im = cv2.imread(n) (H, W) = im.shape[:2] # Colour for the labels npy.random.seed(42) colours = npy.random.randint(0, 255, size=(len(la), 3),dtype="uint8") # Naming layers - *preset ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] # construct a blob from the input image and then perform a forward - *preset # pass of the YOLO object detector, giving us our bounding boxes and associated probabilities blob = cv2.dnn.blobFromImage(im, 1 / 255.0, (416, 416),swapRB=True, crop=False) net.setInput(blob) start = time.time() out = net.forward(ln) end = time.time() box1 = [] classID1 = [] confidence1 = [] for o in out: for det in o: s1 = det[5:] classID = npy.argmax(s1) confidence = s1[classID] if confidence > 0.5: box = det[0:4] * npy.array([W, H, W, H]) (cX, cY, w1, h1) = box.astype("int") x = int(cX - (w1 / 2)) y = int(cY - (h1 / 2)) box1.append([x, y, int(w1), int(h1)]) confidence1.append(float(confidence)) classID1.append(classID) # apply non-maxima suppression to suppress weak, overlapping bounding - *preset id1 = cv2.dnn.NMSBoxes(box1, confidence1, 0.5, 0.3) if len(id1) > 0: for i in id1.flatten(): temp = [] (x, y) = (box1[i][0], box1[i][1]) (w, h) = (box1[i][2], box1[i][3]) cl = [int(c) for c in colours[classID1[i]]] text = "{}".format(la[classID1[i]]) if text == "person": ct=time.time() cv2.rectangle(im, (x, y), (x + w, y + h), cl, 2) cv2.putText(im, str(i)+", "+str(x)+" "+(str(y)), (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, cl, 2) cv2.imwrite("test/detected/d{}.jpg".format(j),im) im_refined = cv2.imread("test/detected/d{}.jpg".format(j)) crop = im_refined[int(y+2):int(y+h-2),int(x+2):int(x+w-2)] new = cv2.resize(crop,(64,128)) cv2.imwrite("test/crops/0000_c6s5_6942{}_0{}.jpg".format(j,i),new) y1 = prepare("test/crops/0000_c1s1_6942{}_0{}.jpg".format(j,i),j) return y1 ``` ## Main module ``` def call(given): yes=0 yes1=0 vid1 = "Demo/demo{}.mp4".format(given) # count = [] # count.append(0) frames = 45 # cv2.VideoCapture(0) - If you want webcam cap = cv2.VideoCapture(vid1) i,j,ld = 0,0,0 while True: r, f = cap.read() # takes the fram if r: #cv2.imshow('frame', f) f = cv2.resize(f,(400,300)) if i%frames == 0: try: j = j+1 s = "test/overall/ss{}.jpg".format(j) cv2.imwrite(s,f) yes = main1(s,j) if yes==1: yes1=1 except: pass i=i+1 if cv2.waitKey(1) & 0xFF == ord('q'): # Press Q to quit break else: break cap.release() return yes1 !rm -r "/content/test/crops/" c = call(1) print(c) ``` ## Plotting ``` def person(points1,p1,p2): initial1 = p1 initial2 = p2 if (p1,p2) in [(1,5),(5,5),(5,1),(1,1)]: l = [1,4,6] random.shuffle(l) for i in range(3): c = call(l[i]) if c==1: if i==0: if p1==1: p1+=1 else: p1-=1 if i==1: if p1==1: p1+=1 if p2==1: p2-=1 else: p2+=1 else: p1-=1 if p2==1: p2-=1 else: p2+=1 if i==2: if p2==5: p2-=1 else: p2+=1 else: l = [1,2,3,4,5,6,7,8] random.shuffle(l) for i in range(8): c = call(l[i]) if c==1: if i==0: p2=p2+1 if i==1: p1+=1 p2+=1 if i==2: p1+=1 if i==3: p1+=1 p2-=1 if i==4: p2-=1 if i==5: p1-=1 p2-=1 if i==6: p1-=1 if i==7: p2+=1 p1-=1 if (p1,p2) in points1: return person(points1,initial1,initial2) elif p1 not in range(1,6) or p2 not in range(1,6): return person(points1, initial1, initial2) else: points1.append((p1,p2)) print(points1) message = "Person found at "+str(p1)+","+str(p2) #service.send_message("9449277201", message) print(message) return points1 ``` ## Display ``` points = [] points1 = [(1,5)] for i in range(5): points1 = person(points1,points1[-1][0],points1[-1][1]) for k in range(1,6): for l in range(1,6): if (k,l) not in points1: points.append((k,l)) x = list(map(lambda x: x[0], points)) y = list(map(lambda x: x[1], points)) x1 = list(map(lambda x1: x1[0], points1)) y1 = list(map(lambda x1: x1[1], points1)) plt.xticks(npy.arange(1, 6, 1)) plt.yticks(npy.arange(1, 6, 1)) plt.scatter(x,y,c="g") plt.scatter(x1,y1,c="r") plt.grid(True) plt.plot(x1,y1,'--'); plt.show() plt.savefig('test/graphs/g{}.png'.format(i)) ```
github_jupyter
# Classification ## Premier League 2018/2019 season W/D/L prediction program ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv(r'D:\git_space\data\Premier-League_18-19seasons_matches.csv') df.head() df.columns df.info() # 의미없는 column 삭제 meaningless_data = ['timestamp', 'date_GMT', 'status', 'attendance', 'referee', 'Game Week', 'Pre-Match PPG (Home)', 'Pre-Match PPG (Away)', 'average_goals_per_match_pre_match', 'btts_percentage_pre_match', 'over_15_percentage_pre_match', 'over_25_percentage_pre_match', 'over_35_percentage_pre_match', 'over_45_percentage_pre_match', 'over_15_HT_FHG_percentage_pre_match', 'over_05_HT_FHG_percentage_pre_match', 'over_15_2HG_percentage_pre_match', 'over_05_2HG_percentage_pre_match', 'average_corners_per_match_pre_match', 'average_cards_per_match_pre_match', 'odds_ft_over15', 'odds_ft_over25', 'odds_ft_over35', 'odds_ft_over45', 'odds_btts_yes', 'odds_btts_no', 'stadium_name'] # 경기시작 전에 알 수 없는 column 삭제(예측을 해야 하기 때문에 경기 기록 data를 사용하면 안된다) unknown_data = ['total_goal_count', 'total_goals_at_half_time', 'home_team_goal_count_half_time', 'away_team_goal_count_half_time', 'home_team_goal_timings', 'away_team_goal_timings', 'home_team_corner_count', 'away_team_corner_count', 'home_team_yellow_cards', 'home_team_red_cards', 'away_team_yellow_cards', 'away_team_red_cards', 'home_team_first_half_cards', 'home_team_second_half_cards', 'away_team_first_half_cards', 'away_team_second_half_cards', 'home_team_shots', 'away_team_shots', 'home_team_shots_on_target', 'away_team_shots_on_target', 'home_team_shots_off_target', 'away_team_shots_off_target', 'home_team_fouls', 'away_team_fouls', 'home_team_possession', 'away_team_possession',] df = df.drop(meaningless_data + unknown_data, axis=1) df.head() df.isnull().sum() # 홈팀 득점 수 - 어웨이팀 득점 수 column 생성 df['difference'] = df['home_team_goal_count'] - df['away_team_goal_count'] df.head() # difference column의 data가 양수(홈팀 승)면 2반환, 0(무승부)이면 1반환, 음수(홈팀 패)면 0반환 def func(x): if x > 0: return 2 elif x == 0: return 1 else: return 0 # 홈팀의 승무패 정보를 담는 column 생성 df['home_team_result'] = df['difference'].apply(lambda x : func(x)) df # 팀의 정보를 하나의 column에 담았으니 쓸모 없어진 홈팀, 어웨이팀 득점 수, difference column은 삭제한다 df.drop(['home_team_goal_count', 'away_team_goal_count', 'difference'], axis=1, inplace=True) df # 새롭게 추가할 data는 home, away team의 근 3년간 시즌에서의 최종 승점의 정보이다 # 현재 18/19 시즌이기 때문에 15/16, 16/17, 17/18 시즌의 성적을 알아보자 df['home_team_name'].value_counts() season_1516 = {'Brighton & Hove Albion': 89*0.5, 'Cardiff City': 68*0.5, 'AFC Bournemouth': 42, 'Southampton': 63, 'Huddersfield Town': 51*0.5, 'Wolverhampton Wanderers': 58*0.5, 'Chelsea': 50, 'Watford': 45, 'Tottenham Hotspur': 70, 'West Ham United': 62, 'Liverpool': 60, 'Manchester United': 66, 'Burnley': 93*0.5, 'Crystal Palace': 42, 'Fulham': 51*0.5, 'Leicester City': 81, 'Manchester City': 66, 'Newcastle United': 37, 'Everton': 47, 'Arsenal': 71} season_1617 = {'Brighton & Hove Albion': 93*0.5, 'Cardiff City': 62*0.5, 'AFC Bournemouth': 46, 'Southampton': 46, 'Huddersfield Town': 81*0.5, 'Wolverhampton Wanderers': 58*0.5, 'Chelsea': 93, 'Watford': 40, 'Tottenham Hotspur': 86, 'West Ham United': 45, 'Liverpool': 76, 'Manchester United': 69, 'Burnley': 40, 'Crystal Palace': 41, 'Fulham': 80*0.5, 'Leicester City': 44, 'Manchester City': 78, 'Newcastle United': 94*0.5, 'Everton': 61, 'Arsenal': 75} season_1718 = {'Brighton & Hove Albion': 40, 'Cardiff City': 90*0.5, 'AFC Bournemouth': 44, 'Southampton': 36, 'Huddersfield Town': 37, 'Wolverhampton Wanderers': 99*0.5, 'Chelsea': 70, 'Watford': 41, 'Tottenham Hotspur': 77, 'West Ham United': 42, 'Liverpool': 75, 'Manchester United': 81, 'Burnley': 54, 'Crystal Palace': 44, 'Fulham': 88*0.5, 'Leicester City': 47, 'Manchester City': 100, 'Newcastle United': 44, 'Everton': 49, 'Arsenal': 63} for team, point in season_1617.items(): point = point * 1.3 season_1617[team] = point for team, point in season_1718.items(): point = point * 1.5 season_1718[team] = point data1516 = pd.DataFrame.from_dict([season_1516]) data1617 = pd.DataFrame.from_dict([season_1617]) data1718 = pd.DataFrame.from_dict([season_1718]) total_points = data1516 + data1617 + data1718 mean_of_points = total_points / 3 mean_of_points # 원래의 dataframe에 각 팀의 승점에 해당되는 점수를 넣는다. for team in mean_of_points.columns: for index, teams in enumerate(df['home_team_name'].values): if str(teams) == str(team): df['home_team_name'].values[index] = mean_of_points[team].values[0] # values들은 list 형태에 저장돼 있기에 index 0값을 불러 가져옴 for team in mean_of_points.columns: for index, teams in enumerate(df['away_team_name'].values): if str(teams) == str(team): df['away_team_name'].values[index] = mean_of_points[team].values[0] # values들은 list 형태에 저장돼 있기에 index 0값을 불러 가져옴 df # team_name 에서 team_point 로 이름 변경 df = df.rename({'home_team_name':'home_team_point', 'away_team_name':'away_team_point'}, axis='columns') df # 이를 해주는 이유는 dataframe에 새로 추가해준 승점 정보를 숫자형으로 확실하게 바꿔주기 위함이다. df['home_team_point'] = pd.to_numeric(df['home_team_point']) df['away_team_point'] = pd.to_numeric(df['away_team_point']) df from sklearn.preprocessing import MinMaxScaler target = df['home_team_result'] features = df.drop('home_team_result', axis=1, inplace=False) scaler = MinMaxScaler() scaler.fit(features) df_scaled = scaler.transform(features) soccer_df_scaled = pd.DataFrame(data=df_scaled, columns=features.columns) soccer_df_scaled ``` ### Machine Learning model for classification ``` from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score X_train, X_test, y_train, y_test = train_test_split(soccer_df_scaled, target, test_size=0.2) dt = DecisionTreeClassifier() rf = RandomForestClassifier() lr = LogisticRegression() ada = AdaBoostClassifier() gbm = GradientBoostingClassifier() xgb = XGBClassifier(n_estimators=400, learning_rate=0.1, max_depth=3) lgbm = LGBMClassifier(n_estimators=400) knn = KNeighborsClassifier(n_neighbors=8) dt_predictions = [] rf_predictions = [] lr_predictions = [] ada_predictions = [] gbm_predictions = [] xgb_predictions = [] lgbm_predictions = [] knn_predictions = [] # 20번 반복하여 나온 정확도 값의 평균 for _ in range(20): dt.fit(X_train, y_train) dt_pred = dt.predict(X_test) dt_predictions.append(accuracy_score(y_test, dt_pred)) rf.fit(X_train, y_train) rf_pred = rf.predict(X_test) rf_predictions.append(accuracy_score(y_test, rf_pred)) lr.fit(X_train, y_train) lr_pred = lr.predict(X_test) lr_predictions.append(accuracy_score(y_test, lr_pred)) ada.fit(X_train, y_train) ada_pred = ada.predict(X_test) ada_predictions.append(accuracy_score(y_test, ada_pred)) gbm.fit(X_train, y_train) gbm_pred = gbm.predict(X_test) gbm_predictions.append(accuracy_score(y_test, gbm_pred)) xgb.fit(X_train, y_train) xgb_pred = xgb.predict(X_test) xgb_predictions.append(accuracy_score(y_test, xgb_pred)) evals = [(X_test, y_test)] lgbm.fit(X_train, y_train, early_stopping_rounds=100, eval_metric='logloss', eval_set=evals, verbose=True) lgbm_pred = lgbm.predict(X_test) lgbm_predictions.append(accuracy_score(y_test, lgbm_pred)) knn.fit(X_train, y_train) knn_pred = knn.predict(X_test) knn_predictions.append(accuracy_score(y_test, knn_pred)) print('Decision Tree:', sum(dt_predictions) / 20) print('Random Forest:', sum(rf_predictions) / 20) print('Logistic Regression:', sum(lr_predictions) / 20) print('Adaboost:', sum(ada_predictions) / 20) print('GBM:', sum(gbm_predictions) / 20) print('XGB:', sum(xgb_predictions) / 20) print('LGBM:', sum(lgbm_predictions) / 20) print('KNN', sum(knn_predictions) / 20) ``` ### VotingClassifier 로 정확도 높이기 시도 ``` from sklearn.ensemble import VotingClassifier # 정확도가 제일 높았던 두 분류기 선택 vc = VotingClassifier(estimators=[('LR', lr), ('Adaboost', ada)], voting='soft') vc.fit(X_train, y_train) vc_pred = vc.predict(X_test) print(accuracy_score(y_test, vc_pred)) ``` ### 하이퍼 파라미터 수정 ``` from sklearn.model_selection import GridSearchCV param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'penalty': ['l1', 'l2']} # 그리드 서치 진행 grid_search = GridSearchCV(LogisticRegression(), param_grid, cv=5, scoring='accuracy') grid_search.fit(X_train, y_train) print(grid_search.best_params_) print(grid_search.best_score_) estimator = grid_search.best_estimator_ grid_pred = estimator.predict(X_test) print(accuracy_score(y_test, grid_pred)) ``` ### stacking ensemble로 조금만 더 높여보자 ``` X_train, X_test, y_train, y_test = train_test_split(soccer_df_scaled, target, test_size=0.2) knn = KNeighborsClassifier(n_neighbors=4) rf = RandomForestClassifier(n_estimators=100) dt = DecisionTreeClassifier() ada = AdaBoostClassifier(n_estimators=100) lr = LogisticRegression(penalty='l1', C=10) knn.fit(X_train, y_train) rf.fit(X_train, y_train) dt.fit(X_train, y_train) ada.fit(X_train, y_train) lr.fit(X_train, y_train) knn_pred = knn.predict(X_test) rf_pred = rf.predict(X_test) dt_pred = dt.predict(X_test) ada_pred = ada.predict(X_test) lr_pred = lr.predict(X_test) print('Score of KNN: ', accuracy_score(y_test, knn_pred)) print('Score of Random Forest: ', accuracy_score(y_test, rf_pred)) print('Score of Decision Tree: ', accuracy_score(y_test, dt_pred)) print('Score of AdaBoostClassifier: ', accuracy_score(y_test, ada_pred)) print('Score of Logistic Regression: ', accuracy_score(y_test, lr_pred)) pred = np.array([knn_pred, rf_pred, dt_pred, lr_pred]) print(pred.shape) pred = np.transpose(pred) print(pred.shape) ada.fit(pred, y_test) pred_final = ada.predict(pred) print('Score of Stacking Ensemble: ', accuracy_score(y_test, pred_final)) ```
github_jupyter
``` import copy import json import pathlib from distutils import dir_util from dask.distributed import Client, LocalCluster from urllib.request import urlretrieve from laserfarm import Retiler, DataProcessing, GeotiffWriter, Classification from laserfarm import MacroPipeline ``` # Macro-ecology LiDAR point-cloud processing pipeline ## 0. Data Retrieval and Cluster Setup Files produced by the pipeline will be saved in the `tmp_folder` directory. ``` tmp_folder = pathlib.Path('/var/tmp') ``` We start by checking whether the test data set is available locally, we otherwise retrieve it from the AHN3 repository. ``` testdata_files = ['C_41CZ2.LAZ'] file_paths = [tmp_folder/f for f in testdata_files] for file_path in file_paths: if not file_path.is_file(): url = 'https://geodata.nationaalgeoregister.nl/ahn3/extract/ahn3_laz' url = '/'.join([url, file_path.name]) urlretrieve(url, file_path) ``` We then setup the cluster that we will use for the computation using `dask`. For this example, the cluster consists of 2 processes (workers). Note: it is important that single-threaded workers are employed for the tasks that require `laserchicken`! ``` cluster = LocalCluster(processes=True, n_workers=2, threads_per_worker=1, local_directory=tmp_folder/'dask-worker-space') cluster ``` ## 1. Retiling The first step in the pipeline is to retile the retrieved point-cloud files to a regular grid, splitting the original data into smaller chuncks that are easier to handle for data processing. The boundaries of the grid and the number of tiles along each axis are set to: ``` grid = { 'min_x': -113107.8100, 'max_x': 398892.1900, 'min_y': 214783.8700, 'max_y': 726783.87, 'n_tiles_side': 256 } ``` The retiling of multiple input files consists of independent tasks, which are thus efficiently parallelized. The input controlling all the steps of the retiling is organized in a dictionary. ``` # set path where output will be written retiling_out_path = tmp_folder/'retiled' retiling_input = { 'setup_local_fs': { 'input_folder': tmp_folder.as_posix(), 'output_folder': retiling_out_path.as_posix() }, 'set_grid': grid, 'split_and_redistribute': {}, 'validate': {} } retiling_macro = MacroPipeline() for file_path in file_paths: retiler = Retiler(input_file=file_path.name, label=file_path.stem) retiler.config(retiling_input) retiling_macro.add_task(retiler) retiling_macro.setup_cluster(cluster=cluster) # run! retiling_macro.run() retiling_macro.print_outcome() ``` ## 2. Feature Extraction Once the files are splitted into tiles of a manageable size, we proceed to the feature extraction stage, which is performed using `laserchicken`. We choose the following two example features: ``` feature_names = ['mean_normalized_height', 'std_normalized_height'] ``` The base input dictionary for this step looks like: ``` # set path where output will be written dp_out_path = tmp_folder/'targets' dp_input = { 'setup_local_fs': { 'input_folder': retiling_out_path.as_posix(), 'output_folder': dp_out_path.as_posix() }, 'load': {}, 'normalize': { 'cell_size': 1 }, 'generate_targets': { 'tile_mesh_size' : 10.0, 'validate' : True, 'validate_precision': 1.e-3, **grid }, 'extract_features': { 'feature_names': feature_names, 'volume_type': 'cell', 'volume_size': 10 }, 'export_targets': {}, 'clear_cache': {} } ``` Note: `laserchicken` caches the KDTree computed for the point cloud. In order to free up the memory of the `dask` workers at the end of each tile's feature extraction, we need to clear the cache (see `clear_cache` in the input dictionary above). The tiles to which the original input file has been retiled are listed in a record file located in the retiling output directory: ``` tiles = [] for file_path in file_paths: record_file = '_'.join([file_path.stem, 'retile_record.js']) with pathlib.Path(retiling_out_path/record_file).open() as f: record = json.load(f) assert record['validated'] tiles += [pathlib.Path(retiling_out_path/tile) for tile in record['redistributed_to']] print([t.as_posix() for t in tiles]) ``` Each tile can be processed independently, so that again one can run the tasks in a parallel fashion. ``` dp_macro = MacroPipeline() for tile in tiles: # parse tile index from the directory name tile_index = [int(n) for n in tile.name.split('_')[1:]] dp = DataProcessing(input=tile.name, label=tile.name, tile_index=tile_index) dp.config(dp_input) dp_macro.add_task(dp) dp_macro.setup_cluster(cluster=cluster) # run! dp_macro.run() dp_macro.print_outcome() ``` ## 3. Classification of target points We can classify the target points according to their groud type, based on given cadaster data. To mark the types of the points in the point cloud, we can add a new column `ground_type` to the target point cloud. We can use the class code of TOP10NL as the identifier. 0. Unclassified 1. Gebouw 2. Inrichtingselement 3. Terrein (Polygon) 4. Spoorbaandeel 5. Waterdeel 6. GeografischGebied (Point) 7. FunctioneelGebied 8. Plaats 9. RegistratiefGebied 10. Hoogte 11. Relief (Line String) 12. Wegdeel Here we present an example where we classify the points that fall on waterbodies with the given shp files of waterbody polygons. We will classify the target points according to the shape files provided in `testdata/shp`: ``` shp_path = pathlib.Path('./testdata/shp/') ``` The pipeline will automatically find out the relavant shp file. We will add a new column `ground_type`, and mark all points which fall in the waterbody polygons with `5`, which is the `waterdeel` identifier. We set up the input for the classification pipeline as follow: ``` # set path where output will be written cl_out_path = tmp_folder/'classified_target_point' classification_input = { 'setup_local_fs': { 'input_folder': dp_out_path.as_posix(), 'output_folder': cl_out_path.as_posix() }, 'locate_shp': {'shp_dir': shp_path.absolute().as_posix()}, 'classification': {'ground_type': 5}, 'export_point_cloud': {'overwrite':True} } ``` Then we excute the pipeline: ``` cl_macro = MacroPipeline() for tile in tiles: tile_path = (dp_out_path/tile.name).with_suffix('.ply') cl = Classification(input_file=tile_path.as_posix(), label=tile.name) cl.config(classification_input) cl_macro.add_task(cl) cl_macro.setup_cluster(cluster=cluster) # run! cl_macro.run() cl_macro.print_outcome() ``` ## 4. GeoTIFF Export The last step of the pipeline is the transformation of the features extracted and added gound type from the point-cloud data and 'rasterized' in the target grid to a GeoTIFF file. In this case, the construction of the geotiffs (one per feature) can be performed in parallel: ``` # set path where output will be written gw_out_path = tmp_folder/'geotiffs' gw_input = { 'setup_local_fs': {'input_folder': cl_out_path.as_posix(), 'output_folder': gw_out_path.as_posix()}, 'parse_point_cloud': {}, 'data_split': [1, 1], 'create_subregion_geotiffs': {'output_handle': 'geotiff'} } geotiff_macro = MacroPipeline() feature_names.append('ground_type') for feature_name in feature_names: gw = GeotiffWriter(bands=feature_name, label=feature_name) gw.config(gw_input) geotiff_macro.add_task(gw) geotiff_macro.setup_cluster(cluster=cluster) # run! geotiff_macro.run() geotiff_macro.print_outcome() ``` Finally, we stop the client and the scheduler of the cluster. ``` cluster.close() ```
github_jupyter