code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="kLSLxAdBGHSd" # # LSN - simulation test code # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="WYpfpG7UGLnX" # Package imports # %matplotlib inline import tensorflow.compat.v1 as tf import tf_slim as slim import numpy as np import random import time import math import pandas as pd from datetime import datetime # - # # LSN Class and other functions # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="4nyoHfbQY-YJ" # Main slim library class siamese_net(object): def __init__(self, net_arch): self.input_L = tf.placeholder(tf.float32, [None, net_arch['MR_shape']],name='baseline') self.input_R = tf.placeholder(tf.float32, [None, net_arch['MR_shape']],name='follow_up') self.aux_gen = tf.placeholder(tf.float32, [None,1],name='apoe') #apoe4 status self.aux_clinical = tf.placeholder(tf.float32, [None,net_arch['aux_shape']-1],name='clinical_attr') self.labels = tf.placeholder(tf.float32, [None,net_arch['output']],name='trajectory') self.is_training = True #toggles dropout in slim self.dropout = 1 with tf.variable_scope("siamese") as scope: self.branch_L = self.mlpnet_slim(self.input_L, net_arch) scope.reuse_variables() self.branch_R = self.mlpnet_slim(self.input_R, net_arch) # Create metrics self.distance = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(self.branch_L,self.branch_R),2),1)) self.preds = self.get_predictions(net_arch) self.loss = self.get_loss() self.accuracy = self.get_accuracy() #Adds all the numerical MRI features to the siamese_net def mlpnet_slim(self, X, net_arch): with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(net_arch['reg'])): # The code below creates fully connected layers for the siamese net for l in range(net_arch['n_layers']): net = slim.fully_connected(X, net_arch['l{}'.format(l+1)], normalizer_fn=slim.batch_norm,scope='fc{}'.format(l)) net = slim.dropout(net, self.dropout, is_training=self.is_training) # MR output MR_predictions = slim.fully_connected(net, net_arch['MR_output'], normalizer_fn=slim.batch_norm, scope='MR_prediction') return MR_predictions #Later also return end_points # Auxilary branch for age, gender, genetics etc def auxnet(self,net_arch): #utilizes multiplicative modulation to incorporate genetics #utilizes concatination to add other clinical attributes such as age and gender distance_vec = tf.concat([self.branch_L,self.branch_R],1,name='MR_embed_concat') distance_vec_mod = tf.multiply(distance_vec,self.aux_gen) distance_vec_mod_aux = tf.concat([distance_vec_mod,self.aux_clinical],1) with tf.name_scope('aux_layers'): aux_predictions = slim.fully_connected(distance_vec_mod_aux, net_arch['aux_output'], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(net_arch['reg']), normalizer_fn=slim.batch_norm) aux_predictions = slim.dropout(aux_predictions, self.dropout, is_training=self.is_training) return aux_predictions def get_predictions(self, net_arch): if net_arch['use_aux']: embed_vec = self.auxnet(net_arch) print('Using MR and aux features') else: embed_vec = tf.concat([self.branch_L,self.branch_R],1,name='MR_embed_concat') print('Using only MR features') penult_predict = slim.fully_connected(embed_vec, net_arch['output'], activation_fn=tf.nn.softmax, normalizer_fn=slim.batch_norm, scope='aux_prediction') return penult_predict #-------------- net with basic/raw TF code (without slim) --------------# #-------------- not used when slim is used (preferred)------------------# def mlpnet(self, X): l1 = self.mlp(X,layer_config['MR_shape'],layer_config['l1'],name='l1') l1 = tf.nn.dropout(l1,self.dropout) l2 = self.mlp(l1,layer_config['l1'],layer_config['l2'],name='l2') l2 = tf.nn.dropout(l2,self.dropout_f) l3 = self.mlp(l2,layer_config['l2'],layer_config['l3'],name='l3') l3 = tf.nn.dropout(l3,self.dropout) l4 = self.mlp(l3,layer_config['l3'],layer_config['l4'],name='l4') l4 = tf.nn.dropout(l4,self.dropout) output = self.mlp(l4,layer_config['l4'],layer_config['output'],name='output') return output def mlp(self, input_,input_dim,output_dim,name="mlp"): with tf.variable_scope(name): w = tf.get_variable('w',[input_dim,output_dim],tf.float32,tf.random_normal_initializer(mean = 0.001,stddev=0.02)) b = tf.get_variable('b',[output_dim],tf.float32,tf.constant_initializer(0.1)) return tf.nn.relu(tf.matmul(input_,w)+b) #-----------------------------------------------------------------------# # Set methods for class variables def set_dropout(self, dropout): self.dropout = dropout def set_train_mode(self,is_training): self.is_training = is_training # Get methods for loss and acc metrics def get_loss(self): return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.labels,logits=self.preds)) def get_accuracy(self): correct_preds = tf.equal(tf.argmax(self.labels,1), tf.argmax(self.preds,1)) return tf.reduce_mean(tf.cast(correct_preds, tf.float32)) # Other helper functions def next_batch(s,e,mr_inputs,aux_inputs,labels): input1 = mr_inputs[s:e,0] input2 = mr_inputs[s:e,1] input3 = aux_inputs[s:e,:] y = labels[s:e,:] return input1,input2,input3,y def check_data_shapes(data,net_arch): check = True n_layers = net_arch['n_layers'] if data['X_MR'].shape[1] != 2: print('wrong input data dimensions - need MR data for two branches') check = False elif data['X_MR'].shape[2] != net_arch['MR_shape']: print('input MR data <-> LSN arch mismatch') check = False elif data['X_aux'].shape[1] != net_arch['aux_shape']: print('input aux data <-> LSN arch mismatch') check = False elif data['y'].shape[1] != net_arch['output']: print('number of classes (2,3) <-> LSN arch mismatch') check = False else: for l in range(n_layers): try: _ = net_arch['l{}'.format(l+1)] except: print('Specify number of nodes for layer {}'.format(l)) check = False return check # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ykpbbpRMha_r" # Train and test defs def train_lsn(sess, lsn, data, optimizer, n_epochs, batch_size, dropout, validate_after, verbose): valid_frac = int(0.1*len(data['y'])) # Split into train and valid data for hyperparam tuning X_MR_train = data['X_MR'][:1-valid_frac] X_aux_train = data['X_aux'][:1-valid_frac] y_train = data['y'][:1-valid_frac] X_MR_valid = data['X_MR'][1-valid_frac:] X_aux_valid = data['X_aux'][1-valid_frac:] y_valid = data['y'][1-valid_frac:] total_batch = int(len(y_train)/batch_size) #print('total_batch {}'.format(total_batch)) train_acc_list = [] valid_acc_list = [] train_loss_list = [] valid_loss_list = [] # Training cycle for epoch in range(n_epochs): avg_loss = 0. avg_acc = 0. start_time = time.time() # Loop over all batches for i in range(total_batch): s = i * batch_size e = (i+1) *batch_size # Fit training using batch data MR_L_batch,MR_R_batch,aux_batch,y_batch = next_batch(s,e,X_MR_train,X_aux_train,y_train) # Train pass lsn.set_dropout(dropout) _,distance,preds,loss_value,acc_value=sess.run([optimizer,lsn.distance,lsn.preds,lsn.loss,lsn.accuracy], feed_dict={lsn.input_L:MR_L_batch, lsn.input_R:MR_R_batch, lsn.aux_gen:aux_batch[:,0:1], lsn.aux_clinical:aux_batch[:,1:], lsn.labels:y_batch}) avg_loss += loss_value avg_acc +=acc_value*100 duration = time.time() - start_time if verbose: print('epoch %d time: %.2f loss %0.4f acc %0.2f' %(epoch,duration,avg_loss/total_batch,avg_acc/total_batch)) #Compute perf on entire training and validation sets (no need after every epoch) if epoch%validate_after == 0: train_acc = lsn.accuracy.eval(feed_dict={lsn.input_L:X_MR_train[:,0,:],lsn.input_R:X_MR_train[:,1,:], lsn.aux_gen:X_aux_train[:,0:1],lsn.aux_clinical:X_aux_train[:,1:], lsn.labels:y_train}) valid_acc = lsn.accuracy.eval(feed_dict={lsn.input_L:X_MR_valid[:,0,:],lsn.input_R:X_MR_valid[:,1,:], lsn.aux_gen:X_aux_valid[:,0:1],lsn.aux_clinical:X_aux_valid[:,1:], lsn.labels:y_valid}) train_loss = lsn.loss.eval(feed_dict={lsn.input_L:X_MR_train[:,0,:],lsn.input_R:X_MR_train[:,1,:], lsn.aux_gen:X_aux_train[:,0:1],lsn.aux_clinical:X_aux_train[:,1:], lsn.labels:y_train}) valid_loss = lsn.loss.eval(feed_dict={lsn.input_L:X_MR_valid[:,0,:],lsn.input_R:X_MR_valid[:,1,:], lsn.aux_gen:X_aux_valid[:,0:1],lsn.aux_clinical:X_aux_valid[:,1:], lsn.labels:y_valid}) print('performance on entire train and valid subsets') print('epoch {}\t train_acc:{}\n'.format(epoch,train_acc)) train_acc_list.append(train_acc) valid_acc_list.append(valid_acc) train_loss_list.append(train_loss) valid_loss_list.append(valid_loss) # Post training: Compute preds and metrics for entire train data X_MR_train = data['X_MR'] X_aux_train = data['X_aux'] y_train = data['y'] train_feature_L = lsn.branch_L.eval(feed_dict={lsn.input_L:X_MR_train[:,0,:]}) train_feature_R = lsn.branch_R.eval(feed_dict={lsn.input_R:X_MR_train[:,1,:]}) train_preds= lsn.preds.eval(feed_dict={lsn.input_L:X_MR_train[:,0,:],lsn.input_R:X_MR_train[:,1,:], lsn.aux_gen:X_aux_train[:,0:1],lsn.aux_clinical:X_aux_train[:,1:]}) train_metrics = {'train_feature_L':train_feature_L,'train_feature_R':train_feature_R,'train_preds':train_preds, 'train_loss':train_loss_list,'train_acc':train_acc_list, 'valid_loss':valid_loss_list,'valid_acc':valid_acc_list} return lsn, train_metrics def test_lsn(sess,lsn,data): print('Testing model') lsn.set_dropout(1) lsn.set_train_mode(False) X_MR_test = data['X_MR'] X_aux_test = data['X_aux'] y_test = data['y'] #print(lsn.dropout) test_feature_L = lsn.branch_L.eval(feed_dict={lsn.input_L:X_MR_test[:,0,:]}) test_feature_R = lsn.branch_R.eval(feed_dict={lsn.input_R:X_MR_test[:,1,:]}) test_preds = lsn.preds.eval(feed_dict={lsn.input_L:X_MR_test[:,0,:],lsn.input_R:X_MR_test[:,1,:], lsn.aux_gen:X_aux_test[:,0:1],lsn.aux_clinical:X_aux_test[:,1:]}) test_acc = lsn.accuracy.eval(feed_dict={lsn.input_L:X_MR_test[:,0,:],lsn.input_R:X_MR_test[:,1,:], lsn.aux_gen:X_aux_test[:,0:1],lsn.aux_clinical:X_aux_test[:,1:], lsn.labels:y_test}) test_metrics = {'test_feature_L':test_feature_L,'test_feature_R':test_feature_R,'test_preds':test_preds,'test_acc':test_acc} print('Accuracy test set %0.2f' % (100 * test_acc)) return lsn, test_metrics # - # # Generate simulation data # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 376, "status": "ok", "timestamp": 1528988676971, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-rmY3GxNQ0TY/AAAAAAAAAAI/AAAAAAAAEdE/SePqq1Clrc4/s50-c-k-no/photo.jpg", "userId": "104945077116245412065"}, "user_tz": 240} id="CoG7X_s41pch" outputId="4cc20727-22d6-437f-d5f9-bbb89efc12a2" sampx = 10000 X_MR = np.squeeze(np.random.rand(2*sampx,2,100)) X_aux = np.squeeze(np.random.rand(2*sampx,5)) # Modify feature distributions of the first half of the sample X_MR[:sampx] = X_MR[:sampx]/1.5 X_aux[:sampx] = X_aux[:sampx]/2.0 y = np.concatenate((np.ones(sampx),np.zeros(sampx))) y = np.squeeze(np.vstack((1-y,y)).T) # Shuffle data before train-test split indx_shuff = np.arange(2*sampx) np.random.shuffle(indx_shuff) X_MR = np.squeeze(X_MR[indx_shuff]) X_aux = np.squeeze(X_aux[indx_shuff]) y = np.squeeze(y[indx_shuff]) # Train-test splits train_frac = int(0.9*(2*sampx)) X_MR_train = X_MR[:train_frac] X_aux_train = X_aux[:train_frac] y_train = y[:train_frac] X_MR_test = X_MR[train_frac:] X_aux_test = X_aux[train_frac:] y_test = y[train_frac:] subject_idx_test = indx_shuff[train_frac:] print('shapes of X_MR_train:{}, X_aux_train:{}, y_train:{}, \n\t X_MR_test:{}, X_aux_test:{}, y_test:{}'.format(X_MR_train.shape,X_aux_train.shape,y_train.shape,X_MR_test.shape,X_aux_train.shape,y_test.shape)) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 243, "status": "ok", "timestamp": 1528988735549, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-rmY3GxNQ0TY/AAAAAAAAAAI/AAAAAAAAEdE/SePqq1Clrc4/s50-c-k-no/photo.jpg", "userId": "104945077116245412065"}, "user_tz": 240} id="BoTQj0-C5oD2" outputId="8bb452a7-b82d-45fb-f0c5-2438bcfa5783" # Check differences in feature distributions print('train distributions') class_0 = np.mean(X_aux_train[y_train[:,0]==0],axis=0) class_1 = np.mean(X_aux_train[y_train[:,0]==1],axis=0) print(class_0,class_1) print('test distributions') class_0 = np.mean(X_aux_test[y_test[:,0]==0],axis=0) class_1 = np.mean(X_aux_test[y_test[:,0]==1],axis=0) print(class_0,class_1) # - # # Train and test a sample LSN # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 510} colab_type="code" executionInfo={"elapsed": 2258, "status": "ok", "timestamp": 1528990262472, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-rmY3GxNQ0TY/AAAAAAAAAAI/AAAAAAAAEdE/SePqq1Clrc4/s50-c-k-no/photo.jpg", "userId": "104945077116245412065"}, "user_tz": 240} id="tvUx-7fivyzv" outputId="17b66e25-2448-47a4-9224-022ba21e34e5" # training params lr = 0.001 n_epochs = 10 validate_after = 2 batch_size = 100 dropout = 0.8 #keep_prob verbose = False # Do you want to print perf after every epoch?? save_model = True save_model_path = '/test/' net_arch = {'MR_shape':100,'n_layers':4,'l1':50,'l2':50,'l3':25,'l4':25,'l5':25,'MR_output':10, 'use_aux':True,'aux_shape':5,'aux_output':2,'output':2,'reg':0.1} # minimal perf df --> append CV related attributes downstream. perf_df = pd.DataFrame(columns=['subject_id','label','pred_prob','pred_label']) tf.reset_default_graph() with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess: # Train model data = {'X_MR':X_MR_train,'X_aux':X_aux_train,'y':y_train} if check_data_shapes(data,net_arch): print('train data <-> net_arch check passed') lsn = siamese_net(net_arch) optimizer = tf.train.AdamOptimizer(learning_rate = lr).minimize(lsn.loss) tf.global_variables_initializer().run() saver = tf.train.Saver() cur_time = datetime.time(datetime.now()) print('\nStart training time: {}'.format(cur_time)) lsn, train_metrics = train_lsn(sess, lsn, data, optimizer, n_epochs, batch_size, dropout,validate_after,verbose) #Save trained model if save_model: print('saving model at {}'.format(save_model_path + 'lsn_example')) saver.save(sess, save_model_path + 'lsn_example') cur_time = datetime.time(datetime.now()) print('End training time: {}\n'.format(cur_time)) else: print('train data <-> net_arch check failed') # Test model data = {'X_MR':X_MR_test,'X_aux':X_aux_test,'y':y_test} if check_data_shapes(data,net_arch): print('test data <-> net_arch check passed') _,test_metrics = test_lsn(sess,lsn,data) # populate perf dataframe perf_df['subject_id'] = subject_idx_test perf_df['label'] = np.argmax(y_test,1) perf_df['pred_prob'] = list(test_metrics['test_preds']) perf_df['pred_label'] = np.argmax(test_metrics['test_preds'],1) else: print('test data <-> net_arch check failed') # - # # Restore dave TF session with pretrained model with tf.Session() as sess: new_saver = tf.train.import_meta_graph(save_model_path + 'lsn_example.meta') new_saver.restore(sess, tf.train.latest_checkpoint(save_model_path)) _,test_metrics = test_lsn(sess,lsn,data) # # Plots # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 729} colab_type="code" executionInfo={"elapsed": 10585, "status": "ok", "timestamp": 1528990279108, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-rmY3GxNQ0TY/AAAAAAAAAAI/AAAAAAAAEdE/SePqq1Clrc4/s50-c-k-no/photo.jpg", "userId": "104945077116245412065"}, "user_tz": 240} id="uwCrzzgCQJJP" outputId="34495065-a74d-41cb-fdee-3f5f847e6956" import matplotlib.pyplot as plt import seaborn as sns from sklearn.manifold import TSNE plt.figure(figsize=(16,9)) plt.style.use('seaborn-white') sns.set(font_scale=2) train_loss = train_metrics['train_loss'] valid_loss = train_metrics['valid_loss'] train_acc = train_metrics['train_acc'] valid_acc = train_metrics['valid_acc'] plt.subplot(2,2,1) plt.plot(train_loss,label='train'); plt.plot(valid_loss,label='valid'); plt.title('Loss (innerloop)') plt.xlabel('number of epoch x{}'.format(validate_after)) plt.legend() plt.subplot(2,2,2) plt.plot(train_acc,label='train'); plt.plot(valid_acc,label='valid'); plt.plot(np.tile(test_metrics['test_acc'],len(train_acc)),'--',label='test') plt.title('Acc') plt.xlabel('number of epoch x{}'.format(validate_after)) plt.legend() # + #Plot TSNE tsne_sampx = 500 #Too slow for large number of samples train_embed_L = train_metrics['train_feature_L'] train_embed_R = train_metrics['train_feature_R'] test_embed_L = test_metrics['test_feature_L'] test_embed_R = test_metrics['test_feature_R'] train_tsne_embed_L = TSNE(n_components=2,init='pca').fit_transform(train_embed_L[:tsne_sampx]) test_tsne_embed_L = TSNE(n_components=2,init='pca').fit_transform(test_embed_L[:tsne_sampx]) train_tsne_embed_R = TSNE(n_components=2,init='pca').fit_transform(train_embed_R[:tsne_sampx]) test_tsne_embed_R = TSNE(n_components=2,init='pca').fit_transform(test_embed_R[:tsne_sampx]) train_df_L = pd.DataFrame(columns=['x','y','labels','subset']) test_df_L = pd.DataFrame(columns=['x','y','labels','subset']) train_df_R = pd.DataFrame(columns=['x','y','labels','subset']) test_df_R = pd.DataFrame(columns=['x','y','labels','subset']) train_df_L['x'] = train_tsne_embed_L[:,0] train_df_L['y'] = train_tsne_embed_L[:,1] train_df_L['labels'] = y_train[:tsne_sampx] train_df_L['subset'] = np.tile('train_L',len(y_train[:tsne_sampx])) train_df_R['x'] = train_tsne_embed_R[:,0] train_df_R['y'] = train_tsne_embed_R[:,1] train_df_R['labels'] = y_train[:tsne_sampx] train_df_R['subset'] = np.tile('train_R',len(y_train[:tsne_sampx])) test_df_L['x'] = test_tsne_embed_L[:,0] test_df_L['y'] = test_tsne_embed_L[:,1] test_df_L['labels'] = y_test[:tsne_sampx] test_df_L['subset'] = np.tile('test_L',len(y_test[:tsne_sampx])) test_df_R['x'] = test_tsne_embed_R[:,0] test_df_R['y'] = test_tsne_embed_R[:,1] test_df_R['labels'] = y_test[:tsne_sampx] test_df_R['subset'] = np.tile('test_R',len(y_test[:tsne_sampx])) plot_df = train_df_L.append(train_df_R).append(test_df_L).append(test_df_R) sns.lmplot(x='x',y='y',hue='labels',col='subset',col_wrap=2, fit_reg=False, markers='.',data=plot_df,size=4); # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 369} colab_type="code" executionInfo={"elapsed": 804, "status": "ok", "timestamp": 1528989626700, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-rmY3GxNQ0TY/AAAAAAAAAAI/AAAAAAAAEdE/SePqq1Clrc4/s50-c-k-no/photo.jpg", "userId": "104945077116245412065"}, "user_tz": 240} id="2IA_YZkNHaVy" outputId="871ac4c9-2385-4a70-fcab-c2aa52e13aae" # Plot preds train_features = train_metrics['train_preds'] test_features = test_metrics['test_preds'] train_df = pd.DataFrame(columns=['x','y','labels','subset']) test_df = pd.DataFrame(columns=['x','y','labels','subset']) train_df['x'] = train_features[:,0] train_df['y'] = train_features[:,1] train_df['labels'] = y_train train_df['subset'] = np.tile('train',len(y_train)) test_df['x'] = test_features[:,0] test_df['y'] = test_features[:,1] test_df['labels'] = y_test test_df['subset'] = np.tile('test',len(y_test)) plot_df = train_df.append(test_df) sns.lmplot(x='x',y='y',hue='labels',col='subset',fit_reg=False, markers='.',data=plot_df); # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="N6VZGx8n8Y1O" # -
notebooks/.ipynb_checkpoints/LSN_sim_testcode-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression Example: Predict the Price of a House # Here we will use some housing data to predict the price of a house based on a number of data points. # The data contains a lot of issues we will need to solve before we can get a result: # # * String values (all features need to be numbers so we can do math on them) # * null values # * NaN values # * enumerable values (e.g. Sale Condition = ["Normal", "Abnormal", "Partial",...]) # * Some of the data points may not be relevant to the sale price # * Some data points might be best combined into a single data point (added or multiplied) # # We won't have time to address all of the issues, but let's explore some of them by importing the data and using dataframes and matplotlib to visualize it. # Import the common packages for exploring Machine Learning # %matplotlib notebook import numpy as np # <-- common convention for short names of packages... import pandas as pd import sklearn import matplotlib.pyplot as plt import matplotlib from sklearn import datasets, linear_model from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score # + # Load housing data into DataFrame (Pandas reads & writes CSVs and many other data formats) # data source: linked from https://ww2.amstat.org/publications/jse/v19n3/decock.pdf # Download this file to our Jupyter filesystem # !wget http://www.amstat.org/publications/jse/v19n3/decock/AmesHousing.txt; # file is 'Tab Separated' with a generic extension, so tell Pandas which separator to use: \t df = pd.read_csv('AmesHousing.txt',sep='\t'); # + # DataFrames in Pandas are easy to sample or use head(n) or tail(n) # df.head(3) # df.tail(3) df.sample(10) # - # Wow, 82 is lots of columns - let's sort them so we can find what we're looking for more easily df.columns.sort_values() # + # Maybe we want to grab just a subset of data columns - it's easy with Pandas. # Don't forget the double [[]] syntax for multiple selections # let's start with the easy stuff and grab only the numeric columns df2 = df[['SalePrice','Lot Area','Bedroom AbvGr','Year Built','Yr Sold','1st Flr SF', '2nd Flr SF','BsmtFin SF 1','BsmtFin SF 2']] df2.head(3) # - # Maybe you want to use Pandas Dataframes to engineer a new aggregate feature column # It's easy to do opeations across columns (eg. add all the Square Footage columns into a new one 'Total SF') df3 = pd.DataFrame(df2['1st Flr SF']+df2['2nd Flr SF']+df2['BsmtFin SF 1']+df2['BsmtFin SF 2'], columns=['Total SF']) df3.head(5) # Combining DataFrame's is easy to # use 'axis=1' for adding columns (features); 'axis=0' for more rows (examples) df4 = pd.concat([df2,df3],axis=1) df4.sample(3) # <-- now we have a new 'Total SF' feature column appended # ## Let's Try a Simple Linear Regression # Let's see if we can predict a Sale Price based on single feature 'Gross Living Area' # Create a new DataFrame with only the data we need data = df[['SalePrice','Gr Liv Area']] data.head(5) # sklearn complains if these are shape [100,] vs [100,1] # just one of the many gotchas you'll find :) X = data['Gr Liv Area'].values.reshape(-1,1) # Y is typically used for the Truth Labels Y = data['SalePrice'].values.reshape(-1,1) # %matplotlib notebook # Let's plot Square Foot vs Sale Price to understand our data plt.xlabel('Gross Living Area in $ft^2$') plt.ylabel('Sale Price in $') plt.plot(X,Y,'rx'); # ## Let's Split the Data so We Can Evaluate How We'll We Can Predict # + # use SKLearns builtin method to split our data & shuffle it into test & train # Split the data into training/testing sets # By default, train_test_split will split the data into 75%/25% train/test housing_X_train, housing_X_test, housing_Y_train, housing_Y_test = train_test_split( X,Y, random_state=3 ) print('housing_X_train',len(housing_X_train),'examples') print('housing_X_test',len(housing_X_test),'examples') print('housing_Y_train',len(housing_Y_train),'examples') print('housing_Y_test',len(housing_Y_test),'examples') # Create linear regression object regr = linear_model.LinearRegression(normalize=True) # regr = linear_model.SGDRegressor(n_iter=100) # - # The fit function will train the model using the training set regr.fit(housing_X_train, housing_Y_train) # + # Make predictions using the testing set housing_Y_pred = regr.predict(housing_X_test) # The coefficients # print('Coefficients: \n', regr.coef_) # TODO: this number is huge... print("Mean squared error: %.2f" % mean_squared_error(housing_Y_test, housing_Y_pred)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(housing_Y_test, housing_Y_pred)) # + # %matplotlib notebook # Plot outputs plt.scatter(housing_X_train, housing_Y_train, alpha=.5, color='black', label='train') plt.scatter(housing_X_test, housing_Y_test, alpha=.5, color='red', label='test') plt.plot(housing_X_test, housing_Y_pred,color='blue', linewidth=3) plt.xticks() plt.yticks() plt.legend() plt.xlabel('Gross Living Area in $ft^2$') plt.ylabel('Sale Price in $') plt.show(); # - # This looks OK, but how well did we really do? # # Let's create a histogram showing how off we were from the truth. # # If our model is good, we'll have a lot of hits in the middle and a nice tall bell curve. # + # %matplotlib notebook # Plot hist of predictions vs actual y_lr = np.reshape(housing_Y_test,housing_Y_test.shape[0]) yhat_lr = np.reshape(housing_Y_pred,housing_Y_pred.shape[0]) ydiff_lr = np.subtract(y_lr,yhat_lr) plt.ylim([0,40]) plt.hist(ydiff_lr,bins=100,range=[-100000, 100000]) plt.title('Linear Regression Single Variable') plt.xlabel('Difference: Predicted vs Sale Price') plt.ylabel('Number of Predictions') plt.show(); # - # This doesn't look that great. # # We can do better if we consider multiple features of data and use a more complex model. # # ## Adding a Neural Network # # For simplicity, let's start with just using all of the numerical columns in the data. # We aren't going to worry about featurizing non-numeric fields yet since there is probably useful data already in the dataset that won't require a lot of work to setup. # # There are a lot of neural networks and tools to choose from. In this example, we are going to use an [MLPRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html), built into scikit-learn # + # sklearn.neural_network.MLPRegressor # sklearn.neural_network.MLPRegressor(hidden_layer_sizes=(100, ), activation=’relu’, solver=’adam’, alpha=0.0001, # batch_size=’auto’, learning_rate=’constant’, learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, # random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, # early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08) from sklearn.neural_network import MLPRegressor # Use all numerical columns to predict 'SalePrice' columns = list([ 'Gr Liv Area', # this is our single linear regression point of reference # can we do better by adding these other features? '1st Flr SF', '2nd Flr SF', '3Ssn Porch', 'Bedroom AbvGr', 'Bsmt Full Bath', 'Bsmt Half Bath', 'Bsmt Unf SF', 'BsmtFin SF 1', 'BsmtFin SF 2', 'Enclosed Porch', 'Fireplaces', 'Full Bath', 'Garage Area', 'Garage Cars', 'Garage Yr Blt', 'Half Bath', 'Kitchen AbvGr', 'Lot Area', 'Lot Frontage', 'Low Qual Fin SF', 'Mas Vnr Area', 'Mo Sold', 'Open Porch SF', 'Pool Area', 'TotRms AbvGrd', 'Total Bsmt SF', 'Wood Deck SF', 'Year Built', 'Year Remod/Add', 'Yr Sold' ]) print(columns) # Create new dataframe with columns X_NN = df[columns] print(X_NN.shape) #sklearn complains if these are shape [100,] vs [100,1] Y_NN = df['SalePrice'].values.reshape(-1,1) print(Y_NN.shape) # remove NaN values & replace with 0's X_NN = X_NN.fillna(0) X_NN = X_NN.values # convert to plain NumPy array # TODO: scaling & centering data # scale & center our data from sklearn import preprocessing X_scaled = preprocessing.scale(X_NN); # use SKLearns builtin method to split our data & shuffle it into test & train # Split the data into training/testing sets housing_X_train_nn, housing_X_test_nn, housing_Y_train_nn, housing_Y_test_nn = train_test_split( X_scaled,Y_NN, random_state=2# what if we lock down the random seed number? (set to 1, 3, 10, etc) ) print('housing_X_train',len(housing_X_train_nn),'examples') print('housing_X_test',len(housing_X_test_nn),'examples') score1 = r2_score(housing_Y_test, housing_Y_pred) print('\nLinear Regression (Single variable) VARIANCE:',round(score1,2)) # you can run this multiple times to check the variable starting points # each run will be different--and may be significantly difference since the initialization variables will change # and that will affect how the model converges # for i in range(2,8): # try setting one of the variables to i # Explore settings logarithmically (0.1, 0.01, 0.001, 0.00001) nn_regr = MLPRegressor( # what if we change our layer sizes? hidden_layer_sizes=(2,8,2), # what if we change our learning rate? learning_rate_init=0.01, # what if we change our activation function? (relu, tanh, identity) activation='relu', max_iter=2000, random_state=2, # if set to None, this is random, to an int, static seed # set this to True to see how well we are learning over the iterations verbose=False ); # Train it nn_regr.fit(housing_X_train_nn,housing_Y_train_nn.reshape(housing_Y_train_nn.size)) # Make predictions using the testing set housing_Y_pred_nn = nn_regr.predict(housing_X_test_nn) # Variance scores or Linear Regression vs NN score2 = r2_score(housing_Y_test_nn, housing_Y_pred_nn) #print("Mean squared error: %.2f" % mean_squared_error(housing_Y_test_nn, housing_Y_pred_nn)) # Explained variance score: 1 is perfect prediction answer = ((score2-score1)/score1)*100 print( 'NN MLP Regression (Multi variable) VARIANCE: {} {:0.0f}% ({:0.2f}x) over 1 variable linear regression'.format( round(score2,2), answer, score2/score1 ) ) pd.DataFrame(nn_regr.loss_curve_).plot() # - # ## So How is That Bell Curve? # + # %matplotlib notebook # Plot histogram of difference between predicted & actual sale price outputs y_nn = np.reshape(housing_Y_test_nn,housing_Y_test_nn.shape[0]) yhat_nn = np.reshape(housing_Y_pred_nn,housing_Y_pred_nn.shape[0]) ydiff = np.subtract(y_nn,yhat_nn) plt.ylim([0,40]) plt.hist(ydiff,bins=100,range=[-100000, 100000]) plt.title('Neural Net MLP Muli Variable') plt.xlabel('Difference: Predicted vs Sale Price') plt.ylabel('Number of Predictions') plt.show(); # - # That's significantly better! # # But can we do even better? # # ## Let's Visualize The Relationship Each Feature Has With Price # # We can plot a chart of each feature mapped to the sales price to easily see how a feature corresponds to the rising price. # # If a feature doesn't show a correlation with increased price, this is not likely a good feature for our model to consider--and we can omit it from the data. # + # Notes: # Data not strongly correllate # remove 'Bsmt Unf SF', # adjust 'Garage Yr Blt' - notice poor logic of replacing NA values with 0 - created outliers/data scaling issue # watch out for scaling - Lot Area # %matplotlib notebook # How many columns do we have? print(len(columns)) # for each feature, show how it relates to sales price for i in range(0,len(columns)): # by specifying a figure, the plotter will create multiple figures plt.figure(i) plt.scatter(X_NN[:,i], Y, alpha=.2, color='blue', label='train samples') # plt.scatter(housing_X_train[:,i], housing_Y_train, alpha=.3, color='black', label='train samples') # plt.scatter(housing_X_test[:,i], housing_Y_test, alpha=.3, color='red', label='test samples') # plt.scatter(housing_X_test[:,i], housing_Y_pred_nn,color='magenta', linewidth=1, alpha=.5, label='predictions') plt.xticks() plt.yticks() plt.legend() plt.xlabel(columns[i]) plt.ylabel('Sale Price in $') plt.show(); # - # ## Which Features Are Useful? # ## Which Features Are Not? # Let's remove all of the features that seem less useful and run it again... # # Just go up to the cell that has our feature columns and comment out (command+/) any fields you want to remove. # # What's the best score you can get? # # ## Box Plots # # Some of the scatter plots only have whole numbers on the x-axis. This makes it difficult to look at them and decide whether the features represented in those graphs are useful or not since the graphs are not continuous. The following box plots make it easier to look at these features and decide whether they are useful based on the amount of overlap between the box plots in each graph. If there is a lot of overlap between box plots, then the feature they are representing is not useful. If there is not a lot of overlap between box plots, then the feature they are representing is useful. # # For information on what box plots are and how to read them, see https://en.wikipedia.org/wiki/Box_plot. # + # %matplotlib notebook import numpy def createBoxplot(attribute, figNum): for i in range(0, len(columns)): a = [] if(columns[i] == attribute): a = X_NN[:,i] break unique, counts = numpy.unique(a, return_counts = True) d = dict(zip(unique, counts)) index = 0 if 0 in d: index = d[0]; pltNum = 1 data_to_plot = list() length = len(d) while pltNum <= length: temp = [] if pltNum in d: endIndex = index + d[pltNum] temp = [] while index < endIndex: temp = numpy.append(temp,Y[index]) index = index + 1 data_to_plot.append(temp) pltNum = pltNum + 1 fig = plt.figure(figNum, figsize=(9,6)) ax = fig.add_subplot(111) bp = ax.boxplot(data_to_plot) fig.savefig("fig"+str(figNum)+".png", bbox_inches='tight') plt.xlabel(attribute) plt.ylabel("Sale Price in $") createBoxplot("Bedroom AbvGr", 1) createBoxplot("Kitchen AbvGr", 2) createBoxplot("Half Bath", 3) createBoxplot("Garage Cars", 4) createBoxplot("Full Bath", 5) createBoxplot("Fireplaces", 6) createBoxplot("Bsmt Half Bath", 7) createBoxplot("Bsmt Full Bath", 8) createBoxplot("TotRms AbvGrd", 9) createBoxplot("Mo Sold", 10) # - # ## The Number of Zeros in Each Column # Many of the scatter plots show features with 0s in their data. Most of the time, these 0s are just noise. For example, for the feature "2nd Flr SF", many houses had 0 in their data for this feature meaning that they did not have a second floor. It would be more meaningful for the graph to just display the data for houses that have a second floor. To do that, the houses that don't have a second floor would be excluded from the graph. It is still useful to know how many houses do not have a second floor though which is why the number of 0s for each feature should be counted even if they are excluded from the graphs. # The box plots exclude 0s from their graphs as some houses have 0s in their feature vector. That makes the resulting visualizations more meaningful. #calculate the number of 0s in the columns with at least one 0. These 0s are left out of the box chart data for i in range(0, len(columns)): numZeros = 0 a = X_NN[:,i] for j in range(0, len(a)): if(a[j]) == 0: numZeros = numZeros + 1 if(numZeros > 0): print(columns[i] + ": " + str(numZeros)) # ## Aggregate Graphs # The following graph shows the relationship between the total square footage in a house and the price of the house. It is an example of an aggregate graph because it adds up three different features provided in the data, "1st Flr SF", "2nd Flr SF', and "Total Bsmt SF", to compute the total square footage of each house. # Aggregate graphs can show potentially useful relationships that aren't immediately obvious from the data provided. # + # %matplotlib #Show a plot of the total square footage in a house compared with the price firstFloorIndex = 0 secondFloorIndex = 0 basementIndex = 0 for i in range(0, len(columns)): if(columns[i] == '1st Flr SF'): firstFloorIndex = i elif(columns[i] == '2nd Flr SF'): secondFloorIndex = i elif(columns[i] == 'Total Bsmt SF'): basementIndex = i sumSquareFootage = [] sumSquareFootage = X_NN[:, firstFloorIndex] sumSquareFootage = X_NN[:, secondFloorIndex] + sumSquareFootage sumSquareFootage = X_NN[:, basementIndex] + sumSquareFootage plt.figure(1) plt.scatter(sumSquareFootage, Y, color = 'blue', label = 'train samples') plt.xticks() plt.yticks() plt.legend() plt.xlabel('Total Square Footage') plt.ylabel('Sale Price in $') plt.show() # - # ## Next Up # [Where Do You Go From Here?](07%20-%20From%20Here.ipynb)
06 - Regression Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kingslyt/pytorch-book/blob/main/Exercise_1_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="8BYLSP0fW5uN" # # Exercise 1: Image Classifier (AlexNet) # + [markdown] id="shQlWCbJTY-x" # # Download an image file to Colab. # - Note: local files will be deleted when notebook instance is closed # + colab={"base_uri": "https://localhost:8080/"} id="FM9cosXOORrX" outputId="4a411ad0-e050-4cb6-8f42-f5110444dc9d" import urllib.request url = "https://upload.wikimedia.org/wikipedia/commons/4/4a/Northern_Diamondback_Water_Snake_%28Nerodia_rhombifer_rhombifer%29_-_Flickr_-_GregTheBusker_%282%29.jpg" urllib.request.urlretrieve(url, 'snake.jpg') url = "https://raw.githubusercontent.com/joe-papa/pytorch-beginners-course/main/assets/imagenet_class_labels.txt" urllib.request.urlretrieve(url, 'imagenet_class_labels.txt') # + [markdown] id="FjYpvJ4Rlxcv" # # Load Image Data # # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="Zp2COVN4l09-" outputId="b07e16f5-4fe6-47b1-d65a-7cb69bbd1dcf" from PIL import Image import matplotlib.pyplot as plt img = Image.open('snake.jpg') plt.imshow(img) # + [markdown] id="Dwiyx3_uPfi0" # # Pre-process Image # Center Crop, Convert To Tensor, Normalize, & Batch # # Pre-trained ImageNet models like AlexNet here expect input images to be normalized the same way as the NN was trained. See more info [here](https://pytorch.org/vision/stable/models.html). # + id="I3okP39rxznG" import torch from torchvision import transforms # + id="a6U-UwhIxzZ9" colab={"base_uri": "https://localhost:8080/"} outputId="7e2e67a4-86d3-49f7-a5d0-91b0dc301cd7" transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_t = transform(img); img_t.shape # + colab={"base_uri": "https://localhost:8080/"} id="H9jxys4u7wlx" outputId="2bdab3be-5d45-4858-a698-1d1e293a84f6" img_t_batch = img_t.unsqueeze(0) img_t_batch.shape # + [markdown] id="c95HA3oqVa94" # # Image Classifier NN (Inference) # + id="TA48mdcSx8Gd" from torchvision import models # + id="xAKaytM-yAS9" colab={"base_uri": "https://localhost:8080/", "height": 555, "referenced_widgets": ["6549c9b222334728b116ea6c7b974f62", "1e4409a214344c4b935a6cc409e1a111", "c2534c1bb9424cdab130c961d07f179a", "49c627d3b63a47f9a67db1dfbe46ce25", "92c6245b49224b3a9310a7d115d0eba1", "ea55f1dcdc19436d902e3256ef748ebe", "943979c1ebb747bebba9fe5bdf35260f", "<KEY>", "2f07e7f0be104f699facb36dc7bd2ef1", "fee7b003dab841528ec68495ce9fdb94", "2a363d3a45934d1390d9da642dd1f849"]} outputId="8d025793-f7eb-47c6-b0d7-1149cc543640" model = models.alexnet(pretrained=True) model.eval() # + [markdown] id="gQhmmKSWTnG1" # # Post-process NN Outputs # Find maximum label value and convert to string # + colab={"base_uri": "https://localhost:8080/"} id="hDjabL-J_XE0" outputId="a8978ddc-0956-44de-8762-d3cf1b9aca12" y = model(img_t_batch) print(y.shape) # + id="7iBmc_uCyFSt" colab={"base_uri": "https://localhost:8080/"} outputId="4791e98a-5632-45d2-b263-60533a2f1662" _, index = y.max(1) print(index) print(index.item()) # + id="uIhhCaxjyFKd" with open('imagenet_class_labels.txt', 'r') as f: labels = eval(f.read()) # + id="U8YTK8jFyFBd" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="51f5dd00-3d56-4c25-cb44-dd3188449f8e" labels[index.item()] # + [markdown] id="E0vu7p0rdSrX" # # Putting it all into one function ... # + id="HNt9QK28yF7V" import torch from torchvision import transforms, models from PIL import Image import matplotlib.pyplot as plt transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) model = models.alexnet(pretrained=True) model.eval() with open('imagenet_class_labels.txt', 'r') as f: labels = eval(f.read()) def classify_image(image_filename): img = Image.open(image_filename) plt.imshow(img) img_t = transform(img) img_t_batch = img_t.unsqueeze(0) y = model(img_t_batch) _, index = y.max(1) return labels[index.item()] # + id="yEKF0lMKyGpd" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="ac457bce-25d6-4909-ad1b-cc65105fbb27" classify_image('snake.jpg') # + [markdown] id="R7l0UOnDWy8z" # # Recap # You've learned how to: # - Use classes to create special data objects (e.g. transforms.ToTensor, models.alexnet) # - Create tensors and use tensor operations (unsqueeze, max, item) # - Create a collection of transforms for image processing using torchvision.transfroms # - Load a pretrained NN model from torchvision.models # # # + [markdown] id="FSllCG_oe3K3" # # Things to Try # - Test classify_image() with other images # - Read a folder of images and classify them as a batch # - Return to top 5 labels and their probabilities for each image # - Try another model (e.g. VGG, ResNet, SqueezeNet) # + id="xce0kFHBWsGv"
Exercise_1_v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculate Jensen-Shannon Divergence Between Luke and John # The KL Divergence between two discrete distributions $P$ and $Q$ with pdfs $p$ and $q$, defined over the same sample space $X=\{x_0,x_1, \dots, x_N\}$, is given by # # \begin{equation} # KL(P||Q) = \sum_{i=0}^N p(x_i) \ln \Big( \frac{p(x_i)}{q(x_i)} \Big) # \end{equation} # # This divergence is not a metric because it is not symmetric, i.e. it is often the case that $KL(P||Q) \ne KL(Q||P)$. To address this, we will use the Jensen-Shannon Divergence which is a true metric and is defined as # # \begin{equation} # JSD(P||Q) = \frac{1}{2}KL(P||R) + \frac{1}{2}KL(Q||R) # \end{equation} # # where $R$ is defined as the average of the two distributions $R=\frac{P+Q}{2}$ # --- from PIL import Image import numpy as np luke = Image.open("/home/nathan/Downloads/Luke_Van_Poppering.jpeg") luke.thumbnail((300,300)) # Thanks for this, John.... john = Image.open("/home/nathan/Downloads/John_Abascal.jpg") john.thumbnail((300,300)) luke john # --- # If we assume that our histograms are exact, then we can trivally calculate the Jensen-Shannon Divergence between them by normalizing the histograms and summing up the terms on the RHS of the JSD equation... # + def KL(p: np.array, q: np.array): val = 0 for i,pi in enumerate(p): if pi == 0: continue val += pi*np.log2(pi/q[i]) return val def JSD(p: np.array, q: np.array): r = (p+q)/2 p = p[r != 0] # If r_i is zero, then it is in neither p nor q and can be ignored q = q[r != 0] r = r[r != 0] val = 0.5*(KL(p,r)+KL(q,r)) return val def hist_loss(im1, im2): im1_channels = im1.split() im2_channels = im2.split() loss = [] for im1_c, im2_c in zip(im1_channels,im2_channels): hist1 = np.array(im1_c.histogram()) hist2 = np.array(im2_c.histogram()) loss.append(JSD(hist1/hist1.sum(), hist2/hist2.sum())) return np.mean(loss) # - # --- # It is symmetric... hist_loss(luke, john) == hist_loss(john, luke) # and returns zero when operating on the same image... hist_loss(john, john) hist_loss(luke, luke) hist_loss(np.zeros(255),np.ones(255)) JSD(np.zeros(255),np.ones(255)/255)
JSD_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mlreview # language: python # name: mlreview # --- # # Notebook 12: Identifying Phases in the 2D Ising Model with TensorFlow # # ## Learning Goal # The goal of this notebook is to familiarize the reader with the nuts and bolts on using the TensorFlow package for building Deep Neural Networks. # # ## Overview # # In this notebook, we show how one can use deep neural nets to classify the states of the 2D Ising model according to their phase. This should be compared with the use of logistic-regression, Random Forests and XG Boost on the same dataset in the previous Notebooks 6 and 9. # # The Hamiltonian for the classical Ising model is given by # # $$ H = -J\sum_{\langle ij\rangle}S_{i}S_j,\qquad \qquad S_j\in\{\pm 1\} $$ # # where the lattice site indices $i,j$ run over all nearest neighbors of a 2D square lattice, and $J$ is some arbitrary interaction energy scale. We adopt periodic boundary conditions. Onsager proved that this model undergoes a phase transition in the thermodynamic limit from an ordered ferromagnet with all spins aligned to a disordered phase at the critical temperature $T_c/J=2/\log(1+\sqrt{2})\approx 2.26$. For any finite system size, this critical point is expanded to a critical region around $T_c$. # + # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import numpy as np seed=12 np.random.seed(seed) import sys, os, argparse import tensorflow as tf from tensorflow.python.framework import dtypes # suppress tflow compilation warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' tf.set_random_seed(seed) # - # ## Structure of the Procedure # # Constructing a Deep Neural Network to solve ML problems is a multiple-stage process. Quite generally, one can identify the key steps as follows: # # * ***step 1:*** Load and process the data # * ***step 2:*** Define the model and its architecture # * ***step 3:*** Choose the optimizer and the cost function # * ***step 4:*** Train the model # * ***step 5:*** Evaluate the model performance on the *unseen* test data # * ***step 6:*** Modify the hyperparameters to optimise performance for the specific data set # # Below, we sometimes combine some of these steps together for convenience. # # Notice that we take a rather different approach, compared to the simpler MNIST Keras notebook. We first define a set of classes and functions and run the actual computation only in the very end. # ### Step 1: Load and Process the Data # # We begin by writing a `DataSet` class and two functions `read_data_sets` and `load_data` to process the 2D Ising data. # # The `DataSet` class performs checks on the data shape and casts the data into the correct data type for the calculation. It contains a function method called `next_batch` which shuffles the data and returns a mini-batch of a pre-defined size. This structure is particularly useful for the training procedure in TensorFlow. class DataSet(object): def __init__(self, data_X, data_Y, dtype=dtypes.float32): """Checks data and casts it into correct data type. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid dtype %r, expected uint8 or float32' % dtype) assert data_X.shape[0] == data_Y.shape[0], ('data_X.shape: %s data_Y.shape: %s' % (data_X.shape, data_Y.shape)) self.num_examples = data_X.shape[0] if dtype == dtypes.float32: data_X = data_X.astype(np.float32) self.data_X = data_X self.data_Y = data_Y self.epochs_completed = 0 self.index_in_epoch = 0 def next_batch(self, batch_size, seed=None): """Return the next `batch_size` examples from this data set.""" if seed: np.random.seed(seed) start = self.index_in_epoch self.index_in_epoch += batch_size if self.index_in_epoch > self.num_examples: # Finished epoch self.epochs_completed += 1 # Shuffle the data perm = np.arange(self.num_examples) np.random.shuffle(perm) self.data_X = self.data_X[perm] self.data_Y = self.data_Y[perm] # Start next epoch start = 0 self.index_in_epoch = batch_size assert batch_size <= self.num_examples end = self.index_in_epoch return self.data_X[start:end], self.data_Y[start:end] # The data itself are being processed in the function `read_data_sets`, which loads the Ising dataset, and splits it into three subsets: ordered, critical and disordered, depending on the temperature which sets the distribution they are drawn from. Once again, we use the ordered and disordered data to create a training and a test data set for the problem. Classifying the states in the critical region is expected to be harder and we only use this data to test the performance of our model in the end. # + import pickle, os os.environ['KMP_DUPLICATE_LIB_OK']='True' from urllib.request import urlopen def load_data(): # path to data directory (for testing) #path_to_data=os.path.expanduser('~')+'/Dropbox/MachineLearningReview/Datasets/isingMC/' url_main = 'https://physics.bu.edu/~pankajm/ML-Review-Datasets/isingMC/'; ######### LOAD DATA # The data consists of 16*10000 samples taken in T=np.arange(0.25,4.0001,0.25): data_file_name = "Ising2DFM_reSample_L40_T=All.pkl" # The labels are obtained from the following file: label_file_name = "Ising2DFM_reSample_L40_T=All_labels.pkl" #DATA data = pickle.load(urlopen(url_main + data_file_name)) # pickle reads the file and returns the Python object (1D array, compressed bits) data = np.unpackbits(data).reshape(-1, 1600) # Decompress array and reshape for convenience data=data.astype('int') data[np.where(data==0)]=-1 # map 0 state to -1 (Ising variable can take values +/-1) #LABELS (convention is 1 for ordered states and 0 for disordered states) labels = pickle.load(urlopen(url_main + label_file_name)) # pickle reads the file and returns the Python object (here just a 1D array with the binary labels) print("Finished loading data") return data, labels # + import pickle from sklearn.model_selection import train_test_split from keras.utils import to_categorical def prepare_data(data, labels, dtype=dtypes.float32, test_size=0.2, validation_size=5000): L=40 # linear system size # divide data into ordered, critical and disordered X_ordered=data[:70000,:] Y_ordered=labels[:70000] X_critical=data[70000:100000,:] Y_critical=labels[70000:100000] X_disordered=data[100000:,:] Y_disordered=labels[100000:] # define training and test data sets X=np.concatenate((X_ordered,X_disordered)) #np.concatenate((X_ordered,X_critical,X_disordered)) Y=np.concatenate((Y_ordered,Y_disordered)) #np.concatenate((Y_ordered,Y_critical,Y_disordered)) # pick random data points from ordered and disordered states to create the training and test sets X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, train_size=1.0-test_size) # make data categorical (i.e [0,1] or [1,0]) Y_train=to_categorical(Y_train) Y_test=to_categorical(Y_test) Y_critical=to_categorical(Y_critical) if not 0 <= validation_size <= len(X_train): raise ValueError('Validation size should be between 0 and {}. Received: {}.'.format(len(X_train), validation_size)) X_validation = X_train[:validation_size] Y_validation = Y_train[:validation_size] X_train = X_train[validation_size:] Y_train = Y_train[validation_size:] # create data sets dataset = { 'train':DataSet(X_train, Y_train), 'test':DataSet(X_test, Y_test), 'critical':DataSet(X_critical, Y_critical), 'validation':DataSet(X_validation, Y_validation) } return dataset # - # The `DataSet` class and the `read_data_sets` function are wrapped in another function: `load_data`. To call the latter, one specifies the sizes for the training, test and validation data sets. This function also contains the local path to the file with the Ising data. def prepare_Ising_DNN(): data, labels = load_data() return prepare_data(data, labels, test_size=0.2, validation_size=5000) # ### Steps 2+3: Define the Neural Net and its Architecture, Choose the Optimizer and the Cost Function # # We can now move on to construct our deep neural net using TensorFlow. To do this, we create a class called `model`. This class contains many useful function methods which break down the construction of the DNN. Unique for TensorFlow is creating placeholders for the variables of the model, such as the feed-in data `self.X` and `self.Y` or the dropout probability `self.dropout_keepprob` (which has to be set to unity explicitly during testing). Another peculiarity is using the `with` scope to give names to the most important operators. While we do not discuss this here, TensorFlow also allows one to visualise the computational graph for the model (see package documentation on [https://www.tensorflow.org/](https://www.tensorflow.org/)). # # To classify whether a given spin configuration is in the ordered or disordered phase, we construct a minimalistic model for a DNN with a single hidden layer containing $N_\mathrm{neurons}$ (which is kept variable so we can try out the performance of different sizes for the hidden layer). # # First, we define two private functions: `_weight_variable` and `_bias_variable`, which we use to set up the precise DNN architecture in the function `create_DNN`. The network architecture thus includes a ReLU-activated input layer, the hidden layer, and the softmax output layer. Notice that the softmax layer is _not_ part of the `create_DNN` function. # # Instead, the softmax layer is part of the function `create_loss` which, as the name suggests, defines the cross entropy loss function, predefined in TensorFlow's `nn` module. We minimize the cost function using the `SGD` optimizer from the `train` module in the function `create_optimiser`. The latter accepts a dictionary `opt_kwargs` with optimizer arguments to be set externally when defining the DNN. # # Last, the function `create_accuracy` evaluates the model performance. # # All these function are called in the `__init__` of our `model` class which sets up the DNN. It accepts the number of hidden neurons $N_\mathrm{neurons}$ and a dictionary with the optimizer arguments as input, as we shall study the performance of the DNN as a function of these parameters. # + class model(object): def __init__(self, N_neurons, opt_kwargs): """Builds the TFlow graph for the DNN. N_neurons: number of neurons in the hidden layer opt_kwargs: optimizer's arguments """ # define global step for checkpointing self.global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step') self.L=40 # system linear size self.n_feats=self.L**2 # 40x40 square lattice self.n_categories=2 # 2 Ising phases: ordered and disordered # create placeholders for input X and label Y self.create_placeholders() # create weight and bias, initialized to 0 and construct DNN to predict Y from X self.deep_layer_neurons=N_neurons self.create_DNN() # define loss function self.create_loss() # use gradient descent to minimize loss self.create_optimiser(opt_kwargs) # create accuracy self.create_accuracy() def create_placeholders(self): with tf.name_scope('data'): # input layer self.X=tf.placeholder(tf.float32, shape=(None, self.n_feats), name="X_data") # target self.Y=tf.placeholder(tf.float32, shape=(None, self.n_categories), name="Y_data") # p self.dropout_keepprob=tf.placeholder(tf.float32, name="keep_probability") def _weight_variable(self, shape, name='', dtype=tf.float32): """weight_variable generates a weight variable of a given shape.""" # weights are drawn from a normal distribution with std 0.1 and mean 0. initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial, dtype=dtype, name=name) def _bias_variable(self, shape, name='', dtype=tf.float32): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, dtype=dtype, name=name) def create_DNN(self): with tf.name_scope('DNN'): # Fully connected layer W_fc1 = self._weight_variable([self.n_feats, self.deep_layer_neurons],name='fc1',dtype=tf.float32) b_fc1 = self._bias_variable([self.deep_layer_neurons],name='fc1',dtype=tf.float32) a_fc1 = tf.nn.relu(tf.matmul(self.X, W_fc1) + b_fc1) # Softmax layer (see loss function) W_fc2 = self._weight_variable([self.deep_layer_neurons, self.n_categories],name='fc2',dtype=tf.float32) b_fc2 = self._bias_variable([self.n_categories],name='fc2',dtype=tf.float32) self.Y_predicted = tf.matmul(a_fc1, W_fc2) + b_fc2 def create_loss(self): with tf.name_scope('loss'): self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.Y, logits=self.Y_predicted) ) # no need to use tf.stop_gradient() on labels because labels are placeholders and contain no params # to be optimized. Backprop will be applied only to the logits. def create_optimiser(self,opt_kwargs): with tf.name_scope('optimiser'): self.optimizer = tf.train.GradientDescentOptimizer(**opt_kwargs).minimize(self.loss,global_step=self.global_step) #self.optimizer = tf.train.AdamOptimizer(**kwargs).minimize(self.loss,global_step=self.global_step) def create_accuracy(self): with tf.name_scope('accuracy'): correct_prediction = tf.equal(tf.argmax(self.Y, 1), tf.argmax(self.Y_predicted, 1)) correct_prediction = tf.cast(correct_prediction, tf.float64) # change data type self.accuracy = tf.reduce_mean(correct_prediction) # - # ### Steps 4+5: Train the Model and Evaluate its Performance # # We want to evaluate the performance of our model over a set of different learning rates, and a set of different hidden neurons, i.e. we consider a variable size of the hidden layer. Therefore, we create a function `evaluate_model` which trains and evaluates the performance of our DNN for a fixed number of hidden `neurons` and a fixed SGD learning rate `lr`, and returns the final loss and accuracy for the three data sets of interest. # # Apart from the number of `neurons` and the learning rate `lr`, `evaluate_model` accepts the data `Ising_Data`. This is done for convenience: loading the data is computationally expensive and we only need to do this once. # # We train our DNN using mini-batches of size $100$ over a total of $100$ epochs, which we define first. We then set up the optimizer parameter dictionary `opt_params`, and use it to create a DNN model. # # Running TensorFlow requires opening up a `Session` which we abbreviate as `sess` for short. All operations are performed in this session by calling the `run` method. First, we initialize the global variables in TensorFlow's computational graph by running the `global_variables_initializer`. To train the DNN, we loop over the number of epochs. In each fix epoch, we use the `next_batch` function of the `DataSet` class we defined above to create a mini-batch. The forward and backward passes through the weights are performed by running the `DNN.loss` and `DNN.optimizer` methods. To pass the mini-batch as well as any other external parameters, we use the `feed_dict` dictionary. Similarly, we evaluate the model performance, by running the `DNN.accuracy` function on the same minibatch data. Note that the dropout probability for testing is set to unity. # # Once we have exhausted all training epochs, we test the final performance on the entire training, test and critical data sets. This is done in the same way as above. # # Last, we return the loss and accuracy for each of the training, test and critical data sets. def evaluate_model(neurons, lr, Ising_Data, verbose): """This function trains a DNN to solve the Ising classification problem neurons: number of hidden neurons lr: SGD learning rate Ising_Data: Ising data set verbose (bool): toggles output during the calculation """ training_epochs=100 batch_size=100 # SGD learning params opt_params=dict(learning_rate=lr) # create DNN DNN=model(neurons,opt_params) with tf.Session() as sess: # initialize the necessary variables, in this case, w and b sess.run(tf.global_variables_initializer()) # train the DNN for epoch in range(training_epochs): batch_X, batch_Y = Ising_Data['train'].next_batch(batch_size,seed=seed) loss_batch, _ = sess.run([DNN.loss,DNN.optimizer], feed_dict={DNN.X: batch_X, DNN.Y: batch_Y, DNN.dropout_keepprob: 0.5} ) accuracy = sess.run(DNN.accuracy, feed_dict={DNN.X: batch_X, DNN.Y: batch_Y, DNN.dropout_keepprob: 1.0} ) # count training step step = sess.run(DNN.global_step) # test DNN performance on entire train test and critical data sets train_loss, train_accuracy = sess.run([DNN.loss, DNN.accuracy], feed_dict={DNN.X: Ising_Data['train'].data_X, DNN.Y: Ising_Data['train'].data_Y, DNN.dropout_keepprob: 0.5} ) if verbose: print("train loss/accuracy:", train_loss, train_accuracy) test_loss, test_accuracy = sess.run([DNN.loss, DNN.accuracy], feed_dict={DNN.X: Ising_Data['test'].data_X, DNN.Y: Ising_Data['test'].data_Y, DNN.dropout_keepprob: 1.0} ) if verbose: print("test loss/accuracy:", test_loss, test_accuracy) critical_loss, critical_accuracy = sess.run([DNN.loss, DNN.accuracy], feed_dict={DNN.X: Ising_Data['critical'].data_X, DNN.Y: Ising_Data['critical'].data_Y, DNN.dropout_keepprob: 1.0} ) if verbose: print("crtitical loss/accuracy:", critical_loss, critical_accuracy) return train_loss,train_accuracy,test_loss,test_accuracy,critical_loss,critical_accuracy # ### Step 6: Modify the Hyperparameters to Optimize Performance of the Model # # To study the dependence of our DNN on some of the hyperparameters, we do a grid search over the number of neurons in the hidden layer, and different SGD learning rates. As we explained in Sec. IX, these searches are best done over logarithmically-spaced points. # # Since we created the `evaluate_model` function with this in hindsight, below we simply loop over the grid values and call `evaluate_model`. def grid_search(verbose): """This function performs a grid search over a set of different learning rates and a number of hidden layer neurons.""" # load Ising data Ising_Data = prepare_Ising_DNN() #Ising_Data=load_data() # perform grid search over learnign rate and number of hidden neurons N_neurons=np.logspace(0,3,4).astype('int') # check number of neurons over multiple decades learning_rates=np.logspace(-6,-1,6) # pre-alocate variables to store accuracy and loss data train_loss=np.zeros((len(N_neurons),len(learning_rates)),dtype=np.float64) train_accuracy=np.zeros_like(train_loss) test_loss=np.zeros_like(train_loss) test_accuracy=np.zeros_like(train_loss) critical_loss=np.zeros_like(train_loss) critical_accuracy=np.zeros_like(train_loss) # do grid search for i, neurons in enumerate(N_neurons): for j, lr in enumerate(learning_rates): print("training DNN with %4d neurons and SGD lr=%0.6f." %(neurons,lr) ) train_loss[i,j],train_accuracy[i,j],\ test_loss[i,j],test_accuracy[i,j],\ critical_loss[i,j],critical_accuracy[i,j] = evaluate_model(neurons,lr,Ising_Data,verbose) plot_data(learning_rates,N_neurons,train_accuracy, 'training') plot_data(learning_rates,N_neurons,test_accuracy, 'testing') plot_data(learning_rates,N_neurons,critical_accuracy, 'critical') # To visualize the data, we used the function `plot_data`, defined below. # + # %matplotlib notebook import matplotlib.pyplot as plt def plot_data(x,y,data,title=None): # plot results fontsize=16 fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(data, interpolation='nearest', vmin=0, vmax=1) cbar=fig.colorbar(cax) cbar.ax.set_ylabel('accuracy (%)',rotation=90,fontsize=fontsize) cbar.set_ticks([0,.2,.4,0.6,0.8,1.0]) cbar.set_ticklabels(['0%','20%','40%','60%','80%','100%']) # put text on matrix elements for i, x_val in enumerate(np.arange(len(x))): for j, y_val in enumerate(np.arange(len(y))): c = "${0:.1f}\\%$".format( 100*data[j,i]) ax.text(x_val, y_val, c, va='center', ha='center') # convert axis vaues to to string labels x=[str(i) for i in x] y=[str(i) for i in y] ax.set_xticklabels(['']+x) ax.set_yticklabels(['']+y) ax.set_xlabel('$\\mathrm{learning\\ rate}$',fontsize=fontsize) ax.set_ylabel('$\\mathrm{hidden\\ neurons}$',fontsize=fontsize) if title is not None: ax.set_title(title) plt.tight_layout() plt.show() # - # ## Run Code # # As we mentioned in the beginning of the notebook, all functions and classes discussed above only specify the procedure but do not actually perform any computations. This allows us to re-use them for different problems. # # Actually running the training and testing for every point in the grid search is done below. verbose=False grid_search(verbose) # ## Creating Convolutional Neural Nets with TensorFlow # # We have so far considered each 2D-Ising state as a $(40\times 40,)$-long 1D vector. This approach neglects any spatial structure of the spin configuration. On the other hand, we do know that in every one of the 2D-Ising states there are *local* spatial correlations between the spins, which we would like to take advantage of to improve the accuracy of our classification model. # # To this end, all we need to do is modify the class `model` to include convolutional layers. The code below is analogous to the DNN `model` class we discussed in detail above, except for the following noticeable discrepancies: # # * note the auxiliary variable `X_reshaped` in the function `create_CNN` which casts the 2D-Ising configuration data as a 2D array, in order to bring out the spatial correlations. # * new below are the `conv2d` and `max_pool_2x2` functions which help create the convolutional structures of the network. class model(object): # build the graph for the CNN def __init__(self,opt_kwargs): # define global step for checkpointing self.global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step') self.L=40 self.n_feats= self.L**2 self.n_categories=2 # create placeholders for input X and label Y self.create_placeholders() # create weight and bias, initialized to 0 and construct CNN to predict Y from X self.create_CNN() # define loss function self.create_loss() # use gradient descent to minimize loss self.create_optimiser(opt_kwargs) # create accuracy self.create_accuracy() print("finished creating CNN") def create_placeholders(self): with tf.name_scope('data'): self.X=tf.placeholder(tf.float32, shape=(None,self.n_feats), name="X_data") self.Y=tf.placeholder(tf.float32, shape=(None,self.n_categories), name="Y_data") self.dropout_keepprob=tf.placeholder(tf.float32, name="keep_probability") def create_CNN(self, N_filters=10): with tf.name_scope('CNN'): # conv layer 1, 5x5 kernel, 1 input 10 output channels W_conv1 = self.weight_variable([5, 5, 1, N_filters],name='conv1',dtype=tf.float32) b_conv1 = self.bias_variable([N_filters],name='conv1',dtype=tf.float32) X_reshaped = tf.reshape(self.X, [-1, self.L, self.L, 1]) h_conv1 = tf.nn.relu(self.conv2d(X_reshaped, W_conv1, name='conv1') + b_conv1) # Pooling layer - downsamples by 2X. h_pool1 = self.max_pool_2x2(h_conv1,name='pool1') # conv layer 2, 5x5 kernel, 10 input 20 output channels W_conv2 = self.weight_variable([5, 5, 10, 20],name='conv2',dtype=tf.float32) b_conv2 = self.bias_variable([20],name='conv2',dtype=tf.float32) h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2, name='conv2') + b_conv2) # Dropout - controls the complexity of the CNN, prevents co-adaptation of features. h_conv2_drop = tf.nn.dropout(h_conv2, self.dropout_keepprob,name='conv2_dropout') # Second pooling layer. h_pool2 = self.max_pool_2x2(h_conv2_drop,name='pool2') # Fully connected layer 1 -- after second round of downsampling, our 40x40 image # is down to 7x7x20 feature maps -- maps this to 50 features. h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*20]) W_fc1 = self.weight_variable([7*7*20, 50],name='fc1',dtype=tf.float32) b_fc1 = self.bias_variable([50],name='fc1',dtype=tf.float32) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout - controls the complexity of the CNN, prevents co-adaptation of features. h_fc1_drop = tf.nn.dropout(h_fc1, self.dropout_keepprob,name='fc1_dropout') # Map the 50 features to 2 classes, one for each phase W_fc2 = self.weight_variable([50, self.n_categories],name='fc12',dtype=tf.float32) b_fc2 = self.bias_variable([self.n_categories],name='fc12',dtype=tf.float32) self.Y_predicted = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 def weight_variable(self, shape, name='', dtype=tf.float32): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial,dtype=dtype,name=name) def bias_variable(self, shape, name='', dtype=tf.float32): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial,dtype=dtype,name=name) def conv2d(self, x, W, name=''): """conv2d returns a 2d convolution layer with full stride.""" return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID', name=name) def max_pool_2x2(self, x,name=''): """max_pool_2x2 downsamples a feature map by 2X.""" return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name=name ) def create_loss(self): with tf.name_scope('loss'): self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=self.Y,logits=self.Y_predicted) ) def create_optimiser(self,kwargs): with tf.name_scope('optimiser'): self.optimizer = tf.train.GradientDescentOptimizer(**kwargs).minimize(self.loss,global_step=self.global_step) #self.optimizer = tf.train.AdamOptimizer(**kwargs).minimize(self.loss,global_step=self.global_step) def create_accuracy(self): with tf.name_scope('accuracy'): correct_prediction = tf.equal(tf.argmax(self.Y, 1), tf.argmax(self.Y_predicted, 1)) correct_prediction = tf.cast(correct_prediction, tf.float64) self.accuracy = tf.reduce_mean(correct_prediction) # ## Exercises # # * TensorFlow allows one to visualize the model as a graph. Read the [TensorBoard](https://www.tensorflow.org/get_started/graph_viz) documentation and implement this in the code above. # * Read about using [TensorBoard](https://www.tensorflow.org/get_started/graph_viz) to monitor the training and test errors. # * Use the CNN `model` defined above to run the Ising classification analysis. Choose the optimal network architecture to improve on the results of the non-convolutional layer. To test the performance of the CNN, use the critical data set. # * Does using the `Adam` optimizer improve on the performance on the critical data? Modify the CNN `model` class to do a grid search over the parameters of the `Adam` optimizer.
jupyter_notebooks/notebooks/NB12_CIX-DNN_ising_TFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Common ENV (python 3) # language: python # name: commonenv # --- # + # create file txt to train # - import os files = os.listdir('data/') for f in files: if 'json' in f: print(f) # + import json with open('data/output_hw/labels.json') as f: data = json.load(f) # + paths = [] labels = [] for key in data: paths.append('output_hw/' + key) labels.append(data[key]) # - len(paths) paths[0] # #### blank blank_paths = os.listdir('data/output_blank/') blank_paths = ['output_blank/' + path for path in blank_paths] len(blank_paths) paths += blank_paths labels += ['..'] * 20000 len(labels) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(paths, labels, test_size=0.1) X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5) def write_file(filename, X, Y): with open('data/' + filename, 'w') as f: for x, y in zip(X, Y): f.write(x + ' ' + y) f.write('\n') write_file('train.txt', X_train, y_train) write_file('test.txt', X_test, y_test) write_file('val.txt', X_val, y_val) ord('a')
preprocessing data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # LeetCode # # ## Link # # 1. https://leetcode.com/ # # 2. https://leetcode-cn.com/ # # ## Demo # # 1. https://www.onlinegdb.com/ # # ## Details # # LeetCode: 85, 152 # # LeetCode 85. Maximal Rectangle 最大矩形 # Given a rows x cols binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area. # # 给定一个仅包含 0 和 1 、大小为 rows x cols 的二维二进制矩阵,找出只包含 1 的最大矩形,并返回其面积。 # # # ![lc-85-p-example.png](attachment:lc-85-p-example.png) # # Example 1: # # ``` # Input: matrix = [["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]] # Output: 6 # Explanation: The maximal rectangle is shown in the above picture. # ``` # # 最大矩形如上图所示。 # # Example 2: # # ``` # Input: matrix = [] # Output: 0 # ``` # # Example 3: # # ``` # Input: matrix = [["0"]] # Output: 0 # ``` # # Example 4: # # ``` # Input: matrix = [["1"]] # Output: 1 # ``` # # Example 5: # # ``` # Input: matrix = [["0","0"]] # Output: 0 # ``` # # Constraints: # # - rows == matrix.length # # - cols == matrix[i].length # # - 1 <= row, cols <= 200 # # - matrix[i][j] is '0' or '1'. # # from typing import List class Solution: def maximalRectangle(self, matrix: List[List[str]]) -> int: if not matrix or not matrix[0]: return 0 nums = [int(''.join(row), base=2) for row in matrix] # 先将每一行变成2进制的数字 ans, N = 0, len(nums) for i in range(N):# 遍历每一行,求以这一行为第一行的最大矩形 j, num = i, nums[i] while j < N: # 依次与下面的行进行与运算。 num = num & nums[j] # num 中为1的部分,说明上下两行该位置都是1,相当于求矩形的高,高度为j-i+1 # print('num=',bin(num)) if not num: # 没有1说明没有涉及第i到第j行的竖直矩形 break width, curnum = 0, num while curnum: # 将cursum与自己右移一位进行&操作。如果有两个1在一起,那么cursum才为1,相当于求矩形宽度 width += 1 curnum = curnum & (curnum >> 1) # print('curnum',bin(curnum)) ans = max(ans, width * (j-i+1)) # print('i','j','width',i,j,width) # print('ans=',ans) j += 1 return ans class Solution: def maximalRectangle(self, matrix) -> int: if len(matrix) == 0: return 0 res = 0 m, n = len(matrix), len(matrix[0]) heights = [0] * n for i in range(m): for j in range(n): if matrix[i][j] == '0': heights[j] = 0 else: heights[j] = heights[j] + 1 res = max(res, self.largestRectangleArea(heights)) return res def largestRectangleArea(self, heights): heights.append(0) stack = [] res = 0 for i in range(len(heights)): while stack and heights[i] < heights[stack[-1]]: s = stack.pop() res = max(res, heights[s] * ((i - stack[-1] - 1) if stack else i)) stack.append(i) return res # # LeetCode 152. Maximum Product Subarray 乘积最大子数组 # Given an integer array nums, find a contiguous non-empty subarray within the array that has the largest product, and return the product. # # The test cases are generated so that the answer will fit in a 32-bit integer. # # A subarray is a contiguous subsequence of the array. # # 给你一个整数数组 nums ,请你找出数组中乘积最大的非空连续子数组(该子数组中至少包含一个数字),并返回该子数组所对应的乘积。 # # 测试用例的答案是一个 32-位 整数。 # # 子数组 是数组的连续子序列。 # # # Example 1: # # ``` # Input: nums = [2,3,-2,4] # Output: 6 # Explanation: [2,3] has the largest product 6. # ``` # # 子数组 [2,3] 有最大乘积 6。 # # Example 2: # # ``` # Input: nums = [-2,0,-1] # Output: 0 # Explanation: The result cannot be 2, because [-2,-1] is not a subarray. # ``` # # 结果不能为 2, 因为 [-2,-1] 不是子数组。 # # Constraints: # # - $1 <= nums.length <= 2 * 10^{4}$ # # - -10 <= nums[i] <= 10 # # - The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer. # # nums 的任何前缀或后缀的乘积都 保证 是一个 32-位 整数 # # # ## 解题思路 # # 1. 给定一个整数数组 nums ,找出一个序列中乘积最大的连续子序列(该序列至少包含一个数)。 # # 2. 给出一个数组,要求找出这个数组中连续元素乘积最大的值。 # # 3. 这一题是 DP 的题,状态转移方程是:最大值是 Max(f(n)) = Max( Max(f(n-1)) * n, Min(f(n-1)) * n);最小值是 Min(f(n)) = Min( Max(f(n-1)) * n, Min(f(n-1)) * n)。只要动态维护这两个值,如果最后一个数是负数,最大值就在负数 * 最小值中产生,如果最后一个数是正数,最大值就在正数 * 最大值中产生。 class Solution: def maxProduct(self, A): B = A[::-1] for i in range(1, len(A)): A[i] *= A[i - 1] or 1 B[i] *= B[i - 1] or 1 return max(max(A),max(B))
AATCC/lab-report/w7/practice-leetcode-labs-w7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Oriented Programming # ## Homework Assignment # # #### Problem 1 # Fill in the Line class methods to accept coordinates as a pair of tuples and return the slope and distance of the line. import math class Line: def __init__(self,coor1,coor2): self.coor1 = coor1 self.coor2 = coor2 def distance(self): return math.sqrt((self.coor1[0]-self.coor2[0])**2+(self.coor1[1]-self.coor2[1])**2) def slope(self): return (self.coor2[1]-self.coor1[1])/(self.coor2[0]-self.coor1[0]) # + coordinate1 = (2,1) coordinate2 = (4,3) li = Line(coordinate1,coordinate2) # - li.distance() li.slope() # + # EXAMPLE OUTPUT coordinate1 = (3,2) coordinate2 = (8,10) li = Line(coordinate1,coordinate2) # - li.distance() li.slope() # ________ # #### Problem 2 # Fill in the class class Cylinder: def __init__(self,height=1,radius=1): self.height = height self.radius = radius def volume(self): # volume = area * Heigh # area = 22/7 * R**2 return ((self.radius**2)*22/7)*self.height def surface_area(self): return (self.radius**2)*(22*2/7)+self.height*((2*22/7)*self.radius) # EXAMPLE OUTPUT c = Cylinder(2,3) c.volume() c.surface_area()
03-Object Oriented Programming Homework.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 3 - Linear Models for Regression with Basis Functions # # # In our **Polynomial Curve Fitting** example, the function is linear with respect to the parameters we are estimating. Thus, it is considered a *linear regression model*. (However, it is *non-linear* with respect to the input variable, $x$) # # # <div class="alert alert-success"> # <b>Input Space</b> # # Suppose we are given a training set comprising of $N$ observations of $\mathbf{x}$, $\mathbf{x} = \left[x_1, x_2, \ldots, x_N \right]^T$, and its corresponding desired outputs $\mathbf{t} = \left[t_1, t_2, \ldots, t_N\right]^T$, where sample $x_i$ has the desired label $t_i$. The input space is defined by the domain of $\mathbf{x}$. # </div> # # * The polynomial curve fitting example can be rewritten as follows: # # \begin{eqnarray} # t \sim y(x,\mathbf{w}) &=& w_0 + \sum_{j=1}^{M} w_j x^j\\ # &=& \sum_{j=0}^{M} w_j \phi_j(x)\\ # \end{eqnarray} # where # $$\phi_j(x) = x^j$$ # # # * By modifying the function $\phi$ (known as a *basis function*), we can easily extend/modify the class of models being considered. # # # <div class="alert alert-success"> # <b>Linear Basis Model</b> # # The linear basis model for regression takes linear combinations of fixed nonlinear functions of the input variables # $$t \sim y(\mathbf{x},\mathbf{w}) = \sum_{j=0}^{M} w_j\phi_j(\mathbf{x})$$ # where $\mathbf{w} = \left[w_{0}, w_{1}, \ldots, w_{M}\right]^T$ and # $\mathbf{x} = \left[x_1, \ldots, x_D\right]^T$ # </div> # # # * For all data observations $\{x_i\}_{i=1}^N$ and using the basis mapping defined as $\boldsymbol{\phi}(x_i) = \left[\begin{array}{ccccc} x_{i}^{0} & x_{i}^{1} & x_{i}^{2} & \cdots & x_{i}^{M}\end{array}\right]^T$, we can write the input data in a *matrix* form as: # # $$\mathbf{X} = \left[\begin{array}{ccccc} # 1 & x_{1} & x_{1}^{2} & \cdots & x_{1}^{M}\\ # 1 & x_{2} & x_{2}^{2} & \cdots & x_{2}^{M}\\ # \vdots & \vdots & \vdots & \ddots & \vdots\\ # 1 & x_{N} & x_{N}^{2} & \cdots & x_{N}^{M} # \end{array}\right] = \left[\begin{array}{c} # \boldsymbol{\phi}^T(x_1)\\ \boldsymbol{\phi}^T(x_2) \\ \vdots \\ \boldsymbol{\phi}^T(x_N)\end{array}\right] \in \mathbb{R}^{N\times (M+1)}$$ # # where each row is a feature representation of a data point $x_i$. # # Other **basis functions** include: # # * Radial Basis functions (D = 1): $\phi_j(x) = \exp\left\{-\frac{(x-\mu_j)^2)}{2s^2}\right\}$ where $x \in R^1$ # # * Radial Basis function (D > 1): $\phi_j(\mathbf{x}) = \exp\left\{-\frac{1}{2}(x-\boldsymbol{\mu}_j)^T\Sigma_j^{-1}(x-\boldsymbol{\mu}_j)\right\}$ where $\mathbf{x} \in R^D$, $\boldsymbol{\mu}_j \in R^D$ and $\boldsymbol{\Sigma}_j \in R^{D\times D}$ # # * Fourier Basis functions # # * Wavelets Basis Functions # # <div class="alert alert-success"> # <b>Feature Space</b> # # The domain of $\boldsymbol{\phi}(\mathbf{x})$ defines the **feature space**: # # \begin{align} # \boldsymbol{\phi}: \mathbb{R}^D & \rightarrow \mathbb{R}^{M+1} \\ # \boldsymbol{\phi}(\mathbf{x}) & \rightarrow [1,\phi_1(\mathbf{x}), \phi_2(\mathbf{x}), ..., \phi_M(\mathbf{x})] # \end{align} # </div> # # * When we use linear regression with respect to a set of (non-linear) basis functions, the regression model is linear in the *feature space* but non-linear in the input space. # # <div class="alert alert-success"> # <b>Objective Function</b> # # We *fit* the polynomial regression model such that the *objective function* $E(\mathbf{w})$ is minimized: # $$\arg_{\mathbf{w}}\min E(\mathbf{w})$$ # where $E(\mathbf{w}) = \frac{1}{2}\left\Vert \mathbf{\Phi}\mathbf{w} - \mathbf{t} \right\Vert^2_2$ # </div> # # <div><img src="figures/LeastSquares.png", width="300"><!div> # # * This error function is minimizing the (Euclidean) *distance* of every point to the curve. # # We **optimize** $E(\mathbf{w})$ by finding the *optimal* set of parameters $\mathbf{w}^*$ that minimize the error function. # # To do that, we **take the derivative of $E(\mathbf{w})$ with respect to the parameters $\mathbf{w}$**. # # $$\frac{\partial E(\mathbf{w})}{\partial \mathbf{w}} = \left[ \frac{\partial E(\mathbf{w})}{\partial w_0}, \frac{\partial E(\mathbf{w})}{\partial w_1}, \ldots, \frac{\partial E(\mathbf{w})}{\partial w_M} \right]^T$$ # # * If we rewrite the objective function as: # \begin{align} # E(\mathbf{w}) &= \frac{1}{2} \left( \mathbf{\Phi}\mathbf{w} - \mathbf{t}\right)^T\left( \mathbf{\Phi}\mathbf{w} - \mathbf{t}\right) \\ # & = \frac{1}{2} \left( \mathbf{w}^T\mathbf{\Phi}^T - \mathbf{t}^T\right)\left( \mathbf{\Phi}\mathbf{w} - \mathbf{t}\right) \\ # & = \frac{1}{2} \left(\mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi}\mathbf{w} - \mathbf{w}^T\mathbf{\Phi}^T \mathbf{t} - \mathbf{t}^T\mathbf{\Phi}\mathbf{w} + \mathbf{t}^T\mathbf{t}\right) # \end{align} # # # * Solving for $\mathbf{w}$, we find: # # \begin{align} # \frac{\partial E(\mathbf{w})}{\partial \mathbf{w}} &= 0 \\ # \frac{\partial }{\partial \mathbf{w}} \left[\frac{1}{2} \left(\mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi}\mathbf{w} - \mathbf{w}^T\mathbf{\Phi}^T \mathbf{t} - \mathbf{t}^T\mathbf{\Phi}\mathbf{w} + \mathbf{t}^T\mathbf{t}\right) \right] &= 0 \\ # \frac{\partial }{\partial \mathbf{w}} \left[ \left(\mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi}\mathbf{w} - \mathbf{w}^T\mathbf{\Phi}^T \mathbf{t} - \mathbf{t}^T\mathbf{\Phi}\mathbf{w} + \mathbf{t}^T\mathbf{t}\right) \right] &= 0 \\ # (\mathbf{\Phi}^T\mathbf{\Phi}\mathbf{w})^T + \mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi} - (\mathbf{\Phi}^T \mathbf{t})^T - \mathbf{t}^T\mathbf{\Phi} &=0 \\ # \mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi} + \mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi} - \mathbf{t}^T\mathbf{\Phi} - \mathbf{t}^T\mathbf{\Phi} &= 0\\ # 2 \mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi} &= 2 \mathbf{t}^T\mathbf{\Phi} \\ # (\mathbf{w}^T\mathbf{\Phi}^T\mathbf{\Phi})^T &= (\mathbf{t}^T\mathbf{\Phi})^T\text{, apply transpose on both sides} \\ # \mathbf{\Phi}^T\mathbf{\Phi}\mathbf{w} &= \mathbf{\Phi}^T\mathbf{t} \\ # \mathbf{w} &= \left(\mathbf{\Phi}^T\mathbf{\Phi}\right)^{-1}\mathbf{\Phi}^T\mathbf{t} # \end{align} # ## Suggested Additional Reading Materials # # * From "Python Data Science Handbook" 2017 by <NAME>, read section "In Depth: Linear Regression", pages 390-405. # #
03_BasisFunctions/L03_BasisFunctions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('..') from polynomial_program import PolynomialProgram # + import functools from collections import OrderedDict # from qiskit import IBMQ # IBMQ.load_account() import numpy as np from qiskit import Aer from qiskit.aqua import QuantumInstance, aqua_globals from qiskit.aqua.algorithms import VQE, QAOA from qiskit.aqua.components.optimizers import SPSA from qiskit.circuit.library import RealAmplitudes # + M = [2, 6, 3, 6] K = [1, 4, 2, 8] T = [12, 6, 24] d = 19 def get_time_matrix(M, T): r = [] for i in M: tmp = [] for j in T: tmp.append(j / i) r.append(tmp) return np.array(r) def get_cost_matrix(time_matrix, K): m = [] for i in range(len(time_matrix)): tmp = [] for j in time_matrix[i]: tmp.append(K[i] * j) m.append(tmp) return m time_matrix = np.array(get_time_matrix(M, T)) cost_matrix = np.array(get_cost_matrix(time_matrix, K)) print("Time matrix:\n {}".format(time_matrix)) print("Cost matrix:\n {}".format(cost_matrix)) # + def sample_most_likely(state_vector): if isinstance(state_vector, (OrderedDict, dict)): # get the binary string with the largest count binary_string = sorted(state_vector.items(), key=lambda kv: kv[1]) repetitions = int(binary_string[-1][1]) binary_string = binary_string[-1][0] x = np.asarray([int(y) for y in reversed(list(binary_string))]) return x, repetitions return [], 0 optimal_key = "0000001000" def get_stats_for_result(dict_res): optimal = 0 correct = 0 incorrect = 0 correct_config = 0 incorrect_config = 0 if optimal_key in dict_res: optimal = dict_res[optimal_key] for key, val in dict_res.items(): key = key[::-1] if is_correct(key): correct += val correct_config += 1 else: incorrect += val incorrect_config += 1 print('most likely solution: ', sample_most_likely(dict_res)) print("optimal: ", optimal) print("correct solutions: ", correct) print("incorrect solutions: ", incorrect) print("correct configs: ", correct_config) print("incorrect configs: ", incorrect_config) def is_correct(key): return solution_vector_correct(key) and execution_time(key) == d correct_machines = ['00', '01', '10', '11'] machine_to_index = {'00': 0, '01': 1, '10': 2, '11': 3} def solution_vector_correct(vector): task1_machine = vector[0:2] task2_machine = vector[2:4] task3_machine = vector[4:6] return task1_machine in correct_machines \ and task2_machine in correct_machines \ and task3_machine in correct_machines def execution_time(k): task1_machine = machine_to_index.get(k[0:2]) task2_machine = machine_to_index.get(k[2:4]) task3_machine = machine_to_index.get(k[4:6]) task1_time = time_matrix[task1_machine, 0] if task1_machine is not None else 0 task2_time = time_matrix[task2_machine, 1] if task2_machine is not None else 0 task3_time = time_matrix[task3_machine, 2] if task3_machine is not None else 0 slack_sum = int(k[6]) * 8 + int(k[7]) * 4 + int(k[8]) * 2 + int(k[9]) * 1 return task1_time + task2_time + task3_time + slack_sum def execution_cost(k): task1_machine = machine_to_index.get(k[0:2]) task2_machine = machine_to_index.get(k[2:4]) task3_machine = machine_to_index.get(k[4:6]) task1_cost = cost_matrix[task1_machine, 0] if task1_machine is not None else 0 task2_cost = cost_matrix[task2_machine, 1] if task2_machine is not None else 0 task3_cost = cost_matrix[task3_machine, 2] if task3_machine is not None else 0 return task1_cost + task2_cost + task3_cost def incorrect_machine_count(k): task1_machine = machine_to_index.get(k[0:2]) task2_machine = machine_to_index.get(k[2:4]) task3_machine = machine_to_index.get(k[4:6]) return (0 if k[0:2] in correct_machines else 1) \ + (0 if k[2:4] in correct_machines else 1) \ + (0 if k[4:6] in correct_machines else 1) # + def get_cost_model(x): return sum([ cost_matrix[0, i] * (1 - x[2 * i]) * (1 - x[2 * i + 1]) + cost_matrix[1, i] * (1 - x[2 * i]) * x[2 * i + 1] + cost_matrix[2, i] * x[2 * i] * (1 - x[2 * i + 1]) + cost_matrix[3, i] * x[2 * i] * x[2 * i + 1] for i in range(0, 3) ]) def get_machine_usage_model(x): return 0 def get_deadline_model(x): time_sum = sum([ time_matrix[0, i] * (1 - x[2 * i]) * (1 - x[2 * i + 1]) + time_matrix[1, i] * (1 - x[2 * i]) * x[2 * i + 1] + time_matrix[2, i] * x[2 * i] * (1 - x[2 * i + 1]) + time_matrix[3, i] * x[2 * i] * x[2 * i + 1] for i in range(0, 3) ]) slack_sum = 8 * x[6] + 4 * x[7] + 2 * x[8] + x[9] time_constraint = (d - time_sum - slack_sum) ** 2 return time_constraint # - def compute_eigenvalues(quibit_op): from qiskit.aqua.algorithms import NumPyEigensolver count = 1024 eigensolver = NumPyEigensolver(qubit_op, count) eigensolver_result = eigensolver.compute_eigenvalues() print('state\t\ttime\tcost\tmachine use\tcorrect\teigenvalue') for eigenstate, eigenvalue in zip(eigensolver_result.eigenstates, eigensolver_result.eigenvalues): eigenstate, = eigenstate.sample().keys() eigenstate = eigenstate[::-1] eigenvalue = eigenvalue print(f'{eigenstate}\t{execution_time(eigenstate)}\t{execution_cost(eigenstate)}', end='') print(f'\t{incorrect_machine_count(eigenstate)}\t\t{is_correct(eigenstate)}\t{eigenvalue}') A = 1 # B = 20 C = 35 # + pp = PolynomialProgram(10) pp.add_objective(get_cost_model(pp.x), A) # pp.add_objective(get_machine_usage_model(pp.x), B) pp.add_objective(get_deadline_model(pp.x), C) # - qubit_op, offset = pp.to_ising() compute_eigenvalues(qubit_op) # + seed = 10598 aqua_globals.random_seed = seed max_trials = 1000 shots = 1000 reps = 2 entanglement = 'full' # + spsa = SPSA(maxiter=max_trials) ry = RealAmplitudes(qubit_op.num_qubits, reps=reps, entanglement=entanglement) vqe = VQE(qubit_op, ry, spsa) # provider = IBMQ.get_provider('ibm-q') backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed, shots=shots) result_vqe = vqe.run(quantum_instance) state_vector_vqe = result_vqe['eigenstate'] get_stats_for_result(state_vector_vqe) # + spsa = SPSA(maxiter=max_trials) qaoa = QAOA(qubit_op, optimizer=spsa, p=1) # provider = IBMQ.get_provider('ibm-q') backend = Aer.get_backend('qasm_simulator') quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed, shots=shots) result = qaoa.run(quantum_instance) state_vector = result['eigenstate'] get_stats_for_result(state_vector)
binary-encoding/created-hamiltonian-binary-encoding-3x4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo Collect Rook Usage import pandas as pd import hvplot.pandas # noqa from rooki.client import Rooki # Available hosts hosts = { 'demo': 'rook.dkrz.de', 'dkrz': 'rook3.cloud.dkrz.de', 'ceda': 'rook-wps1.ceda.ac.uk', } # Use cache cache_id = { 'ceda': '1f8181bc-d351-11eb-9402-005056aba41c', 'dkrz': '34369610-d351-11eb-8f86-fa163e466023', } # Collect usage from several nodes def collect_usage(sites, time=None, use_cache=True): df_wps_list = [] df_downloads_list = [] for site in sites: if use_cache: ref_wps = f"http://{hosts[site]}/outputs/rook/{cache_id[site]}/wps_requests.csv" ref_downloads = f"http://{hosts[site]}/outputs/rook/{cache_id[site]}/downloads.csv" else: url = f"http://{hosts[site]}/wps" rooki = Rooki(url, mode='sync') resp = rooki.usage(time=time) ref_wps = resp.response.processOutputs[0].reference print(ref_wps) ref_downloads = resp.response.processOutputs[1].reference print(ref_downloads) # load wps df_wps = pd.read_csv(ref_wps, parse_dates=[4, 5]) df_wps['node'] = site df_wps_list.append(df_wps) # load downloads df_downloads = pd.read_csv(ref_downloads, parse_dates=[2]) df_downloads['node'] = site df_downloads_list.append(df_downloads) df_wps_combined = pd.concat(df_wps_list, ignore_index=True) df_downloads_combined = pd.concat(df_downloads_list, ignore_index=True) return df_wps_combined, df_downloads_combined # ## collect usage df, df_downloads = collect_usage(['ceda', 'dkrz'], time='2021-03-23/', use_cache=False) df.head() df.nunique() # ## evaluate pywps stats df.operation.value_counts() df.loc[df['operation']=='execute'].loc[df['status']==4].identifier.value_counts() df.loc[df['operation']=='execute'].loc[df['status']==5].identifier.value_counts() df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].status.value_counts() # ## duration df['duration'] = df['time_end'] - df['time_start'] df.duration = df.duration.dt.seconds df_skip_outlier = df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].loc[df.duration<900] df_skip_outlier.duration.mean() df_skip_outlier.hvplot.hist(y='duration', logx=False, bins=100) # ## jobs over days days = (df.time_start.max() - df.time_start.min()).days days len(df)/days df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].hvplot.hist(y='time_start', bins=days) # ## jobs over week days df['dayofweek'] = df['time_start'].dt.dayofweek df df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].hvplot.hist(y='dayofweek', bins=7) # ## jobs over day time df['hour'] = df['time_start'].dt.hour df df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].hvplot.hist(y='hour', bins=24) # ## concurrent jobs # https://stackoverflow.com/questions/57804145/combining-rows-with-overlapping-time-periods-in-a-pandas-dataframe edf = df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].loc[df['status'].isin([4, 5])] startdf = pd.DataFrame({'time':edf['time_start'], 'what':1}) enddf = pd.DataFrame({'time':edf['time_end'], 'what':-1}) mergdf = pd.concat([startdf, enddf]).sort_values('time') mergdf['running'] = mergdf['what'].cumsum() mergdf mergdf.running.mean() max_running = mergdf.running.max() max_running mergdf.loc[mergdf['running']>0].hvplot.hist(y='running', bins=max_running) # ## concurrent jobs over days mergdf.loc[mergdf['running']>0].hvplot.scatter(y='running', x='time') tmpdf = mergdf.groupby(pd.Grouper(key="time", freq="1D")).max() tmpdf tmp2df = pd.DataFrame() tmp2df['time'] = tmpdf.index.values tmp2df['running'] = tmpdf.running.values tmp2df tmpdf.running.mean() tmp2df.hvplot.bar(x='time', y='running') # ## Errors per day df_errors = df.loc[df['operation']=='execute'].loc[df['identifier']=='orchestrate'].loc[df['status']==5] df_errors df_errors.hvplot.hist(y='time_start') # ## Error messages df_errors.message.value_counts() # ## Downloads df_downloads.head() df_downloads.nunique() # ### Downloads size df_downloads['size'].sum() / 1024 ** 3 def size_mb(size): return size / 1024 ** 2 df_downloads['size_mb'] = df_downloads['size'].apply(size_mb) df_downloads df_downloads.hvplot.hist(y='size_mb') # ### Download size per day downloads_per_day = df_downloads.groupby(df_downloads.datetime.dt.date)["size_mb"].sum() downloads_per_day downloads_per_day.mean() downloads_per_day.hvplot.bar() # ### Download requests per day days = (df_downloads.datetime.max() - df_downloads.datetime.min()).days days len(df_downloads)/days # + tags=[] df_downloads.hvplot.hist(y='datetime', bins=days) # - # ### Downloads by IP address df_downloads.remote_host_ip.value_counts() # ### Downloads GeoIP # https://pypi.org/project/geoip2nation/ # + from geoip import xgeoip r = xgeoip.GeoIp() r.load_memory() def lookup_ip(ip): return r.resolve(ip).country # - df_downloads['geoip'] = df_downloads.remote_host_ip.apply(lookup_ip) df_downloads df_downloads.geoip.value_counts().hvplot.bar() # ## GeoHealthCheck # https://geohealthcheck.cloud.dkrz.de import requests from io import StringIO ghc_url = "https://geohealthcheck.cloud.dkrz.de/resource/45/history/csv" req = requests.get(ghc_url, verify=False) df_ghc = pd.read_csv(StringIO(req.text), parse_dates=['checked_datetime']) df_ghc df_ghc.status.value_counts() def up(status): if status == True: return 1 return 0 df_ghc['up'] = df_ghc.status.apply(up) df_ghc.hvplot.line(x='checked_datetime', y='up')
notebooks/demo/rook-usage.ipynb
# --- # title: "Descriptive Statistics For pandas Dataframe" # author: "<NAME>" # date: 2017-12-20T11:53:49-07:00 # description: "Descriptive statistics for pandas dataframe." # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Import modules import pandas as pd # ### Create dataframe data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'], 'age': [42, 52, 36, 24, 73], 'preTestScore': [4, 24, 31, 2, 3], 'postTestScore': [25, 94, 57, 62, 70]} df = pd.DataFrame(data, columns = ['name', 'age', 'preTestScore', 'postTestScore']) df # ### The sum of all the ages df['age'].sum() # ### Mean preTestScore df['preTestScore'].mean() # ### Cumulative sum of preTestScores, moving from the rows from the top df['preTestScore'].cumsum() # ### Summary statistics on preTestScore df['preTestScore'].describe() # ### Count the number of non-NA values df['preTestScore'].count() # ### Minimum value of preTestScore df['preTestScore'].min() # ### Maximum value of preTestScore df['preTestScore'].max() # ### Median value of preTestScore df['preTestScore'].median() # ### Sample variance of preTestScore values df['preTestScore'].var() # ### Sample standard deviation of preTestScore values df['preTestScore'].std() # ### Skewness of preTestScore values df['preTestScore'].skew() # ### Kurtosis of preTestScore values df['preTestScore'].kurt() # ### Correlation Matrix Of Values df.corr() # ### Covariance Matrix Of Values df.cov()
content/python/data_wrangling/pandas_dataframe_descriptive_stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %system ls import nltk import re from wordcloud import WordCloud import matplotlib.pyplot as plt import PIL import random # %matplotlib inline from nltk.corpus import stopwords # Import the stop word list print (stopwords.words("english")) lines = open("a.txt").read() lines def clean_text( text, remove_sw ): # Function to convert a raw review to a string of words # The input is a single string (a raw movie review), and # the output is a single string (a preprocessed movie review) # # # 2. Remove non-letters letters_only = re.sub("[^a-zA-Z]", " ", text) # # 3. Convert to lower case, split into individual words words = letters_only.lower().split() # # 4. In Python, searching a set is much faster than searching # a list, so convert the stop words to a set # 5. remove stopwords meaningful_words = [] if remove_sw == True: stops = set(stopwords.words("english")) meaningful_words = [w for w in words if not w in stops] else: meaningful_words = words # # 6. Join the words back into one string separated by space, # and return the result. return( " ".join( meaningful_words )) clean_text(lines, False) wordcloud = WordCloud(width = 500, height=500, max_font_size=50, relative_scaling=.5, max_words=2000).generate(clean_text(lines, False)) plt.figure() plt.imshow(wordcloud) plt.axis("off") plt.show() plt.savefig('a.png') image = wordcloud.to_image() image.save('a.png') def gen_ngrams(words, n): """Takes in a list of words and generates ngrams""" if len(words) < n: return ngram = [] for i in range(len(words) - (n-1)): phrase = [] for j in range(n): phrase.append(words[i+j]) ngram.append(phrase) return ngram grams = gen_ngrams(clean_text(lines, False).split(), 10) len(grams) grams[0] cache = {} def database(grams): for gram in grams: key = (gram[0], gram[1]) if key in cache: cache[key].append(gram[2:]) else: cache[key] = [gram[2:]] # + active="" # # - database(grams) stops = set(stopwords.words("english")) def generate_markov_text(size=9): seedg = random.randint(0, len(grams)-1) seedw = random.randint(0,8) seed_word, next_word = grams[seedg][seedw], grams[seedg][seedw+1] w1, w2 = seed_word, next_word gen_words = [] gen_words.append(w1) gen_words.append(w2) for i in range(size): t_gram = [] if type(cache[(w1, w2)][0]) is str: t_gram = cache[(w1, w2)] else: t_gram = random.choice(cache[(w1, w2)]) w1, w2 = t_gram[len(t_gram)-2], t_gram[len(t_gram)-1] for w in t_gram: gen_words.append(w) if gen_words[-1] in stops: gen_words = gen_words[:-1] return ' '.join(gen_words) generate_markov_text()
en_textprocessing/entextprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/isb-cgc/Community-Notebooks/blob/Staging-Notebooks/RegulomeExplorer/Correlations_Protein_and_Gene_expression_CPTAC.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9h1bEFTSoGyu" # # Compute correlations of protein and gene expression in CPTAC # # # ``` # Title: Correlations of protein and gene expression in CPTAC # Author: <NAME> # Created: 05-23-2021 # Purpose: Compute correlations between proteomic and gene expression available in the PDC # Notes: Runs in Google Colab # ``` # This notebook uses BigQuery to compute Pearson correlation between protein and gene expression for all the genes in the BigQuery tables of the PDC dataset. We used CCRCC as example; but this can be changed easily for other cancer types. # + [markdown] id="BCpeagmasFFs" # ## Modules # + id="losf8GRlZvcM" from google.cloud import bigquery from google.colab import auth import numpy as np import pandas as pd import seaborn as sns import pandas_gbq # + [markdown] id="CnzNWzE3zS0H" # ## Google Authentication # The first step is to authorize access to BigQuery and the Google Cloud. For more information see ['Quick Start Guide to ISB-CGC'](https://isb-cancer-genomics-cloud.readthedocs.io/en/latest/sections/HowToGetStartedonISB-CGC.html) and alternative authentication methods can be found [here](https://googleapis.dev/python/google-api-core/latest/auth.html). # # Moreover you need to [create a google cloud](https://cloud.google.com/resource-manager/docs/creating-managing-projects#console) project to be able to run BigQuery queries. # + id="2ySNqCskzONP" auth.authenticate_user() my_project_id = "" # write your project id here bqclient = bigquery.Client( my_project_id ) # + [markdown] id="v0y0FkrvBn4L" # ## Retrieve protein expression of CCRCC # The following query will retrieve protein expression and case IDs from CPTAC table `quant_proteome_CPTAC_CCRCC_discovery_study_pdc_current`. Moreover, to label samples as Tumor or Normal samples we join the table with metadata available in the table `aliquot_to_case_mapping_pdc_current` # + id="E2b6HN_cu-cn" prot = '''quant AS ( SELECT meta.sample_submitter_id, meta.sample_type, quant.case_id, quant.aliquot_id, quant.gene_symbol, CAST(quant.protein_abundance_log2ratio AS FLOAT64) AS protein_abundance_log2ratio FROM `isb-cgc-bq.CPTAC.quant_proteome_CPTAC_CCRCC_discovery_study_pdc_current` as quant JOIN `isb-cgc-bq.PDC_metadata.aliquot_to_case_mapping_current` as meta ON quant.case_id = meta.case_id AND quant.aliquot_id = meta.aliquot_id AND meta.sample_type IN ('Primary Tumor','Solid Tissue Normal') )''' # + [markdown] id="G_TjJAtgvtA2" # ## Retrieve gene expression of CCRCC # Next we retrieve gene expression data from the table `CPTAC.RNAseq_hg38_gdc_current` which contains RNA-seq data from all tumor types of CPTAC. Moreover we join the data with the metadata table `aliquot_to_case_mapping_pdc_current` to label samples to cancer or normal tissue # + id="eAaHSre1v2cV" gexp = '''gexp AS ( SELECT DISTINCT meta.sample_submitter_id, meta.sample_type, rnaseq.gene_name , LOG(rnaseq.HTSeq__FPKM + 1) as HTSeq__FPKM FROM `isb-cgc-bq.CPTAC.RNAseq_hg38_gdc_current` as rnaseq JOIN `isb-cgc-bq.PDC_metadata.aliquot_to_case_mapping_current` as meta ON meta.sample_submitter_id = rnaseq.sample_barcode )''' # + [markdown] id="4IW69UHBwu2s" # ## Compute Pearson correlation # The following query join the protein and gene expression data and compute correlation for each gene and semple type (normal or tumor). # + id="kDSJ47hbw28a" corr = '''correlation AS ( SELECT quant.gene_symbol, gexp.sample_type, COUNT(*) as n, CORR(protein_abundance_log2ratio,HTSeq__FPKM) as corr FROM quant JOIN gexp ON quant.sample_submitter_id = gexp.sample_submitter_id AND gexp.gene_name = quant.gene_symbol AND gexp.sample_type = quant.sample_type GROUP BY quant.gene_symbol, gexp.sample_type )''' # + [markdown] id="TKStdGWhxsYQ" # ## Compute p-values # + id="g3QdUOQqxz3X" pval = '''SELECT gene_symbol, sample_type, n, corr, `cgc-05-0042.functions.corr_pvalue`(corr, n) as p FROM correlation WHERE ABS(corr) <= 1.0''' # + [markdown] id="wGjeJAGuyqbA" # ## Adjust p-values # The following commands generate the final query which will be sent to Google to retrieve the final data that include the correlation for each gene. The query also includes a function (BHmultipletests) that adjusts the computed p values with the Benjamini-Hochberg method for multipletest correction. # + id="woduO0K9ywOM" mysql = '''DECLARE Nrows INT64; CREATE TEMP TABLE PearsonCorrelation AS WITH {0}, {1}, {2} {3} ; # Adjust pvalues for multiple tests SET Nrows = ( SELECT COUNT(*) FROM PearsonCorrelation ); CALL `cgc-05-0042.functions.BHmultipletests`( 'PearsonCorrelation', 'p', Nrows ) '''.format(prot, gexp, corr, pval) # + [markdown] id="BcNc2B_hMmfn" # ## Run the query to retrieve the analysis # + id="0RzNRC16Mv11" job_config = bigquery.QueryJobConfig() job_config.use_legacy_sql = False try: query_job = bqclient.query ( mysql, job_config=job_config ) except: print ( " FATAL ERROR: query execution failed " ) mydf = query_job.to_dataframe() # + [markdown] id="WUIdZMflFgTX" # The following command displays the results. # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="9840zbeiFMhG" outputId="805f0bb8-4f05-4968-a190-b1f724c5d130" mydf # + [markdown] id="Gz12y0AexsoO" # ## Histogram of correlations # The results above show the correlation between protein and gene expression for tumors and normal samples. Next we show two histograms of these correlations, one for tumor and the other for normal samples. Moreover we colored the bars by genes that have significant correlations (significant level = 0.01). # + id="ImOZyU1RnPvr" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="f1fad544-6ba9-454e-95a0-03aed770736a" s_level = 0.01 mydf['significant'] = np.where( mydf['p_adj'] <= s_level, True, False) sns.displot(data=mydf, x="corr", hue="significant", multiple="stack", binwidth=0.1, col='sample_type')
RegulomeExplorer/Correlations_Protein_and_Gene_expression_CPTAC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_10_1_timeseries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # T81-558: Applications of Deep Neural Networks # **Module 10: Time Series in Keras** # **Part 10.1: Time Series Data Encoding for Deep Learning** # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # # Module 10 Material # # * **Part 10.1: Time Series Data Encoding for Deep Learning** [[Video]](https://www.youtube.com/watch?v=dMUmHsktl04&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_1_timeseries.ipynb) # * Part 10.2: Programming LSTM with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=wY0dyFgNCgY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_2_lstm.ipynb) # * Part 10.3: Text Generation with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=6ORnRAz3gnA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_3_text_generation.ipynb) # * Part 10.4: Image Captioning with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=NmoW_AYWkb4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_4_captioning.ipynb) # * Part 10.5: Temporal CNN in Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=i390g8acZwk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_5_temporal_cnn.ipynb) # # Google CoLab Instructions # # The following code ensures that Google CoLab is running the correct version of TensorFlow. try: # %tensorflow_version 2.x COLAB = True print("Note: using Google CoLab") except: print("Note: not using Google CoLab") COLAB = False # # Part 10.1: Time Series Data Encoding # # In this chapter, we will examine time series encoding and recurrent networks, two topics that are logical to put together because they are both methods for dealing with data that spans over time. Time series encoding deals with representing events that occur over time to a neural network. There are many different methods to encode data that occur over time to a neural network. This encoding is necessary because a feedforward neural network will always produce the same output vector for a given input vector. Recurrent neural networks do not require encoding of time series data because they are able to handle data that occur over time automatically. # # The variation in temperature during the week is an example of time-series data. For instance, if we know that today’s temperature is 25 degrees, and tomorrow’s temperature is 27 degrees, the recurrent neural networks and time series encoding provide another option to predict the correct temperature for the week. Conversely, a traditional feedforward neural network will always respond with the same output for a given input. If we train a feedforward neural network to predict tomorrow’s temperature, it should return a value of 27 for 25. The fact that it will always output 27 when given 25 might be a hindrance to its predictions. Surely the temperature of 27 will not always follow 25. It would be better for the neural network to consider the temperatures for a series of days before the prediction. Perhaps the temperature over the last week might allow us to predict tomorrow’s temperature. Therefore, recurrent neural networks and time series encoding represent two different approaches to representing data over time to a neural network. # # Previously we trained neural networks with input ($x$) and expected output ($y$). $X$ was a matrix, the rows were training examples, and the columns were values to be predicted. The $x$ value will now contain sequences of data. The definition of the $y$ value will stay the same. # # Dimensions of the training set ($x$): # * Axis 1: Training set elements (sequences) (must be of the same size as $y$ size) # * Axis 2: Members of sequence # * Axis 3: Features in data (like input neurons) # # Previously, we might take as input a single stock price, to predict if we should buy (1), sell (-1), or hold (0). The following code illustrates this encoding. # + # x = [ [32], [41], [39], [20], [15] ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) # - # The following code builds a CSV file from scratch, to see it as a data frame, use the following: # + from IPython.display import display, HTML import pandas as pd import numpy as np x = np.array(x) print(x[:,0]) df = pd.DataFrame({'x':x[:,0], 'y':y}) display(df) # - # You might want to put volume in with the stock price. The following code shows how we can add an additional dimension to handle the volume. # + x = [ [32,1383], [41,2928], [39,8823], [20,1252], [15,1532] ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) # - # Again, very similar to what we did before. The following shows this as a data frame. # + from IPython.display import display, HTML import pandas as pd import numpy as np x = np.array(x) print(x[:,0]) df = pd.DataFrame({'price':x[:,0], 'volume':x[:,1], 'y':y}) display(df) # - # Now we get to sequence format. We want to predict something over a sequence, so the data format needs to add a dimension. A maximum sequence length must be specified, but the individual sequences can be of any length. # + x = [ [[32,1383],[41,2928],[39,8823],[20,1252],[15,1532]], [[35,8272],[32,1383],[41,2928],[39,8823],[20,1252]], [[37,2738],[35,8272],[32,1383],[41,2928],[39,8823]], [[34,2845],[37,2738],[35,8272],[32,1383],[41,2928]], [[32,2345],[34,2845],[37,2738],[35,8272],[32,1383]], ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) # - # Even if there is only one feature (price), the 3rd dimension must be used: # + x = [ [[32],[41],[39],[20],[15]], [[35],[32],[41],[39],[20]], [[37],[35],[32],[41],[39]], [[34],[37],[35],[32],[41]], [[32],[34],[37],[35],[32]], ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) # - # # Module 10 Assignment # # You can find the first assignment here: [assignment 10](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class10.ipynb)
Clase8-RNN/extras/.ipynb_checkpoints/1seriestemporales-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} """EXERCISE 2 1. Generar un diccionario a partir de la columna country (country_id,country)""" # + pycharm={"name": "#%%\n"} """EXERCISE 2 2. Escribir el diccionario como CSV""" # + pycharm={"name": "#%%\n"} """EXERCISE 2 3. Leer el CSV generado y hacer inner join con demo.csv"""
code/EXERCISE 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 9 XOR # ## Lab09-2-xor-nn # > XOR 데이터를 neural network를 이용한 경우(using sigmoid fn) # > # > hidden layer를 1개 만듬으로 인해 accuracy=1 이 됨 import tensorflow as tf import numpy as np tf.set_random_seed(777) # for reproducibility learning_rate = 0.1 x_data = [[0, 0], [0, 1], [1, 0], [1, 1]] y_data = [[0], [1], [1], [0]] x_data = np.array(x_data, dtype=np.float32) y_data = np.array(y_data, dtype=np.float32) X = tf.placeholder(tf.float32, [None, 2]) Y = tf.placeholder(tf.float32, [None, 1]) # 1th hidden layer W1 = tf.Variable(tf.random_normal([2, 2]), name='weight1') b1 = tf.Variable(tf.random_normal([2]), name='bias1') layer1 = tf.sigmoid(tf.matmul(X, W1) + b1) # hidden variable은 2개!! W2 = tf.Variable(tf.random_normal([2, 1]), name='weight2') b2 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2) # cost/loss function cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) # Optimizer train = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) # Accuracy computation # True if hypothesis>0.5 else False predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # + # Launch graph with tf.Session() as sess: # Initialize TensorFlow variables sess.run(tf.global_variables_initializer()) for step in range(10001): sess.run(train, feed_dict={X: x_data, Y: y_data}) if step % 100 == 0: print(step, sess.run(cost, feed_dict={ X: x_data, Y: y_data}), sess.run([W1, W2])) # Accuracy report h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a) ''' Hypothesis: [[ 0.01338218] [ 0.98166394] [ 0.98809403] [ 0.01135799]] Correct: [[ 0.] [ 1.] [ 1.] [ 0.]] Accuracy: 1.0 '''
Python/tensorflow/DeepLearningZeroToAll/Lab09-2-xor-nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## fastText chABSA dataset slot 3 evaluation # # Tested single data split only, not cross validation applied. # # ### Polarity classification # # P@1 0.751 # R@1 0.751 # Number of examples: 265 # # ### Category classification # # P@1 0.464 # R@1 0.464 # Number of examples: 265 # ! git clone https://github.com/daisukelab/dl-cliche.git # ! cd dl-cliche && pip install . # ! rm -fr dl-cliche from dlcliche.utils import * from dlcliche.nlp_mecab import * # Download dataset & stop_words_ja.txt # ! wget https://s3-ap-northeast-1.amazonaws.com/dev.tech-sketch.jp/chakki/public/chABSA-dataset.zip # ! unzip -q chABSA-dataset.zip && rm chABSA-dataset.zip && rm -r __MACOSX # ! ls chABSA-dataset # ! cd chABSA-dataset && wget https://raw.githubusercontent.com/chakki-works/chABSA-dataset/master/notebooks/resource/stop_words_ja.txt # + DATA = Path('chABSA-dataset') def check_data_existence(folder): file_count = len(list(folder.glob("e*_ann.json"))) if file_count == 0: raise Exception("Processed Data does not exist.") else: print("{} files ready.".format(file_count)) check_data_existence(DATA) stop_words = [] with (DATA/"stop_words_ja.txt").open(encoding="utf-8") as f: stop_words = f.readlines() stop_words = [w.strip() for w in stop_words] print("{} stop words ready.".format(len(stop_words))) # - labels = [] # make labels (exclude NULL and OOD) for e in ["market", "company", "business", "product"]: for a in ["general", "sales", "profit", "amount", "price", "cost"]: labels.append(e + "#" + a) if e in ["market"]: break; print(labels) # + import json import numpy as np import pandas as pd from collections import Counter sentences = [] dataset = [] tokenizer = get_mecab_tokenizer(stop_words=stop_words, normalize=False) for f in DATA.glob("e*_ann.json"): with f.open(encoding="utf-8") as j: d = json.load(j) for s in d["sentences"]: tokenized = tokenizer.tokenize(s["sentence"].upper()) for o in s["opinions"]: if o["category"] in labels: # sentence index + category dataset.append((len(sentences), o["category"], o["polarity"])) sentences.append(tokenized) # - # ## Polarity classification # + from sklearn.model_selection import train_test_split Y = 2 dataset = np.array(dataset) Xtrn, Xval, ytrn, yval = train_test_split(dataset[:, 0], dataset[:, Y], test_size=0.1, random_state=0) def write_dataset(filename, X, y): with open(filename, 'w') as f: for _x, _y in zip(X, y): w = ['__label__'+_y] w += list(sentences[int(_x)]) f.write(' '.join(w)+'\n') write_dataset(DATA/'train.txt', Xtrn, ytrn) write_dataset(DATA/'valid.txt', Xval, yval) len(Xtrn), len(Xval), list(set(dataset[:, Y])) # - # ! fasttext supervised -input {DATA}/train.txt -output {DATA}/ft_supervised -dim 50 -epoch 10000 # ! fasttext test {DATA}/ft_supervised.bin {DATA}/valid.txt 1 # ## Category classification # + from sklearn.model_selection import train_test_split Y = 1 dataset = np.array(dataset) Xtrn, Xval, ytrn, yval = train_test_split(dataset[:, 0], dataset[:, Y], test_size=0.1, random_state=0) def write_dataset(filename, X, y): with open(filename, 'w') as f: for _x, _y in zip(X, y): w = ['__label__'+_y] w += list(sentences[int(_x)]) f.write(' '.join(w)+'\n') write_dataset(DATA/'train.txt', Xtrn, ytrn) write_dataset(DATA/'valid.txt', Xval, yval) len(Xtrn), len(Xval), list(set(dataset[:, Y])) # - # ! fasttext supervised -input {DATA}/train.txt -output {DATA}/ft_supervised -dim 50 -epoch 10000 # ! fasttext test {DATA}/ft_supervised.bin {DATA}/valid.txt 1
tests/chABSA-dataset_Slot3_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # T81-558: Applications of Deep Neural Networks # **Module 2: Python for Machine Learning** # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # # Module Video Material # # Main video lecture: # # * [Part 2.1: Dealing with Data in Python with Pandas](https://www.youtube.com/watch?v=Bj2m6hvRoNk&index=6&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) # * [Part 2.2: Machine Learning Background for Deep Learning, Keras and Tensorflow](https://www.youtube.com/watch?v=WCXzchgxi9c&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) # * [Part 2.3: Pandas and Machine Learning](https://www.youtube.com/watch?v=eZGunTjrHyA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) # # Weekly video update: # # * *Will be posted week of this class* # # Helpful Functions # # You will see these at the top of every module. These are simply a set of reusable functions that we will make use of. Each of them will be explained as the semester progresses. They are explained in greater detail as the course progresses. Class 4 contains a complete overview of these functions. # + from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd import shutil import os import requests import base64 # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name, x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1 # at every location where the original column (name) matches each of the target_values. One column is added for # each target value. def encode_text_single_dummy(df, name, target_values): for tv in target_values: l = list(df[name].astype(str)) l = [1 if str(x) == str(tv) else 0 for x in l] name2 = "{}-{}".format(name, tv) df[name2] = l # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df, name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df, name, mean=None, sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name] - mean) / sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df, target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification dummies = pd.get_dummies(df[target]) return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32) else: # Regression return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Regression chart. def chart_regression(pred,y,sort=True): t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()}) if sort: t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))] df.drop(drop_rows, axis=0, inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # This function submits an assignment. You can submit an assignment as much as you like, only the final # submission counts. The paramaters are as follows: # data - Pandas dataframe output. # key - Your student key that was emailed to you. # no - The assignment class number, should be 1 through 1. # source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name. # . The number must match your assignment number. For example "_class2" for class assignment #2. def submit(data,key,no,source_file=None): if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.') if source_file is None: source_file = __file__ suffix = '_class{}'.format(no) if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix)) with open(source_file, "rb") as image_file: encoded_python = base64.b64encode(image_file.read()).decode('ascii') ext = os.path.splitext(source_file)[-1].lower() if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext)) r = requests.post("https://api.heatonresearch.com/assignment-submit", headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"), 'assignment': no, 'ext':ext, 'py':encoded_python}) if r.status_code == 200: print("Success: {}".format(r.text)) else: print("Failure: {}".format(r.text)) # - # Pandas # ====== # [Pandas](http://pandas.pydata.org/) is an open source library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. It is based on the [dataframe](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html) concept found in the [R programming language](https://www.r-project.org/about.html). For this class, Pandas will be the primary means by which data is manipulated in conjunction with neural networks. # # The dataframe is a key component of Pandas. We will use it to access the [auto-mpg dataset](https://archive.ics.uci.edu/ml/datasets/Auto+MPG). This dataset can be found on the UCI machine learning repository. For this class we will use a version of the Auto MPG dataset where I added column headers. You can find my version [here](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/data/auto-mpg.csv). # # This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University. The dataset was used in the 1983 American Statistical Association Exposition. It contains data for 398 cars, including [mpg](https://en.wikipedia.org/wiki/Fuel_economy_in_automobiles), [cylinders](https://en.wikipedia.org/wiki/Cylinder_(engine)), [displacement](https://en.wikipedia.org/wiki/Engine_displacement), [horsepower](https://en.wikipedia.org/wiki/Horsepower) , weight, acceleration, model year, origin and the car's name. # # The following code loads the MPG dataset into a dataframe: # + # Simple dataframe import os import pandas as pd path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read) print(df[0:5]) # + # Perform basic statistics on a dataframe. import os import pandas as pd path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) # Strip non-numerics df = df.select_dtypes(include=['int', 'float']) headers = list(df.columns.values) fields = [] for field in headers: fields.append( { 'name' : field, 'mean': df[field].mean(), 'var': df[field].var(), 'sdev': df[field].std() }) for field in fields: print(field) # - # ## Sorting and Shuffling Dataframes # It is possable to sort and shuffle. # + import os import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) #np.random.seed(42) # Uncomment this line to get the same shuffle each time df = df.reindex(np.random.permutation(df.index)) df.reset_index(inplace=True, drop=True) df # + import os import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df = df.sort_values(by='name',ascending=True) print("The first car is: {}".format(df['name'].iloc[0])) print(df[0:5]) # - # ## Saving a Dataframe # # Many of the assignments in this course will require that you save a dataframe to submit to the instructor. The following code performs a shuffle and then saves a new copy. # + import os import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") filename_write = os.path.join(path,"auto-mpg-shuffle.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df = df.reindex(np.random.permutation(df.index)) df.to_csv(filename_write,index=False) # Specify index = false to not write row numbers print("Done") # - # ## Dropping Fields # # Some fields are of no value to the neural network and can be dropped. The following code removes the name column from the MPG dataset. # + import os import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) print("Before drop: {}".format(df.columns)) df.drop('name',1,inplace=True) print("After drop: {}".format(df.columns)) # - # ## Calculated Fields # # It is possible to add new fields to the dataframe that are calculated from the other fields. We can create a new column that gives the weight in kilograms. The equation to calculate a metric weight, given a weight in pounds is: # # $ m_{(kg)} = m_{(lb)} \times 0.45359237 $ # # This can be used with the following Python code: # + import os import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df.insert(1,'weight_kg',(df['weight']*0.45359237).astype(int)) df # - # # Field Transformation & Preprocessing # # The data fed into a machine learning model rarely bares much similarity to the data that the data scientist originally received. One common transformation is to normalize the inputs. A normalization allows numbers to be put in a standard form so that two values can easily be compared. Consider if a friend told you that he received a $10 discount. Is this a good deal? Maybe. But the value is not normalized. If your friend purchased a car, then the discount is not that good. If your friend purchased dinner, this is a very good discount! # # Percentages are a very common form of normalization. If your friend tells you they got 10% off, we know that this is a better discount than 5%. It does not matter how much the purchase price was. One very common machine learning normalization is the Z-Score: # # $z = {x- \mu \over \sigma} $ # # To calculate the Z-Score you need to also calculate the mean($\mu$) and the standard deviation ($\sigma$). The mean is calculated as follows: # # $\mu = \bar{x} = \frac{x_1+x_2+\cdots +x_n}{n}$ # # The standard deviation is calculated as follows: # # $\sigma = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \mu)^2}, {\rm \ \ where\ \ } \mu = \frac{1}{N} \sum_{i=1}^N x_i$ # # The following Python code replaces the mpg with a z-score. Cars with average MPG will be near zero, above zero is above average, and below zero is below average. Z-Scores above/below -3/3 are very rare, these are outliers. # + import os import pandas as pd import numpy as np from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df['mpg'] = zscore(df['mpg']) df # - # ## Missing Values # # Missing values are a reality of machine learning. Ideally every row of data will have values for all columns. However, this is rarely the case. Most of the values are present in the MPG database. However, there are missing values in the horsepower column. A common practice is to replace missing values with the median value for that column. The median is calculated as described [here](https://www.mathsisfun.com/median.html). The following code replaces any NA values in horsepower with the median: # + import os import pandas as pd import numpy as np from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) med = df['horsepower'].median() df['horsepower'] = df['horsepower'].fillna(med) # df = df.dropna() # you can also simply drop NA values print("horsepower has na? {}".format(pd.isnull(df['horsepower']).values.any())) # - # ## Concatenating Rows and Columns # Rows and columns can be concatenated together to form new data frames. # + # Create a new dataframe from name and horsepower import os import pandas as pd import numpy as np from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) col_horsepower = df['horsepower'] col_name = df['name'] result = pd.concat([col_name,col_horsepower],axis=1) result # + # Create a new dataframe from name and horsepower, but this time by row import os import pandas as pd import numpy as np from scipy.stats import zscore path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) col_horsepower = df['horsepower'] col_name = df['name'] result = pd.concat([col_name,col_horsepower]) result # - # ## Training and Validation # # It is very important that we evaluate a machine learning model based on its ability to predict data that it has never seen before. Because of this we often divide the training data into a validation and training set. The machine learning model will learn from the training data, but ultimately be evaluated based on the validation data. # # * **Training Data** - **In Sample Data** - The data that the machine learning model was fit to/created from. # * **Validation Data** - **Out of Sample Data** - The data that the machine learning model is evaluated upon after it is fit to the training data. # # There are two predominant means of dealing with training and validation data: # # * **Training/Validation Split** - The data are split according to some ratio between a training and validation (hold-out) set. Common ratios are 80% training and 20% validation. # * **K-Fold Cross Validation** - The data are split into a number of folds and models. Because a number of models equal to the folds is created out-of-sample predictions can be generated for the entire dataset. # ### Training/Validation Split # # The code below performs a split of the MPG data into a training and validation set. The training set uses 80% of the data and the validation set uses 20%. # # The following image shows how a model is trained on 80% of the data and then validated against the remaining 20%. # # ![Training and Validation](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_1_train_val.png "Training and Validation") # # + path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df = df.reindex(np.random.permutation(df.index)) # Usually a good idea to shuffle mask = np.random.rand(len(df)) < 0.8 trainDF = pd.DataFrame(df[mask]) validationDF = pd.DataFrame(df[~mask]) print("Training DF: {}".format(len(trainDF))) print("Validation DF: {}".format(len(validationDF))) # - # ### K-Fold Cross Validation # # There are several types of cross validation; however, k-fold is the most common. The value K specifies the number of folds. The two most common values for K are either 5 or 10. For this course we will always use a K value of 5, or a 5-fold cross validation. A 5-fold validation is illustrated by the following diagram: # # ![K-Fold Crossvalidation](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_1_kfold.png "K-Fold Crossvalidation") # # First, the data are split into 5 equal (or close to, due to rounding) folds. These folds are used to generate 5 training/validation set combinations. Each of the folds becomes the validation set once, and the remaining folds become the training sets. This allows the validated results to be appended together to produce a final out-of-sample prediction for the entire dataset. # # # The following code demonstrates a 5-fold cross validation: # + import os from sklearn.model_selection import KFold import pandas as pd import numpy as np path = "./data/" filename_read = os.path.join(path,"auto-mpg.csv") df = pd.read_csv(filename_read,na_values=['NA','?']) df = df.reindex(np.random.permutation(df.index)) kf = KFold(5) fold = 1 for train_index, validate_index in kf.split(df): trainDF = pd.DataFrame(df.ix[train_index,:]) validateDF = pd.DataFrame(df.ix[validate_index]) print("Fold #{}, Training Size: {}, Validation Size: {}".format(fold,len(trainDF),len(validateDF))) fold+=1 # - # Accessing Files Directly # ======================== # It is possible to access files directly, rather than using Pandas. For class assignments you should use Pandas; however, direct access is possible. Using the CSV package, you can read the files in, line-by-line and process them. Accessing a file line-by-line can allow you to process very large files that would not fit into memory. For the purposes of this class, all files will fit into memory, and you should use Pandas for all class assignments. # + # Read a raw text file (avoid this) import codecs import os path = "./data" # Always specify your encoding! There is no such thing as "its just a text file". # See... http://www.joelonsoftware.com/articles/Unicode.html # Also see... http://www.utf8everywhere.org/ encoding = 'utf-8' filename = os.path.join(path,"auto-mpg.csv") c = 0 with codecs.open(filename, "r", encoding) as fh: # Iterate over this line by line... for line in fh: c+=1 # Only the first 5 lines if c>5: break print(line.strip()) # + # Read a CSV file import codecs import os import csv encoding = 'utf-8' path = "./data/" filename = os.path.join(path,"auto-mpg.csv") c = 0 with codecs.open(filename, "r", encoding) as fh: reader = csv.reader(fh) for row in reader: c+=1 if c>5: break print(row) # + # Read a CSV, symbolic headers import codecs import os import csv path = "./data" encoding = 'utf-8' filename = os.path.join(path,"auto-mpg.csv") c = 0 with codecs.open(filename, "r", encoding) as fh: reader = csv.reader(fh) # Generate header index using comprehension. # Comprehension is cool, but not necessarily a beginners feature of Python. header_idx = {key: value for (value, key) in enumerate(next(reader))} for row in reader: c+=1 if c>5: break print( "Car Name: {}".format(row[header_idx['name']])) # + # Read a CSV, manual stats import codecs import os import csv import math path = "./data/" encoding = 'utf-8' filename_read = os.path.join(path,"auto-mpg.csv") filename_write = os.path.join(path,"auto-mpg-norm.csv") c = 0 with codecs.open(filename_read, "r", encoding) as fh: reader = csv.reader(fh) # Generate header index using comprehension. # Comprehension is cool, but not necessarily a beginners feature of Python. header_idx = {key: value for (value, key) in enumerate(next(reader))} headers = header_idx.keys() #print([(key,{'count':0}) for key in headers]) fields = {key: value for (key, value) in [(key,{'count':0,'sum':0,'variance':0}) for key in headers] } # Pass 1, means row_count = 0 for row in reader: row_count += 1 for name in headers: try: value = float(row[header_idx[name]]) field = fields[name] field['count'] += 1 field['sum'] += value except ValueError: pass # Calculate means, toss sums (part of pass 1) for field in fields.values(): # If 90% are not missing (or non-numeric) calculate a mean if (field['count']/row_count)>0.9: field['mean'] = field['sum'] / field['count'] del field['sum'] # Pass 2, standard deviation & variance fh.seek(0) for row in reader: for name in headers: try: value = float(row[header_idx[name]]) field = fields[name] # If we failed to calculate a mean, no variance. if 'mean' in field: field['variance'] += (value - field['mean'])**2 except ValueError: pass # Calculate standard deviation, keep variance (part of pass 2) for field in fields.values(): # If no variance, then no standard deviation if 'mean' in field: field['variance'] /= field['count'] field['sdev'] = math.sqrt(field['variance']) else: del field['variance'] # Print summary stats for key in sorted(fields.keys()): print("{}:{}".format(key,fields[key])) # - # # Module 2 Assignment # # You can find the first assignmeht here: [assignment 2](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class2.ipynb)
t81_558_class2_python_ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import seaborn as sns import os import numpy as np import pandas as pd import tensorflow as tf import atecml.data from contextlib import contextmanager from tqdm import tqdm from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from sklearn.externals import joblib from sklearn.linear_model import LogisticRegressionCV #build Models... from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier import lightgbm as lgb from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.externals import joblib from imblearn.over_sampling import SMOTE, ADASYN import random plt.style.use('ggplot') # + train_df = pd.read_pickle('./01_train.dat') predictors = [x for x in train_df.columns if x not in atecml.data.NOT_FEATURE_COLUMNS] DateFold={} DateFold[0] = set(atecml.data.filter_date(train_df,start_date='2017-09-05',end_date='2017-09-12').index) DateFold[1] = set(atecml.data.filter_date(train_df,start_date='2017-09-13',end_date='2017-09-20').index) DateFold[2] = set(atecml.data.filter_date(train_df,start_date='2017-09-21',end_date='2017-09-28').index) DateFold[3] = set(atecml.data.filter_date(train_df,start_date='2017-09-29',end_date='2017-10-06').index) DateFold[4] = set(atecml.data.filter_date(train_df,start_date='2017-10-07',end_date='2017-10-14').index) DateFold[5] = list(atecml.data.filter_date(train_df,start_date='2017-10-15',end_date='2017-11-24').index) all_list = set(train_df.index) - set(DateFold[5]) len(all_list),len(DateFold[5]) # - WOE_LIST = joblib.load('./woe_feature.dat') CATE_LIST = WOE_LIST + atecml.data.CATE_FEATURE_LIST categorical=[] for item in predictors: if (item in CATE_LIST): categorical.append(item) num_boosting_round = 3000 early_stop_round = 100 # + params = { 'objective': 'binary', 'metric': 'auc', 'use_missing' : True, #'is_unbalance': True, 'scale_pos_weight': 98, 'learning_rate': 0.05, 'num_leaves': 64, # we should let it be smaller than 2^(max_depth) 'max_depth': -1, # -1 means no limit 'min_child_samples': 600, # Minimum number of data need in a child(min_data_in_leaf) 'max_bin': 255, # Number of bucketed bin for feature values 'colsample_bytree': 0.7, 'subsample': 0.85, # Subsample ratio of the training instance. 'subsample_freq': 1, # frequence of subsample, <=0 means no enable 'min_child_weight': 0.05, # Minimum sum of instance weight(hessian) needed in a child(leaf) 'subsample_for_bin': 200000, # Number of samples for constructing bin 'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization 'reg_alpha': 0.01, # L1 regularization term on weights 'reg_lambda': 0.1, # L2 regularization term on weights 'nthread': 40, 'n_estimators': num_boosting_round, 'verbose': -1, } rf = { 'boosting_type': 'rf', } rf.update(params) dart = { 'boosting_type': 'dart', } dart.update(params) gbdt = { 'boosting_type': 'gbdt', } gbdt.update(params) param_list = [rf,gbdt,dart] # + model_list =[] for idx in tqdm(range(0,5)): Train_DataSet = train_df[train_df.index.isin(list(all_list - DateFold[idx]))].reset_index(drop=True) Normal_DF = Train_DataSet[Train_DataSet['label']==0] Fraud_DF = Train_DataSet[Train_DataSet['label']==1] number_record_fraud = len(Fraud_DF) number_record_normal = len(Normal_DF) #undersample random_normal_indices = np.array(np.random.choice(Normal_DF.index,number_record_fraud,replace=False)) filter_list = list(random_normal_indices) + list(Fraud_DF.index) under_sample_train = Train_DataSet[Train_DataSet.index.isin(filter_list)].reset_index(drop=True) Val_DataSet = train_df[train_df.index.isin(DateFold[idx])].reset_index(drop=True) X_train = under_sample_train[predictors] y_train = under_sample_train['Fraud'] X_test = Val_DataSet[predictors] y_test = Val_DataSet['Fraud'] for item_params in (param_list): gbm = lgb.LGBMClassifier(**item_params) print('starting fit model...') gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)],eval_metric='auc',early_stopping_rounds=early_stop_round,verbose=100,categorical_feature=categorical) model_list.append(gbm) # - val_df = train_df[train_df.index.isin(DateFold[5])].reset_index(drop=True) val_df1 = val_df.head(180000) val_df2 = val_df[~val_df.index.isin(list(val_df1.index))] # + val_r_neg=pd.DataFrame() for idx in tqdm(range(0,len(model_list))): model_neg = model_list[idx] val_neg = model_neg.predict_proba(val_df2[predictors]) val_r_neg[idx] = pd.DataFrame(val_neg)[1] # - #pos_mean = val_r_pos.mean(axis=1) neg_mean = val_r_neg.mean(axis=1) _,_,_ = atecml.data.accuracy_validation(val_df2['Fraud'],neg_mean) vote_list =[] for idx in tqdm(range(0,len(model_list))): name = 'model_' + str(idx) clf = model_list[idx] vote_list.append((name,clf)) from sklearn.ensemble import VotingClassifier voting_clf=VotingClassifier(estimators=vote_list,voting='soft',n_jobs=40) voting_clf.fit(val_df1[predictors],val_df1['Fraud']) foo = voting_clf.predict_proba(val_df2[predictors]) _,_,_ = atecml.data.accuracy_validation(val_df2['Fraud'],pd.DataFrame(foo)[1]) import joblib #joblib.dump(pos_model_list,'./pos_model_list.dat') #joblib.dump(score_posA,'./score_pos.dat') joblib.dump(neg_model_list,'./neg_model_list.dat') joblib.dump(score_negA,'./score_neg.dat') # + val_df = train_df[train_df.index.isin(DateFold[5])].reset_index(drop=True) val_r_pos=pd.DataFrame() val_r_neg=pd.DataFrame() for idx in tqdm(range(0,len(neg_model_list))): #model_pos = pos_model_list[idx] model_neg = neg_model_list[idx] #val_pos = model_pos.predict(val_df[predictors],num_iteration=model_pos.best_iteration) val_neg = model_neg.predict(val_df[predictors],num_iteration=model_neg.best_iteration) #val_r_pos[idx] = 1 - val_pos val_r_neg[idx] = val_neg # - #pos_mean = val_r_pos.mean(axis=1) neg_mean = val_r_neg.mean(axis=1) _,_,_ = atecml.data.accuracy_validation(val_df['Fraud'],neg_mean) from sklearn.ensemble import VotingClassifier voting_clf=VotingClassifier(estimators=[('a1',neg_model_list[0]),('rf',neg_model_list[1]),('svc',neg_model_list[2])],voting='soft') voting_clf.fit(val_df[predictors],val_df['Fraud']) foo = neg_model_list[1] foo.feature_importance() test_df = pd.read_pickle('./01_test.dat') # + test_r_pos=pd.DataFrame() test_r_neg=pd.DataFrame() for idx in tqdm(range(0,len(pos_model_list))): #model_pos = pos_model_list[idx] model_neg = neg_model_list[idx] #test_pos = model_pos.predict(test_df[predictors],num_iteration=model_pos.best_iteration) test_neg = model_neg.predict(test_df[predictors],num_iteration=model_neg.best_iteration) #test_r_pos[idx] = 1 - test_pos test_r_neg[idx] = test_neg # - neg_mean = test_r_neg.mean(axis=1) result=pd.DataFrame() result['id'] = test_df['id'] result['score'] = neg_mean result[['id','score']].to_csv('./submit_2018_07_05_01.csv',index=False) result
model/Single_Model/Single-Model-Training_WOE-VOTE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import json import psycopg2 import pandas as pd import geopandas as gpd from geopandas import GeoSeries, GeoDataFrame import folium import fiona from pyproj import Proj, transform import osmnx as ox import networkx as nx import matplotlib.colors as colors import matplotlib.cm as cm from shapely.ops import cascaded_union import geojson import operator nx.__version__ # + import matplotlib.cm as cmx import matplotlib.colors as colors def style_function(feature): color = feature['properties']['color'] return { 'fillOpacity': 0.5, 'weight': 0, 'fillColor': "rgb(0, 0, 255)" if color == 2 else "rgb(255, 0, 0)" } # - ox.config(log_file=True, log_console=False, use_cache=True) location = [47.3754, 8.5413]#[47.22662, 8.81834] tags = { "landuse_tags": [ "retail" ], "amenity_tags": [ "pub", "bar", "cafe", "restaurant", "pharmacy", "bank", "fast_food", "food_court", "ice_cream", "library", "ferry_terminal", "clinic", "doctors", "hospital", "pharmacy", "veterinary", "dentist", "arts_centre", "cinema", "community_centre", "casino", "fountain", "nightclub", "studio", "theatre", "dojo", "internet_cafe", "marketplace", "post_opffice", "townhall" ], "shop_tags": [ "mall", "bakery", "beverages", "butcher", "chocolate", "coffee", "confectionery", "deli", "frozen_food", "greengrocer", "healthfood", "ice_cream", "pasta", "pastry", "seafood", "spices", "tea", "department_store", "supermarket", "bag", "boutique", "clothes", "fashion", "jewelry", "leather", "shoes", "tailor", "watches", "chemist", "cosmetics", "hairdresser", "medical_supply", "electrical", "hardware", "electronics", "sports", "swimming_pool", "collector", "games", "music", "books", "gift", "stationery", "ticket", "laundry", "pet", "tobacco", "toys" ], "leisure_tags": [ "adult_gaming_centre", "amusement_arcade", "beach_resort", "fitness_centre", "garden", "ice_rink", "sports_centre", "water_park" ] } # + class AoiQueries(): def __init__(self, location=None, tags=[], dbscan_eps=50, dbscan_minpoints=3): self.location = location self.tags = tags self.dbscan_eps = dbscan_eps self.dbscan_minpoints = dbscan_minpoints def _polygons_query(self): return """ (SELECT way AS geometry FROM planet_osm_polygon WHERE (amenity = ANY(ARRAY{amenity_tags}) OR shop = ANY(ARRAY{shop_tags}) OR leisure = ANY(ARRAY{leisure_tags}) OR landuse = ANY(ARRAY{landuse_tags})) AND access IS DISTINCT FROM 'private' AND st_within(way, {bbox})) UNION ALL (SELECT polygon.way AS geometry FROM planet_osm_polygon AS polygon INNER JOIN planet_osm_point AS point ON st_within(point.way, polygon.way) WHERE (point.amenity = ANY(ARRAY{amenity_tags}) OR point.shop = ANY(ARRAY{shop_tags}) OR point.leisure = ANY(ARRAY{leisure_tags}) OR point.landuse = ANY(ARRAY{landuse_tags})) AND point.access IS DISTINCT FROM 'private' AND st_within(point.way, {bbox}) AND polygon.building IS NOT NULL) """.format(bbox=self._bbox_query(), **self.tags) def _clusters_query(self): return """ WITH polygons AS ({polygons_query}) SELECT polygon.geometry AS geometry, ST_ClusterDBSCAN(polygon.geometry, eps := {eps}, minpoints := {minpoints}) over () AS cid FROM polygons AS polygon """.format(polygons_query=self._polygons_query(), eps=self.dbscan_eps, minpoints=self.dbscan_minpoints) def _hulls_query(self): return """ WITH clusters AS ({clusters_query}) SELECT cid, ST_ConvexHull(ST_Union(geometry)) AS geometry FROM clusters WHERE cid IS NOT NULL GROUP BY cid """.format(clusters_query=self._clusters_query()) def _hulls_without_water_query(self): return """ WITH hulls AS ({hulls_query}) SELECT ST_Difference(hulls.geometry, coalesce(( SELECT ST_Union(way) AS geometry FROM planet_osm_polygon WHERE (water IS NOT NULL OR waterway IS NOT NULL) AND (tunnel IS NULL OR tunnel = 'no') AND st_intersects(way, hulls.geometry) ), 'GEOMETRYCOLLECTION EMPTY'::geometry)) AS geometry FROM hulls """.format(hulls_query=self._hulls_query()) def _clusters_and_hulls_query(self): return """ WITH clusters AS ({clusters_query}), hulls AS ({hulls_query}) SELECT cid, geometry FROM clusters UNION ALL SELECT cid, geometry FROM hulls """.format(clusters_query=self._clusters_query(), hulls_query=self._hulls_query()) def _bbox_query(self): location_3857 = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), self.location[1], self.location[0]) location_3857 = " ".join([str(coordinate) for coordinate in location_3857]) return """ (SELECT ST_Buffer(ST_GeomFromText('POINT({})', 3857), 1000) AS bbox) """.format(location_3857) # - hulls_query = AoiQueries(location=location, tags=tags, dbscan_eps=50)._hulls_query() hulls_without_water_query = AoiQueries(location=location, tags=tags, dbscan_eps=50)._hulls_without_water_query() print(hulls_query) with psycopg2.connect("") as conn: aois = gpd.read_postgis(hulls_query, conn, geom_col='geometry') aois.crs = fiona.crs.from_epsg(3857) aois_4326 = aois.to_crs(fiona.crs.from_epsg(4326)) # + m = folium.Map(location=location, zoom_start=16) folium.GeoJson(aois).add_to(m) m # + def bbox_query(): location_3857 = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), location[1], location[0]) location_3857 = " ".join([str(coordinate) for coordinate in location_3857]) return """ (SELECT ST_Buffer(ST_GeomFromText('POINT({})', 3857), 1000) AS bbox) """.format(location_3857) water_query = """ SELECT ST_Union(way) AS geometry FROM planet_osm_polygon WHERE (water IS NOT NULL OR waterway IS NOT NULL) AND (tunnel IS NULL OR tunnel = 'no') AND st_intersects(way, {bbox}) """.format(bbox=bbox_query()) with psycopg2.connect("") as conn: aois = gpd.read_postgis(water_query, conn, geom_col='geometry') aois.crs = fiona.crs.from_epsg(3857) aois_4326 = aois.to_crs(fiona.crs.from_epsg(4326)) m = folium.Map(location=location, zoom_start=16) folium.GeoJson(aois).add_to(m) m # - with psycopg2.connect("") as conn: aois = gpd.read_postgis(hulls_without_water_query, conn, geom_col='geometry') aois.crs = fiona.crs.from_epsg(3857) aois_4326 = aois.to_crs(fiona.crs.from_epsg(4326)) # + m = folium.Map(location=location, zoom_start=16) folium.GeoJson(aois).add_to(m) m # -
notebooks/notebooks/11- Exclude Water.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn from tqdm import tqdm from src.nn.MDN import MDN, mdn_loss from src.nn.MLP import MLP # - def generate_simple_data(n_samples): epsilon = np.random.normal(size=(n_samples)) # sample noise from Gaussin dist with mean 0, std = 1.0 x = np.random.uniform(-10.5, 10.5, n_samples) y = 7 * np.sin(0.75 * x) + 0.5 * x + epsilon return x, y # + y, x = generate_simple_data(1000) # just swap x,y to generate 'many-to-one' mapping fig, ax = plt.subplots(1,1, figsize=(8,5)) ax.scatter(x,y, alpha=0.2, label='random sampled data') plt.legend() plt.show() # - from src.nn.GaussianNet import GaussianNetwork, gaussianLL # + gaussianNet = GaussianNetwork(1, 100, 1) opt = torch.optim.Adam(gaussianNet.parameters()) x_tensor = torch.tensor(x).reshape(-1, 1).float() y_tensor = torch.tensor(y).reshape(-1, 1).float() # - # ## Visualizing the NOT trained model # + _x_tensor = torch.linspace(-10, 10, 100).reshape(-1,1) mu, std = gaussianNet(_x_tensor) N_STD = 2 x_axis = np.squeeze(_x_tensor.detach().numpy()) upper = mu + N_STD *std upper_np = np.squeeze(upper.detach().numpy()) lower = mu - N_STD * std lower_np = np.squeeze(lower.detach().numpy()) fig, ax = plt.subplots(1,1, figsize=(8,5)) ax.scatter(x, y, alpha=0.2, label='Sampled') ax.plot(x_axis, mu.detach().numpy(), color='orange', label='prediction') ax.fill_between(x_axis, upper_np, lower_np, alpha=0.5, label='=+/- {} std'.format(N_STD)) plt.legend() plt.show() # - for i in tqdm(range(1500)): mu, std = gaussianNet(x_tensor) loss = gaussianLL(mu, std, y_tensor) loss = -loss.mean() opt.zero_grad() loss.backward() opt.step() # ## Gaussian network is not capable of fitting multimodal outcomes # # + _x_tensor = torch.linspace(-10, 10, 100).reshape(-1,1) mu, std = gaussianNet(_x_tensor) N_STD = 2 x_axis = np.squeeze(_x_tensor.detach().numpy()) upper = mu + N_STD *std upper_np = np.squeeze(upper.detach().numpy()) lower = mu - N_STD * std lower_np = np.squeeze(lower.detach().numpy()) fig, ax = plt.subplots(1,1, figsize=(8,5)) ax.scatter(x, y, alpha=0.2, label='Sampled') ax.plot(x_axis, mu.detach().numpy(), color='orange', label='prediction') ax.fill_between(x_axis, upper_np, lower_np, alpha=0.5, label='=+/- {} std'.format(N_STD)) plt.legend() plt.show() # - # ## Quick fix: Mixture Density Network # + device = 'cuda:0' mdn = nn.Sequential(MLP(1, 64, hidden_act='LeakyReLU'), MDN(64, 1, 5)).to(device) opt = torch.optim.Adam(mdn.parameters()) # + x_tensor = x_tensor.to(device) y_tensor = y_tensor.to(device) for i in range(30000): pi, mu, sigma = mdn(x_tensor) loss = mdn_loss(pi, mu, sigma, y_tensor) opt.zero_grad() loss.backward() opt.step() if i % 3000 == 0: print("[{} epoch] loss = {}".format(i, loss.item())) # + mdn.to('cpu') x_test_data = np.linspace(-15, 15, 1000) x_test_tensor = torch.from_numpy(np.float32(x_test_data).reshape(1000, 1)) pi, mu, sigma = mdn(x_test_tensor) pi_data = pi.data.numpy() sigma_data = sigma.data.numpy() mu_data = mu.data.numpy() # - def gumbel_sample(x, axis=1): z = np.random.gumbel(loc=0, scale=1, size=x.shape) return (np.log(x+1e-10) + z).argmax(axis=axis) # + n_samples = 1000 x_test_data = np.linspace(-10, 10, n_samples) x_test_tensor = torch.from_numpy(np.float32(x_test_data).reshape(-1, 1)) pi, mu, sigma = mdn(x_test_tensor) pi_data = np.squeeze(pi.data.numpy()) sigma_data = np.squeeze(sigma.data.numpy()) mu_data = np.squeeze(mu.data.numpy()) k = gumbel_sample(pi_data) # + indices = (np.arange(n_samples), k) rn = np.random.randn(n_samples) sampled = rn * sigma_data[indices] + mu_data[indices] plt.figure(figsize=(8, 8)) plt.scatter(x, y, alpha=0.2) plt.scatter(x_test_data, sampled, alpha=0.2, color='red') plt.show() # -
[Lab] Day1-03 Mixture Density Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Finding the occurence of a letters in a string # # - Suppose we have a string "Interview" the output should be like the below, # # i=2\ # n=1\ # t=1\ # e=2\ # r=1\ # v=1\ # w=1 # # + def find_occurence(word): len_ = len(word) word = word.lower() occurence_dict = {} for i in range(len_): if word[i] in occurence_dict: occurence_dict[word[i]] += 1 else: occurence_dict[word[i]] = 1 return occurence_dict print(find_occurence("Interview")) # -
Notebooks/Interview_Coding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import pandas as pd from alphamind.api import * from PyFin.api import * from matplotlib import pyplot as plt plt.style.use('ggplot') # # Parameter Setting # ---------------------- def _map_freq(freq): if freq == '1m': horizon = 21 elif freq == '1w': horizon = 4 elif freq == '2w': horizon = 8 elif freq == '3w': horizon = 12 elif freq == '1d': horizon = 0 else: raise ValueError("Unrecognized freq: {0}".format(freq)) return horizon # + alpha_factors = { 'eps': LAST('eps_q'), 'roe': LAST('roe_q'), 'bdto': LAST('BDTO'), 'cfinc1': LAST('CFinc1'), 'chv': LAST('CHV'), 'rvol': LAST('RVOL'), 'val': LAST('VAL'), 'grev': LAST('GREV'), 'droeafternonorecurring': LAST('DROEAfterNonRecurring') } engine = SqlEngine() universe = Universe('custom', ['zz500']) benchmark_code = 905 neutralize_risk = ['SIZE'] + industry_styles constraint_risk = ['SIZE'] + industry_styles start_date = '2012-01-01' end_date = '2017-11-02' industry_lower = 1. industry_upper = 1. freq = '2w' batch = 8 # - data_package = fetch_data_package(engine, alpha_factors=alpha_factors, start_date=start_date, end_date=end_date, frequency=freq, universe=universe, benchmark=benchmark_code, batch=batch, neutralized_risk=neutralize_risk, pre_process=[winsorize_normal], post_process=[winsorize_normal], warm_start=batch) # + train_x = data_package['train']['x'] train_y = data_package['train']['y'] predict_x = data_package['predict']['x'] predict_y = data_package['predict']['y'] settlement = data_package['settlement'] features = data_package['x_names'] # - # # Naive Executor Strategy # --------------------------------- # + dates = sorted(train_x.keys()) model_df = pd.Series() horizon = _map_freq(freq) rets = [] turn_overs = [] executor = NaiveExecutor() leverags = [] for i, ref_date in enumerate(dates): # Model Training and Prediction sample_train_x = train_x[ref_date] sample_train_y = train_y[ref_date].flatten() model = LinearRegression() model.fit(sample_train_x, sample_train_y) sample_test_x = predict_x[ref_date] sample_test_y = predict_y[ref_date].flatten() er = model.predict(sample_test_x) model_df.loc[ref_date] = model # Constraints Building # today_settlement = settlement[settlement.trade_date == ref_date] codes = today_settlement.code.tolist() dx_return = None risk_exp = today_settlement[neutralize_risk].values.astype(float) industry = today_settlement.industry.values benchmark_w = today_settlement.weight.values constraint_exp = today_settlement[constraint_risk].values risk_exp_expand = np.concatenate((constraint_exp, np.ones((len(risk_exp), 1))), axis=1).astype(float) risk_names = constraint_risk + ['total'] risk_target = risk_exp_expand.T @ benchmark_w lbound = np.zeros(len(today_settlement)) ubound = 0.01 + benchmark_w constraint = Constraints(risk_exp_expand, risk_names) for i, name in enumerate(risk_names): if name == 'total' or name == 'SIZE': constraint.set_constraints(name, lower_bound=risk_target[i], upper_bound=risk_target[i]) else: constraint.set_constraints(name, lower_bound=risk_target[i]*industry_lower, upper_bound=risk_target[i]*industry_upper) target_pos, _ = er_portfolio_analysis(er, industry, dx_return, constraint, False, benchmark_w) target_pos['code'] = today_settlement['code'].values turn_over, executed_pos = executor.execute(target_pos=target_pos) executed_codes = executed_pos.code.tolist() dx_retuns = engine.fetch_dx_return(ref_date.strftime('%Y-%m-%d'), executed_codes, horizon=horizon) result = pd.merge(executed_pos, today_settlement[['code', 'weight']], on=['code'], how='inner') result = pd.merge(result, dx_retuns, on=['code']) leverage = result.weight_x.abs().sum() ret = (result.weight_x - result.weight_y * leverage / result.weight_y.sum()).values @ result.dx.values rets.append(ret) executor.set_current(executed_pos) turn_overs.append(turn_over) leverags.append(leverage) print('{0} is finished'.format(ref_date)) # - ret_df1 = pd.DataFrame({'returns': rets, 'turn_over': turn_overs, 'leverage': leverage}, index=dates) ret_df1.loc[advanceDateByCalendar('china.sse', dates[-1], freq)] = 0. ret_df1 = ret_df1.shift(1) ret_df1.iloc[0] = 0. ret_df1['tc_cost'] = ret_df1.turn_over * 0.002 ret_df1[['returns', 'tc_cost']].cumsum().plot(figsize=(12, 6), title='Fixed frequency rebalanced: {0}'.format(freq), secondary_y='tc_cost') ret_atfer_tc = ret_df1.returns - ret_df1.tc_cost print("sharp: ", ret_atfer_tc.mean() / ret_atfer_tc.std() * np.sqrt(52)) ret_df1[['returns', 'leverage']].rolling(window=60).std().plot(figsize=(12, 6), title='rolling std', secondary_y='leverage') # # Threshold Turn Over + Strategy # ------------------------------------ freq = '1d' horizon = _map_freq(freq) dates = makeSchedule(start_date, end_date, tenor=freq, calendar='china.sse', dateGenerationRule=DateGeneration.Backward) all_data = engine.fetch_data_range(universe, alpha_factors, dates=dates, benchmark=905) factor_all_data = all_data['factor'] factor_groups = factor_all_data.groupby('trade_date') # + rets = [] turn_overs = [] turn_over_threshold = 0.90 executor = ThresholdExecutor(turn_over_threshold=turn_over_threshold) execution_pipeline = ExecutionPipeline(executors=[executor]) leverags = [] execution_dates = [] horizon = _map_freq(freq) for i, value in enumerate(factor_groups): date = value[0] data = value[1] # get the latest model models = model_df[model_df.index <= date] if models.empty: continue execution_dates.append(date) model = models[-1] codes = data.code.tolist() ref_date = date.strftime('%Y-%m-%d') total_data = data.dropna() dx_return = None risk_exp = total_data[neutralize_risk].values.astype(float) industry = total_data.industry.values benchmark_w = total_data.weight.values constraint_exp = total_data[constraint_risk].values risk_exp_expand = np.concatenate((constraint_exp, np.ones((len(risk_exp), 1))), axis=1).astype(float) risk_names = constraint_risk + ['total'] risk_target = risk_exp_expand.T @ benchmark_w lbound = np.zeros(len(total_data)) ubound = 0.01 + benchmark_w constraint = Constraints(risk_exp_expand, risk_names) for i, name in enumerate(risk_names): if name == 'total' or name == 'SIZE': constraint.set_constraints(name, lower_bound=risk_target[i], upper_bound=risk_target[i]) else: constraint.set_constraints(name, lower_bound=risk_target[i]*industry_lower, upper_bound=risk_target[i]*industry_upper) factors_values = factor_processing(total_data[features].values, pre_process=[winsorize_normal], post_process=[winsorize_normal]) er = model.predict(factors_values) target_pos, _ = er_portfolio_analysis(er, industry, dx_return, constraint, False, benchmark_w) target_pos['code'] = total_data['code'].values turn_over, executed_pos = execution_pipeline.execute(target_pos=target_pos) executed_codes = executed_pos.code.tolist() dx_retuns = engine.fetch_dx_return(date, executed_codes, horizon=horizon) result = pd.merge(executed_pos, total_data, on=['code'], how='inner') result = pd.merge(result, dx_retuns, on=['code']) leverage = result.weight_x.abs().sum() ret = (result.weight_x - result.weight_y * leverage / result.weight_y.sum()).values @ result.dx.values rets.append(ret) leverags.append(executed_pos.weight.abs().sum()) turn_overs.append(turn_over) print('{0} is finished: {1}'.format(date, turn_over)) # - ret_df2 = pd.DataFrame({'returns': rets, 'turn_over': turn_overs, 'leverage': leverags}, index=execution_dates) ret_df2.loc[advanceDateByCalendar('china.sse', dates[-1], freq)] = 0. ret_df2 = ret_df2.shift(1) ret_df2.iloc[0] = 0. ret_df2['tc_cost'] = ret_df2.turn_over * 0.002 ret_df2[['returns', 'tc_cost']].cumsum().plot(figsize=(12, 6), title='Threshold tc rebalanced: Monitored freq {0}, {1} tc'.format(freq, turn_over_threshold), secondary_y='tc_cost') ret_atfer_tc = ret_df2.returns - ret_df2.tc_cost print("sharp: ", ret_atfer_tc.mean() / ret_atfer_tc.std() * np.sqrt(252)) ret_df2[['returns', 'leverage']].rolling(window=60).std().plot(figsize=(12, 6), title='rolling std', secondary_y='leverage') # # Target Vol + Threshold Turn Over + Strategy # ------------------------ # + rets = [] turn_overs = [] target_vol = 0.002 turn_over_threshold = 0.70 window = 30 executor1 = TargetVolExecutor(window=window, target_vol=target_vol) executor2 = ThresholdExecutor(turn_over_threshold=turn_over_threshold, is_relative=False) execution_pipeline = ExecutionPipeline(executors=[executor1, executor2]) leverags = [] execution_dates = [] horizon = _map_freq(freq) for i, value in enumerate(factor_groups): date = value[0] data = value[1] # get the latest model models = model_df[model_df.index <= date] if models.empty: continue execution_dates.append(date) model = models[-1] codes = data.code.tolist() ref_date = date.strftime('%Y-%m-%d') total_data = data.dropna() dx_return = None risk_exp = total_data[neutralize_risk].values.astype(float) industry = total_data.industry.values benchmark_w = total_data.weight.values constraint_exp = total_data[constraint_risk].values risk_exp_expand = np.concatenate((constraint_exp, np.ones((len(risk_exp), 1))), axis=1).astype(float) risk_names = constraint_risk + ['total'] risk_target = risk_exp_expand.T @ benchmark_w lbound = np.zeros(len(total_data)) ubound = 0.01 + benchmark_w constraint = Constraints(risk_exp_expand, risk_names) for i, name in enumerate(risk_names): if name == 'total' or name == 'SIZE': constraint.set_constraints(name, lower_bound=risk_target[i], upper_bound=risk_target[i]) else: constraint.set_constraints(name, lower_bound=risk_target[i]*industry_lower, upper_bound=risk_target[i]*industry_upper) factors_values = factor_processing(total_data[features].values, pre_process=[winsorize_normal], post_process=[winsorize_normal]) er = model.predict(factors_values) target_pos, _ = er_portfolio_analysis(er, industry, dx_return, constraint, False, benchmark_w) target_pos['code'] = total_data['code'].values turn_over, executed_pos = execution_pipeline.execute(target_pos=target_pos) executed_codes = executed_pos.code.tolist() dx_retuns = engine.fetch_dx_return(date, executed_codes, horizon=horizon) result = pd.merge(executed_pos, total_data, on=['code'], how='inner') result = pd.merge(result, dx_retuns, on=['code']) leverage = result.weight_x.abs().sum() ret = (result.weight_x - result.weight_y * leverage / result.weight_y.sum()).values @ result.dx.values rets.append(ret) execution_pipeline.update({'return': ret}) turn_overs.append(turn_over) leverags.append(executed_pos.weight.abs().sum()) print('{0} is finished: turn_over: {1}, levegare: {2}'.format(date, turn_over, leverags[-1])) # - ret_df3 = pd.DataFrame({'returns': rets, 'turn_over': turn_overs, 'leverage': leverags}, index=execution_dates) ret_df3.loc[advanceDateByCalendar('china.sse', dates[-1], freq)] = 0. ret_df3 = ret_df3.shift(1) ret_df3.iloc[0] = 0. ret_df3['tc_cost'] = ret_df3.turn_over * 0.002 ret_df3[['returns', 'tc_cost']].cumsum().plot(figsize=(12, 6), title='Threshold tc + Target vol rebalanced: Monitored freq {0}, {1} tc, {2} vol target'.format(freq, turn_over_threshold, target_vol), secondary_y='tc_cost') ret_df3[['returns', 'leverage']].rolling(window=60).std().plot(figsize=(12, 6), title='rolling std', secondary_y='leverage') ret_atfer_tc = ret_df3.returns - ret_df3.tc_cost print("sharp: ", ret_atfer_tc.mean() / ret_atfer_tc.std() * np.sqrt(252)) ret_df3.tail() # # Target Turn Over + Strategy # ------------------------ # + rets = [] turn_overs = [] turn_over_target_base = 0.04 executor = NaiveExecutor() execution_pipeline = ExecutionPipeline(executors=[executor]) leverags = [] previous_pos = pd.DataFrame() execution_dates = [] horizon = _map_freq(freq) for i, value in enumerate(factor_groups): date = value[0] data = value[1] # get the latest model models = model_df[model_df.index <= date] if models.empty: continue execution_dates.append(date) model = models[-1] codes = data.code.tolist() ref_date = date.strftime('%Y-%m-%d') total_data = data.dropna() dx_return = None risk_exp = total_data[neutralize_risk].values.astype(float) industry = total_data.industry.values benchmark_w = total_data.weight.values constraint_exp = total_data[constraint_risk].values risk_exp_expand = np.concatenate((constraint_exp, np.ones((len(risk_exp), 1))), axis=1).astype(float) risk_names = constraint_risk + ['total'] risk_target = risk_exp_expand.T @ benchmark_w lbound = np.zeros(len(total_data)) ubound = 0.01 + benchmark_w constraint = Constraints(risk_exp_expand, risk_names) for i, name in enumerate(risk_names): if name == 'total' or name == 'SIZE': constraint.set_constraints(name, lower_bound=risk_target[i], upper_bound=risk_target[i]) else: constraint.set_constraints(name, lower_bound=risk_target[i]*industry_lower, upper_bound=risk_target[i]*industry_upper) factors_values = factor_processing(total_data[features].values, pre_process=[winsorize_normal], post_process=[winsorize_normal]) er = model.predict(factors_values) codes = total_data['code'].values if previous_pos.empty: current_position = None turn_over_target = None else: previous_pos.set_index('code', inplace=True) remained_pos = previous_pos.loc[codes] remained_pos.fillna(0., inplace=True) turn_over_target = turn_over_target_base current_position = remained_pos.weight.values try: target_pos, _ = er_portfolio_analysis(er, industry, dx_return, constraint, False, benchmark_w, current_position=current_position, turn_over_target=turn_over_target) except ValueError: print('{0} full rebalance'.format(date)) target_pos, _ = er_portfolio_analysis(er, industry, dx_return, constraint, False, benchmark_w) target_pos['code'] = codes turn_over, executed_pos = execution_pipeline.execute(target_pos=target_pos) executed_codes = executed_pos.code.tolist() dx_retuns = engine.fetch_dx_return(date, executed_codes, horizon=horizon) result = pd.merge(executed_pos, total_data, on=['code'], how='inner') result = pd.merge(result, dx_retuns, on=['code']) leverage = result.weight_x.abs().sum() ret = (result.weight_x - result.weight_y * leverage / result.weight_y.sum()).values @ result.dx.values rets.append(ret) leverags.append(executed_pos.weight.abs().sum()) turn_overs.append(turn_over) previous_pos = executed_pos print('{0} is finished: {1}'.format(date, turn_over)) # - ret_df4 = pd.DataFrame({'returns': rets, 'turn_over': turn_overs, 'leverage': leverags}, index=execution_dates) ret_df4.loc[advanceDateByCalendar('china.sse', dates[-1], freq)] = 0. ret_df4 = ret_df4.shift(1) ret_df4.iloc[0] = 0. ret_df4['tc_cost'] = ret_df4.turn_over * 0.002 ret_df4[['returns', 'tc_cost']].cumsum().plot(figsize=(12, 6), title='Target turn over rebalanced: Rebalance freq {0}, {1} turnover_target'.format(freq, turn_over_target_base), secondary_y='tc_cost') ret_atfer_tc = ret_df4.returns - ret_df4.tc_cost print("sharp: ", ret_atfer_tc.mean() / ret_atfer_tc.std() * np.sqrt(252)) ret_df4[['returns', 'leverage']].rolling(window=60).std().plot(figsize=(12, 6), title='rolling std', secondary_y='leverage')
notebooks/target_vol_executor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="nI3BCU_tMsXg" from tensorflow.keras.datasets.mnist import load_data # + colab={"base_uri": "https://localhost:8080/"} id="QIHWf5MEObAT" outputId="f46d7048-bfa9-4240-d30f-553d1d709dba" from matplotlib import pyplot #load dataset (trainX, trainY), (testX, testY) = load_data() #summerize loaded dataset print('Train: X=%s, Y=%s' %(trainX.shape, trainY.shape)) print('Test: X=%s, Y=%s' %(testX.shape, testY.shape)) # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="IRh84E5rPkwi" outputId="68fcaa2e-8ba6-414b-cda0-d033a8d41b11" for i in range(25): #define subplot pyplot.subplot(5,5,i+1) #plot raw pixel data pyplot.imshow(trainX[i], cmap='gray') pyplot.show() # + id="yDZ4DFG2wqPa" from numpy import asarray, unique, argmax from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="Lo4JygkCzhMR" outputId="bac4b3f0-ff1c-4192-ec45-5ee55f87955c" (x_train, y_train), (x_test,y_test) = load_data() # reshape data to have a single channel x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)) x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)) # determine the shape of the input images in_shape = x_train.shape[1:] # determine the number of classes n_classes = len(unique(y_train)) print(in_shape, n_classes) # + id="M9wE0jLc17f7" # normalize pixel values x_train = x_train.astype('float32') / 255.0 x_test = x_test.astype('float32') / 255.0 # + colab={"base_uri": "https://localhost:8080/"} id="hjqexUP142z6" outputId="15b9f10c-34c0-460a-c4fd-e0558c8d1067" #define CNN Model model = Sequential() model.add(Conv2D(32, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=in_shape)) model.add(MaxPool2D((2,2))) model.add(Flatten()) model.add(Dense(100, activation= 'relu', kernel_initializer='he_uniform')) model.add(Dropout(0.5)) model.add(Dense(n_classes, activation='softmax')) model.summary() # define loss and optimizer model.compile(optimizer= 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) # fit the model model.fit(x_train, y_train, validation_split=0.2, epochs=50, batch_size=128, verbose= 1) # + colab={"base_uri": "https://localhost:8080/"} id="6_u4ZE9C_zo_" outputId="741e0452-10dd-4ac2-e106-b8eb26c32d81" #evaluate the model loss, acc = model.evaluate(x_test, y_test, verbose=1) print('Accuracy: %.3f' %acc) #make a pridiction image = x_test[0] yhat = model.predict(asarray([image])) print('Predicted: Class =%d' %argmax(yhat)) # + id="5TPKW181I3s-"
Handwritten_Digit_Recognization_CNN_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Implementing market basket analysis #Loading neccesary packages import numpy as np import pandas as pd from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules #Reading Data From Web #myretaildata = pd.read_excel('http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx') myretaildata.head() # # Data Preparation #Data Cleaning myretaildata['Description'] = myretaildata['Description'].str.strip() #removes spaces from beginning and end myretaildata.dropna(axis=0, subset=['InvoiceNo'], inplace=True) #removes duplicate invoice myretaildata['InvoiceNo'] = myretaildata['InvoiceNo'].astype('str') #converting invoice number to be string myretaildata = myretaildata[~myretaildata['InvoiceNo'].str.contains('C')] #remove the credit transactions myretaildata.head() myretaildata['Country'].value_counts() #myretaildata.shape #Separating transactions for Germany mybasket = (myretaildata[myretaildata['Country'] =="Germany"] .groupby(['InvoiceNo', 'Description'])['Quantity'] .sum().unstack().reset_index().fillna(0) .set_index('InvoiceNo')) #viewing transaction basket mybasket.head() # + #converting all positive vaues to 1 and everything else to 0 def my_encode_units(x): if x <= 0: return 0 if x >= 1: return 1 my_basket_sets = mybasket.applymap(my_encode_units) my_basket_sets.drop('POSTAGE', inplace=True, axis=1) #Remove "postage" as an item # - # # Training Model #Generatig frequent itemsets my_frequent_itemsets = apriori(my_basket_sets, min_support=0.07, use_colnames=True) #generating rules my_rules = association_rules(my_frequent_itemsets, metric="lift", min_threshold=1) #viewing top 100 rules my_rules.head(100) # # Making reecommendations my_basket_sets['ROUND SNACK BOXES SET OF4 WOODLAND'].sum() my_basket_sets['SPACEBOY LUNCH BOX'].sum() #Filtering rules based on condition my_rules[ (my_rules['lift'] >= 3) & (my_rules['confidence'] >= 0.3) ]
Examples/Market Basket Analysis/Market Basket Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tensorflow_p36) # language: python # name: conda_tensorflow_p36 # --- # ## Classification of text articles # Links # * [Reuters newswire dataset](https://keras.io/datasets/) # * [Reuters text classification](https://www.bonaccorso.eu/2016/08/02/reuters-21578-text-classification-with-gensim-and-keras/) # * [Keras Reuters MLP example](https://github.com/keras-team/keras/blob/master/examples/reuters_mlp.py) # + from keras.datasets import reuters import numpy as np from keras.preprocessing.text import Tokenizer import keras from keras.models import Sequential from keras.layers import Dense from keras.layers import Activation from keras.layers import Dropout from keras import backend as K # - # The Reuters dataset is made up of 11,228 newswires from Reuters, labeled over 46 topics. The word index used to encode the sequences is stored in the reuters_word_index.json file. word_index = reuters.get_word_index(path="reuters_word_index.json") print('There are', len(word_index), 'words used to encode.') max_words = 1000 batch_size = 32 epochs = 5 # + print('Loading Reuters data...') (x_train, y_train), (x_test, y_test) = reuters.load_data( num_words=max_words, test_split=0.2) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') num_classes = np.max(y_train) + 1 print(num_classes, 'classes') # - print('Vectorizing sequence data...') tokenizer = Tokenizer(num_words=max_words) x_train = tokenizer.sequences_to_matrix(x_train, mode='binary') x_test = tokenizer.sequences_to_matrix(x_test, mode='binary') print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('Convert class vector to binary class matrix ' '(for use with categorical_crossentropy)') y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) print('y_train shape:', y_train.shape) print('y_test shape:', y_test.shape) # + print('Building model...') K.clear_session() model = Sequential() model.add(Dense(512, input_shape=(max_words,))) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # - history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1) score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1])
code/notebooks/keras/07_Reuters_article_text_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def calculadora(): # Passo 1 - imprimir na tela o termo Python Calculator print ('******************* Python Calculator *******************') # Passo 2 - imprimir na tela uma mensagem para solicitando a seleção da operação desejada print ('Selecione o número da operação desejada:') # Passo 3 - imprimir na tela as opções de operações matemáticas print ('1 - Soma \n2 - Subtração \n3 - Multiplicação \n4 - Divisão') # Passo 4 - imprimir na tela a caixa para escolha da operação matemática desejada operação = int(input('Digite sua opção (1/2/3/4): ')) # Passo 5 - imprimir na tela a caixa para entrada do primeiro número num1 = int(input('Digite o primeiro número: ')) # Passo 6 - imprimir na tela a caixa para a entrada do segundo número num2 = int(input('Digite o segundo número: ')) # Passo 7 - realizar a operação matemática escolhida com os números fornecidos nos passos 6 e 7 if operação == 1: print ('%s + %s = ' %(num1, num2), num1 + num2) elif operação == 2: print ('%s - %s = ' %(num1, num2), num1 - num2) elif operação == 3: print ('%s x %s = ' %(num1, num2), num1 * num2) elif operação == 4: print ('%s / %s = ' %(num1, num2), num1 / num2) elif operação > 4: print ('Opção inválida!') calculadora() # - # ##
Cap03/Lab02/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # Problem 1: Variance in the coalescent # === # # A few imports # - import numpy as np import matplotlib.pyplot as plt from scipy.stats import expon, poisson # + [markdown] slideshow={"slide_type": "slide"} # Population parameters # + N = 10000 n = 100 mu = 0.003 loci = 10 S_empirical = 1000 # the number of samples we'll use for simulating distributions samples = 1000 # + [markdown] slideshow={"slide_type": "slide"} # Define a convenience function for the binomial coefficient $\binom{i}{2}$ # - def i_choose_2(i): return i * (i - 1) / 2 # + [markdown] slideshow={"slide_type": "slide"} # Generator yielding sequence of simulated intercoalescent time intervals # $\mathbb{E}[T_n], \mathbb{E}[T_{n-1}], \dots, \mathbb{E}[T_2]$ # - def intercoalescent_times(N, n): for i in range(2, n + 1): rate = i_choose_2(i) / (2 * N) yield expon.rvs(scale=1/rate) # + [markdown] slideshow={"slide_type": "fragment"} # For example # - sum(intercoalescent_times(N, n)) # + [markdown] slideshow={"slide_type": "slide"} # Similarly, a generator yeilding the expected times # - def intercoalescent_times_expected(N, n): for i in range(2, n + 1): yield 2 * N / i_choose_2(i) # + [markdown] slideshow={"slide_type": "fragment"} # For example # - sum(intercoalescent_times_expected(N, n)) # + [markdown] slideshow={"slide_type": "slide"} # Use these functions to simulate the distribution of TMRCA for one locus # - plt.hist([sum(intercoalescent_times(N, n)) for _ in range(samples)], bins=30) plt.axvline(sum(intercoalescent_times_expected(N, n)), color='r') plt.xlabel('TMRCA'); # + [markdown] slideshow={"slide_type": "slide"} # Function to simulate the number of segregating sites # - def S(N, n, mu): return sum(poisson.rvs(mu * i * Ti) for i, Ti in enumerate(intercoalescent_times(N, n), 2)) # + [markdown] slideshow={"slide_type": "fragment"} # For example # - S(N, n, mu) # + [markdown] slideshow={"slide_type": "slide"} # Similarly, a function giving the expected $S$ # - def S_expected(N, n, mu): return sum(mu * i * Ti for i, Ti in enumerate(intercoalescent_times_expected(N, n), 2)) # + [markdown] slideshow={"slide_type": "fragment"} # For example # - S_expected(N, n, mu) # + [markdown] slideshow={"slide_type": "slide"} # Use these functions to simulate the distribution of $S$ for one locus # - plt.hist([S(N, n, mu) for _ in range(samples)], bins=30) plt.axvline(S_expected(N, n, mu), color='r') plt.xlabel('$S$'); # + [markdown] slideshow={"slide_type": "slide"} # Plot the simulated distribution of the max $S$ value, and indicate its observe value # + # null simulation values Smax_null = np.array([max(S(N, n, mu) for _ in range(loci)) for _ in range(samples)]) plt.hist(Smax_null, bins=30) plt.axvline(S_empirical, color='r') plt.xlabel('$S_{\\max}$'); # + [markdown] slideshow={"slide_type": "slide"} # $p$-value for $S_{\rm max}$ observed accross loci. # - sum(Smax_null >= S_empirical) / samples # + [markdown] slideshow={"slide_type": "fragment"} # Are we impressed? No.
_teaching/2020-spring-gs541/hw.solution.problem1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # language: python # name: python36964bit6486db22bd404319aca96f84d42312c3 # --- from PIL import Image, ImageDraw, ImageFilter # + # urllib3 is not in standard library, its used, maybe written by requests, so u prolly have it. # urllib and urllib2 existed in python 2 days. in python 3 there is only urllib # even urllib recommends using requests in its docs # urllib.request.urlopen() uses HTTP/1.1 and includes Connection:close header in its HTTP requests. from io import BytesIO import urllib import requests # - # # create new blank image # + w = 600 h = 400 # RGBA: 4x8 bit true color with alpha # RGB: 3x8 bit true color # L: 1x8 bit black and white im = Image.new(mode='RGBA', size=(w,h), color=(177,2,177, 250)) print(f'format: {im.format}, size: {im.size}, mode: {im.mode}') # uncomment to launch in external tool # im.show() # - # # Draw Line # + # **************************************** Draw a line image = Image.new(mode='L', size=(600, 600), color=255) # Draw a line imd = ImageDraw.Draw(image) x = image.width / 2 y_start = 0 y_end = image.height line = ((x, y_start), (x, y_end)) # imd.line(xy, fill=None, width=0, joint=None) # Docstring: Draw a line, or a connected sequence of line segments. # fill looks like is fill color imd.line(line, fill=16, width=10) # del imd # image.show() # + # **************************************** Draw multiple lines image = Image.new(mode='L', size=(600, 600), color=255) # Draw some lines imd = ImageDraw.Draw(image) y_start = 0 y_end = image.height step_size = int(image.width / 10) for x in range(0, image.width, step_size): line = ((x, y_start), (x, y_end)) imd.line(line, fill=0, width=3) del imd # image.show() # - # # Download an image from a URL # + # **************************************** DL image using requests url = 'https://avatars1.githubusercontent.com/u/24477926' # -------------------- using requests resp = requests.get(url) # get the bytes, b'\x89PNG\r\n\x1a\n\x00 ..... remote_image_bytes = resp.content # -------------------- using urlopen # resp = urllib.request.urlopen(url) # # get the bytes, b'\x89PNG\r\n\x1a\n\x00 ..... # remote_image_bytes = resp.read() # turn it into a file handle like object using BytesIO image_fh = BytesIO(remote_image_bytes) im = Image.open(image_fh) print(f'format: {im.format}, size: {im.size}, mode: {im.mode}') # im.show() # - # # Apply some filters to an image # + # **************************************** Blur an image image_fh = BytesIO(requests.get('https://avatars1.githubusercontent.com/u/24477926').content) src_im = Image.open(image_fh) im = src_im.filter(ImageFilter.BLUR) print(f'format: {im.format}, size: {im.size}, mode: {im.mode}') # im.show() # + # **************************************** SHARPEN image_fh = BytesIO(requests.get('https://avatars1.githubusercontent.com/u/24477926').content) src_im = Image.open(image_fh) im = src_im.filter(ImageFilter.SHARPEN) print(f'format: {im.format}, size: {im.size}, mode: {im.mode}') # im.show() # - # # The great PixelAccess # + image_fh = BytesIO(requests.get('https://avatars1.githubusercontent.com/u/24477926').content) im = Image.open(image_fh) print(f'format: {im.format}, size: {im.size}, mode: {im.mode}') im.show() # + grid = im.load() print(f'grid type: {type(grid)}') # upper left is 0,0 # get any pixel print(grid[10,2]) print(grid[50,120]) # iterate over pixels and do something cool. im_w = im.size[0] im_h = im.size[1] # - # + # swapp all blue and red values. # - for i in range(im_w): for j in range(im_h): # pixel is at grid[i,j] r, g, b = grid[i, j] grid[i, j] = b, g, r im.show()
pillow_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- pip install twython # + from twython import Twython CONSUMER_KEY = 'Uj4evSaoPD0BeECqnW4QNJHQ5' CONSUMER_SECRET = '<KEY>' ACCESS_TOKEN = '<KEY>' ACCESS_TOKEN_SECRET = '<KEY> ' # - twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET) help(twitter) for status in twitter.search(q='"data science"')["statuses"]: user = status["user"]["screen_name"] text = status["text"] print(user, ":", text) print() # + """ This isn't that interesting, largely because the Twitter Search API just shows you whatever hardful of recent results it feels like. When you're doint data science, more often you want a lot of tweets. This is where #STREAMING API is useful. It allows you to connect to (a sample of) the great Twitter firehose. To use it, you'll need to authenticate using your access tokens. In order to access the Streaming API with Twython, we need to define a class that inherits from TwythonStreamer and that overrides its on_success method (and possibly its on_error method): """ from twython import TwythonStreamer # appending data to a global variable is pretty poor form # but it makes the example much simpler tweets = [] # + class MyStreamer(TwythonStreamer): """our own subclass of TwythonStreamer that specifies how to interact with the stream""" def on_success(self, data): """what do we do when twitter sends us data? here data will be a Python dict representing a tweet""" # only want to collect English-language tweets if data['lang'] == 'en': tweets.append(data) print("received tweet #", len(tweets)) # stop when we've collected enough if len(tweets >= 1000): self.disconnect() def on_error(self, status_code, data): print(status_code, data) self.disconnect() # MyStreamer will connect to the Twitter stream and wait for Twitte to feed it # data. Each time it receives some data (here, a Tweet represented as a # Python object) it passes it to the on_success method, which appends it to # our tweets list if its language is English, and then disconnects the # streamer after it's collected 1000 tweets # - # All that's left is to initialize it and start it running: stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) # starts consuming public statuses that contain the keyword 'data' stream.statuses.filter(track='data') # if instead we wanted to start consuming a sample of "all" public statuses stream.statuses.sample() # + # This will run until it collects 1000 tweet(or until it encounters an error) # and stop, at which point you can start analyzing those tweets. For instance # you could find the most common hashtags with: from collections import Counter top_hashtags = Counter(hashtag['text'].lower() for tweet in tweets for hashtag in tweet["entities"]["hashtags"]) print(top_hashtags.most_common(5)) # - twitter_api_documentation = 'https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/tweet-object' # + # In a non-toy project you probably wouldn’t want to rely on an in-memory list for storing the tweets. # Instead you’d want to save them to a file or a database, so that you’d have them permanently. # + #page 121 # -
patch/Twython.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Lf7huAiYp-An" # ##### Copyright 2021 The TensorFlow Authors. # + cellView="form" id="YHz2D-oIqBWa" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="x44FFES-r6y0" # # Working with tff's ClientData. # + [markdown] id="iPFgLeZIsZ3Q" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/working_with_client_data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/working_with_client_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/working_with_client_data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/working_with_client_data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="8RVecD0EfXdb" # The notion of a dataset keyed by clients (e.g. users) is essential to federated computation as modeled in TFF. TFF provides the interface [`tff.simulation.datasets.ClientData`](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/ClientData) to abstract over this concept, and the datasets which TFF hosts ([stackoverflow](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/stackoverflow), [shakespeare](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/shakespeare), [emnist](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist), [cifar100](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/cifar100), and [gldv2](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/gldv2)) all implement this interface. # # If you are working on federated learning with your own dataset, TFF strongly encourages you to either implement the `ClientData` interface or use one of TFF's helper functions to generate a `ClientData` which represents your data on disk, e.g. [`tff.simulation.datasets.ClientData.from_clients_and_fn`](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/ClientData?version=nightly#from_clients_and_fn). # # As most of TFF's end-to-end examples start with `ClientData` objects, implementing the `ClientData` interface with your custom dataset will make it easier to spelunk through existing code written with TFF. Further, the `tf.data.Datasets` which `ClientData` constructs can be iterated over directly to yield structures of `numpy` arrays, so `ClientData` objects can be used with any Python-based ML framework before moving to TFF. # # There are several patterns with which you can make your life easier if you intend to scale up your simulations to many machines or deploy them. Below we will walk through a few of the ways we can use `ClientData` and TFF to make our small-scale iteration-to large-scale experimentation-to production deployment experience as smooth as possible. # + [markdown] id="snsz06ESrGvL" # ## Which pattern should I use to pass ClientData into TFF? # # We will discuss two usages of TFF's `ClientData` in depth; if you fit in either of the two categories below, you will clearly prefer one over the other. If not, you may need a more detailed understanding of the pros and cons of each to make a more nuanced choice. # * I want to iterate as quickly as possible on a local machine; I don't need to be able to easily take advantage of TFF's distributed runtime. # * You want to pass `tf.data.Datasets` in to TFF directly. # * This allows you to program imperatively with `tf.data.Dataset` objects, and process them arbitrarily. # * It provides more flexibility than the option below; pushing logic to the clients requires that this logic be serializable. # # * I want to run my federated computation in TFF's remote runtime, or I plan to do so soon. # * In this case you want to map dataset construction and preprocessing to clients. # * This results in you passing simply a list of `client_ids` directly to your federated computation. # * Pushing dataset construction and preprocessing to the clients avoids bottlenecks in serialization, and significantly increases performance with hundreds-to-thousands of clients. # + id="KoCHeay4Rozd" #@title Set up open-source environment #@test {"skip": true} # tensorflow_federated_nightly also bring in tf_nightly, which # can causes a duplicate tensorboard install, leading to errors. # !pip uninstall --yes tensorboard tb-nightly # !pip install --quiet --upgrade tensorflow_federated_nightly # !pip install --quiet --upgrade nest_asyncio import nest_asyncio nest_asyncio.apply() # + id="LNduVQsPNoH7" #@title Import packages import collections import time import tensorflow as tf import tensorflow_federated as tff # + [markdown] id="dNOfCerkfZh_" # ## Manipulating a ClientData object # # Let's begin by loading and exploring TFF's EMNIST `ClientData`: # # + id="Rd8vaOOfbe5X" client_data, _ = tff.simulation.datasets.emnist.load_data() # + [markdown] id="a-46eXnKbmYP" # Inspecting the first dataset can tell us what type of examples are in the `ClientData`. # + id="N1JvJvDkbxDo" first_client_id = client_data.client_ids[0] first_client_dataset = client_data.create_tf_dataset_for_client( first_client_id) print(first_client_dataset.element_spec) # This information is also available as a `ClientData` property: assert client_data.element_type_structure == first_client_dataset.element_spec # + [markdown] id="7Z8l3uuYv8cD" # Note that the dataset yields `collections.OrderedDict` objects that have `pixels` and `label` keys, where pixels is a tensor with shape `[28, 28]`. Suppose we wish to flatten our inputs out to shape `[784]`. One possible way we can do this would be to apply a pre-processing function to our `ClientData` object. # + id="VyPqaw6Uv7Fu" def preprocess_dataset(dataset): """Create batches of 5 examples, and limit to 3 batches.""" def map_fn(input): return collections.OrderedDict( x=tf.reshape(input['pixels'], shape=(-1, 784)), y=tf.cast(tf.reshape(input['label'], shape=(-1, 1)), tf.int64), ) return dataset.batch(5).map( map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE).take(5) preprocessed_client_data = client_data.preprocess(preprocess_dataset) # Notice that we have both reshaped and renamed the elements of the ordered dict. first_client_dataset = preprocessed_client_data.create_tf_dataset_for_client( first_client_id) print(first_client_dataset.element_spec) # + [markdown] id="NtpLRgdpl9Js" # We may want in addition to perform some more complex (and possibly stateful) preprocessing, for example shuffling. # + id="CtBVHcAmmKiu" def preprocess_and_shuffle(dataset): """Applies `preprocess_dataset` above and shuffles the result.""" preprocessed = preprocess_dataset(dataset) return preprocessed.shuffle(buffer_size=5) preprocessed_and_shuffled = client_data.preprocess(preprocess_and_shuffle) # The type signature will remain the same, but the batches will be shuffled. first_client_dataset = preprocessed_and_shuffled.create_tf_dataset_for_client( first_client_id) print(first_client_dataset.element_spec) # + [markdown] id="Ek7W3ZZHMr1k" # ## Interfacing with a `tff.Computation` # # Now that we can perform some basic manipulations with `ClientData` objects, we are ready to feed data to a `tff.Computation`. We define a [`tff.templates.IterativeProcess`](https://www.tensorflow.org/federated/api_docs/python/tff/templates/IterativeProcess) which implements [Federated Averaging](https://arxiv.org/abs/1602.05629), and explore different methods of passing it data. # + id="j41nKFYse8GC" def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(10, kernel_initializer='zeros'), ]) return tff.learning.from_keras_model( model, # Note: input spec is the _batched_ shape, and includes the # label tensor which will be passed to the loss function. This model is # therefore configured to accept data _after_ it has been preprocessed. input_spec=collections.OrderedDict( x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32), y=tf.TensorSpec(shape=[None, 1], dtype=tf.int64)), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) trainer = tff.learning.build_federated_averaging_process( model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.01)) # + [markdown] id="ICJdME7-5lMx" # Before we begin working with this `IterativeProcess`, one comment on the semantics of `ClientData` is in order. A `ClientData` object represents the *entirety* of the population available for federated training, which in general is [not available to the execution environment of a production FL system](https://arxiv.org/abs/1902.01046) and is specific to simulation. `ClientData` indeed gives the user the capacity to bypass federated computing entirely and simply train a server-side model as usual via [`ClientData.create_tf_dataset_from_all_clients`](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/ClientData?hl=en&version=nightly#create_tf_dataset_from_all_clients). # # TFF's simulation environment puts the researcher in complete control of the outer loop. In particular this implies considerations of client availability, client dropout, etc, must be addressed by the user or Python driver script. One could for example model client dropout by adjusting the sampling distribution over your `ClientData's` `client_ids` such that users with more data (and correspondingly longer-running local computations) would be selected with lower probability. # # In a real federated system, however, clients cannot be selected explicitly by the model trainer; the selection of clients is delegated to the system which is executing the federated computation. # + [markdown] id="Zaoo661LOaCK" # ### Passing `tf.data.Datasets` directly to TFF # # One option we have for interfacing between a `ClientData` and an `IterativeProcess` is that of constructing `tf.data.Datasets` in Python, and passing these datasets to TFF. # # Notice that if we use our preprocessed `ClientData` the datasets we yield are of the appropriate type expected by our model defined above. # + id="U3R4cvZvPmxt" selected_client_ids = preprocessed_and_shuffled.client_ids[:10] preprocessed_data_for_clients = [ preprocessed_and_shuffled.create_tf_dataset_for_client( selected_client_ids[i]) for i in range(10) ] state = trainer.initialize() for _ in range(5): t1 = time.time() state, metrics = trainer.next(state, preprocessed_data_for_clients) t2 = time.time() print('loss {}, round time {}'.format(metrics['train']['loss'], t2 - t1)) # + [markdown] id="XFaFlB59nAVi" # If we take this route, however, we will be ***unable to trivially move to multimachine simulation***. The datasets we construct in the local TensorFlow runtime can *capture state from the surrounding python environment*, and fail in serialization or deserialization when they attempt to reference state which is no longer available to them. This can manifest for example in the inscrutable error from TensorFlow's `tensor_util.cc`: # ``` # Check failed: DT_VARIANT == input.dtype() (21 vs. 20) # ``` # + [markdown] id="Q5VKu7OLny5X" # ### Mapping construction and preprocessing over the clients # # To avoid this issue, TFF recommends its users to consider dataset instantiation and preprocessing as *something that happens locally on each client*, and to use TFF's helpers or `federated_map` to explicitly run this preprocessing code at each client. # # Conceptually, the reason for preferring this is clear: in TFF's local runtime, the clients only "accidentally" have access to the global Python environment due to the fact that the entire federated orchestration is happening on a single machine. It is worthwhile noting at this point that similar thinking gives rise to TFF's cross-platform, always-serializable, functional philosophy. # # TFF makes such a change simple via `ClientData's` attribute `dataset_computation`, a `tff.Computation` which takes a `client_id` and returns the associated `tf.data.Dataset`. # # Note that `preprocess` simply works with `dataset_computation`; the `dataset_computation` attribute of the preprocessed `ClientData` incorporates the entire preprocessing pipeline we just defined: # + id="yKiTjDj3pw4R" print('dataset computation without preprocessing:') print(client_data.dataset_computation.type_signature) print('\n') print('dataset computation with preprocessing:') print(preprocessed_and_shuffled.dataset_computation.type_signature) # + [markdown] id="oGcSqAjuqJau" # We could invoke `dataset_computation` and receive an eager dataset in the Python runtime, but the real power of this approach is exercised when we compose with an iterative process or another computation to avoid materializing these datasets in the global eager runtime at all. TFF provides a helper function [`tff.simulation.compose_dataset_computation_with_iterative_process`](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/compose_dataset_computation_with_iterative_process) which can be used to do exactly this. # + id="69vY85cmPsel" trainer_accepting_ids = tff.simulation.compose_dataset_computation_with_iterative_process( preprocessed_and_shuffled.dataset_computation, trainer) # + [markdown] id="Ixrmztq6SbRE" # Both this `tff.templates.IterativeProcesses` and the one above run the same way; but former accepts preprocessed client datasets, and the latter accepts strings representing client ids, handling both dataset construction and preprocessing in its body--in fact `state` can be passed between the two. # + id="ZcYPQxqlSapn" for _ in range(5): t1 = time.time() state, metrics = trainer_accepting_ids.next(state, selected_client_ids) t2 = time.time() print('loss {}, round time {}'.format(metrics['train']['loss'], t2 - t1)) # + [markdown] id="SeoQzU-5XeGz" # ### Scaling to large numbers of clients # # `trainer_accepting_ids` can immediately be used in TFF's multimachine runtime, and avoids materializing `tf.data.Datasets` and the controller (and therefore serializing them and sending them out to the workers). # # This significantly speeds up distributed simulations, especially with a large number of clients, and enables intermediate aggregation to avoid similar serialization/deserialization overhead. # # + [markdown] id="iSy1t2UZQWCy" # ### Optional deepdive: manually composing preprocessing logic in TFF # # TFF is designed for compositionality from the ground up; the kind of composition just performed by TFF's helper is fully within our control as users. We could have manually compose the preprocessing computation we just defined with the trainer's own `next` quite simply: # + id="yasFmYyIwTKY" selected_clients_type = tff.FederatedType(preprocessed_and_shuffled.dataset_computation.type_signature.parameter, tff.CLIENTS) @tff.federated_computation(trainer.next.type_signature.parameter[0], selected_clients_type) def new_next(server_state, selected_clients): preprocessed_data = tff.federated_map(preprocessed_and_shuffled.dataset_computation, selected_clients) return trainer.next(server_state, preprocessed_data) manual_trainer_with_preprocessing = tff.templates.IterativeProcess(initialize_fn=trainer.initialize, next_fn=new_next) # + [markdown] id="pHG0NXbWQuk7" # In fact, this is effectively what the helper we used is doing under the hood (plus performing appropriate type checking and manipulation). We could even have expressed the same logic slightly differently, by serializing `preprocess_and_shuffle` into a `tff.Computation`, and decomposing the `federated_map` into one step which constructs un-preprocessed datasets and another which runs `preprocess_and_shuffle` at each client. # # We can verify that this more-manual path results in computations with the same type signature as TFF's helper (modulo parameter names): # + id="C2sc5HkLPwkp" print(trainer_accepting_ids.next.type_signature) print(manual_trainer_with_preprocessing.next.type_signature)
docs/tutorials/working_with_client_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 人脸识别模型 FaceNet # # # ## 使用基于 NN4 改造的 CNN 模型训练和提取特征 # # # **nn4.small2.v1** 是 FaceNet 论文中描述的 NN4 模型的变体,在 OpenFace 的模型列表中有 [nn4.small2](https://cmusatyalab.github.io/openface/models-and-accuracies/#model-definitions) 详细介绍。 # # ![](model_comparison.png) # # # ### 模型列表 # # |Model | Number of Parameters | # | ---- | ---- | # | [nn4.small2](https://github.com/cmusatyalab/openface/blob/master/models/openface/nn4.small2.def.lua) | 3733968 | # | [nn4.small1](https://github.com/cmusatyalab/openface/blob/master/models/openface/nn4.small1.def.lua) | 5579520 | # | [nn4](https://github.com/cmusatyalab/openface/blob/master/models/openface/nn4.def.lua) | 6959088 | # | [nn2](https://github.com/cmusatyalab/openface/blob/master/models/openface/nn2.def.lua) | 7472144 | # # # 本教程使用其 [Keras版本](https://github.com/krasserm/face-recognition) 的一种实现,模型定义在 [model.py](model.py) ,模型可视化的图像存储在 [nn4_small2_model.png](nn4_small2_model.png) 。 # # ### Retrain 人脸识别模型工作流程 # # 1. 加载训练数据集 # 1. 人脸检测、对齐和提取(使用 OpenFace 的人脸对齐工具 AlignDlib) # 1. 人脸特征向量学习(使用预训练的 nn4.small1.v1 模型) # 1. 人脸分类(使用 KNN 或 SVM) # # ## 加载训练数据集 # # ### 训练数据集组织形式 # # - 每人一个文件目录,目录以人名命名,如”Fan\_Bingbing“ # - 每个人的文件目录下包含10张图像(最好是1:1比例),图像文件以"人名\_序号"命名,仅支持.jpg和.jpeg 两种格式。如”Fan\_Bingbing\_0001.jpg“。 # + import numpy as np import cv2 import os.path class IdentityMetadata(): def __init__(self, base, name, file): self.base = base # 数据集根目录 self.name = name # 目录名 self.file = file # 图像文件名 def __repr__(self): return self.image_path() def image_path(self): return os.path.join(self.base, self.name, self.file) def load_metadata(path): metadata = [] for i in os.listdir(path): for f in os.listdir(os.path.join(path, i)): # 检查文件名后缀,仅支持 jpg 和 jpeg 两种文件格式 ext = os.path.splitext(f)[1] if ext == '.jpg' or ext == '.jpeg': metadata.append(IdentityMetadata(path, i, f)) return np.array(metadata) def load_image(path): img = cv2.imread(path, 1) # OpenCV 默认使用 BGR 通道加载图像,转换为 RGB 图像 return img[...,::-1] # - metadata = load_metadata('images') print(metadata) # ## 人脸检测、对齐和提取 # # 从原图提取 96x96 RGB人脸图像。如果原图不是 1:1 比例,提取后的人脸会进行拉伸变换。 # + # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.patches as patches from align import AlignDlib # 初始化 OpenFace 人脸对齐工具,使用 Dlib 提供的 68 个关键点 alignment = AlignDlib('face_detection/landmarks.dat') # 加载一张训练图像 img = load_image(metadata[0].image_path()) # 检测人脸并返回边框 bb = alignment.getLargestFaceBoundingBox(img) # 使用指定的人脸关键点转换图像并截取 96x96 的人脸图像 aligned_img = alignment.align(96, img, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE) # 绘制原图 plt.subplot(131) plt.imshow(img) plt.xticks([]) plt.yticks([]) # 绘制带人脸边框的原图 plt.subplot(132) plt.imshow(img) plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red')) plt.xticks([]) plt.yticks([]) # 绘制对齐后截取的 96x96 人脸图像 plt.subplot(133) plt.imshow(aligned_img) plt.xticks([]) plt.yticks([]) # - # ### 加载 nn4.small2.v1 模型 # # ![](facenet_architect.png) # + from model import create_model nn4_small2 = create_model() # + from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Layer # 输入 anchor, positive and negative 96x96 RGB图像 in_a = Input(shape=(96, 96, 3)) in_p = Input(shape=(96, 96, 3)) in_n = Input(shape=(96, 96, 3)) # 输出对应的人脸特征向量 emb_a = nn4_small2(in_a) emb_p = nn4_small2(in_p) emb_n = nn4_small2(in_n) # - # ## Triplet Loss Layer # # 模型训练的目标是学习出一个将人脸图像嵌入到欧几里得特征空间的函数 $f(x)$,使得对于特定人脸图像 $x$ ,同一人不同人脸的欧式距离(Squared L2 Distance)尽可能小,不同人直接的欧式距离尽可能大。 # # 通过最小化 *triplet loss* $L$ 可以学习到我们想要的模型: # # $$L = \sum^{N}_{i=1} \large[ \small {\mid \mid f(x_{i}^{a}) - f(x_{i}^{p})) \mid \mid_2^2} - {\mid \mid f(x_{i}^{a}) - f(x_{i}^{n})) \mid \mid_2^2} + \alpha \large ] \small_+$$ # # $[z]_+$ 即 $max(z,0)$ , $N$ 是三元组集合的基数。 # # **下面使用 Keras 的自定义 Loss 来实现 Triplet Loss** # + from tensorflow.keras import backend as K class TripletLossLayer(Layer): def __init__(self, alpha, **kwargs): self.alpha = alpha super(TripletLossLayer, self).__init__(**kwargs) def triplet_loss(self, inputs): a, p, n = inputs p_dist = K.sum(K.square(a-p), axis=-1) n_dist = K.sum(K.square(a-n), axis=-1) return K.sum(K.maximum(p_dist - n_dist + self.alpha, 0), axis=0) def call(self, inputs): loss = self.triplet_loss(inputs) self.add_loss(loss) return loss triplet_loss_layer = TripletLossLayer(alpha=0.2, name='triplet_loss_layer')([emb_a, emb_p, emb_n]) nn4_small2_train = Model([in_a, in_p, in_n], triplet_loss_layer) # - # ## 加载预训练模型 nn4.small2.v1 # # 我们从 OpenFace 提供的 [预训练模型](https://cmusatyalab.github.io/openface/models-and-accuracies/#pre-trained-models) 中选择 **nn4.small2.v1**。 # # 这些模型使用公开数据集 [FaceScrub](http://vintage.winklerbros.net/facescrub.html) 和 [CASIA-WebFace](http://arxiv.org/abs/1411.7923)进行训练。Keras-OpenFace 项目将这些模型文件转换为 [csv 文件](https://github.com/iwantooxxoox/Keras-OpenFace/tree/master/weights),然后我们将其转换为 Keras h5 模型文件 [nn4.small2.v1.h5](models/nn4.small2.v1.h5)。 # # ### 预训练模型 # # | Model | alignment `landmarkIndices` | # | ---- | ---- | # | nn4.v1 | `openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP` | # | nn4.v2 | `openface.AlignDlib.OUTER_EYES_AND_NOSE` | # | nn4.small1.v1 | `openface.AlignDlib.OUTER_EYES_AND_NOSE` | # | nn4.small2.v1 | `openface.AlignDlib.OUTER_EYES_AND_NOSE` | # nn4_small2_pretrained = create_model() nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5') def align_image(img): return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img), landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE) # + # 原数据集测试效果更好 #metadata = load_metadata('images') embedded = np.zeros((metadata.shape[0], 128)) for i, m in enumerate(metadata): img = load_image(m.image_path()) img = align_image(img) # 数据规范化 img = (img / 255.).astype(np.float32) # 人脸特征向量 embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0] # + # Squared L2 Distance def distance(emb1, emb2): return np.sum(np.square(emb1 - emb2)) def show_pair(idx1, idx2): plt.figure(figsize=(8,3)) plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}') plt.subplot(121) plt.imshow(load_image(metadata[idx1].image_path())) plt.xticks([]) plt.yticks([]) plt.subplot(122) plt.imshow(load_image(metadata[idx2].image_path())) plt.xticks([]) plt.yticks([]) show_pair(2, 3) show_pair(2, 12) # - # ## 人脸分类 # + from sklearn.preprocessing import LabelEncoder from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import LinearSVC from sklearn.metrics import accuracy_score targets = np.array([m.name for m in metadata]) encoder = LabelEncoder() encoder.fit(targets) # Numerical encoding of identities y = encoder.transform(targets) train_idx = np.arange(metadata.shape[0]) % 2 != 0 test_idx = np.arange(metadata.shape[0]) % 2 == 0 # 50 train examples of 10 identities (5 examples each) X_train = embedded[train_idx] # 50 test examples of 10 identities (5 examples each) X_test = embedded[test_idx] y_train = y[train_idx] y_test = y[test_idx] knn = KNeighborsClassifier(n_neighbors=1, metric='euclidean') svc = LinearSVC() knn.fit(X_train, y_train) svc.fit(X_train, y_train) acc_knn = accuracy_score(y_test, knn.predict(X_test)) acc_svc = accuracy_score(y_test, svc.predict(X_test)) print(f'KNN accuracy = {acc_knn}, SVM accuracy = {acc_svc}') # + import warnings warnings.filterwarnings('ignore') # 测试不同样例 example_idx = 10 example_image = load_image(metadata[example_idx].image_path()) example_prediction = svc.predict([embedded[example_idx]]) example_identity = encoder.inverse_transform(example_prediction)[0] plt.imshow(example_image) plt.title(f'Recognized as {example_identity}'); plt.xticks([]) plt.yticks([]) # - # ## 模型测试与可视化分析 # + from sklearn.metrics import f1_score distances = [] # squared L2 distance between pairs identical = [] # 1 if same identity, 0 otherwise num = len(metadata) for i in range(num - 1): for j in range(1, num): distances.append(distance(embedded[i], embedded[j])) identical.append(1 if metadata[i].name == metadata[j].name else 0) distances = np.array(distances) identical = np.array(identical) thresholds = np.arange(0.1, 1.0, 0.01) f1_scores = [f1_score(identical, distances < t) for t in thresholds] acc_scores = [accuracy_score(identical, distances < t) for t in thresholds] opt_idx = np.argmax(f1_scores) opt_tau = thresholds[opt_idx] # 最大F1值对应的 threshold opt_acc = accuracy_score(identical, distances < opt_tau) # 最大F1值对应的准确率 # 绘制F1值和准确率与 threshold 间关系 plt.plot(thresholds, f1_scores, label='F1 score'); plt.plot(thresholds, acc_scores, label='Accuracy'); plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}'); plt.xlabel('Distance threshold') plt.legend(); # + dist_pos = distances[identical == 1] dist_neg = distances[identical == 0] plt.figure(figsize=(12,4)) plt.subplot(121) plt.hist(dist_pos) plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title('Distances (positive. pairs)') plt.legend(); plt.subplot(122) plt.hist(dist_neg) plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold') plt.title('Distances (negative. pairs)') plt.legend(); # - # ## 降维人脸特征向量 # + from sklearn.manifold import TSNE X_embedded = TSNE(n_components=2).fit_transform(embedded) for i, t in enumerate(set(targets)): idx = targets == t plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t) plt.legend(bbox_to_anchor=(1, 1)); plt.xticks([]) plt.yticks([]) # -
CTA2019-DL/tf_keras_FaceNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf] # language: python # name: conda-env-tf-py # --- # # # Style Transfer # # # ## Our Changes: # # Added code for saving the input content and style images. Also added code for saving the output mixed image from IPython.display import Image, display Image('images/15_style_transfer_flowchart.png') # + [markdown] colab_type="text" id="xu2SVpFJjmJr" # ## Imports # - # %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import PIL.Image # This was developed using Python 3.5.2 (Anaconda) and TensorFlow version: tf.__version__ import vgg16 # The VGG-16 model is downloaded from the internet. This is the default directory where you want to save the data-files. The directory will be created if it does not exist. # + # vgg16.data_dir = 'vgg16/' # - vgg16.maybe_download() # + [markdown] colab_type="text" id="Nv2JqNLBhy1j" # ## Helper-functions for image manipulation # - # This function loads an image and returns it as a numpy array of floating-points. The image can be automatically resized so the largest of the height or width equals `max_size`. def load_image(filename, max_size=None): image = PIL.Image.open(filename) if max_size is not None: # Calculate the appropriate rescale-factor for # ensuring a max height and width, while keeping # the proportion between them. factor = max_size / np.max(image.size) # Scale the image's height and width. size = np.array(image.size) * factor # The size is now floating-point because it was scaled. # But PIL requires the size to be integers. size = size.astype(int) # Resize the image. image = image.resize(size, PIL.Image.LANCZOS) print(image) # Convert to numpy floating-point array. return np.float32(image) # Save an image as a jpeg-file. The image is given as a numpy array with pixel-values between 0 and 255. def save_image(image, filename): # Ensure the pixel-values are between 0 and 255. image = np.clip(image, 0.0, 255.0) # Convert to bytes. image = image.astype(np.uint8) # Write the image-file in jpeg-format. with open(filename, 'wb') as file: PIL.Image.fromarray(image).save(file, 'jpeg') # This function plots a large image. The image is given as a numpy array with pixel-values between 0 and 255. # This function plots the content-, mixed- and style-images. def plot_image_big(image): # Ensure the pixel-values are between 0 and 255. image = np.clip(image, 0.0, 255.0) # Convert pixels to bytes. image = image.astype(np.uint8) # Convert to a PIL-image and display it. display(PIL.Image.fromarray(image)) def plot_images(content_image, style_image, mixed_image): # Create figure with sub-plots. fig, axes = plt.subplots(1, 3, figsize=(10, 10)) # Adjust vertical spacing. fig.subplots_adjust(hspace=0.1, wspace=0.1) # Use interpolation to smooth pixels? smooth = True # Interpolation type. if smooth: interpolation = 'sinc' else: interpolation = 'nearest' # Plot the content-image. # Note that the pixel-values are normalized to # the [0.0, 1.0] range by dividing with 255. ax = axes.flat[0] ax.imshow(content_image / 255.0, interpolation=interpolation) ax.set_xlabel("Content") # Plot the mixed-image. ax = axes.flat[1] ax.imshow(mixed_image / 255.0, interpolation=interpolation) ax.set_xlabel("Mixed") # Plot the style-image ax = axes.flat[2] ax.imshow(style_image / 255.0, interpolation=interpolation) ax.set_xlabel("Style") # Remove ticks from all the plots. for ax in axes.flat: ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # ## Loss Functions # # These helper-functions create the loss-functions that are used in optimization with TensorFlow. # # This function creates a TensorFlow operation for calculating the Mean Squared Error between the two input tensors. def mean_squared_error(a, b): return tf.reduce_mean(tf.square(a - b)) def create_content_loss(session, model, content_image, layer_ids): """ Create the loss-function for the content-image. Parameters: session: An open TensorFlow session for running the model's graph. model: The model, e.g. an instance of the VGG16-class. content_image: Numpy float array with the content-image. layer_ids: List of integer id's for the layers to use in the model. """ # Create a feed-dict with the content-image. feed_dict = model.create_feed_dict(image=content_image) # Get references to the tensors for the given layers. layers = model.get_layer_tensors(layer_ids) # Calculate the output values of those layers when # feeding the content-image to the model. values = session.run(layers, feed_dict=feed_dict) # Set the model's graph as the default so we can add # computational nodes to it. It is not always clear # when this is necessary in TensorFlow, but if you # want to re-use this code then it may be necessary. with model.graph.as_default(): # Initialize an empty list of loss-functions. layer_losses = [] # For each layer and its corresponding values # for the content-image. for value, layer in zip(values, layers): # These are the values that are calculated # for this layer in the model when inputting # the content-image. Wrap it to ensure it # is a const - although this may be done # automatically by TensorFlow. value_const = tf.constant(value) # The loss-function for this layer is the # Mean Squared Error between the layer-values # when inputting the content- and mixed-images. # Note that the mixed-image is not calculated # yet, we are merely creating the operations # for calculating the MSE between those two. loss = mean_squared_error(layer, value_const) # Add the loss-function for this layer to the # list of loss-functions. layer_losses.append(loss) # The combined loss for all layers is just the average. # The loss-functions could be weighted differently for # each layer. You can try it and see what happens. total_loss = tf.reduce_mean(layer_losses) return total_loss def gram_matrix(tensor): shape = tensor.get_shape() # Get the number of feature channels for the input tensor, # which is assumed to be from a convolutional layer with 4-dim. num_channels = int(shape[3]) # Reshape the tensor so it is a 2-dim matrix. This essentially # flattens the contents of each feature-channel. matrix = tf.reshape(tensor, shape=[-1, num_channels]) # Calculate the Gram-matrix as the matrix-product of # the 2-dim matrix with itself. This calculates the # dot-products of all combinations of the feature-channels. gram = tf.matmul(tf.transpose(matrix), matrix) return gram def create_style_loss(session, model, style_image, layer_ids): """ Create the loss-function for the style-image. Parameters: session: An open TensorFlow session for running the model's graph. model: The model, e.g. an instance of the VGG16-class. style_image: Numpy float array with the style-image. layer_ids: List of integer id's for the layers to use in the model. """ # Create a feed-dict with the style-image. feed_dict = model.create_feed_dict(image=style_image) # Get references to the tensors for the given layers. layers = model.get_layer_tensors(layer_ids) layerIdCount=len(layer_ids) print('count of layer ids:',layerIdCount) # Set the model's graph as the default so we can add # computational nodes to it. It is not always clear # when this is necessary in TensorFlow, but if you # want to re-use this code then it may be necessary. with model.graph.as_default(): # Construct the TensorFlow-operations for calculating # the Gram-matrices for each of the layers. gram_layers = [gram_matrix(layer) for layer in layers] # Calculate the values of those Gram-matrices when # feeding the style-image to the model. values = session.run(gram_layers, feed_dict=feed_dict) # Initialize an empty list of loss-functions. layer_losses = [] # For each Gram-matrix layer and its corresponding values. for value, gram_layer in zip(values, gram_layers): # These are the Gram-matrix values that are calculated # for this layer in the model when inputting the # style-image. Wrap it to ensure it is a const, # although this may be done automatically by TensorFlow. value_const = tf.constant(value) # The loss-function for this layer is the # Mean Squared Error between the Gram-matrix values # for the content- and mixed-images. # Note that the mixed-image is not calculated # yet, we are merely creating the operations # for calculating the MSE between those two. loss = mean_squared_error(gram_layer, value_const) # Add the loss-function for this layer to the # list of loss-functions. layer_losses.append(loss) # The combined loss for all layers is just the average. # The loss-functions could be weighted differently for # each layer. You can try it and see what happens. total_loss = tf.reduce_mean(layer_losses) return total_loss def create_denoise_loss(model): loss = tf.reduce_sum(tf.abs(model.input[:,1:,:,:] - model.input[:,:-1,:,:])) + \ tf.reduce_sum(tf.abs(model.input[:,:,1:,:] - model.input[:,:,:-1,:])) return loss def style_transfer(content_image, style_image, content_layer_ids, style_layer_ids, weight_content=1.5, weight_style=10.0, weight_denoise=0.3, num_iterations=120, step_size=10.0): """ Use gradient descent to find an image that minimizes the loss-functions of the content-layers and style-layers. This should result in a mixed-image that resembles the contours of the content-image, and resembles the colours and textures of the style-image. Parameters: content_image: Numpy 3-dim float-array with the content-image. style_image: Numpy 3-dim float-array with the style-image. content_layer_ids: List of integers identifying the content-layers. style_layer_ids: List of integers identifying the style-layers. weight_content: Weight for the content-loss-function. weight_style: Weight for the style-loss-function. weight_denoise: Weight for the denoising-loss-function. num_iterations: Number of optimization iterations to perform. step_size: Step-size for the gradient in each iteration. """ # Create an instance of the VGG16-model. This is done # in each call of this function, because we will add # operations to the graph so it can grow very large # and run out of RAM if we keep using the same instance. model = vgg16.VGG16() # Create a TensorFlow-session. session = tf.InteractiveSession(graph=model.graph) # Print the names of the content-layers. print("Content layers:") print(model.get_layer_names(content_layer_ids)) print('Content Layers:',content_layer_ids) print() # Print the names of the style-layers. print("Style layers:") print(model.get_layer_names(style_layer_ids)) print('Style Layers:',style_layer_ids) print() #Printing the input paramenter to the function print('Weight Content:',weight_content) print('Weight Style:',weight_style) print('Weight Denoise:',weight_denoise) print('Number of Iterations:',num_iterations) print('Step Size:',step_size) print() # Create the loss-function for the content-layers and -image. loss_content = create_content_loss(session=session, model=model, content_image=content_image, layer_ids=content_layer_ids) # Create the loss-function for the style-layers and -image. loss_style = create_style_loss(session=session, model=model, style_image=style_image, layer_ids=style_layer_ids) # Create the loss-function for the denoising of the mixed-image. loss_denoise = create_denoise_loss(model) # Create TensorFlow variables for adjusting the values of # the loss-functions. This is explained below. adj_content = tf.Variable(1e-10, name='adj_content') adj_style = tf.Variable(1e-10, name='adj_style') adj_denoise = tf.Variable(1e-10, name='adj_denoise') # Initialize the adjustment values for the loss-functions. session.run([adj_content.initializer, adj_style.initializer, adj_denoise.initializer]) # Create TensorFlow operations for updating the adjustment values. # These are basically just the reciprocal values of the # loss-functions, with a small value 1e-10 added to avoid the # possibility of division by zero. update_adj_content = adj_content.assign(1.0 / (loss_content + 1e-10)) update_adj_style = adj_style.assign(1.0 / (loss_style + 1e-10)) update_adj_denoise = adj_denoise.assign(1.0 / (loss_denoise + 1e-10)) # This is the weighted loss-function that we will minimize # below in order to generate the mixed-image. # Because we multiply the loss-values with their reciprocal # adjustment values, we can use relative weights for the # loss-functions that are easier to select, as they are # independent of the exact choice of style- and content-layers. loss_combined = weight_content * adj_content * loss_content + \ weight_style * adj_style * loss_style + \ weight_denoise * adj_denoise * loss_denoise # Use TensorFlow to get the mathematical function for the # gradient of the combined loss-function with regard to # the input image. gradient = tf.gradients(loss_combined, model.input) # List of tensors that we will run in each optimization iteration. run_list = [gradient, update_adj_content, update_adj_style, \ update_adj_denoise] # The mixed-image is initialized with random noise. # It is the same size as the content-image. mixed_image = np.random.rand(*content_image.shape) + 128 for i in range(num_iterations): # Create a feed-dict with the mixed-image. feed_dict = model.create_feed_dict(image=mixed_image) # Use TensorFlow to calculate the value of the # gradient, as well as updating the adjustment values. grad, adj_content_val, adj_style_val, adj_denoise_val \ = session.run(run_list, feed_dict=feed_dict) # Reduce the dimensionality of the gradient. grad = np.squeeze(grad) # Scale the step-size according to the gradient-values. step_size_scaled = step_size / (np.std(grad) + 1e-8) # Update the image by following the gradient. mixed_image -= grad * step_size_scaled # Ensure the image has valid pixel-values between 0 and 255. mixed_image = np.clip(mixed_image, 0.0, 255.0) # Print a little progress-indicator. print(". ", end="") # Display status once every 10 iterations, and the last. if (i % 10 == 0) or (i == num_iterations - 1): print() print("Iteration:", i) #Print adjustment weights for loss-functions. msg = "Weight Adj. for Content: {0:.2e}, Style: {1:.2e}, Denoise: {2:.2e}" print(msg.format(adj_content_val, adj_style_val, adj_denoise_val)) # Plot the content-, style- and mixed-images. plot_images(content_image=content_image, style_image=style_image, mixed_image=mixed_image) #Saving the mixed image after every 10 iterations filename='images/outputs_StyleTransfer/Mixed_Iteration' + str(i) +'.jpg' print(filename) save_image(mixed_image, filename) print() print("Final image:") plot_image_big(mixed_image) # Close the TensorFlow session to release its resources. session.close() # Return the mixed-image. return mixed_image # ## Example # # This example shows how to transfer the style of various images onto a portrait. # # First we load the content-image which has the overall contours that we want in the mixed-image. # + content_filename = 'images/download.jpg' content_image = load_image(content_filename, max_size=None) filenamecontent='images/outputs_StyleTransfer/Content.jpg' print(filenamecontent) save_image(content_image, filenamecontent) # - # Then we load the style-image which has the colours and textures we want in the mixed-image. # + style_filename = 'images/style4.jpg' style_image = load_image(style_filename, max_size=None) filenamestyle='images/outputs_StyleTransfer/Style.jpg' print(filenamestyle) save_image(style_image, filenamestyle) # - # Then we define a list of integers which identify the layers in the neural network that we want to use for matching the content-image. These are indices into the layers in the neural network. For the VGG16 model, the 5th layer (index 4) seems to work well as the sole content-layer. content_layer_ids = [4,6] # Then we define another list of integers for the style-layers. # + # The VGG16-model has 13 convolutional layers. # This selects all those layers as the style-layers. style_layer_ids = list(range(13)) # You can also select a sub-set of the layers, e.g. like this: # style_layer_ids = [1, 2, 3, 4] # - # Now perform the style-transfer. This automatically creates the appropriate loss-functions for the style- and content-layers, and # # then performs a number of optimization iterations. This will gradually create a mixed-image which has similar contours as the content-image, with the colours and textures being similar to the style-image. # # This can be very slow on a CPU! # + # %%time img = style_transfer(content_image=content_image, style_image=style_image, content_layer_ids=content_layer_ids, style_layer_ids=style_layer_ids, weight_content=1.5, weight_style=10.0, weight_denoise=0.3, num_iterations=150, step_size=10.0) # - # Function for printing output image filename='images/outputs_StyleTransfer/Mixed.jpg' save_image(img, filename) # ## License (MIT) # # Copyright (c) 2016 by [<NAME>](http://www.hvass-labs.org/) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Style_Transfer_Saving_Input_Output_Images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd df = pd.read_csv('Absenteeism_data.csv') # - df df = df.drop(['ID'], axis = 1) # + ## overviewing reason for absence sorted(df['Reason for Absence'].unique()) # + #creating dummy column for Reason for Absence reason_columns = pd.get_dummies(df['Reason for Absence']) # - reason_columns #creating check column to make sure there is only one '1' for each index reason_columns['check'] = reason_columns.sum(axis=1) reason_columns.head() reason_columns['check'].unique() reason_columns['check'].sum(axis = 0) reason_columns = pd.get_dummies(df['Reason for Absence'], drop_first = True) #removing reason for absence from original dataframe df = df.drop(['Reason for Absence'], axis = 1 ) # + #Grouping dummy variables into categorical variables #Reasons for absence are grouped into 4 categories reason_type_1 = reason_columns.loc[:,1:14].max(axis =1) reason_type_2 = reason_columns.loc[:,15:17].max(axis =1) reason_type_3 = reason_columns.loc[:,18:21].max(axis =1) reason_type_4 = reason_columns.loc[:,22:].max(axis =1) # + #concatenate reasons and original dataframe df = pd.concat([df, reason_type_1,reason_type_2,reason_type_3,reason_type_4], axis = 1) # - df # Renaming last four reason columns and sorting it df.columns.values # + column_new_names = ['Date', 'Transportation Expense', 'Distance to Work', 'Age', 'Daily Work Load Average', 'Body Mass Index', 'Education', 'Children', 'Pets', 'Absenteeism Time in Hours','Reason_1','Reason_2', 'Reason_3', 'Reason_4'] # - df.columns = column_new_names df.head(3) # + column_name_sort =['Reason_1','Reason_2', 'Reason_3', 'Reason_4','Date', 'Transportation Expense', 'Distance to Work', 'Age', 'Daily Work Load Average', 'Body Mass Index', 'Education', 'Children', 'Pets', 'Absenteeism Time in Hours'] df = df[column_name_sort] # - df.head() # + #Creating Checkpoint df_reason_mod = df.copy() # - #Date formating to extract month value and creating 'Month'column in original dataframe type(df_reason_mod['Date']) df_reason_mod['Date']= pd.to_datetime(df_reason_mod["Date"], format = '%d/%m/%Y') df_reason_mod.shape list_months = [] for i in range (df_reason_mod.shape[0]: list_months.append(df_reason_mod['Date'][i].month) df_reason_mod['Month'] = list_months df_reason_mod # + #adding a weekday value def date_to_weekday(date_value): return date_value.weekday() df_reason_mod['Day of the Week'] = df_reason_mod['Date'].apply(date_to_weekday) # - df_reason_mod.head() # + # Analyzing and transforming Education data #Grouping 1 - highschool graduate 2- graduate 3-post grad 4 - Phd into 2 categories 1->0 2,3,4->1 df_reason_mod['Education'].unique() # - df_reason_mod['Education'] = df_reason_mod['Education'].map({1:0,2:1,3:1,4:1}) df_reason_mod['Education'].value_counts() df_preprocessed = df_reason_mod.copy() df_preprocessed.to_csv('df_preprocessed.csv')
Data with Python/6.Absentieesm data process to MySQL/Absenteeism_data_transformation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/GerardoMayel/diabetes_diagnosis/blob/main/Diabetes_prediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7UJ6Q4-e9Ni3" # # Libraries: # + id="Hl91z1sb88ap" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # + [markdown] id="xpobZ7jr9lCg" # # Dataset # + id="bLWKM1HW9nS3" df = pd.read_csv('diabetes_data.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 270} id="TrQFpFf59y7f" outputId="b7513d79-b416-417c-81ba-b61974edb244" df.head() # + [markdown] id="j-nmMXp7tEzu" # # Exploratory Data Analysis: # + colab={"base_uri": "https://localhost:8080/"} id="xQ8Ef_nq91xX" outputId="eed193a7-9525-4602-fee7-af058bef3a1c" df.info() # + colab={"base_uri": "https://localhost:8080/"} id="kcdXApZf97Rb" outputId="0119c8f7-2aae-4392-85be-10f6cc3c5b0b" #Número de casos positivos y negativos df['class'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 372} id="Ota6zvaZ-NoH" outputId="7d349c97-0322-4130-f3ed-af08a21b80a9" plt.figure(figsize=(5,5)) plt.title('Positive Cases vs Negative Cases', fontsize=20) sns.countplot(data=df, x='class') # + colab={"base_uri": "https://localhost:8080/", "height": 372} id="MlC_DYme-fZ_" outputId="6b0f5aeb-947a-4cb9-8530-d3a12f1155be" #Casos por género plt.figure(figsize=(8,5)) sns.countplot(data=df, x='gender', hue='class') plt.title('Cases by gender', fontsize=20) plt.xlabel('Gender') # + colab={"base_uri": "https://localhost:8080/", "height": 367} id="ZTG4LvC6VwnT" outputId="a661932b-a642-4360-94a9-16ae1dc382f5" #Distribución de la edad plt.figure(figsize=(10,5)) sns.histplot(data=df, x='age' , bins=30 , kde=True) plt.title('Age distribution') plt.xlabel('Age') # + colab={"base_uri": "https://localhost:8080/", "height": 372} id="XkZ2gCIO-obQ" outputId="66081952-a5e7-4825-adf3-43ac48e11cc7" #Casos positivos y negativos por edad plt.figure(figsize=(10,5)) sns.histplot(data=df, x='age' , hue='class' , bins=30 , kde=True) plt.title('Positive and Negative Cases with Age' , fontsize=20) plt.xlabel('Age') # + id="DBgC5jPe_cfI" #Mapeo de datos de la columna "class" df['class']= df['class'].map({'Positive':'1', 'Negative':'0'}) # + id="Xxv_gdWqKnmN" outputId="a61d327b-90d0-4adc-9101-bb2afeba2019" colab={"base_uri": "https://localhost:8080/"} df.info() # + id="bBYYb6p5OjEV" df['class']=df['class'].astype('int') # + colab={"base_uri": "https://localhost:8080/"} id="Y6AjrkXhOppG" outputId="221544e9-58e5-435a-f03f-01fc691ca6ac" df.info() # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="bz4H3hSMKIGD" outputId="f0343e2a-6eb5-4a94-9d9e-364f3769ac08" df # + colab={"base_uri": "https://localhost:8080/", "height": 615} id="pT7RlC3oPqXE" outputId="aae7191e-baf6-48a5-e51a-4a4c8f6f4b6f" #Correlation df.corr() # + id="sqqczrHhP2VE" corr_matrix= df.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 611} id="ByQJ8L1PP9Ws" outputId="17c12203-afde-4bd7-ef01-5e3bb9e8224c" plt.figure(figsize=(10,8)) sns.heatmap(corr_matrix, annot=True) plt.title('Correlation Heatmap', fontsize=20) # + colab={"base_uri": "https://localhost:8080/"} id="C6OwmG-eW-VD" outputId="7ffd545a-a0ac-4854-9d82-a00df6c9b3a6" df.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="worjzk8KOriD" outputId="3ecca7a8-fc55-48d4-9618-7f544a15f105" df.corr()['class'].sort_values(ascending=False) # + id="J7eZZI01ET3N" df_dummy=pd.get_dummies(df['gender']) # + id="g1Xzppq_Eok7" df=pd.concat([df, df_dummy], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="fhsqA4n-EwHB" outputId="c06fa37e-892f-42a2-f08b-45e60ab6151f" df # + id="xCg4qS2qGlmk" df_dummy=df.drop(['gender'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="uwPfbvcKG-L9" outputId="4aafa66c-068e-43dc-9aeb-b0a5af260dfe" df_dummy # + id="c6Lh42aKQuOD" df=df.drop(['gender'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 488} id="dxcLw9RnQ626" outputId="11d34c43-c696-4276-99a6-7ff34ecc96fe" df # + id="_op8jCMTQ6hC" import pylab import scipy.stats as stats # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="y0EWLC29Q7JK" outputId="fdaf21b7-01ef-43c8-f50f-a6e7649aad46" #Gráfico Quantile-quantile para corroborar normalidad stats.probplot(df_dummy['age'], dist='norm', plot=pylab) pylab.show() # + colab={"base_uri": "https://localhost:8080/"} id="829st1giofAJ" outputId="84875649-8db5-4af2-f7f5-db256a4c4073" #Prueba de normalidad (Test de Shapiro-Wilks) from scipy.stats import shapiro estadistico, p_value = shapiro(df['age']) print('Estadístico=%.3f, p_value=%.3f' % (estadistico, p_value)) #p_value > 0.5 indica que la distribución es normal, por lo tanto no es normal. # + colab={"base_uri": "https://localhost:8080/"} id="OUu8cp9bWt6b" outputId="88ec62d8-b388-4efb-8f56-6e8bb876f146" #Normalización de la edad (min-max) df_n=(df['age']-df['age'].min())/(df['age'].max()-df['age'].min()) df_n.describe() # + id="jN_R-BRvoXk6" #Arbol de decisión from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split # + id="VBBNk-NCRE9S" #X,y X = df.drop('class', axis=1) y = df['class'] # + id="zosTjYZJXYgF" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # + id="wH2KawxcRIqx" model= DecisionTreeClassifier(max_depth=3) # + colab={"base_uri": "https://localhost:8080/"} id="RVZHBiv2Vb9k" outputId="04f312a5-a24d-4d46-b3dd-880c60509ba9" model.fit(X,y) # + colab={"base_uri": "https://localhost:8080/"} id="_CNDrQIyXlrT" outputId="9d837c7a-3d07-4733-bf84-0f1130ae2105" #Precisión del modelo model_accuracy = round(model.score(X, y), 4) print('Accuracy: %0.4f' % (model_accuracy)) # + id="bZj-SdTtVfSi" #Visualización del modelo from sklearn. tree import plot_tree # + colab={"base_uri": "https://localhost:8080/", "height": 303} id="PkdlINwPViPb" outputId="9e1a5d54-f703-4c71-f0ff-7d70bfa687b8" plt.figure(figsize=(10,5)) plot_tree(decision_tree=model, filled=True);
Diabetes_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="e_WiTkW4sCyi" colab_type="code" colab={} import pandas as pd import numpy as np import csv from google.colab import files import io from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error,accuracy_score,classification_report from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler,MinMaxScaler from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.utils import resample import matplotlib.pyplot as plt #from keras.models import Sequential #from keras.layers import Dense # + id="P0bLUsOKtfMe" colab_type="code" outputId="d33beeaf-ab66-4994-86ec-a8b6072f1cda" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 139} uploaded = files.upload() # + id="QV5aunVduMC9" colab_type="code" outputId="da7a6b6a-ee39-4593-d94e-327db1346099" colab={"base_uri": "https://localhost:8080/", "height": 258} train_values_df = pd.read_csv(io.BytesIO(uploaded['train.csv'])) submission_values_df = pd.read_csv(io.BytesIO(uploaded['test.csv'])) #Balancing Classes df_majority = train_values_df[train_values_df.Survived==0] df_minority = train_values_df[train_values_df.Survived==1] df_minority_upsampled = resample(df_minority, replace=True, # sample with replacement n_samples=549, # to match majority class random_state=123) # reproducible results train_values_df = pd.concat([df_majority, df_minority_upsampled]) #Dividing Data data_X = train_values_df.iloc [:,[2,4,5,6,7]] data_Y = train_values_df.iloc [:,1].to_numpy() submission_X = submission_values_df.iloc[:,[1,3,4,5,6]] #Checking for class imbalance counter_0,counter_1 = 0,0 for vals in data_Y: if vals == 0: counter_0+=1 else: counter_1+=1 print(counter_0,counter_1) scaler = MinMaxScaler() #Label Encoding label_encoder = LabelEncoder() data_X = data_X.apply(label_encoder.fit_transform) scaler.fit(data_X[['Age']]) age_vector = scaler.transform(data_X[['Age']]) data_X['Age'] = age_vector submission_X = submission_X.apply(label_encoder.fit_transform) scaler.fit(submission_X[['Age']]) sub_age_vector = scaler.transform(submission_X[['Age']]) submission_X['Age'] = sub_age_vector #Updated array to fit One Hot Encoder label_encoder_fit_array = pd.concat([data_X,submission_X]) label_encoder_fit_array = label_encoder_fit_array.to_numpy() #Converting individual to numpy arrays after combining completed above data_X = data_X.to_numpy() submission_X = submission_X.to_numpy() #Updated One Hot Encoding my_encoder_X = OneHotEncoder(categorical_features = [0,1,3,4]) my_encoder_X.fit(label_encoder_fit_array) data_X_transformed = my_encoder_X.transform(data_X).toarray() submission_transformed = my_encoder_X.transform(submission_X).toarray() #One Hot Encoding #my_encoder_X = OneHotEncoder(categorical_features = [0,1,3,4]) #data_X_transformed = my_encoder_X.fit_transform(data_X).toarray() #submission_transformed = my_encoder_X.fit_transform(submission_X).toarray() my_encoder_Y = OneHotEncoder() data_Y_transformed = my_encoder_Y.fit_transform(data_Y.reshape(-1,1)).toarray() #Train_Test_Split X_train, X_test, Y_train, Y_test = train_test_split(data_X_transformed, data_Y, test_size=0.30, random_state=40) #######ADDRESS THE ISSUE WITH DIFFERENT SIZES OF TRAIN SET AND SUBMISSION SET BECAUSE OF A MISSING DATA POINT WITH SPECIFIC CLASS VALUE IN ONE OF THE FEATURES ---> This causes difference in shape! ####### print(data_X_transformed.shape) #print(data_X_transformed) print(submission_transformed.shape) #print(submission_transformed) # + id="k24QQRtj_4Si" colab_type="code" outputId="75b73db9-2fc1-429e-bec1-01665f76b968" colab={"base_uri": "https://localhost:8080/", "height": 187} #Logistic Regression logistic_regression = LogisticRegression(solver='lbfgs', penalty="none", fit_intercept=False) logistic_regression.fit(X_train,Y_train) predictions = logistic_regression.predict(X_test) score = accuracy_score(predictions,Y_test) * 100 print(score) print(classification_report(predictions,Y_test)) # + id="_yQrdRX-qNaX" colab_type="code" outputId="1ff59b68-1571-4e6e-ad01-1187ff626596" colab={"base_uri": "https://localhost:8080/", "height": 187} #Decision Tree decision_tree = DecisionTreeClassifier(random_state=0, criterion='gini',max_depth=5, class_weight = 'balanced') decision_tree.fit(X_train,Y_train) predictions = decision_tree.predict(X_test) score = accuracy_score(predictions,Y_test) * 100 print(score) print(classification_report(predictions,Y_test)) # + id="1LJRd1Kna9qC" colab_type="code" outputId="ec72c685-50e1-4691-e69f-018d1620de0d" colab={"base_uri": "https://localhost:8080/", "height": 88} #Submission File Preparation #Writing Logistic Regression Results on the Submission CSV file. logistic_regression = LogisticRegression(solver='lbfgs', penalty="none", fit_intercept=False) logistic_regression.fit(X_train,Y_train) predictions = logistic_regression.predict(submission_transformed) #print(prob_pred) output_list = [] with open('SubmissionFile.csv') as csv_file: reader = csv.reader(csv_file) output_list = list(reader) csv_file.close() pred_list = predictions.tolist() print(pred_list) output_list[0].append('Survived') counter = 1 for items in pred_list: output_list[counter].append(items) counter+=1 print(output_list) #writing output list to csv_file with open("FinalFile.csv",'w') as csv_file: writer = csv.writer(csv_file) writer.writerows(output_list) csv_file.close() #files.download("FinalFile.csv")
TitanicDisasterManagementClassifier/NoNeuralNet_TitanicClassificationProblem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import can import udsoncan # + bus = can.Bus(interface='socketcan', channel='can0', receive_own_messages=False) # send a message message = can.Message(arbitration_id=0x110, is_extended_id=False, data=[0x8f, 0x00, 0x00, 0x9e, 0x86, 0x00, 0x00, 0x00]) bus.send(message, timeout=0.2) # + message = can.Message(arbitration_id=0x110, is_extended_id=False, data=[0x80, 0x00, 0x00, 0x9e, 0x86, 0x00, 0x00, 0x00]) bus.send(message, timeout=0.2) # - import time time.sleep(0.1) for X in range(0x00, 0xFF): print(f"{X:X}") message = can.Message(arbitration_id=0x110, is_extended_id=False, data=[0X00, X, 0x00, 0x9e, 0x86, 0x00, 0x00, 0x00]) bus.send(message, timeout=0.2) time.sleep(0.25)
notebooks/MS_CAN_Bus_Tach.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #import various python libraries import requests import csv import bs4 import re import pandas as pd import seaborn import matplotlib.pyplot as plt from datetime import date today = date.today() # Load Data Files df = pd.read_csv('../data/CA_Coronovirus_Websites_by_county.csv') # Scraper helper function def regex_paragraph_1(list_level_1): ''' Makes a 'paragraph' of text from a list in order to perform REGEX.. Returns a text of combined strings for parsing. ''' paragraph = '' for x in list_level_1: paragraph += x + ' ' return(paragraph) # + # SAVE YESTERDAYS COLUMN NAMES AND TIMESTAMPS def save_yesterday_data (cases=None, deaths=None, tests=None, recovered_p=None): if tests: yesterday_tests = tests if cases: yesterday_cases = cases if deaths: yesterday_deaths = deaths if recovered_p: yesterday_recovered = recovered return(yesterday_cases,yesterday_deaths,yesterday_tests,yesterday_recovered) y_c, y_d, y_t, y_r = save_yesterday_data(cases=name, deaths=string_, tests=test_admin,recovered_p=recovered) # Initial Current TImestamp columns to yesterday values df[name] = df[y_c] df[string_] = df[y_d] df[recovered] = df[y_r] df[test_admin] = df[y_t] # - # Make Columns with CURRENT TIMESTAMP test_admin = str(today) +" Tests" name = str(today)+' Confirmed Cases' string_ = str(today) +' Deaths' recovered = str(today)+ ' Recovered' # + # Function that checks website accuracy and does initial soup work def initial_study(county_no): print(df.loc[county_no]) print(df.loc[county_no,'Website']) # Optional Soup Error Checking # soup_c = soups[county_no].find_all('div') # for x in soup_c: # print(x) # - initial_study(3) # A beautiful soup web scraping function def run_bs4(link, lxml=None): user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36' headers = {'User-Agent': user_agent} page = requests.get(link) html = page.content soup = bs4.BeautifulSoup(html, 'html.parser') return(soup) # SCRAPES COUNTY PUBLIC HEALTH WEBSITES soups= [] for x in df['Website']: s = run_bs4(x) soups.append(s) # ## BERKLEY # for x in soups[3].find_all('p'): if 'cases' in str(x): try: text = x.find('strong').find(text=True) except: pass df.loc[3, name]= text df.loc[3] # # Alameda print(df.loc[0, 'Website']) with open('../webpages/Coronavirus Disease (COVID-19) - ACPHD.mhtml', 'r') as htm: soups[0] = htm.read() # + cases = re.findall(r'Positive Cases: <em>([0-9]+)', soups[0])[0] cases # + deaths = re.findall(r'Deaths: <em>([0-9]+)', soups[0])[0] deaths # - df.loc[0, name]= cases df.loc[0, string_] = deaths # ## ALPINE df.iloc[1] df.iloc[1].fillna(0) # ## Amador # + #amador paragraph = '' for x in soups[2].find_all('span'): for y in x.find_all(text=True): paragraph += y.strip()+ ' ' # print(paragraph) cases = re.findall(r'Cases As of [A-Z][a-z]+ [0-9]+, [0-9]+: ([0-9]+)', paragraph)[0] # print(cases) df.loc[2, name] = cases df.loc[2, name] # - # ## BUTTE df.loc[4] paragraph = '' for no, x in enumerate(soups[4].find_all('td')): if 'Total' in str(x): paragraph += soups[4].find_all('td')[no+1].find(text=True).strip() + ' ' cases = re.findall(r'([0-9]+) ([0-9]+)', paragraph)[0] df.loc[4,name] = re.findall(r'([0-9]+) ([0-9]+)', paragraph)[0][0] df.loc[4, string_] = re.findall(r'([0-9]+) ([0-9]+)', paragraph)[0][1] # ## Calaveras # + for x in soups[5].find_all('span', class_= 'Head', id="dnn_ctr8879_dnnTITLE_titleLabel"): results = x.contents print(results) case = re.findall(r'Number of confirmed cases in Calaveras County: ([0-9]+)', results[0])[0] df.loc[5, name] = case # - case df.loc[5] # ## COLUSA for x in soups[6].find_all('div', class_="fr-view"): for row in x.find_all('strong'): # print(row) if 'Cases' in str(row): cases = row.find(text=True) cases = re.findall(r'Cases: ([0-9]+)', cases)[0] df.loc[6, name]=cases # # CONTRA COSTA paragraph = ' ' for x in soups[7].find_all('div', class_="txtNew"): for y in x.find_all('h1'): paragraph += y.find(text=True) + ' ' df.loc[7, string_] = re.findall(r'DEATHS ([0-9]+)', paragraph)[0] df.loc[7, name] = re.findall(r'TOTAL CASES ([0-9]+)', paragraph)[0] # ## <NAME> print(df.loc[8, 'Website']) # + paragraph = '' for x in soups[8].find_all('div'): for z in x.find_all(text=True): # print(z.strip()) paragraph += z.replace('\xa0', '').strip() + ' ' tests = re.findall(r'Total Number of Tests Administered ([0-9]+)', paragraph)[0] cases = re.findall(r'Number of Positive COVID-19 Cases ([0-9]+)', paragraph)[0] # - df.loc[8, name]= case df.loc[8, test_admin]= tests # ## <NAME> print(df.loc[9, "Website"]) import re # + paragraph_ = '' for x in soups[9].find_all('table'): text = x.find_all(text=True) # print(text) for no, y in enumerate(text): paragraph_ += y.strip().replace('\n', '').replace('\u200b', '') +' ' print(paragraph_) cases = re.findall(r'Positive Tests ([0-9]+)', paragraph_)[0] print(cases) deaths =re.findall(r'Deaths ([0-9]+)', paragraph_)[0] print(deaths) cases = cases tests = re.findall(r'Total Number of Tests\*\* ([0-9]+)', paragraph_)[0] df.loc[9, name] = cases df.loc[9, string_] = deaths df.loc[9, test_admin] = tests # - # ## FRESNO # + paragraph_case = '' paragraph_deaths = '' paragraph_tests = '' for no, x in enumerate(soups[10].find_all('li')): text = x.find(text=True) # print(text) if 'cases' in text: paragraph_case += soups[10].find_all('li')[no+1].find(text=True) + ' ' paragraph_case += soups[10].find_all('li')[no+2].find(text=True) + ' ' paragraph_case += soups[10].find_all('li')[no+3].find(text=True) + ' ' if 'deaths' in text: paragraph_deaths += text + ' ' if 'Tests' in text: paragraph_tests += text + ' ' paragraph_tests = paragraph_tests.replace(',', '') # print(paragraph_tests) cases = re.findall(r'([0-9]+) \(Travel-Related\) ([0-9]+) \(Person-to-Person\) ([0-9]+) \(Community-Spread\)',\ paragraph_case) tests = re.findall(r'([0-9]+) \(Tests',paragraph_tests)[0] cases = sum([int(x) for x in cases[0]]) # print(tests, cases) df.loc[10, name] = cases df.loc[10, test_admin] = tests # - df.loc[10] # ## GLENN county = 11 print(df.loc[county, 'Website']) df.iloc[11] paragraph = '' for x in soups[11].find_all('strong'): for y in x.find_all(text=True): paragraph += y.strip() + ' ' paragraph df.loc[11, name] = re.findall(r'COVID-19 Cases ([0-9]+)', paragraph)[0] df.loc[11, test_admin] = 32 # ## Humboldt df.loc[12, 'Website'] = 'https://humboldtgov.org/2707/COVID-Test-Results' #'https://humboldtgov.org/CivicAlerts.aspx?AID=2657' base = 'https://humboldtgov.org' soups[12] = run_bs4(df.loc[12, 'Website']'') new_link = [] for x in soups[12].find_all('div', class_="widgetBody cpGrid cpGrid1"): if len(x)>0: try: for y in x.find('li', class_='widgetItem'): try: new_link.append(y.find('a').get('href')) except: pass except: pass new_link latest_link = base + new_link[0] positive = [] for x in soups[12].find_all('div', class_="outer col col24 first last"): # print(x) for y in x.find_all('div'): text =y.find(text=True) #print(text) positive.append(str(text).replace('\n', "").replace('\t', "").replace('\r', "")) paragraph = regex_paragraph_1(positive) no_case = 0 cases = re.findall(r'Total [new]* positive cases [confirmed]* on [A-Z][a-z]+ [0-9]+: ([0-9]+) |Total positive cases [confirmed]* on [A-Z][a-z]+ [ ]*[0-9]+: [ ]*([0-9]+)', paragraph) for x,y in zip(cases[:3], cases[3:]): print(x[0], y[1]) no_case += int(x[0]) + int(y[1]) df.loc[12, name] = no_case + 10 df.loc[12, test_admin] = 439 + 409 df.loc[12].fillna(0) # ## imperial # + print(df.loc[13, 'Website']) # + paragraph = '' for x in soups[13].find_all('div', class_="panel-body"): #print(x) for y in x.find_all('td'): text = y.find(text=True) if text: paragraph += text.replace('\xa0', "") +' ' cases = re.findall(r'\(Confirmed Cases\) ([0-9]+)', paragraph)[0] tests = re.findall(r'Total Tested ([0-9]+)', paragraph)[0] df.loc[13, name] = cases df.loc[13, test_admin]= tests # - paragraph # ## INYO try: paragraph = '' for no, x in enumerate(soups[14].find_all('meta')): if x: paragraph += str(x).strip() + ' ' paragraph = paragraph.replace("\n", "").replace("\xa0", "").replace("\t", "") deaths= re.findall(r'([0-9]+) deaths', paragraph)[0] cases= re.findall(r'([0-9]+) confirmed', paragraph)[0] df.loc[14, name] = cases df.loc[14, string_] = deaths except: pass print(df.loc[14, 'Website']) # + paragraph = '' for no, x in enumerate(soups[14].find_all('table')): for y in x.find_all('li'): for z in y.find_all(text=True): # print(z) paragraph += str(z).strip() + ' ' mono = '' for no, x in enumerate(soups[14].find_all('p')): for y in x.find_all('span'): for z in y.find_all(text=True): mono += z.strip() + ' ' paragraph = paragraph.replace("\n", "").replace("\xa0", "").replace("\t", "") deaths= re.findall(r'([0-9]+) deaths', paragraph)[0] cases= re.findall(r'([0-9]+) confirmed', paragraph)[0] tests = re.findall(r'Tests Administered: ([0-9]+)', paragraph)[0] df.loc[14, name] = cases df.loc[14, string_] = deaths df.loc[14, test_admin] = tests # - mono_results = re.findall(r'Mono County Cases: ([0-9]+) confirmed COVID-19 case \| ([0-9]+) deaths', mono) df.loc[27, name] = mono_results[0][0] df.loc[27, string_] = mono_results[0][1] # + ## KERN # + print(df.loc[15, 'Website']) with open('../webpages/2019 Novel Coronavirus - Kern County Public Health.mhtml', 'r') as htm: soups[15] = htm.read() soups[15] = bs4.BeautifulSoup(soups[15]) df.loc[15, name] = 155 df.loc[15, string_] = 1 df.loc[15, test_admin] = 4110 # - # ## Kings County paragraph ='' soups[16].find_all('b') for x in soups[16].find_all('b'): for y in x.find_all('h3'): paragraph += str(y) + ' ' df.loc[16, name] = re.findall(r'Confirmed Cases: ([0-9]+)', paragraph)[0] df.loc[16, test_admin] = re.findall(r'Samples Collected by Health Dept.: ([0-9]+)', paragraph)[0] paragraph re.findall(r'Confirmed Cases: ([0-9]+)', paragraph)[0] re.findall(r'Samples Collected by Health Dept.: ([0-9]+)', paragraph)[0] # ## Lake county = 17 print(df.loc[17, 'Website']) df.loc[county, test_admin] = 110 # ## LASSEN print(df.loc[18, 'Website']) # # LONG BEACH # # # print(df.loc[19, 'Website']) # + paragraph = '' for x in soups[20].find_all('table'): for y in x.find_all(text=True): paragraph += y.strip().replace('\xa0', '') +' ' results= re.findall(r'- Long Beach ([0-9]+)', paragraph) # print(results) cases = results[0] deaths = results[1] # - df.loc[19, name] = cases df.loc[19, string_] = deaths # ## Los Angeles # data sometimes includes Long Beach and Pasadena Counties county = 20 print(df.loc[20, 'Website']) # + paragraph = '' for x in soups[20].find_all('table'): # print(x.find_all('th')) for te, num in zip(x.find_all('th'), x.find_all('td')): # print(te.find(text=True), num.find(text=True)) paragraph += num.find(text=True).strip().replace('\xa0', '') +' ' paragraph += te.find(text=True).strip().replace('\xa0', '') + ' ' cases= re.findall(r'Total Cases ([0-9]+)*', paragraph)[0] deaths = re.findall(r'Deaths \- Los Angeles County \(excl.LBandPas\) ([0-9]+)', paragraph) # - df.loc[20, name] = cases df.loc[20, string_] = deaths # ## MADERA county = 21 df.loc[county] print(df.loc[21, 'Website']) info = [] paragraph = '' for x in soups[21].find_all('div', id='widget_685_4225_1649'): text = x.find_all('td') for y in x.find_all('td'): paragraph += y.find(text=True).strip() + " " try: if int(y.find(text=True)): info.append(str(y.find(text=True))) except: pass paragraph for no, x in enumerate(info): x = x.replace('\xa0', "") info[no] = int(x) cases = max(info) deaths = re.findall(r'deceased', paragraph) recover = re.findall(r'recovered', paragraph) len(deaths), len(recover) df.loc[21, name] = cases df.loc[21, string_] = len(deaths) df.loc[21, recovered] = len(recover) # # ## MARIN county = 22 df.loc[county] print(df.loc[county, 'Website']) # + # soups[22] = run_bs4(df.loc[22, 'Website']) # - info=[] for x in soups[22].find_all('table', class_="table table-striped table-hover cols-1"): # print(x) for y in x.find_all('tbody'): # print(y.find_all(text=True)) for no, z in enumerate(y.find_all(text=True)): if len(z) >1: info.append(z.strip()) paragraph = regex_paragraph_1(info) paragraph # + paragraph = regex_paragraph_1(info) cases = re.findall(r'Total Cases ([0-9]+)', paragraph)[0] cases # - df.loc[22, name] = cases df.loc[22, string_] = 4 df.loc[22, test_admin] = 716 # ## MARIPOSA df.loc[23, 'Website'] info = [] for x in soups[23].find_all('div', class_="fr-view"): for y in x.find_all('strong'): for z in y.find_all(text=True): info.append(str(z).replace('\xa0', "")) paragraph = '' for x in info: paragraph += x + ' ' paragraph cases = re.search(r'Total Positive: [0-9]+', paragraph).group() cases = re.search(r'[0-9]+', cases).group() cases = re.findall(r'Total Positive: ([0-9]+)', paragraph)[0] tests = re.findall(r'Total Tested: ([0-9]+)', paragraph)[0] deaths = re.findall(r'Total Deaths: ([0-9]+)', paragraph)[0] df.loc[23, test_admin] = tests df.loc[23, name] = cases df.loc[23, string_] = deaths # ## MENDOCINO print(df.loc[24, 'Website']) # + paragraph = '' for x in soups[24].find_all('div', id="widget_313_6901_4337"): for y in x.find_all('li'): text = y.find_all(text=True) for z in text: paragraph += z.strip() +' ' pos = re.findall(r'Positive tests: ([0-9]+)', paragraph)[0] tests = re.findall(r'Total tests: ([0-9]+)', paragraph)[0] df.loc[24, name] = pos df.loc[24, test_admin] = tests # - # ## MERCED df.loc[25] # + info={} for x in soups[25].find_all('table'): print(x) for title, no in zip(x.find_all('th'), x.find_all('td')): info[title.find(text=True)] = int(no.find(text=True)) info # - df.loc[25, name]= info['Cases'] df.loc[25, string_]= info['Deaths'] # df.loc[25, recovered] = info['Recoveries'] # df.loc[25, test_admin] = info['Tests'] # ## MODOC df.loc[26, 'Website'] = 'https://www.modocsheriff.us/modoc-covid-19-incident-updates' print(df.loc[26, 'Website']) soups[26] = run_bs4('https://www.modocsheriff.us/modoc-covid-19-incident-updates') soups[26] df.loc[26, test_admin] = 31 # ## MONO print(df.loc[27, 'Website']) with open('../webpages/Mono County Coronavirus Response.htm', 'r') as htm: print(htm.read()) # + df.loc[27, name]= 18 df.loc[27, string_]= 1 df.loc[27, test_admin] = 84 # - # ## MONTEREY print(df.loc[28, 'Website']) # + # soups[28] = run_bs4(df.loc[28, 'Website']) # + paragraph = '' for no_1, x in enumerate(soups[28].find_all('div')): for no, y in enumerate(x.find_all('p')): text = y.find_all(text=True) for z in text: if len(z) > 0: paragraph += str(z.strip()) + ' ' try: cases = re.findall(r'Total ([0-9]+) 100', paragraph)[0] tests = re.findall(r'Number of tests completed: ([0-9]+)', paragraph)[0] deaths = re.findall(r'Fatalities ([0-9]+)', paragraph)[0] except: cases = re.findall(r'Total ([0-9]+) 100', paragraph)[0] tests = re.findall(r' completado: ([0-9]+)', paragraph)[0] deaths = re.findall(r' mortales ([0-9]+)', paragraph)[0] # + cases, tests, deaths # - df.loc[28, name]= cases df.loc[28, string_]= deaths df.loc[28, test_admin] = tests # ## NAPA df.loc[29, 'Website'] = 'https://legacy.livestories.com/s/v2/coronavirus-report-for-napa-county-ca/9065d62d-f5a6-445f-b2a9-b7cf30b846dd/' print(df.loc[29, 'Website']) # !ls ../data/webpages with open('../webpages/Coronavirus Report for Napa County, CA - LiveStories.mhtml', 'r') as htm: text = htm.read() soups[29] = bs4.BeautifulSoup(text) # + paragraph = '' for x in soups[29].find_all('table'): for y in x.find_all('tr'): for z in y.find_all('td'): for a in z.find_all(text=True): paragraph += str(a.strip()).replace('\n', '').replace('=20', '').replace(' ', '') info_cases = re.findall(r'Napa County ([0-9]+) ([0-9]+)', paragraph) cases = info_cases[0][0] deaths = info_cases[0][1] tests = re.findall(r'TOTAL ([0-9]+)', paragraph) df.loc[29, name] = cases df.loc[29, string_] = deaths #df.loc[29, recovered] = recover_ tests = re.findall(r'TOTAL ([0-9]+)', paragraph) tests = tests[0] df.loc[29, test_admin] = tests # - # ## NEVADA df.loc[30, 'Website'] = 'https://www.mynevadacounty.com/2924/Coronavirus' # + # soups[30] = run_bs4(df.loc[30, 'Website']) # - paragraph += str(a.strip()).replace('\n', '').replace('=20', '').replace(' ', '') paragraph = '' for x in soups[30].find_all('table'): for y in x.find_all('tr'): for z in y.find_all('td'): for a in z.find_all(text=True): print(a) paragraph += str(a.strip()) + ' ' paragraph results = re.findall(r'Positive Tests ([0-9]+)',paragraph) cases = results[0] cases deaths= re.findall(r'Deaths ([0-9]+)', paragraph) deaths = deaths[0] df.loc[30, name] = cases df.loc[30, string_] = deaths # ## ORANGE COUNTY # + paragraph = '' for x in soups[31].find_all('div', class_="col-md-6 col-sm-6 col-xs-12"): for y, z in zip(x.find_all('h2'), x.find_all('h1')): paragraph += y.find(text=True) +' ' + z.find(text=True) +' ' df.loc[31, name]= re.findall(r'Cumulative Cases to Date ([0-9]+)', paragraph)[0] df.loc[31, string_]= re.findall(r'Cumulative Deaths to Date ([0-9]+)', paragraph)[0] # df.loc[31, test_admin] = info['test'] # - print(df.loc[31, 'Website']) # ## PASADENA df.loc[32, 'Website'] = 'https://www.cityofpasadena.net/public-health/news-announcements/information-on-covid-19/' # + info={} key=[] for x in soups[32].find_all('aside'): table = x.find('table') rows = table.find_all('tr') for no, y in enumerate(rows): count = 0 for a in y.find_all('th'): key.append(a.find(text=True)) for b in y.find_all('td'): print(key[count]) info[key[count]]=b.find(text=True) count+=1 df.loc[32, name]= int(info['Cases']) df.loc[32, string_]= int(info['Deaths']) # - info # ## PLACER df.loc[33, 'Website'] = 'https://www.placer.ca.gov/6448/Cases-in-Placer' # + paragraph = '' for x in soups[33].find_all('table'): for no, y in enumerate(x.find_all('td')): paragraph += y.find(text=True) + ' ' df.loc[33, name]= re.findall(r'Lab Confirmed Cases \(includes those who have died\) ([0-9]+)', paragraph)[0] df.loc[33, string_]= re.findall(r'Deaths ([0-9]+)', paragraph)[0] # - paragraph # ## PLUMAS df.loc[34, 'Website'] # + paragraph ='' key=[] for x in soups[34].find_all('table'): y = x.find('tbody') count = 0 for no, a in enumerate(y.find_all('td')): print(no, a.find(text=True)) text = a.find(text=True) if text: paragraph += y.find_all('td')[no].find(text=True) + ' ' # - paragraph results = re.findall(r'Positive Test Results ([0-9]+) People Tested ([0-9]+)', paragraph) results cases = results[0][0] tests = results[0][1] # + df.loc[34, name]= cases df.loc[34, test_admin] = tests # - # ## Riverside print(df.loc[35, 'Website']) paragraph = '' for x in soups[35].find_all('div'): # print(x) for y in x.find_all('p'): #print(y.find_all(text=True)) for no, z in enumerate(y.find_all(text=True)): paragraph += z.strip().replace('\xa0', '') + ' ' ''' cases = re.findall(r'Confirmed cases : ([0-9]+)', paragraph) cases = cases[0] deaths = re.findall(r'Deaths: ([0-9]+)', paragraph) deaths = deaths[0] deaths, cases ''' df.loc[35, name]= 493 df.loc[35, string_]= 14 # ## <NAME> # manually check dashboard print(df.loc[36, 'Website']) df.loc[36, name]= 23 df.loc[36, string_]= 1 df.loc[36, test_admin] = 289 df.loc[36, recovered] = 11 # ## <NAME> print(df.loc[37, 'Website']) # + paragraph = '' for x in soups[37].find_all('div', class_= "et_pb_text_inner"): for z, y in zip(x.find_all('h2'), x.find_all('span')): paragraph += z.find(text=True) + ' '+ y.find(text=True) + ' ' cases = re.findall(r'COVID-19 CASES IN ([0-9]+)', paragraph) deaths = re.findall(r'COVID-19 ASSOCIATED DEATHS IN ([0-9]+)', paragraph) cases, deaths # - df.loc[37, name]= re.findall(r'COVID-19 CASES IN ([0-9]+)', paragraph)[0] df.loc[37, string_]= re.findall(r'COVID-19 ASSOCIATED DEATHS IN ([0-9]+)', paragraph)[0] # ## <NAME> # # df.loc[38, 'Website'] # dictionary_outer keys : ['COVID-19 Case Summary', 'San Diego County Residents', # 'Non-SanDiego County Residents', 'Total'] # dictionary_inner sample : info ['Total'] = # {'Total Positives': '297','0-9 years': '2','10-19 years': '3', # '20-29 years': '65','40-49 years': '54', # '50-59 years': '40','60-69 years': '24', # '70-79 years': '20','80+ years': '12', # 'Age Unknown': '0','Gender': '\xa0', # 'Female': '112','Male': '185','Unknown': '0', # 'Hospitalizations': '59', 'Intensive Care': '29', # 'Deaths': '2'} ''' key = [] info = {} for x in soup.find_all('table'): for no_1, y in enumerate(x.find_all('tr')): text = y.find_all(text=True) if '\n' in text: key.append(text) # trying to undo some wonky formatting for no, item in enumerate(key[1]): key[1][no] = str(item).replace('\n', '').replace(" ", "") # trying to remove some unicode characters from strings for no, x in enumerate(key[3]): key[3][no]=key[3][no].replace('\xa0', '0') # setting up some initial key values for dictionary list_=[] error_ = [] info[key[1][1]] = {key[1][1] : list_} info[key[1][3]] = {key[2][1]:key[2][3]} info[key[1][5]] = {key[2][1]:key[2][5]} info[key[1][7]] = {key[2][1]:key[2][7]} for x in range(2,21): try: list_.append(key[x][1]) info[key[1][3]].update({key[x][1]: key[x][3]}) info[key[1][5]].update({key[x][1]: key[x][5]}) info[key[1][7]].update({key[x][1]: key[x][7]}) except: error_.append(x) cases = info['Total']['Total Positives'] deaths = info['Total']['Deaths'] return((cases, deaths, 0,0))''' # + key = [] key_2 = [] info = {} paragraph = '' for x in soups[38].find_all('table'): for no_1, y in enumerate(x.find_all('tr')): for z in y.find_all('td'): paragraph += z.find(text=True).strip().replace('\xa0', '').replace('\n', '') + ' ' # - paragraph cases= re.findall(r'Residents Total Positives ([0-9]+)', paragraph)[0] deaths= re.findall(r'Deaths ([0-9]+)', paragraph)[0] df.loc[38, name]= cases df.loc[38, string_]= deaths # ## <NAME> # # # # # # df.loc[39, 'Website'] paragraph = '' for x in soups[39].find_all('div', class_='box2'): for y in x.find_all('p'): paragraph += y.find(text=True) +' ' # + paragraph # - cases = re.findall(r'Total Positive Cases: ([0-9]+)', paragraph)[0] deaths = re.findall(r'Deaths: ([0-9]+)', paragraph)[0] deaths df.loc[39, name]= cases df.loc[39, string_]= deaths # ## <NAME> # # # # df.loc[40, 'Website'] paragraph = '' for x in soups[40].find_all('tr'): for y in x.find_all('strong'): paragraph += y.find(text=True) + ' ' paragraph cases = re.findall(r'Confirmed COVID-19 Cases ([0-9]+) ([0-9]+)', paragraph) cases df.loc[40, name]= cases[0][0] df.loc[40, string_]= cases[0][1] # ## <NAME> # print(df.loc[41, 'Website']) page = requests.get('https://e.infogram.com/f6d9f731-5772-4da5-b149-5e42cc1c3b89?parent_url=https%3A%2F%2Fwww.emergencyslo.org%2Fen%2Fpositive-case-details.aspx&src=embed#') slo = pd.read_csv('../data/data_slo.csv', header=None) slo # + for no, y in enumerate(slo.loc[:, 0]): if no == 0: cases = re.findall(r'>([0-9]+)<', y)[0] if no == 2: recovered_ = re.findall(r'40px">([0-9]+)<', y)[0] if no == 3: deaths = re.findall(r'40px">([0-9]+)<', y)[0] # - df.loc[41, name]= cases df.loc[41, string_]= deaths df.loc[41, recovered]= recovered_ soups[41]=run_bs4('https://e.infogram.com/f6d9f731-5772-4da5-b149-5e42cc1c3b89?parent_url=https%3A%2F%2Fwww.emergencyslo.org%2Fen%2Fpositive-case-details.aspx&src=embed#async_embed') # ## San Mateo print(df.loc[42, 'Website']) paragraph = '' for x in soups[42].find_all('table'): print(x) for y in x.find_all('td'): paragraph += y.find(text=True).strip() + ' ' paragraph # + # results = re.findall(r' ([0-9]+) Deaths ([0-9]+)', paragraph) cases = 309 deaths = 10 # - df.loc[42, name]= cases df.loc[42, string_]= deaths # ## Santa Barbara with open('../webpages/Status Reports – County of Santa Barbara.mhtml', 'r') as htm: soups[43] = htm.read() print(df.loc[43, 'Website']) work_around = bs4.BeautifulSoup(soups[43]) # + info =[] dates = [] for x in work_around.find_all('div'): for y,z in zip(x.find_all('td'), x.find_all('li')): nums = y.find_all(text=True) text = z.find_all(text=True) info.append(nums) if 'As of' in str(text): info.append(text) paragraph = '' for x in info: for y in x: paragraph += y + ' ' pos = re.findall(r'# Positive Results ([0-9]+)', paragraph)[0] rec = re.findall(r'Recovered ([0-9]+)', paragraph)[0] tests = re.findall(r'Total Tested ([0-9]+)', paragraph)[0] df.loc[43, name]= pos df.loc[43, test_admin]= tests df.loc[43, recovered] = rec # - pos # + rec # + tests # - # ## Santa Clara df.loc[44, 'Website'] = 'https://www.sccgov.org/sites/phd/DiseaseInformation/novel-coronavirus/Pages/home.aspx' print(df.loc[44, 'Website']) # + paragraph = '' for x in soups[44].find_all('table'): # print(x) for no, y in enumerate(x.find_all('td')): for z in y.find_all(text=True): paragraph += z.replace('\n', '') + ' ' #paragraph += # - info = re.findall(r"Total Confirmed Cases Hospitalized Deaths ([0-9]+) ([0-9]+) ([0-9]+)", paragraph) info df.loc[44, name] = 1019 df.loc[44, string_] = 36 df.loc[44, test_admin] = 9218 # ## Santa Cruz # df.loc[45, 'Website'] # + paragraph = '' for x in soups[45].find_all('div', class_='Normal'): for y in x.find_all('td'): text = y.find_all('p') for z in y.find_all('p'): paragraph += z.find(text=True).replace('\xa0', "") + ' ' results = re.findall(r'Cases/Deathsas of 0[0-9]/[0-9]+/20 [0-9]+:[0-9]+[ap]m ([0-9]+)/ ([0-9]+)', paragraph) results cases = results[0][0] deaths=results[0][1] # - paragraph df.loc[45, name]= cases df.loc[45, string_] = deaths # ## Shasta df.loc[46, 'Website'] = 'https://www.co.shasta.ca.us/covid-19/overview' info = [] for x in soups[46].find_all('table'): for y in x.find_all('td'): info.append(y.find(text=True)) paragraph = regex_paragraph_1(info) # + cases = re.search(r'Total Confirmed Cases [0-9]+', paragraph).group() cases = re.search(r'[0-9]+', cases).group() deaths = re.search(r'Deaths [0-9]+', paragraph).group() deaths = re.search(r'[0-9]+', deaths).group() # - info df.loc[46, name]= cases df.loc[46, string_]= deaths # ## SIERRA df.loc[47, 'Website'] = 'http://sierracounty.ca.gov/582/Coronavirus-COVID-19' soups[47] = run_bs4('http://sierracounty.ca.gov/582/Coronavirus-COVID-19') # + info = [] for x in soups[47].find_all('table'): for y in x.find_all('tr'): for z in y.find_all(text=True): if '\n' not in z: info.append(z) # - info paragraph = regex_paragraph_1(info) # + cases = re.search(r'# of Positive COVID-19 Cases [0-9]+', paragraph).group() cases = re.search(r' [0-9]+', cases).group() tests = re.search(r'# of Test Administered [0-9]+', paragraph).group() tests = re.search(r'[0-9]+', tests).group() tests, cases # - df.loc[47, name]= cases df.loc[47, string_]= cases df.loc[47, test_admin]= tests # ## Siskiyou print(df.loc[48, 'Website'] ) with open('../webpages/Public Health _ Siskiyou County California.mhtml', 'r') as htm: soups[48] = htm.read() soups[48] = bs4.BeautifulSoup(soups[48]) info = [] for x in soups[48].find_all('table'): for y in x.find_all('td'): for z in y.find_all(text=True): if z != '\n': info.append(z) paragraph = regex_paragraph_1(info[:12]) paragraph # + cases = re.findall(r'([0-9]+) TOTAL CONFIRMED POSITIVE', paragraph)[0] tests = re.findall(r'([0-9]+) TOTAL TEST', paragraph)[0] deaths = re.findall(r'([0-9]+) TOTAL DEATHS', paragraph)[0] df.loc[48, name]= cases df.loc[48, string_]= deaths df.loc[48, test_admin]= tests # - # ## Solano df.loc[49, 'Website'] = 'http://www.solanocounty.com/depts/ph/ncov.asp' print(df.loc[49, 'Website']) run_bs4('https://admin.solanocounty.com:4433/civicax/filebank/blobdload.aspx?BlobID=31871'); df.loc[49, name] = 61 df.loc[49, string_] = 1 # ## Sonoma print(df.loc[50, 'Website']) with open('../webpages/Novel Coronavirus - Sonoma County Emergency and Preparedness Information.mhtml', 'r') as htm: soups[50] = htm.read() soups[50] = bs4.BeautifulSoup(soups[50]) info=[] for x in soups[50].find_all('div'): for y in x.find_all('p'): for z in y.find_all(text=True): if len(z) >0: info.append(z) # + paragraph = regex_paragraph_1(info[1:11]) cases = re.findall(r'Total Cases ([0-9]+)', paragraph)[0] tests = re.findall(r'Tests ([0-9]+)', paragraph)[0] deaths = re.findall(r'Deaths ([0-9]+)', paragraph)[0] recovered_ = re.findall(r'Recovered ([0-9]+)', paragraph)[0] cases, deaths, tests, recovered_ # - df.loc[50, name]= cases df.loc[50, string_]= deaths df.loc[50, test_admin]= tests df.loc[50, recovered]= recovered_ # ## Stanislaus df.loc[51, 'Website'] = 'http://www.schsa.org/PublicHealth/pages/corona-virus/' # + # soups[51]= run_bs4('http://www.schsa.org/PublicHealth/pages/corona-virus/') # + paragraph = '' for z in soups[51].find_all('p'): for b in z.find_all('strong'): if 'Cases' in str(b): paragraph += (b.find(text=True))+ " " if 'Tests' in str(b): paragraph += (b.find(text=True))+ " " if 'Deaths' in str(b): paragraph += (b.find(text=True))+ " " for x in soups[51].find_all('div', class_='counter'): for y in x.find_all(text=True): print(y.strip()) if len(y) >1: paragraph += y.strip()+ " " no +=1 results = re.findall(r'Positive Cases Negative Tests Related Deaths ([0-9]+) ([0-9]+) ([0-9])+', paragraph) cases = results[0][0] tests = int(results[0][1]) + int(results[0][0]) deaths = results[0][2] # - df.loc[51, name]= cases df.loc[51, string_]= deaths df.loc[51, test_admin]= tests # ## Sutter print(df.loc[52, 'Website']) # + # soups[52] = run_bs4(df.loc[52, 'Website']) # + paragraph = ' ' for x in soups[52].find_all('table'): for y in x.find_all(text=True): y = y.replace('\n', "") if len(y) > 0: paragraph += y.replace('\n', "") + ' ' for x in soups[52].find_all('p'): if 'tests' in str(x): paragraph += x.find(text=True) + ' ' tests = re.findall(r'reported ([0-9]+) COVID-19 tests', paragraph) tests = tests[0] results = re.findall(r'Confirmed ([0-9]+) Deceased ([0-9]+)', paragraph) tests, results # - paragraph cases = results[0][0] deaths = results[0][1] df.loc[52, name] = cases df.loc[52, test_admin] = tests df.loc[59, name] = results[0][1] # ## Tehama print(df.loc[53, 'Website']) # ## Trinty print(df.loc[54, 'Website']) # ## Tulare print(df.loc[55, 'Website']) soups[55] = run_bs4('https://tchhsa.org/eng/index.cfm/public-health/covid-19-updates-novel-coronavirus/') with open('../webpages/English - COVID-19 (Novel Coronavirus).mhtml', 'r') as htm: soups[55] = htm.read() soups[55] = bs4.BeautifulSoup(soups[55]) paragraph = "" for x in soups[55].find_all('div', class_= '3D"col-6'): for y in x.find_all(text=True): if len(y) > 0: paragraph += y.strip().replace('=\n', '') + " " paragraph cases = re.findall(r'([0-9]+) - Total positive cases', paragraph)[0] deaths = re.findall(r'([0-9]+) - Deaths', paragraph)[0] recovered_ = re.findall(r'([0-9]+) - Recovered Cases', paragraph)[0] # + df.loc[55, name]= cases df.loc[55, string_] = deaths df.loc[55, recovered] = recovered_ # - # ## Tuolumne print(df.loc[56, 'Website']) paragraph = '' for x in soups[56].find_all('table'): # print(x) for y, z in zip(x.find_all('th'), x.find_all('td')): for a in z.find_all(text=True): paragraph += a.strip() +' ' for b in y.find_all(text=True): paragraph += b.strip() +' ' paragraph # + cases = re.findall(r'([0-9]+) TOTAL POSITIVE', paragraph)[0] tests = re.findall(r'([0-9]+) TOTAL TESTED', paragraph)[0] deaths = re.findall(r'([0-9]+) TOTAL DEATHS', paragraph)[0] cases, deaths, tests # - df.loc[56, name]= cases df.loc[56, string_]= deaths df.loc[56, test_admin]= tests # ## VENTURA df.loc[57, 'Website'] = 'https://www.vcemergency.com/' # + # soups[57] = run_bs4('https://www.vcemergency.com/') # - info = [] for x in soups[57].find_all('table'): for y in x.find_all('td'): info.append(y.find(text=True).replace(',', '')) paragraph = regex_paragraph_1(info) paragraph df.loc[57, name]= re.findall(r'TOTAL CASES ([0-9]+)', paragraph)[0] df.loc[57, string_]= re.findall(r'DEATHS ([0-9]+)', paragraph)[0] df.loc[57, recovered]= re.findall(r'Recovered Cases ([0-9]+)', paragraph)[0] df.loc[57, test_admin]= re.findall(r'Tested as of [A-Z][a-z]+ [0-9]+[a-z]+ ([0-9]+)', paragraph)[0] # ## YOLO print(df.loc[58, 'Website'] ) df.loc[58, name] = 24 df.loc[58, string_] = 1 # ## Yuba df.loc[59, 'Website'] = 'https://www.yuba.org/coronavirus/' soups[59] = run_bs4('https://www.yuba.org/coronavirus/') # + paragraph = ' ' for x in soups[52].find_all('table'): for y in x.find_all(text=True): y = y.replace('\n', "") if len(y) > 0: paragraph += y.replace('\n', "") + ' ' for x in soups[52].find_all('p'): if 'tests' in str(x): paragraph += x.find(text=True) + ' ' tests = re.findall(r'reported ([0-9]+) COVID-19 tests', paragraph) tests = tests[0] results = re.findall(r'Confirmed ([0-9]+) Deceased ([0-9]+)', paragraph) cases = results[1][0] deaths = results[1][1] # - df.loc[59, name] = cases df.loc[59, string_] = deaths df.loc[59, test_admin] = tests # GOOD LINK TO DOUBLE CHECK DATA COLLECTED 'https://www.latimes.com/projects/california-coronavirus-cases-tracking-outbreak/' df.to_csv("../output_files/CA_COUNTY_COVID"+today+'.csv', index=False) # ! ls ../data df_la=pd.read_csv('../data/la_times_Ap_2.csv', delimiter='\t') la_count = df_la.merge(df[['County',name, string_]],how='right', right_on = 'County', left_on='County') la_count df[name] df[name].fillna(0).astype(int).sum()
notebooks/California Counties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # cyBERT: a flexible log parser based on the BERT language model # # ## Table of Contents # * Introduction # * Generating Labeled Logs # * Subword Tokenization # * Data Loading # * Fine-tuning pretrained BERT # * Model Evaluation # * Parsing with cyBERT # # ## Introduction # # One of the most arduous tasks of any security operation (and equally as time consuming for a data scientist) is ETL and parsing. This notebook illustrates how to train a BERT language model using a toy dataset of just 1000 previously parsed apache server logs as a labeled data. We will fine-tune a pretrained BERT model from [HuggingFace](https://github.com/huggingface) with a classification layer for Named Entity Recognition. from os import path import s3fs import torch import torch.nn as nn import torch.nn.functional as F from torch.optim import Adam from torch.utils.data import TensorDataset, DataLoader from torch.utils.data.dataset import random_split from torch.utils.dlpack import from_dlpack from seqeval.metrics import classification_report,accuracy_score,f1_score from transformers import BertForTokenClassification from tqdm import tqdm,trange from collections import defaultdict import pandas as pd import numpy as np import cupy import cudf # ## Generating Labels For Our Training Dataset # # To train our model we begin with a dataframe containing parsed logs and additional `raw` column containing the whole raw log as a string. We will use the column names as our labels. # + # download log data APACHE_SAMPLE_CSV = "apache_sample_1k.csv" S3_BASE_PATH = "rapidsai-data/cyber/clx" if not path.exists(APACHE_SAMPLE_CSV): fs = s3fs.S3FileSystem(anon=True) fs.get(S3_BASE_PATH + "/" + APACHE_SAMPLE_CSV, APACHE_SAMPLE_CSV) # - logs_df = cudf.read_csv(APACHE_SAMPLE_CSV) # sample parsed log logs_df.sample(1) # sample raw log print(logs_df.raw.loc[10]) def labeler(index_no, cols): """ label the words in the raw log with the column name from the parsed log """ raw_split = logs_df.raw_preprocess[index_no].split() # words in raw but not in parsed logs labeled as 'other' label_list = ['O'] * len(raw_split) # for each parsed column find the location of the sequence of words (sublist) in the raw log for col in cols: if str(logs_df[col][index_no]) not in {'','-','None','NaN'}: sublist = str(logs_df[col][index_no]).split() sublist_len=len(sublist) match_count = 0 for ind in (i for i,el in enumerate(raw_split) if el==sublist[0]): # words in raw log not present in the parsed log will be labeled with 'O' if (match_count < 1) and (raw_split[ind:ind+sublist_len]==sublist) and (label_list[ind:ind+sublist_len] == ['O'] * sublist_len): label_list[ind] = 'B-'+col label_list[ind+1:ind+sublist_len] = ['I-'+col] * (sublist_len - 1) match_count = 1 return label_list # + logs_df['raw_preprocess'] = logs_df.raw.str.replace('"','') # column names to use as lables cols = logs_df.columns.values.tolist() # do not use raw columns as labels cols.remove('raw') cols.remove('raw_preprocess') # using for loop for labeling funcition until string UDF capability in rapids- it is currently slow labels = [] for indx in range(len(logs_df)): labels.append(labeler(indx, cols)) # - print(labels[10]) # ## Subword Labeling # We are using the `bert-base-cased` tokenizer vocabulary. This tokenizer splits our whitespace separated words further into in dictionary sub-word pieces. The model eventually uses the label from the first piece of a word as the sole label for the word, so we do not care about the model's ability to predict individual labels for the sub-word pieces. For training, the label used for these pieces is `X`. To learn more see the [BERT paper](https://arxiv.org/abs/1810.04805) def subword_labeler(log_list, label_list): """ label all subword pieces in tokenized log with an 'X' """ subword_labels = [] for log, tags in zip(log_list,label_list): temp_tags = [] words = cudf.Series(log.split()) words_size = len(words) subword_counts = words.str.subword_tokenize("resources/bert-base-cased-hash.txt", 10000, 10000,\ max_rows_tensor=words_size,\ do_lower=False, do_truncate=False)[2].reshape(words_size, 3)[:,2] for i, tag in enumerate(tags): temp_tags.append(tag) temp_tags.extend('X'* subword_counts[i].item()) subword_labels.append(temp_tags) return subword_labels subword_labels = subword_labeler(logs_df.raw_preprocess.to_arrow().to_pylist(), labels) print(subword_labels[10]) # We create a set list of all labels from our dataset, add `X` for wordpiece tokens we will not have tags for and `[PAD]` for logs shorter than the length of the model's embedding. # + # set of labels label_values = list(set(x for l in labels for x in l)) label_values[:0] = ['[PAD]'] # Set a dict for mapping id to tag name label2id = {t: i for i, t in enumerate(label_values)} label2id.update({'X': -100}) # - print(label2id) def pad(l, content, width): l.extend([content] * (width - len(l))) return l padded_labels = [pad(x[:256], '[PAD]', 256) for x in subword_labels] int_labels = [[label2id.get(l) for l in lab] for lab in padded_labels] label_tensor = torch.tensor(int_labels).to('cuda') # # Training and Validation Datasets # For training and validation our datasets need three features. (1) `input_ids` subword tokens as integers padded to the specific length of the model (2) `attention_mask` a binary mask that allows the model to ignore padding (3) `labels` corresponding labels for tokens as integers. def bert_cased_tokenizer(strings): """ converts cudf.Seires of strings to two torch tensors- token ids and attention mask with padding """ num_strings = len(strings) num_bytes = strings.str.byte_count().sum() token_ids, mask = strings.str.subword_tokenize("resources/bert-base-cased-hash.txt", 256, 256, max_rows_tensor=num_strings, do_lower=False, do_truncate=True)[:2] # convert from cupy to torch tensor using dlpack input_ids = from_dlpack(token_ids.reshape(num_strings,256).astype(cupy.float).toDlpack()) attention_mask = from_dlpack(mask.reshape(num_strings,256).astype(cupy.float).toDlpack()) return input_ids.type(torch.long), attention_mask.type(torch.long) input_ids, attention_masks = bert_cased_tokenizer(logs_df.raw_preprocess) # create dataset dataset = TensorDataset(input_ids, attention_masks, label_tensor) # use pytorch random_split to create training and validation data subsets dataset_size = len(input_ids) training_dataset, validation_dataset = random_split(dataset, (int(dataset_size*.8), int(dataset_size*.2))) # create dataloader train_dataloader = DataLoader(dataset=training_dataset, shuffle=True, batch_size=32) val_dataloader = DataLoader(dataset=validation_dataset, shuffle=False, batch_size=1) # # Fine-tuning pretrained BERT # Download pretrained model from HuggingFace and move to GPU # + model = BertForTokenClassification.from_pretrained("bert-base-cased", num_labels=len(label2id)) # model to gpu model.cuda() # use multi-gpu if available model = nn.DataParallel(model) # - # Define optimizer and learning rate for training FULL_FINETUNING = True if FULL_FINETUNING: #fine tune all layer parameters param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] else: # only fine tune classifier parameters param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}] optimizer = Adam(optimizer_grouped_parameters, lr=3e-5) # + # %%time # using 2 epochs to avoid overfitting epochs = 2 max_grad_norm = 1.0 for _ in trange(epochs, desc="Epoch"): # TRAIN loop model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): b_input_ids, b_input_mask, b_labels = batch # forward pass loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)[0] # backward pass loss.sum().backward() # track train loss tr_loss += loss.sum().item() nb_tr_examples += b_input_ids.size(0) nb_tr_steps += 1 # gradient clipping torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm) # update parameters optimizer.step() model.zero_grad() # print train loss per epoch print("Train loss: {}".format(tr_loss/nb_tr_steps)) # - # ## Model Evaluation # no dropout or batch norm during eval model.eval(); # + # Mapping id to label id2label={label2id[key] : key for key in label2id.keys()} eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 y_true = [] y_pred = [] for step, batch in enumerate(val_dataloader): input_ids, input_mask, label_ids = batch with torch.no_grad(): outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask,) # For eval mode, the first result of outputs is logits logits = outputs[0] # Get NER predicted result logits = torch.argmax(F.log_softmax(logits,dim=2),dim=2) logits = logits.detach().cpu().numpy() # Get NER true result label_ids = label_ids.detach().cpu().numpy() # Only predict the groud truth, mask=0, will not calculate input_mask = input_mask.detach().cpu().numpy() # Compare the valuable predict result for i,mask in enumerate(input_mask): # ground truth temp_1 = [] # Prediction temp_2 = [] for j, m in enumerate(mask): # Mask=0 is PAD, do not compare if m: # Exclude the X label if id2label[label_ids[i][j]] != "X" and id2label[label_ids[i][j]] != "[PAD]": temp_1.append(id2label[label_ids[i][j]]) temp_2.append(id2label[logits[i][j]]) else: break y_true.append(temp_1) y_pred.append(temp_2) print("f1 score: %f"%(f1_score(y_true, y_pred))) print("Accuracy score: %f"%(accuracy_score(y_true, y_pred))) # Get acc , recall, F1 result report print(classification_report(y_true, y_pred,digits=3)) # - # ## Saving model files for future parsing with cyBERT model.module.config.id2label = id2label model.module.config.label2id = label2id #torch.save(model.state_dict(), 'path/to/save.pth')
notebooks/cybert/cybert_example_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 範例重點 # * 了解如何 reset Tensorflow Graph # * 學習如何以迴圈方式訓練不同超參數的模型 # * 學習如何以迴圈方式繪圖 # + import os import keras # 本範例不需使用 GPU, 將 GPU 設定為 "無" os.environ["CUDA_VISIBLE_DEVICES"] = "" # - # 從 Keras 的內建功能中,取得 train 與 test 資料集 train, test = keras.datasets.cifar10.load_data() # + ## 資料前處理 def preproc_x(x, flatten=True): x = x / 255. if flatten: x = x.reshape((len(x), -1)) return x def preproc_y(y, num_classes=10): if y.shape[-1] == 1: y = keras.utils.to_categorical(y, num_classes) return y # + x_train, y_train = train x_test, y_test = test # 資料前處理 - X 標準化 x_train = preproc_x(x_train) x_test = preproc_x(x_test) # 資料前處理 -Y 轉成 onehot y_train = preproc_y(y_train) y_test = preproc_y(y_test) # - def build_mlp(input_shape, output_units=10, num_neurons=[512, 256, 128]): input_layer = keras.layers.Input(input_shape) for i, n_units in enumerate(num_neurons): if i == 0: x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(input_layer) else: x = keras.layers.Dense(units=n_units, activation="relu", name="hidden_layer"+str(i+1))(x) out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x) model = keras.models.Model(inputs=[input_layer], outputs=[out]) return model ## 超參數設定 LEARNING_RATE = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5] EPOCHS = 50 BATCH_SIZE = 256 MOMENTUM = 0.95 results = {} """ 使用迴圈,建立不同 Learning rate 的模型並訓練 """ for lr in LEARNING_RATE: keras.backend.clear_session() # 把舊的 Graph 清掉 print("Experiment with LR = %.6f" % (lr)) model = build_mlp(input_shape=x_train.shape[1:]) model.summary() optimizer = keras.optimizers.SGD(lr=lr, nesterov=True, momentum=MOMENTUM) model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer) model.fit(x_train, y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), shuffle=True) # Collect results train_loss = model.history.history["loss"] valid_loss = model.history.history["val_loss"] train_acc = model.history.history["acc"] valid_acc = model.history.history["val_acc"] exp_name_tag = "exp-lr-%s" % str(lr) results[exp_name_tag] = {'train-loss': train_loss, 'valid-loss': valid_loss, 'train-acc': train_acc, 'valid-acc': valid_acc} # + import matplotlib.pyplot as plt # %matplotlib inline color_bar = ["r", "g", "b", "y", "m", "k"] plt.figure(figsize=(8,6)) for i, cond in enumerate(results.keys()): plt.plot(range(len(results[cond]['train-loss'])),results[cond]['train-loss'], '-', label=cond, color=color_bar[i]) plt.plot(range(len(results[cond]['valid-loss'])),results[cond]['valid-loss'], '--', label=cond, color=color_bar[i]) plt.title("Loss") plt.legend() plt.show() plt.figure(figsize=(8,6)) for i, cond in enumerate(results.keys()): plt.plot(range(len(results[cond]['train-acc'])),results[cond]['train-acc'], '-', label=cond, color=color_bar[i]) plt.plot(range(len(results[cond]['valid-acc'])),results[cond]['valid-acc'], '--', label=cond, color=color_bar[i]) plt.title("Accuracy") plt.legend() plt.show() # - # ## Work # 1. 請比較 SGD optimizer 不同的 momentum 及使用 nesterov 與否的表現
homeworks/D079/Day079_LearningRateEffect.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # QCoDeS Example with R&S HMC 8043 Power Supply import qcodes as qc import qcodes_contrib_drivers.drivers.RohdeSchwarz.HMC8043 as hmc8043 # Create the instrument (in this case a HMC8043 connected with ethernet to the 10.0.1.1 address) ps = hmc8043.RohdeSchwarzHMC8043('ps-1', 'TCPIP0::10.0.1.1::inst0::INSTR') # You can set voltage and/or current to any channel ps.ch1.set_voltage(1) ps.ch1.set_current(0.2) ps.ch2.set_voltage(10) # Channel(s) should be turned on, as well as the master on/off ps.ch1.state('ON') ps.state('ON') # Voltage, current and power can be measured print('V1=', ps.ch1.voltage()) print('I1=', ps.ch1.current()) print('P1=', ps.ch1.power()) # And finally turned off ps.ch1.state('OFF') ps.state('OFF')
docs/examples/Rohde_Schwarx_HMC8043.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: VPython # language: python # name: vpython # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Introduction: Rosenbrock # # Welcome to the first AeroSandbox tutorial! # # AeroSandbox is a tool for solving design optimization problems for large, multidisciplinary engineered systems. The most important part of AeroSandbox is its `Opti()` stack, which allows you formulate and solve an optimization problem in natural mathematical syntax. # # The `Opti` class extends the `Opti` class of CasADi (the library AeroSandbox uses for automatic differentiation), adding many new features tailored specifically for engineering design. We'll explore more of these advanced features later! # # For now, let's solve the "Hello World!" of optimization problems: [the Rosenbrock problem](https://en.wikipedia.org/wiki/Rosenbrock_function). Mathematically, it is stated as: # # $$ \underset{x, y}{\text{minimize }}(1-x)^2 + 100(y-x^2)^2 $$ # # In code: # + pycharm={"name": "#%%\n"} def rosenbrock(x, y): return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 # - # It's a good test case, because the minimum lies at the bottom of a shallow, curving valley: # + pycharm={"name": "#%%\n"} ### Don't worry about this code block; this is just here to visualize the Rosenbrock function. from matplotlib import pyplot as plt, colors; import numpy as np fig, ax = plt.subplots(figsize=(4, 4), dpi=200, subplot_kw={"projection": "3d"}) X, Y = np.meshgrid(np.linspace(-2, 2, 150), np.linspace(-1, 3, 150)) ax.plot_surface(X, Y, rosenbrock(X, Y), cmap=plt.cm.rainbow, linewidth=0, norm=colors.LogNorm(vmin=0.1, vmax=2500), rstride=1, cstride=1) ax.view_init(35, 255); plt.xlabel("$x$"); plt.ylabel("$y$"); plt.title("The Rosenbrock Function"); plt.tight_layout(); plt.show() # - # As it turns out, a good number of engineering design optimization problems are pretty mathematically similar to the Rosenbrock problem (of course, there are plenty of exceptions). Specifically, many engineering design problems are: # # * Continuous: all design variables are continuous inputs (as opposed to being discrete). # * [Nonlinear](https://en.wikipedia.org/wiki/Nonlinear_system): the sensitivity of performance with respect to inputs changes throughout the design space. # * Nonconvex: doesn't satisfy the [convex inequality](https://en.wikipedia.org/wiki/Convex_function#Definition). Speaking loosely, convexity means the objective function is always "curving up" and that the boundary of the feasible design space doesn't have any concave regions. # * Poorly-scaled, i.e. Hessian has a large condition number (e.g. for Rosenbrock, $\text{cond}(H)\approx 2500$ at the optimum) # * [Constrained](https://en.wikipedia.org/wiki/Constrained_optimization): most engineering problems are constrained. The Rosenbrock problem is unconstrained out-of-the-box, but we'll add a constraint in the next tutorial. # # For now, let's optimize and find the minimum of the Rosenbrock function! First, we set up the problem: # + pycharm={"name": "#%%\n"} import aerosandbox as asb # This is the standard AeroSandbox import convention opti = asb.Opti() # Initialize a new optimization environment; convention is to name it `opti`. ### Define your optimization variables x = opti.variable(init_guess=0) # You must provide initial guesses. y = opti.variable(init_guess=0) ### Define your objective f = (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 # You can construct nonlinear functions of variables... opti.minimize(f) # ...and then optimize them. # - # Then, we solve the problem. The solver will spit out lots of helpful info as it solves; we can ignore this for now (and later we'll learn how to suppress it if desired). # + pycharm={"name": "#%%\n"} ### Optimize sol = opti.solve() # This is the conventional syntax to solve the optimization problem. # - # Finally, we can look at the optimal values of our optimization variables: # + pycharm={"name": "#%%\n"} ### Extract values at the optimum x_opt = sol.value(x) # Evaluates x at the point where the solver converged. y_opt = sol.value(y) ### Print values print(f"x = {x_opt}") print(f"y = {y_opt}") # + [markdown] pycharm={"name": "#%% md\n"} # The solution is found to be $(1, 1)$, which can be proven to be the optimal value via hand calculations.
tutorial/01 - Optimization and Math/01 - 2D Rosenbrock.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Streak Interpretations for Image Prediction (Transfer Learning) # # (Adapted from LIME Keras tutorial # https://github.com/marcotcr/lime/blob/master/doc/notebooks/Tutorial%20-%20Image%20Classification%20Keras.ipynb) import os import matplotlib.pyplot as plt import keras from keras.applications import inception_v3 as inc_net from keras.preprocessing import image from keras.applications.imagenet_utils import decode_predictions from skimage.io import imread from lime.explanation import id_generator import numpy as np from time import time from skimage import io import load_networks import lime import lime_image_streak from skimage.segmentation import mark_boundaries from tf_predict import * # from tensorflow.examples import label_image # import label_image print('using keras:', keras.__version__) # ## Inception Transfer Learning # # Retrain the last layer of the InceptionV3 pretrained model, and interpret the predictions of new, preprocessed images #keras preprocessing function def transform_img_fn(path_list): out = [] for img_path in path_list: img = image.load_img(img_path, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = inc_net.preprocess_input(x) out.append(x) return np.vstack(out) # + #tensorflow preprocessing functions def import_tf_imgs(path_list): out = [] for img_path in path_list: # tmp = label_image.read_tensor_from_image_file(img_path, input_height=299, input_width=299) tmp = io.imread(img_path) out.append(tmp) # return np.vstack(out) return out def import_tf_img(img_path): # return label_image.read_tensor_from_image_file(img_path, input_height=299, input_width=299) return np.expand_dims(io.imread(img_path),axis=0) # + #setup transfer learning code from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential, Model from keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Conv2D, MaxPooling2D from keras import backend as k from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping from keras.utils.np_utils import to_categorical # from tf.examples.image_retraining import retrain import retrain from shutil import copy2 # #preprocessing, separate full dataset into training, validation, and testing # data_dir = '~/flower_photos' # dest_dir = '~/flower_photos_retrain' # image_lists = retrain.create_image_lists(data_dir,testing_percentage=10,validation_percentage=10) # print sum([len(image_lists[label]['training']) for label in image_lists.keys()]) # print sum([len(image_lists[label]['validation']) for label in image_lists.keys()]) # print image_lists['tulips'].keys() # for label in image_lists.keys(): # for set_name in ['training','validation','testing']: # for file_name in image_lists[label][set_name]: # copy2(os.path.join(data_dir,label,file_name),os.path.join(dest_dir,set_name,label,file_name)) img_width, img_height = 299, 299 train_data_dir = "~/flower_photos_retrain/training" validation_data_dir = "~/flower_photos_retrain/validation" nb_train_samples = 3056 nb_validation_samples = 451 batch_size = 16 epochs = 50 bottleneck_size = 2048 model = inc_net.InceptionV3(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3), pooling=None) # + #save 2nd to last layer of inceptionv3 as numpy features #then train a fully connected layer and save #then load this fc layer and append to inceptionV3 datagen = ImageDataGenerator(rescale=1. / 255) generator = datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=1, class_mode=None, # this means our generator will only yield batches of data, no labels shuffle=False) # our data will be in order, so all first 1000 images will be cats, then 1000 dogs bottleneck_features_train = model.predict_generator(generator, nb_train_samples, verbose=1) # save the output as a Numpy array np.save(open('bottleneck_features_train.npy', 'w'), bottleneck_features_train) generator2 = datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=1, class_mode=None, shuffle=False) bottleneck_features_validation = model.predict_generator(generator2, nb_validation_samples, verbose=1) # save the output as a Numpy array np.save(open('bottleneck_features_validation.npy', 'w'), bottleneck_features_validation) # + #retrain from keras.utils.np_utils import to_categorical train_data = np.load(open('bottleneck_features_train.npy')) validation_data = np.load(open('bottleneck_features_validation.npy')) classes = ['daisy','dandelion','roses','sunflowers','tulips'] train_sizes = [len([name for name in os.listdir(os.path.join(train_data_dir,c)) ]) for c in classes] validation_sizes = [len([name for name in os.listdir(os.path.join(validation_data_dir,c)) ]) for c in classes] # print train_data.shape,validation_data.shape train_labels = [] validation_labels = [] for l,label in enumerate(classes): train_labels.extend([l]*train_sizes[l]) validation_labels.extend([l]*validation_sizes[l]) train_labels = to_categorical(train_labels) validation_labels = to_categorical(validation_labels) #retrain the top layer top_model = Sequential() top_model.add(Flatten(input_shape=model.output_shape[1:])) top_model.add(Dense(len(classes), activation='sigmoid')) top_model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) top_model.fit(train_data, train_labels, epochs=50, batch_size=batch_size, validation_data=(validation_data, validation_labels)) #save the weights of the top layer, 2.6MB top_model.save_weights('retrained/bottleneck_fc_model.h5') # - # ## Look at predictions for a few images images_list = [os.path.join('daisy','705422469_ffa28c566d.jpg'), 'flowers_etsy.jpg', #contains both sunflowers and daisies, good for 2 top_labels os.path.join('daisy','3445110406_0c1616d2e3_n.jpg'), os.path.join('daisy','176375506_201859bb92_m.jpg'), os.path.join('sunflowers','2979297519_17a08b37f6_m.jpg'), os.path.join('daisy','301964511_fab84ea1c1.jpg'), os.path.join('sunflowers','19504937128_a4ae90fcbd_m.jpg') ] classes = ['daisy','dandelion','roses','sunflowers','tulips'] # + #import keras transfer learning model inet_model = load_networks.load_keras_inception_transfer() images = transform_img_fn(images_list) #print predictions preds = inet_model.predict(images) # print preds for ii,pr in enumerate(preds): print images_list[ii] sortedClasses = np.argsort(preds[ii])[-5:][::-1] #the indices that lime uses in explanation for s in sortedClasses: # print (classes[s],preds[ii][s]) print '(%s, %.5f)' % (classes[s],preds[ii][s]) predict = inet_model.predict # + # #keras (original) inception model # inet_model = load_networks.load_keras_inception_imagenet() # images = transform_img_fn(images_list) # preds = inet_model.predict(images) # for ii,pr in enumerate(preds): # print images_list[ii] # for x in decode_predictions(preds)[ii]: # print '(%s, %.5f)' % (x[1],x[2]) # predict = inet_model.predict # + # #tensorflow transfer learning # g = load_networks.load_tf_transfer() # images = import_tf_imgs(images_list) # preds = tf_predict(images,0,5,g) # print preds # for ii,pr in enumerate(preds): # print images_list[ii] # sortedClasses = np.argsort(preds[ii])[-5:][::-1] #the indices that lime uses in explanation # for s in sortedClasses: # # print (classes[s],preds[ii][s]) # print '(%s, %.5f)' % (classes[s],preds[ii][s]) # predict = lambda x: tf_predict(x,0,5,g) # - #explanation plotting function def explanation_plotting(explanation,label_idx,fs_string): label = explanation.top_labels[label_idx] temp, mask = explanation.get_image_and_mask(label, positive_only=True, num_features=5, hide_rest=True) plt.imshow(mark_boundaries(temp / 2 + 0.5, mask)) print "%s, %s" % (classes[label],fs_string) plt.show() #then plot image with positive segments marked in green and negative segments marked in red temp, mask = explanation.get_image_and_mask(label, positive_only=False, num_features=5, hide_rest=False) plt.imshow(mark_boundaries(temp / 2 + 0.5, mask)) plt.show() #plot original image, explantation agains black background with segments fig = plt.figure() ax1 = fig.add_subplot(121) ax1.imshow(explanation.image/2 + 0.5) ax1.set_xticklabels("") ax1.set_yticklabels("") ax2 = fig.add_subplot(122) finalMask = explanation.segments finalTemp = lime_image_streak.SegmentedImage.add_image_segments(-1*np.ones_like(explanation.image), explanation.image,explanation.segments,[x[0] for x in explanation.local_exp[label]][:5]) ax2.imshow(mark_boundaries(finalTemp/2 + 0.5,finalMask)) ax2.set_xticklabels("") ax2.set_yticklabels("") plt.show() # **Get an explanation** #new explainer class that can support streak feature_selection query_image = images[0] #Method described in Section 6.2 of the paper explainer = lime_image_streak.LimeImageExplainer(feature_selection='greedy_likelihood') # %%time explanation = explainer.explain_instance(query_image, classifier_fn=predict, # top_labels=2, num_features=5, top_labels=1, num_features=5, qs_kernel_size=6, hide_color=0, num_samples=1000) # **Explanations for the top class** # # (for explanation of the 2nd top class, change top_labels to 2) explanation_plotting(explanation,0,'greedy_likelihood') # explanation_plotting(explanation,1,'greedy_likelihood') #now try another selection method #LIME baseline method explainerFS = lime_image_streak.LimeImageExplainer(feature_selection='forward_selection') # %%time explanationFS = explainerFS.explain_instance(query_image, classifier_fn=predict, top_labels=2, num_features=5, qs_kernel_size=6, hide_color=0, num_samples=1000) explanation_plotting(explanationFS,0,'forward_selection') explanation_plotting(explanationFS,1,'forward_selection') #now try another selection method #Method described in Section A.8 of the paper explainerSG = lime_image_streak.LimeImageExplainer(feature_selection='streaming_greedy') # %%time explanationSG = explainerSG.explain_instance(query_image, classifier_fn=predict, top_labels=2, num_features=5, qs_kernel_size=6, hide_color=0, num_samples=1000) explanation_plotting(explanationSG,0,'streaming_greedy') explanation_plotting(explanationSG,1,'streaming_greedy')
StreakImageRetraining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from scipy import stats from statsmodels.sandbox.stats.multicomp import multipletests # - data = pd.read_csv('gene_high_throughput_sequencing.csv', engine = 'python') data # + count1 = 0 pvals1 = [] for i in range(data.shape[1] - 2): pval = stats.ttest_ind(data[data['Diagnosis'] == 'normal'].iloc[:, i + 2], data[data['Diagnosis'] == 'early neoplasia'].iloc[:, i + 2], equal_var = False)[1] pvals1.append(pval) if pval < 0.05: count1 += 1 # + count2 = 0 pvals2 = [] for i in range(data.shape[1] - 2): pval = stats.ttest_ind(data[data['Diagnosis'] == 'early neoplasia'].iloc[:, i + 2], data[data['Diagnosis'] == 'cancer'].iloc[:, i + 2], equal_var = False)[1] pvals2.append(pval) if pval < 0.05: count2 += 1 # + answer1 = open('answer_bio_1.txt', 'w') answer1.write('{}'.format(count1)) answer1.close() answer2 = open('answer_bio_2.txt', 'w') answer2.write('{}'.format(count2)) answer2.close() # - b1, _, _, _ = multipletests(pvals1, alpha = 0.025, method = 'fdr_bh') b2, _, _, _ = multipletests(pvals2, alpha = 0.025, method = 'fdr_bh') # + count2 = 0 for i in range(len(b2)): if b2[i]: a = np.mean(data[data['Diagnosis'] == 'early neoplasia'].iloc[:, i + 2])/np.mean(data[data['Diagnosis'] == 'cancer'].iloc[:, i + 2]) if abs(a) > 1.5 or abs(1/a) > 1.5: count2 += 1 # + count1 = 0 for i in range(len(b1)): if b1[i]: a = np.mean(data[data['Diagnosis'] == 'normal'].iloc[:, i + 2])/np.mean(data[data['Diagnosis'] == 'early neoplasia'].iloc[:, i + 2]) if abs(a) > 1.5 or abs(1/a) > 1.5: count1 += 1 # + answer5 = open('answer_bio_5.txt', 'w') answer5.write('{}'.format(count1)) answer5.close() answer6 = open('answer_bio_6.txt', 'w') answer6.write('{}'.format(count2)) answer6.close() # -
Yandex data science/4/Week 4/Bioinformatics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="-oWn2GD_cr8T" from google.colab import drive # + colab={"base_uri": "https://localhost:8080/"} id="BVi7c4pLdCZs" outputId="df6f712f-dbf4-4c45-9834-efeab3504f16" drive.mount('/content/drive') # + id="lPmxlvA6dFp-" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.style # + id="DKwBvSzrdR1A" pd.set_option('display.max_columns',None) pd.set_option('display.max_rows',None) # + colab={"base_uri": "https://localhost:8080/", "height": 236} id="1mIS8GZTdVvm" outputId="caaa1d03-a232-4c0e-dcb7-5fce35ff85dd" df=pd.read_csv('/content/drive/MyDrive/27 feb - breast cancer/WineQT.csv') df.head(4) # + colab={"base_uri": "https://localhost:8080/"} id="AqPEi-qCdsUK" outputId="6c950f65-abc5-46e6-a9a1-26ce9d384912" df.shape # + colab={"base_uri": "https://localhost:8080/"} id="K8RwMYUjdvbY" outputId="de72d86f-212c-473d-fdf7-69b93bd04873" df.info() # + colab={"base_uri": "https://localhost:8080/"} id="Eq3-lJlceD7k" outputId="88014886-e618-4f4c-c4a7-af8c4ca0618f" df.isnull().sum().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="9jCyFPWpL7IB" outputId="4b0d3761-1b36-423f-dbb0-ee5264fc9f83" sns.countplot(df['quality']) # + colab={"base_uri": "https://localhost:8080/"} id="aYJpjtC34lMv" outputId="e4cb785f-adc4-4b83-e56e-816a7fc06453" df['Id'].nunique() # + id="tZZWXdg74sQw" #hence id is not of any us, so drop it df.drop('Id',axis=1,inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 205} id="O7a_ZKHD4-Dd" outputId="91abac23-f142-4cad-e540-7618411ea8d8" df.head(3) # + colab={"base_uri": "https://localhost:8080/"} id="nA_CxB8G5bWH" outputId="5a7ffc82-694e-488a-8d5e-52578b374ab5" df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="jw9bCmZ_6yAi" outputId="27d39551-7e85-481d-932d-4ceb49d5d674" figure = plt.figure(figsize=(12, 16)) sns.heatmap(df.corr(), annot=True,cmap=plt.cm.cool) # + colab={"base_uri": "https://localhost:8080/", "height": 425} id="dtxuTEAg79P3" outputId="d1f5d7a2-27ab-4adf-952d-43ed5096676c" df.describe().T # + colab={"base_uri": "https://localhost:8080/"} id="tDNWCiju-Ta-" outputId="a7bc5f60-34aa-47dc-cf2f-381be1ceb2c7" df.duplicated().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3RUKRM6rFIgp" outputId="1838a70c-07d0-4ae7-ec2b-e492ebfa28b1" cols = ['free sulfur dioxide','total sulfur dioxide','pH','density','chlorides','volatile acidity'] # - --->>> column of which you want to see outlier, include them for i in cols: sns.boxplot(df[i]) plt.show(); # + colab={"base_uri": "https://localhost:8080/"} id="kPkfqBU_FJhr" outputId="dfa54b97-6868-46f7-98e7-51764a5781ad" df2 = df df2.shape # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="EJfey30dFJwM" outputId="bf855b24-c873-4ede-e8a1-fa2e700fc816" for i in cols: q1 = np.percentile(df[i], 25) q3 = np.percentile(df[i], 75) iqr = q3 -q1 lower = np.abs(q1 - (1.5 * iqr)) upper = np.abs(q3 + (1.5 * iqr)) #print(lower, upper) df1 = df[(df[i] > upper) | (df[i] < lower)] indexs = df1.index df2.drop(indexs, axis = 0, inplace = True) plt.figure(figsize = (8,3)) sns.boxplot(df2[i]) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="soXxTM25-wCW" outputId="a69b4457-f723-4332-a744-a5613096e30f" df2.shape # + colab={"base_uri": "https://localhost:8080/"} id="qV2R9H0OKbDH" outputId="4d6fa9d7-cba5-4228-d6b2-fdd9938fac0a" df.shape # + id="kzmde2kr8G6k" # Copy all the predictor variables into X dataframe X = df2.drop('quality', axis=1) y=df2['quality'] # + id="y-ewP0x78T6V" from sklearn.model_selection import train_test_split # Split X and y into training and test set in 65:35 ratio X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3 , random_state=10) # + colab={"base_uri": "https://localhost:8080/"} id="v46iiCOt8j2x" outputId="32ae8a56-7cb4-4bef-ddab-fb1bd0959910" from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 10) classifier.fit(X_train, y_train) # + id="1lzEQRFa8wCj" y_pred = classifier.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="FzXaeKKp8zzT" outputId="6ef43dee-1952-4989-a201-090a4b76a0bb" from sklearn.metrics import classification_report, confusion_matrix cm = confusion_matrix(y_test, y_pred) print ("Confusion Matrix : \n", cm) # + colab={"base_uri": "https://localhost:8080/"} id="SZeXXNuN82zC" outputId="6a646c96-c17f-4c67-eeb9-8d13af0cd985" print (classification_report(y_test, y_pred)) # + colab={"base_uri": "https://localhost:8080/"} id="aZZhBHy-86cb" outputId="1371c5da-a410-4898-e624-2deb39978aaa" from sklearn.metrics import accuracy_score print ("Accuracy : ", accuracy_score(y_test, y_pred)) # + colab={"base_uri": "https://localhost:8080/"} id="MCkdbYI59BCB" outputId="0ff719b5-7938-4c6f-d3b2-834a9ad9c8e0" # Import necessary modules from sklearn.neighbors import KNeighborsClassifier # Split into training and test set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size = 0.2, random_state=42) knn = KNeighborsClassifier(n_neighbors=7) knn.fit(X_train, y_train) # Calculate the accuracy of the model print(knn.score(X_test, y_test)) # + id="q7_WzR109Hlo" from sklearn.tree import DecisionTreeClassifier # Splitting the dataset into train and test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 100) # Function to perform training with giniIndex. def train_using_gini(X_train, X_test, y_train): # Creating the classifier object clf_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100,max_depth=3, min_samples_leaf=5) # Performing training clf_gini.fit(X_train, y_train) return clf_gini # Function to perform training with entropy. def tarin_using_entropy(X_train, X_test, y_train): # Decision tree with entropy clf_entropy = DecisionTreeClassifier( criterion = "entropy", random_state = 100, max_depth = 3, min_samples_leaf = 5) # Performing training clf_entropy.fit(X_train, y_train) return clf_entropy # Function to make predictions def prediction(X_test, clf_object): # Predicton on test with giniIndex y_pred = clf_object.predict(X_test) print("Predicted values:") print(y_pred) return y_pred # Function to calculate accuracy def cal_accuracy(y_test, y_pred): print("Confusion Matrix: ", confusion_matrix(y_test, y_pred)) print ("Accuracy : ", accuracy_score(y_test,y_pred)*100) print("Report : ", classification_report(y_test, y_pred)) # + id="UzZK9lUv9LsH" clf_gini = train_using_gini(X_train, X_test, y_train) # + id="B0HKBj-y9PwN" clf_entropy = tarin_using_entropy(X_train, X_test, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="TQdPQXrN9Syb" outputId="d2dbc13b-9373-4893-cbb0-ddec540fd976" # Operational Phase print("Results Using Gini Index:") # Prediction using gini y_pred_gini = prediction(X_test, clf_gini) cal_accuracy(y_test, y_pred_gini) # + colab={"base_uri": "https://localhost:8080/"} id="NGYka1qh9V34" outputId="d4e49fa7-be15-4dec-e7dc-34a7547526d6" print("Results Using Entropy:") # Prediction using entropy y_pred_entropy = prediction(X_test, clf_entropy) cal_accuracy(y_test, y_pred_entropy) # + colab={"base_uri": "https://localhost:8080/"} id="kNtFzvS-9bfK" outputId="9182286d-d8ae-4b76-f9e1-12addc7096e1" from sklearn.ensemble import RandomForestClassifier # creating a RF classifier clf = RandomForestClassifier(n_estimators = 100) # Training the model on the training dataset # fit function is used to train the model using the training sets as parameters clf.fit(X_train, y_train) # performing predictions on the test dataset y_pred = clf.predict(X_test) # metrics are used to find accuracy or error from sklearn import metrics print() # using metrics module for accuracy calculation print("ACCURACY OF THE MODEL: ", metrics.accuracy_score(y_test, y_pred)) # + colab={"base_uri": "https://localhost:8080/"} id="itdoNE_x9g1N" outputId="9b83475e-77e6-42bb-a1b7-25144d6bf924" rfc = RandomForestClassifier() rfc.fit(X_train, y_train) rfc_pred = rfc.predict(X_test) rfc_acc = rfc.score(X_test, y_test) print("The training accuracy for Random Forest is:", rfc.score(X_train, y_train)*100, "%") print("The testing accuracy for Random Forest is:", rfc_acc * 100, "%") # + colab={"base_uri": "https://localhost:8080/"} id="DaCAwkA02QMD" outputId="b4d2872f-a6c0-46d4-fa11-454cea00162d" from xgboost import XGBClassifier xgb = XGBClassifier(verbosity=0) xgb.fit(X_train, y_train) xgb_pred = xgb.predict(X_test) xgb_acc = xgb.score(X_test, y_test) print("The training accuracy for XGB is:", xgb.score(X_train, y_train)*100, "%") print("The testing accuracy for XGB is:", xgb_acc * 100, "%")
Copy_of_2_wine_QT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: base # language: python # name: base # --- import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss import torch.optim as optim from transformers import DistilBertTokenizer,AdamW,DistilBertPreTrainedModel, DistilBertModel, DistilBertConfig import numpy as np import os from tqdm import tqdm, trange from seqeval.metrics import f1_score, precision_score, recall_score, classification_report from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler torch.manual_seed(1) tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') device = 'cuda' print(tokenizer.tokenize('shubham')) print(tokenizer.vocab_size) # + class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_ids): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f: words = [] labels = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append(InputExample(guid="{}-{}".format(mode, guid_index), words=words, labels=labels)) guid_index += 1 words = [] labels = [] else: splits = line.split(" ") words.append(splits[0]) if len(splits) > 1: lab = splits[-3].replace("\n", "") #print('#####################:',lab,splits[-4].replace("\n", "")) if '$' in lab and len(list(lab))>1: labels.append(lab[:]) elif not lab.isalnum(): labels.append("PUNC") else: labels.append(lab[:]) else: # Examples could have no label for mode = "test" labels.append("PUNC") #print(labels) if words: examples.append(InputExample(guid="%s-%d".format(mode, guid_index), words=words, labels=labels)) return examples # + train_obj = read_examples_from_file('/home/shubham/Project/pos_tag/data/ner','train') lab_list = ['NNS', 'CD', 'TO', 'VBD', 'WP$', 'LS', 'RP', 'SYM', 'VBN', 'NNPS', 'RBR', 'JJS', 'VBP', 'MD', 'JJ', 'CC', 'VBG', 'IN', 'WP', 'PRP', 'PUNC', 'POS', 'FW', 'JJR', 'EX', 'WRB', 'DT', 'UH', 'VB', 'VBZ', 'RB', 'RBS', 'NN', 'WDT', 'NNP', 'PRP$', 'PDT'] label_map = {label:i for i, label in enumerate(lab_list)} ix_to_tag = {i:label for i, label in enumerate(lab_list)} def input_to_features(words,labels,pad_token_label_id=-1): tokens = [] label_ids = [] for word, label in zip(words, labels): #print(word) word_tokens = tokenizer.tokenize(word) #print(word_tokens) tokens.extend(word_tokens) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [label_map[label]] * (len(word_tokens) - 1)) input_ids = tokenizer.convert_tokens_to_ids(tokens) dbert_input_ids = tokenizer.encode("[CLS]",add_special_tokens=False) + input_ids + tokenizer.encode("[SEP]",add_special_tokens=False) return torch.tensor(dbert_input_ids, dtype=torch.long,device=device),torch.tensor(input_ids, dtype=torch.long,device=device),torch.tensor(label_ids, dtype=torch.long,device=device) # + #train_obj = read_examples_from_file('/home/shubham/Project/pos_tag/data/ner','train') #lab_list = ['NNS', 'CD', 'TO', 'VBD', 'WP$', 'LS', 'RP', 'SYM', 'VBN', 'NNPS', 'RBR', 'JJS', 'VBP', 'MD', 'JJ', 'CC', 'VBG', 'IN', 'WP', 'PRP', 'PUNC', 'POS', 'FW', 'JJR', 'EX', 'WRB', 'DT', 'UH', 'VB', 'VBZ', 'RB', 'RBS', 'NN', 'WDT', 'NNP', 'PRP$', 'PDT'] training_data =[] for i in train_obj: training_data.append((i.words, i.labels,)) _,a,b = input_to_features(training_data[0][0],training_data[0][1]) print(a,b) print(training_data[0][0]) print(tokenizer.encode(training_data[0][0],add_special_tokens=False)) # - class DistilBertForTokenClassificationCustom(DistilBertPreTrainedModel): def __init__(self, config): super(DistilBertForTokenClassificationCustom, self).__init__(config) self.num_labels = config.num_labels self.distilbert = DistilBertModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.distilbert(input_ids, attention_mask=None, head_mask=None, inputs_embeds=None) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits) return outputs # + config = DistilBertConfig(num_labels=37) teacher_model = DistilBertForTokenClassificationCustom(config) state_dict = torch.load("/home/shubham/Project/pos_tag/models/distil/pytorch_model.bin",map_location=device) teacher_model.load_state_dict(state_dict) teacher_model.to(device) teacher_model.eval() # + EMBEDDING_DIM = 256 HIDDEN_DIM = 512 learning_rate = 5e-5 adam_epsilon = 1e-8 weight_decay = 0.0 class LSTMTagger(nn.Module): def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size): super(LSTMTagger, self).__init__() self.hidden_dim = hidden_dim self.word_embeddings = nn.Embedding(vocab_size, embedding_dim) # The LSTM takes word embeddings as inputs, and outputs hidden states # with dimensionality hidden_dim. self.lstm = nn.LSTM(embedding_dim, hidden_dim,num_layers=5,bidirectional=True) # The linear layer that maps from hidden state space to tag space self.hidden2tag = nn.Linear(hidden_dim*2, tagset_size) def forward(self, sentence): embeds = self.word_embeddings(sentence) lstm_out, _ = self.lstm(embeds.view(len(sentence), 1, -1)) tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1)) tag_scores = F.log_softmax(tag_space, dim=1) return tag_scores,tag_space # - def evaluation(model): model.eval() val_obj = read_examples_from_file('/home/shubham/Project/pos_tag/data/ner','dev') validation_data =[] for i in val_obj: validation_data.append((i.words, i.labels,)) out_list = [] pred_list = [] for sentence, tags in (validation_data[:]): dbert_input_ids,sentence_in,targets = input_to_features(sentence,tags) targets = [ix_to_tag[i] for i in targets.tolist()] out_list.append(targets) with torch.no_grad(): tag_scores,_ = model(sentence_in) pred = [] for i in tag_scores: pred.append(ix_to_tag[int(np.argmax(i.cpu().detach().numpy()))]) pred_list.append(pred) #print((pred_list)) sc = f1_score(out_list,pred_list) print(sc) print(classification_report(out_list,pred_list)) return sc # + model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, tokenizer.vocab_size, len(label_map)) #model.load_state_dict(torch.load("/home/shubham/Project/pos_tag/code/distilation_experiments/lstm_models/model_0.8884873404025104.pt")) model.to(device) #loss_function = nn.NLLLoss() #loss_function = CrossEntropyLoss() def custom_loss(lstm_prob, bert_prob, real_label): a = 0.8 criterion_mse = nn.MSELoss() #criterion_ce = nn.NLLLoss() criterion_ce = CrossEntropyLoss() return a*criterion_ce(lstm_prob, real_label) + (1-a)*criterion_mse(lstm_prob, bert_prob) #optimizer = optim.SGD(model.parameters(), lr=0.1) optimizer = torch.optim.Adam(model.parameters()) """ no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon) """ for epoch in tqdm(range(100)): model.train() for sentence, tags in (training_data[:]): model.zero_grad() dbert_input_ids,sentence_in,targets = input_to_features(sentence,tags) #print(targets.shape) with torch.no_grad(): dbert_logits = teacher_model(dbert_input_ids.unsqueeze(0)) tag_scores,logits = model(sentence_in) #print('logits score:', logits) #print('tag score:', tag_scores) #print('dbert_logits',dbert_logits[0][1:-1]) #loss = loss_function(tag_scores, targets) #print(tag_scores.shape) loss = custom_loss(logits, dbert_logits[0][1:-1],targets) loss.backward() optimizer.step() print('loss',loss) f1 = evaluation(model) if f1>0.89: torch.save(model.state_dict(),'/home/shubham/Project/pos_tag/code/distilation_experiments/lstm_models/model_'+str(f1)+'.pt') # - """# See what the scores are after training with torch.no_grad(): #inputs = prepare_sequence(training_data[0][0], word_to_ix) inputs = torch.tensor(tokenizer.encode(training_data[0][0],add_special_tokens=False), dtype=torch.long) print('input ',inputs) tag_scores = model(inputs) # The sentence is "the dog ate the apple". i,j corresponds to score for tag j # for word i. The predicted tag is the maximum scoring tag. # Here, we can see the predicted sequence below is 0 1 2 0 1 # since 0 is index of the maximum value of row 1, # 1 is the index of maximum value of row 2, etc. # Which is DET NOUN VERB DET NOUN, the correct sequence! for i in tag_scores: print(int(np.argmax(i))) print(tag_scores.shape)""" """out_list = [] for i in tag_scores: out_list.append(ix_to_tag[int(np.argmax(i))]) print((training_data[0][0],training_data[0][1])) print(out_list) sc = f1_score(out_list,training_data[0][1]) print(sc)""" evaluation(model)
pos_tagger/bi-lstm/Bi-lstm-disbert-distillation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="fbEucLMi69gf" # ## IMPORT LIBRARY # # + id="PIl1WkJVM9EM" # import libraries import math import pandas_datareader as web import numpy as np import pandas as pd import matplotlib.pyplot as plt import cufflinks as cf from datetime import datetime from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.preprocessing import MinMaxScaler # + [markdown] id="nJfbV8Yb7BG-" # ## PREPARATION FOR MAIN DATA # + id="qswXCaVTSM7z" outputId="3c98ca2e-9a43-43c0-b942-bbdcb2d70a91" colab={"base_uri": "https://localhost:8080/", "height": 450} # INPUT STOCK DATA ticker='BBCA.JK' sampling=60 training_len=0.4 #start_date='1980-01-01' end_date=datetime.today().strftime('%Y-%m-%d') #end_date='2020-05-26' df=web.DataReader(ticker,data_source='yahoo', end=end_date) df['SMA1']=df['Close'].rolling(window=5).mean() df=df[4:] df # + id="_b8RWTJcS3zF" #df['Close'].iplot(asFigure=True, title=ticker + ' CLOSE PRICE',yTitle="HARGA PENUTUPAN (Rp)", xTitle="TANGGAL") # + [markdown] id="cv4FnHSx7Muz" # ## PREDIKSI MOVING AVERAGE # # + id="PfOPtEsGxXzX" # convert the data inti numpy array data=df.filter(['SMA1']) dataset=data.values # get the number of rows to train the model on training_data_len=math.ceil(len(dataset)* training_len) scaler = MinMaxScaler(feature_range=(0,1)) scaled_data = scaler.fit_transform(dataset) # SPLIT DATA INTO x_train and y_train train_data=scaled_data[0:training_data_len] x_train=[] y_train=[] for i in range(sampling,len(train_data)): x_train.append(train_data[i-sampling:i,0]) y_train.append(train_data[i,0]) x_train, y_train=np.array(x_train), np.array(y_train) # SPLIT DATA INTO x_test and y_test test_data=scaled_data[training_data_len-sampling:] x_test=[] y_test=dataset[training_data_len+1:] for i in range(sampling, len(test_data)): x_test.append(test_data[i-sampling:i,0]) # + id="xRyyMXMndZxb" outputId="16789361-0f98-46b3-9f78-20fecc8f4b0a" colab={"base_uri": "https://localhost:8080/", "height": 221} # Linear regression MODEL FIT lr=LinearRegression() lr.fit(x_train,y_train) print("lr.coef_: {}".format(lr.coef_)) print("lr.intercept_: {}".format(lr.intercept_)) print('Training set score: {:.4f}'.format(lr.score(x_train,y_train))) #print('Test set score: {:.4f}'.format(lr.score(x_test,y_test))) # + id="<KEY>" predictions= lr.predict(x_test) predictions=np.reshape(predictions,(predictions.shape[0],1)) predictions=scaler.inverse_transform(predictions) #len(predictions) #predictions # + id="Nz-y6jg2kor8" outputId="dd2e0deb-05ce-4e36-8ab7-0f8782f032bb" colab={"base_uri": "https://localhost:8080/", "height": 760} df_baru=df[training_data_len:] df_baru['prediction']=predictions df_baru[['SMA1','prediction']].iplot(asFigure=True,title=ticker,legend='top').show() df_baru.tail(5) # + id="kqspX1gHctVn" outputId="f43cb46c-5f14-43b7-cddd-3c244f708d0a" colab={"base_uri": "https://localhost:8080/", "height": 34} close=df_baru.filter(['SMA1']).values predict=df_baru.filter(['prediction']).values rmse =np.sqrt(np.mean(((close-predict)**2))) print('PERSEN ERROR PREDIKSI SMA = {:.2f}%'.format(rmse)) # + id="6vnf2o2ufsfj" outputId="412357a1-ab54-49f5-d5a3-4cb1edde9b58" colab={"base_uri": "https://localhost:8080/", "height": 34} new_df=df.filter(['SMA1']) last_60_days=new_df[-sampling:].values last_data=scaler.transform(last_60_days) last_data=np.reshape(last_data,(1,len(last_data))) last_prediction=lr.predict(last_data) last_prediction=[last_prediction] last_prediction=scaler.inverse_transform(last_prediction) print('PREDIKSI SMA ({}) BESOK = {}'.format(end_date,last_prediction[0][0])) # + [markdown] id="8shJF-MhbDjf" # ## CLOSE PRICE KESELURUHAN # PAKE NILAI PREDIKSI # + id="m2vIqcdSbNwM" outputId="f2c74cb7-69c5-4b85-e606-9fbd3a6dfbe3" colab={"base_uri": "https://localhost:8080/", "height": 760} df_close=df_baru[['Close','SMA1','prediction']] df_close['y']=df_close['Close'].rolling(window=4).sum().shift() df_close['% error sma pred']=(df_close['SMA1']-df_close['prediction'])/df_close['SMA1']*100 df_close=df_close[4:] error=df_close['% error sma pred'].mean() df_close['close prediction']=5*(df_close['prediction'])-df_close['y'] df_close[['Close','close prediction']].iplot(asFigure=True,title='PREDIKSI HARGA CLOSE '+ticker,legend='top').show() df_close.tail() # + id="K3tMs9WLeQQP" outputId="124e7678-3e3c-4cba-efd3-0c2a9eb6dca4" colab={"base_uri": "https://localhost:8080/", "height": 34} harga_close=df_close.filter(['Close']).values predict_close=df_close.filter(['close prediction']).values rmse_close_price=np.sqrt(np.mean(((harga_close-predict_close)**2))) print('PERSEN ERROR PREDIKSI HARGA CLOSE = {:.2f}%'.format(rmse_close_price)) # + [markdown] id="0VReUisgmL4R" # ## PREDIKSI CLOSE PRICE BESOK # + id="n2RROeWqmtyp" outputId="282765e6-4d77-4ef0-9294-dc3eb825628f" colab={"base_uri": "https://localhost:8080/", "height": 34} y_besok=df_close['Close'][-4:].sum() close_price_besok=5*(last_prediction[0][0])-y_besok close_price_besok
CLOSE_PRICE_PREDICTION_[SMA]_[LINEAR_REGRESSION]_VER_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from precommit_analysis.keras_mnist_example import prepare_data, create_mnist_cnn_model from precommit_analysis.generators import eval_generator, eval_precommit_generator, sparse_mnist_generator_nonzero, eval_precommit_adversarial_generator, eval_optimal_adversary_generator batch_size = 128 num_classes = 10 epochs = 12 # - x_train, y_train, x_test, y_test, input_shape = prepare_data(num_classes) val_data_generator = sparse_mnist_generator_nonzero( x_test, y_test, batch_size=x_test.shape[0], sparsity=6, shuffle=False ) # # Evaluate judge alone # # - judge is a sparse MNIST classifier # - 6 non-zero pixels are randomly sampled from an input image # ### Judge - 5k batches judge = create_mnist_cnn_model(num_classes, input_shape) judge.load_weights('models/model_sparse_mnist_generator_nonzero_5k.h5py') # judge samples 6 pixels on random -> we need to see more runs and look at mean and variance accuracies = eval_generator(val_data_generator, judge, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # ### Better judge - 30k batches judge = create_mnist_cnn_model(num_classes, input_shape) judge.load_weights('models/model_sparse_mnist_generator_nonzero_30k.h5py') accuracies = eval_generator(val_data_generator, judge, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # # Random pre-commit # ### Judge - 5k batches accuracies = eval_precommit_generator(val_data_generator, judge, num_classes, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # ### Better judge - 30k batches accuracies = eval_precommit_generator(val_data_generator, judge, num_classes, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # # Adversarial precommit # # - evaluate the best adversary, which was found in train_adversary.ipynb adversary = create_mnist_cnn_model(num_classes, input_shape) adversary.load_weights('models/model_mnist_1epoch_adam1e-5.h5py') # ### Judge - 5k batches accuracies = eval_precommit_adversarial_generator(x_test, val_data_generator, judge, adversary, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # ### Better judge - 30k batches accuracies = eval_precommit_adversarial_generator(x_test, val_data_generator, judge, adversary, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # # Optimal adversary - perfect knowledge of judge # # - with perfect knowledge of the judge it's trivial to find an optimal adversarial pre-commit class # - choose judge's predicted categories as long as they are not true # - otherwise take the 2nd most probable class according to the judge and hope for a tie, which is a loose in our setting # ### Judge - 5k batches accuracies = eval_optimal_adversary_generator(val_data_generator, judge, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # ### Better judge - 30k batches accuracies = eval_optimal_adversary_generator(val_data_generator, judge, num_repetitions=10) print('accuracy: %.2f%%' % (100 * np.mean(accuracies))) print('variance: %E' % np.var(accuracies)) # # Results # # - accuracy for different choices of pre-commit # - highlighted are the highest accuracy for the judge with random 2nd class pre-commit # - and the best adversarial pre-commit # | pre-commit type | judge 5k | judge 30k | # |-----------------------|----------|----------| # | **random** | **87.35%** | **88.31%** | # | adversarial_top | 79.43% | 80.77% | # | adversarial_30k | 77.72% | 80.60% | # | adversarial_15k | 76.04% | 77.42% | # | adversarial_10k | 75.31% | 76.82% | # | adversarial_7.5k | 76.23% | 77.24% | # | adversarial_5k | 78.31% | 80.12% | # | adversarial_500 | 84.33% | 85.61% | # |-----------------------|----------|----------| # | adversarial_adam 1e-6 | 83.32% | 84.98% | # | adversarial_adam 5e-5 | 75.23% | 76.28% # | **adversarial_adam 1e-5** | **73.87%** | **75.41%** | # | adversarial_adam 1e-4| 75.06% | 76.50% | # |-----------------------|----------|----------| # | perfect knowledge | 52.89% | 55.62% | # |-----------------------|----------|----------| # | none / baseline | 52.99% | 55.51% | # # Conclusion # - much of the gain in judge's accuracy can be explained with the pre-commit only, without the actual debate between the 2 agents # - adversarial precommit indeed managed to decrease the judge's accuracy compared to random precommit # - the game of debate seems to me as a good tool for finding the candidate solutions (values for pre-commit) via agents of superior capabilities, and for mitigating the negative effect of the adversary. # # Future work # # It's a question whether we can talk about honest and adversarial agents in the context of AI safety. Either we accept that agents may generally want to deceive us, then we can't assume even one honest agent. Or we can decide to assume both of the agents are honest. But even if both agents are acting in a good faith there will be cases of disagreement. This is a known problem usually solved by ensembling methods. Could the game of debate be used as an addition to the existing ensembling methods? If you want to see some preliminary exploration of this idea, take a look at the future_work.ipynb notebook.
results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Application of batch-mode regression to cross-validation # # botorch provides a helper function `gen_loo_cv_folds` to easily perform leave-one-out (LOO) cross-validation (CV) by taking advantage of batch-mode regression and evaluation in GPyTorch. This tutorial illustrates the process on a noisy sinusoidal function, similar to the example from the batch-mode GP regression [tutorial](https://github.com/cornellius-gp/gpytorch/blob/master/examples/01_Simple_GP_Regression/Simple_Batch_Mode_GP_Regression.ipynb) from GPyTorch: # # $$y = \sin(2\pi x) + \epsilon, ~\epsilon \sim \mathcal N(0, 0.2).$$ # # Note: this tutorial aims to introduce batch-mode regression and evaluation in GPyTorch with CV as an example application. For alternative, more user-friendly functions to perform CV in Ax, see [ax.modelbridge.cross_validation](https://github.com/facebook/Ax/blob/master/ax/modelbridge/cross_validation.py). However, for larger CV tasks, it may be useful to exploit GPyTorch batch-mode, as shown in this tutorial. # + import torch import math device = torch.device("cpu") dtype = torch.float torch.manual_seed(3); # - # ### Initialize the CV dataset # # For our training data, we take 20 regularly spaced points on the interval $[0, 1]$ and generate noisy evaluations with an observed noise variance of 0.2. sigma = math.sqrt(0.2) train_X = torch.linspace(0, 1, 20, dtype=dtype, device=device).view(-1, 1) train_Y_noiseless = torch.sin(train_X * (2 * math.pi)).view(-1) train_Y = train_Y_noiseless + sigma * torch.randn_like(train_Y_noiseless) train_Yvar = torch.full_like(train_Y, 0.2) # The botorch function `gen_loo_cv_folds` takes our observed data `train_X`, `train_Y`, `train_Yvar` as input and returns the LOO CV folds in a `CVFolds` object. # + from botorch.cross_validation import gen_loo_cv_folds cv_folds = gen_loo_cv_folds(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar) # - # The `cv_folds` object contains the data, stored as tensors of appropriate batch shape, necessary to perform 20 CVs of 19 training points and 1 test point. For example, we can check that the shapes of the training inputs and training targets are `b x n x d = 20 x 19 x 1` and `b x n x o = 20 x 19 x 1` respectively, where `o` is the number of outputs. cv_folds.train_X.shape, cv_folds.train_Y.shape cv_folds.test_X.shape, cv_folds.test_Y.shape # Note that in a situation where the dataset is large, one may not want to perform LOO; in that case, a similar process can be used to perform $k$-fold CV. # ### Perform LOOCV # # We can the `batch_cross_validation` function to perform LOOCV using batching (meaning that the `b = 20` sets of training data can be fit as `b = 20` separate GP models with separate hyperparameters in parallel through GPyTorch) and return a CVResult tuple with the batched `GPyTorchPosterior` object over the LOOCV test points and the observed targets. The `batch_cross_validation` requires a model class (`model_cls`) and a marginal log likelihood class (`mll_cls`). Since we have an observed and constant noise level, we will use the FixedNoiseGP as the `model_cls` and an ExactMarginalLogLikelihood as the `mll_cls`. # + from botorch.cross_validation import batch_cross_validation from botorch.models import FixedNoiseGP from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood # instantiate and fit model cv_results = batch_cross_validation( model_cls=FixedNoiseGP, mll_cls=ExactMarginalLogLikelihood, cv_folds=cv_folds, ) # - # #### Compute the cross-validation error and generate plots # To compute the cross-validation error, we first evaluate the test points by computing the posterior in batch mode. Next, we compute the squared errors for each test point from the prediction and take an average across all cross-validation folds. # + from matplotlib import pyplot as plt # %matplotlib inline posterior = cv_results.posterior mean = posterior.mean cv_error = ((cv_folds.test_Y.squeeze() - mean.squeeze()) ** 2).mean() print(f"Cross-validation error: {cv_error : 4.2}") # get lower and upper confidence bounds lower, upper = posterior.mvn.confidence_region() # scatterplot of predicted versus test _, axes = plt.subplots(1, 1, figsize=(6, 4)) plt.plot([-1.5, 1.5], [-1.5, 1.5], 'k', label="true objective", linewidth=2) axes.set_xlabel("Actual") axes.set_ylabel("Predicted") axes.errorbar( x=cv_folds.test_Y.numpy(), y=mean.numpy(), xerr=1.96*sigma, yerr=((upper-lower)/2).numpy(), fmt='*' ) # - # Finally, we can visualize the fitted models. To do this, we again take advantage of batch-mode evaluation to obtain predictions, including lower and upper confidence regions, from each of the 20 models. model = cv_results.model with torch.no_grad(): # evaluate the models at a series of points for plotting plot_x = torch.linspace(0, 1, 101).view(1, -1, 1).repeat(cv_folds.train_X.shape[0], 1, 1) posterior = model.posterior(plot_x) mean = posterior.mean # get lower and upper confidence bounds lower, upper = posterior.mvn.confidence_region() plot_x.squeeze_() # The code snippet below plots the result for the 12th CV fold (by setting `num = 12`), but note that we have computed the results for all folds above (other plots can be obtained by iterating `num` from 1 to 20). # + _, axes = plt.subplots(1, 1, figsize=(6, 4)) # plot the 12th CV fold num = 12 # plot the training data in black axes.plot( cv_folds.train_X[num - 1].detach().numpy(), cv_folds.train_Y[num - 1].detach().numpy(), 'k*' ) # plot the test data in red axes.plot( cv_folds.test_X[num - 1].detach().numpy(), cv_folds.test_Y[num - 1].detach().numpy(), 'r*' ) # plot posterior means as blue line axes.plot(plot_x[num - 1].numpy(), mean[num-1].numpy(), 'b') # shade between the lower and upper confidence bounds axes.fill_between( plot_x[num - 1].numpy(), lower[num - 1].numpy(), upper[num - 1].numpy(), alpha=0.5 )
tutorials/batch_mode_cross_validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="https://www.quantrocket.com/assets/img/notebook-header-logo.png"> # # <a href="https://www.quantrocket.com/disclaimer/">Disclaimer</a> # # Strategy code # # The strategy code is simple and is located in [dual_moving_average.py](dual_moving_average.py). A few highlights are shown below. # # To use our historical data in backtrader, we download a CSV of AAPL prices and create our backtrader data feed from it: # # ```python # import backtrader.feeds as btfeeds # from quantrocket.history import download_history_file # # # Create data feed using QuantRocket data and add to backtrader # # (Put files in /tmp to have QuantRocket automatically clean them out after # # a few hours) # download_history_file( # 'usstock-free-1d', # sids=['FIBBG000B9XRY4'], # filepath_or_buffer='/tmp/backtrader-demo-1d.csv', # fields=['Sid','Date','Open','Close','High','Low','Volume']) # # data = btfeeds.GenericCSVData( # dataname='/tmp/backtrader-demo-1d.csv', # dtformat=('%Y-%m-%d'), # datetime=1, # open=2, # close=3, # high=4, # low=5, # volume=6 # ) # cerebro.adddata(data) # ``` # # A backtest commonly ends by plotting a performance chart, but since our code will be running in a headless Docker container, we should save the plot to a file (which we'll tell QuantRocket to return to us when we run the backtest): # # ```python # # Save the plot to PDF so the satellite service can return it # cerebro.plot(savefig=True, figfilename='/tmp/backtrader-plot.pdf') # ``` # *** # # ## *Next Up* # # Part 4: [Run backtest](Part4-Run-Backtest.ipynb)
backtrader_dma/Part3-Strategy-Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="7vqqFwNfuLij" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 457} outputId="70a57d6d-bce1-4039-8132-a8d48cb17b42" executionInfo={"status": "ok", "timestamp": 1583361963050, "user_tz": -60, "elapsed": 9269, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} # !pip install --upgrade tables # !pip install eli5 # + id="Xkuq91wyug9p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2d65da36-7cbe-48f4-8518-2bd115967f7c" executionInfo={"status": "ok", "timestamp": 1583362030137, "user_tz": -60, "elapsed": 501, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} # cd "drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car" # + id="i-hIvYOOuzfi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f713880d-0fb0-43a5-c631-7b9a527f913a" executionInfo={"status": "ok", "timestamp": 1583362037260, "user_tz": -60, "elapsed": 1934, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} # ls # + id="Z1rnvNi-u04r" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score import eli5 from eli5.sklearn import PermutationImportance # + id="m5QEE9M4vRtC" colab_type="code" colab={} df = pd.read_hdf('data/car.h5') # + id="tsr_9-MWv9H_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="182b5830-0d56-44e3-b304-70dd91ed1e5f" executionInfo={"status": "ok", "timestamp": 1583362351757, "user_tz": -60, "elapsed": 631, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} df.shape # + id="cQ4F8cNRwB4J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="fac2bb83-0307-4e19-9795-b747da0c2a2c" executionInfo={"status": "ok", "timestamp": 1583362374164, "user_tz": -60, "elapsed": 581, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} df.columns # + id="FgvbQSuOwFBc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="50f2f9e2-fb87-4aad-a3cf-9823f243055a" executionInfo={"status": "ok", "timestamp": 1583362715676, "user_tz": -60, "elapsed": 557, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} feats = ['car_id'] X = df[ feats ].values y = df['price_value'].values model = DummyRegressor() model.fit(X, y) y_pred = model.predict(X) mae(y, y_pred) # + id="Asou06NDxQ1Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c82d8cc7-d190-4533-8c9d-e62798ea825a" executionInfo={"status": "ok", "timestamp": 1583362822406, "user_tz": -60, "elapsed": 461, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} [ x for x in df.columns if 'price' in x] # + id="yruqNwvRx07B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="437f7c4e-e94f-497e-c05f-5675b43ff768" executionInfo={"status": "ok", "timestamp": 1583362972262, "user_tz": -60, "elapsed": 623, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} df = df[ df['price_currency'] != 'EUR' ] df.shape # + id="cMYJIXUgzCL_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f85469d9-58da-42f3-d457-6212debe0c59" executionInfo={"status": "ok", "timestamp": 1583363188629, "user_tz": -60, "elapsed": 537, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} np.unique(df['param_color'].factorize()[0]) # + id="qAW1qWAEx7DG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 138} outputId="5b2be742-624f-4e0b-c232-643886b62313" executionInfo={"status": "ok", "timestamp": 1583363491311, "user_tz": -60, "elapsed": 2422, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values # + id="vVX9DYsTyf3u" colab_type="code" colab={} cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x] # + id="l8a80Qub0sOo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cce451cb-d9ac-4743-dfa1-e8dfb6137661" executionInfo={"status": "ok", "timestamp": 1583363629240, "user_tz": -60, "elapsed": 340, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} len(cat_feats) # + id="NyG5718-0tPZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="050b4426-ad57-4b95-ef9f-0b0cce9f6b83" executionInfo={"status": "ok", "timestamp": 1583363766556, "user_tz": -60, "elapsed": 4994, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} X = df[cat_feats].values y = df['price_value'].values model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') np.mean(scores) # + id="2pju_ml51aU-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="ffdab14e-b7f3-4f27-9c7f-37943b0ca66a" executionInfo={"status": "ok", "timestamp": 1583364027628, "user_tz": -60, "elapsed": 54438, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07993273702187826689"}} m = DecisionTreeRegressor(max_depth=5) m.fit(X,y) imp = PermutationImportance(m).fit(X, y) eli5.show_weights(imp, feature_names=cat_feats) # + id="Z3gRaM7X17rf" colab_type="code" colab={} # !git config --global user.email "<EMAIL>" # !git config --global user.name "Hubert" # + id="03sqvbts3W_f" colab_type="code" colab={} # !git add # !git commit -m "add simple model"
day3_simple_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Cleansing with PostgreSQL # ### Merging data from different files onto a single table # By checking same [Mexican Government Data Offical Sources](https://datos.gob.mx/) found two different sources related to economical activities: <ul> # <li>Essential</li> # <li>Non - Essential</li> # </ul> # But these were separated CSV with information, will analyse first the csv separated within Jupyter notebooks and if data fields match, will create a unique database to be accessed on PostgreSQL. import pandas as pd import matplotlib.pyplot as plt act_es = 'C:/Users/werlix/Downloads/Techo_Cristal/Data/Esenciales/th_denue_act_esenciales_ent01_09.csv' e1 = pd.read_csv(esenciales1,sep=""",""") # Due to below error, will try to have a view to the dataset on Excel directly to check if the structure of 45 fields is correct, this is because the dataset is non coded with UTF8 and probably its missing some comas or quotations marks for distinguing string datatypes. # #### Non Esential Activities data sets # <img src="image1.png"> # #### Esential Activites data sets # <img src="image2.png"> # As per above images, the data is set up in four different files segregated by mexican entities, as per exploring the data on Excel; we can see it's missing comas and quotation marks, therefore the data is not detected neither with Jupyter nor PostgreSQL. <br /> # The process that I'll take is to manually check and if needed agregate comas or quotation so table can be loaded onto PostgreSQL to further cleaning and an analyzing. from IPython.display import HTML # #### Manual detection for missing comas or quotations # %%HTML <div align="left"> <video width="500" height="340" autoplay muted> <source src="video1.mp4" type="video/mp4"> Manual Cleaning process in Excel for esential and non-esential data. </video> </div> # After cleaning, will create a local database on PostgreSQL to have an easier way to analyse my data:<br> # For this case will have the following details for have a quick detail of data: # #### Table Creation # <img src="image3.png"> # #### Append for Data (for each of the 4 non esential and 4 esential tables) # <img src="image4.png"> # While appending data, identified that for the esential and non esential data same field were shared, distinguised by the _es_esencial_ field.<br /> # Therefore, before appending non esential data had to homologue all the values on _es_esencial_ to **1** to have them properly distinguished before appending.<br /> # <img src="image5.png"> # <br /> # So by the end of the data join for the esential activities table, we could properly have identified each of the items: # <br /> # <img src="image6.png"> # # But for the purpose of this analysis, these items are not necessary since the database is not segregated by gender, but we can still analyze this to have an overview of how esential and non esential business sustent mexican economy. # <br /> # For this I will create a PostgreSQL connection: import psycopg2 as pg2 # Set the connection: conn = pg2.connect(database='Platzi_PT',user='postgres',password='password') df_esential = pd.read_sql(''' SELECT es_esencial, COUNT(*) FROM act_esenciales GROUP BY es_esencial ''', conn) # To show the amount of a single table made from 8 different CSVs for esential and non esential economical activities. df_esential # Since my analysis involves differences based in gender let me show you why this is not usefull with an `.info()` # Let's see the count of the table to set up a sample: df_esential_count = pd.read_sql(''' SELECT COUNT(*) FROM act_esenciales ''', conn) df_esential_count # We select our sample: df_esential_sample = pd.read_sql(''' SELECT * FROM act_esenciales ORDER BY random() limit 10000; ''', conn) df_esential_sample.info(verbose=True) # We need to set id to be the index, convert fecha_alta to date and es_esencial to int in order to manipulate our sample: df_esential_sample=df_esential_sample.set_index('id') df_esential_sample.head() # Setting 'es_esencial' to int. df_esential_sample['es_esencial'].astype(int) df_esential_sample.dtypes # And fecha_alta as date, but as we can see, this is in yyyy-mm format, so we'll need to take another approach: df_esential_sample['fecha_alta']=pd.to_datetime(df_esential_sample['fecha_alta'], format='%Y-%m') df_esential_sample.head() # Now that we cleaned our data let's see that brief analysis per entity and esentiality: df_esential_sample.groupby(['entidad'])['es_esencial'].sum().sort_values(ascending=False).plot(kind='bar', figsize=(15,15)) # With this we can see that the top three entities with more esential businesses are Mexico City, Mexico State and Jalisco.<br /> # Let's see if the municipalities coincide. df_esential_sample.groupby(['municipio'])['es_esencial'].sum().nlargest(n=5).sort_values(ascending=False).plot(kind='bar', figsize=(8,8)) # Contrary with what we saw on the entities table, we see that the top 1 municipality with more esential businesses is not part from the top 3 entities, since Puebla is from Puebla state. # Still, since our analysis is based on gender; again, we can consider this source as a valuable input to evidence the glass ceiling that mexican women have currently.
.ipynb_checkpoints/DataCleansing_PostgreSQl-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Multipe Regression # # In this notebook we use a different data set to practice what we have learned so far: # * Upload and preprocess the data # * Write a function to compute the Multiple Regression weights # * Write a function to make predictions of the output given the input feature # * Compare different models for predicting the output # # Look at the Multiple Regression notebook on insurance data! # # Import all required libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as seabornInstance from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics # %matplotlib inline # # Upload, preprocess and check the data # # Dataset on insurance data (insurance.csv) is obtained from the Machine Learning course website (Spring 2017) from Professor <NAME> at http://www.sci.csueastbay.edu/~esuess/stat6620/#week-6. # + # Import dataset data = pd.read_csv('insurance.csv') # Look at the table to check potential features data[:10] # - # Check if the dataset contains NaN values data.isnull().any() # Check some statistics of the data data.describe() # Plot Scatter Matrix for relevant variables plot_data_new = data[['expenses', 'age', 'bmi', 'children']] from pandas.plotting import scatter_matrix sm = scatter_matrix(plot_data_new, figsize = (10,10)) # Plot some feature relations data.plot(x='age', y='expenses', style='o') plt.title('expenses vs. age') plt.xlabel('age') plt.ylabel('expenses') plt.show() # Plot some feature relations data.plot(x='children', y='expenses', style='o') plt.title('expenses vs. children') plt.xlabel('children') plt.ylabel('expenses') plt.show() # Plot some feature relations data.plot(x='bmi', y='expenses', style='o') plt.title('expenses vs. bmi') plt.xlabel('bmi') plt.ylabel('expenses') plt.show() # Divide the data into some 'attributes' (X) and 'labels' (y). X = data[['bmi','children', 'age']] y = data['expenses'] # # Split data into training and testing # Split data set into 80% train and 20% test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # Look at the shape to check the split ratio X_train.shape, X_test.shape, y_train.shape, y_test.shape # # Use a pre-build multiple regression function # # # Train a Sklearn built-in function reg = LinearRegression() reg.fit(X_train, y_train) X.columns # + # See intercept and coefficients chosen by the model print('Intercept:', reg.intercept_) coeff_df = pd.DataFrame({'Features': X.columns, 'Coefficients': reg.coef_}).set_index('Features') coeff_df # - # Do prediction on test data y_pred = reg.predict(X_test) # Check differences between actual and predicted value df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred, 'Difference': y_pred - y_test}, columns=['Actual', 'Predicted', 'Difference']).astype(int) df.head() # Evaluate the performance print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) # # Check for overfitting # + # If r2 and RMSE in train and test differ dramatically => Overfitting! # => Compare r2 and RSME in Test and Train! # "Prework" needed to do the comparison y_pred_train = reg.predict(X_train) y_pred_test = reg.predict(X_test) from sklearn.metrics import r2_score # => Compare r2 and RSME in Test and Train! print('RSME Train:', np.sqrt(metrics.mean_squared_error(y_train, y_pred_train))) print('RSME Test: ', np.sqrt(metrics.mean_squared_error(y_test, y_pred_test))) print('R-2 Train:', r2_score(y_train, y_pred_train)) print('R-2 Test: ', r2_score(y_test, y_pred_test)) # - # # Additional task: What is the best combination of input features showing the least MSE # ## (1) bmi + age + children # ## (2) bmi + age # ## (3) bmi + children # ## (4) age + children # # For more information on performance evaluation see also # * https://en.wikipedia.org/wiki/Mean_absolute_error # * https://en.wikipedia.org/wiki/Mean_squared_error # * https://en.wikipedia.org/wiki/Root-mean-square_deviation
Data-Analysis/5_Linear-regression/4c_multiple-linear-regression_insurance_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %%time import time for _ in range(1000): time.sleep(0.01)# sleep for 0.01 seconds from sympy import * from sympy import init_printing; init_printing(use_latex = 'mathjax') from sympy.plotting import plot n = int(input('Qué número de valores de energía desea aproximar?')) l, m, hbar, k = symbols('l m hbar k', real = True, constant = True) var('x,W') H = ones(n,n) S = ones(n,n) U = ones(n,n) CC = ones(n,n) #F = [sympify(input('Ingrese la función {0}: '.format(i+1))) for i in range(n)] F = [x*(l - x),(x**2)*((l - x)**2),x*(l - x)*((l/2)-x),(x**2)*((l - x)**2)*((l/2)-x)] fi = zeros(n) c = ones(n,n) for i in range(n): for j in range(n): c[i,j] = sympify('c%d%d' %(j+1,i+1)) fi[j] = sympify('phi%d' %(j+1)) for j in range(1,n+1): #loop para llenar la matriz H for i in range(1,n+1): I = ((-hbar**2)/(2*m)) integrando = I*(F[j-1])*diff(F[i-1], x, 2) A = integrate(integrando, (x, 0, l)) integrandos = (F[j-1])*(F[i-1]) B = integrate(integrandos, (x, 0, l)) H[j-1,i-1] *= A S[j-1,i-1] *= B U[j-1,i-1] *= (H[j-1,i-1] -W*S[j-1,i-1]) E = U.det() EE = solve(E,W) a = 1/EE[0] #truco para ordenar los W for i in range(n): EE[i] = EE[i]*a EE.sort() for j in range(n): EE[j] = EE[j]*(1/a) cc = Matrix(c) for j in range(n): for i in range(n): C = U*cc.col(j) CC[i,j] *= C[i].subs(W, EE[j]) G = [] for i in range(n): D = solve(CC.col(i),cc) G.append(list(D.items())) G = Matrix(G) J = [] for i in range(len(G)): if G[i][1] != 0: J.append(factor(G[i])) ceros = [] param = [] for i in range(len(G)): if G[i][1] != 0: param.append(G[i][0]) elif G[i][1] == 0: ceros.append(G[i][0]) kas = [x for x in cc if x not in (ceros+param)] finale = ones(n,n) for j in range(n): for i in range(n): if sympify('c'+str(i+1)+str(j+1)) not in (ceros+param): finale[i,j] *= k elif sympify('c'+str(i+1)+str(j+1)) not in (kas+ceros): finale[i,j] *= J[i][1].subs(sympify('c'+str(i+1)+str(j+1+1)),k) else: finale[i,j] *= 0 Psi = factor(finale*Matrix(F)) integrand = [] Psis = [] for i in range(n): integrand.append(Psi[i]**2) Psis.append(integrate(integrand[i], (x, 0, l))) normaliz = [] for i in range(n): normaliz.append(factor(Psis[i])*(1/k**2)-(1/k**2)) KKK = [] Figaro = [] for i in range(n): KKK.append(solve(normaliz[i],k**2)) Figaro.append(Psi[i]**2) Figaro[i] = Figaro[i].subs(k**2,KKK[i][0]) for i in range(n): plot(Figaro[i].subs(l, 1), (x, 0,1))
Huckel_M0/Variational+Theory+beta.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jacob-honig/venture_funding_with_deep_learning/blob/main/GC_venture_funding_with_deep_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="JmEcaJzVURr_" # # Venture Funding with Deep Learning # # You work as a risk management associate at Alphabet Soup, a venture capital firm. Alphabet Soup’s business team receives many funding applications from startups every day. This team has asked you to help them create a model that predicts whether applicants will be successful if funded by Alphabet Soup. # # The business team has given you a CSV containing more than 34,000 organizations that have received funding from Alphabet Soup over the years. With your knowledge of machine learning and neural networks, you decide to use the features in the provided dataset to create a binary classifier model that will predict whether an applicant will become a successful business. The CSV file contains a variety of information about these businesses, including whether or not they ultimately became successful. # # ## Instructions: # # The steps for this challenge are broken out into the following sections: # # * Prepare the data for use on a neural network model. # # * Compile and evaluate a binary classification model using a neural network. # # * Optimize the neural network model. # # ### Prepare the Data for Use on a Neural Network Model # # Using your knowledge of Pandas and scikit-learn’s `StandardScaler()`, preprocess the dataset so that you can use it to compile and evaluate the neural network model later. # # Open the starter code file, and complete the following data preparation steps: # # 1. Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that will need to be encoded, as well as columns that could eventually define your features and target variables. # # 2. Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model. # # 3. Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame. # # 4. Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables. # # > **Note** To complete this step, you will employ the Pandas `concat()` function that was introduced earlier in this course. # # 5. Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset. # # 6. Split the features and target sets into training and testing datasets. # # 7. Use scikit-learn's `StandardScaler` to scale the features data. # # ### Compile and Evaluate a Binary Classification Model Using a Neural Network # # Use your knowledge of TensorFlow to design a binary classification deep neural network model. This model should use the dataset’s features to predict whether an Alphabet Soup&ndash;funded startup will be successful based on the features in the dataset. Consider the number of inputs before determining the number of layers that your model will contain or the number of neurons on each layer. Then, compile and fit your model. Finally, evaluate your binary classification model to calculate the model’s loss and accuracy. # # To do so, complete the following steps: # # 1. Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras. # # > **Hint** You can start with a two-layer deep neural network model that uses the `relu` activation function for both layers. # # 2. Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric. # # > **Hint** When fitting the model, start with a small number of epochs, such as 20, 50, or 100. # # 3. Evaluate the model using the test data to determine the model’s loss and accuracy. # # 4. Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`. # # ### Optimize the Neural Network Model # # Using your knowledge of TensorFlow and Keras, optimize your model to improve the model's accuracy. Even if you do not successfully achieve a better accuracy, you'll need to demonstrate at least two attempts to optimize the model. You can include these attempts in your existing notebook. Or, you can make copies of the starter notebook in the same folder, rename them, and code each model optimization in a new notebook. # # > **Note** You will not lose points if your model does not achieve a high accuracy, as long as you make at least two attempts to optimize the model. # # To do so, complete the following steps: # # 1. Define at least three new deep neural network models (the original plus 2 optimization attempts). With each, try to improve on your first model’s predictive accuracy. # # > **Rewind** Recall that perfect accuracy has a value of 1, so accuracy improves as its value moves closer to 1. To optimize your model for a predictive accuracy as close to 1 as possible, you can use any or all of the following techniques: # > # > * Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model. # > # > * Add more neurons (nodes) to a hidden layer. # > # > * Add more hidden layers. # > # > * Use different activation functions for the hidden layers. # > # > * Add to or reduce the number of epochs in the training regimen. # # 2. After finishing your models, display the accuracy scores achieved by each model, and compare the results. # # 3. Save each of your models as an HDF5 file. # # + id="6XiL4BeyURsD" # Imports import pandas as pd import tensorflow as tf from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler,OneHotEncoder # + [markdown] id="JzvfMq--URsD" # --- # # ## Prepare the data to be used on a neural network model # + [markdown] id="pJWhWy1zURsE" # ### Step 1: Read the `applicants_data.csv` file into a Pandas DataFrame. Review the DataFrame, looking for categorical variables that will need to be encoded, as well as columns that could eventually define your features and target variables. # # + id="5GsFRRuGU9r-" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} outputId="18367fe6-ba73-4ce6-e4f9-1ba77937f1e6" # Upload credit_card_transactions.csv to Colab from google.colab import files csv_file = files.upload() # + id="gSTFDSzYURsE" colab={"base_uri": "https://localhost:8080/", "height": 921} outputId="406e5ad8-fafd-4064-cc6e-22bcd8d557fd" # Read the applicants_data.csv file from the Resources folder into a Pandas DataFrame applicant_data_df = pd.read_csv( "applicants_data.csv" ) # Review the DataFrame applicant_data_df # + id="C-WB0yc4URsE" colab={"base_uri": "https://localhost:8080/"} outputId="077abec4-c22b-4dad-a9c7-06ba7caa4cb5" # Review the data types associated with the columns applicant_data_df.dtypes # + [markdown] id="XCwg3ahTURsE" # ### Step 2: Drop the “EIN” (Employer Identification Number) and “NAME” columns from the DataFrame, because they are not relevant to the binary classification model. # + id="dj7yoYaAURsE" colab={"base_uri": "https://localhost:8080/", "height": 522} outputId="74b262a9-5e21-497e-c1fd-2d75de1c7978" # Drop the 'EIN' and 'NAME' columns from the DataFrame applicant_data_df = applicant_data_df.drop(columns=["EIN", "NAME"]) # Review the DataFrame applicant_data_df # + [markdown] id="WFF35NwZURsF" # ### Step 3: Encode the dataset’s categorical variables using `OneHotEncoder`, and then place the encoded variables into a new DataFrame. # + id="aHv_okM7URsF" colab={"base_uri": "https://localhost:8080/"} outputId="86b3c81b-5a19-4b00-9870-d6b84c88f363" # Create a list of categorical variables categorical_variables = list(applicant_data_df.dtypes[applicant_data_df.dtypes == "object"].index) # Display the categorical variables list categorical_variables # + id="IizC83_xURsF" # Create a OneHotEncoder instance enc = OneHotEncoder(sparse=False) # + id="fLUlCo48URsF" # Encode the categorcal variables using OneHotEncoder encoded_data = enc.fit_transform(applicant_data_df[categorical_variables]) # + id="Bj-Hc08qURsF" colab={"base_uri": "https://localhost:8080/", "height": 352} outputId="8a86482a-0799-4873-bddd-31b679203aad" # Create a DataFrame with the encoded variables encoded_df = pd.DataFrame( encoded_data, columns = enc.get_feature_names(categorical_variables) ) # Review the DataFrame encoded_df.head() # + [markdown] id="A8Bd0AQTURsF" # ### Step 4: Add the original DataFrame’s numerical variables to the DataFrame containing the encoded variables. # # > **Note** To complete this step, you will employ the Pandas `concat()` function that was introduced earlier in this course. # + id="r43_PQpFURsG" colab={"base_uri": "https://localhost:8080/", "height": 505} outputId="2a8ad523-09f4-4410-be96-7c46bda2156b" # Add the numerical variables from the original DataFrame to the one-hot encoding DataFrame numerical_variables_df = applicant_data_df.drop(columns = categorical_variables) combined_df = pd.concat([numerical_variables_df, encoded_df], axis=1) # Review the Dataframe combined_df # + [markdown] id="RJHzn3uyURsG" # ### Step 5: Using the preprocessed data, create the features (`X`) and target (`y`) datasets. The target dataset should be defined by the preprocessed DataFrame column “IS_SUCCESSFUL”. The remaining columns should define the features dataset. # # # + id="XpmWKy35URsG" colab={"base_uri": "https://localhost:8080/"} outputId="96380442-1e8d-43bd-8892-d14f23e03fc5" # Define the target set y using the IS_SUCCESSFUL column y = combined_df["IS_SUCCESSFUL"] # Display a sample of y y # + id="SzVEfRlaURsG" colab={"base_uri": "https://localhost:8080/", "height": 505} outputId="56043264-c413-4caf-8233-760ca11ec2f6" # Define features set X by selecting all columns but IS_SUCCESSFUL X = combined_df.drop(columns=["IS_SUCCESSFUL"]) # Review the features DataFrame X # + [markdown] id="Wgb0PFDKURsG" # ### Step 6: Split the features and target sets into training and testing datasets. # # + id="M_vGfjV4URsG" # Split the preprocessed data into a training and testing dataset # Assign the function a random_state equal to 1 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) # + [markdown] id="hDcNY8TGURsG" # ### Step 7: Use scikit-learn's `StandardScaler` to scale the features data. # + id="MWOH8vl-URsG" # Create a StandardScaler instance scaler = StandardScaler() # Fit the scaler to the features training dataset X_scaler = scaler.fit(X_train) # Fit the scaler to the features training dataset X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) # + [markdown] id="RvE_rJAAURsH" # --- # # ## Compile and Evaluate a Binary Classification Model Using a Neural Network # + [markdown] id="mLD8vtTqURsH" # ### Step 1: Create a deep neural network by assigning the number of input features, the number of layers, and the number of neurons on each layer using Tensorflow’s Keras. # # > **Hint** You can start with a two-layer deep neural network model that uses the `relu` activation function for both layers. # # + id="K1Vgwz04URsH" colab={"base_uri": "https://localhost:8080/"} outputId="c69e7443-a752-41ab-edbd-c3659769d9af" # Define the the number of inputs (features) to the model number_input_features = len(X_train.iloc[0]) # Review the number of features number_input_features # + id="0mcgkl8SURsH" # Define the number of neurons in the output layer number_output_neurons = 2 # + id="i0Q3iY3wURsH" colab={"base_uri": "https://localhost:8080/"} outputId="2b86a723-fc72-48f3-98dc-fac68412c4b2" # Define the number of hidden nodes for the first hidden layer hidden_nodes_layer1 = (number_input_features + 1) // 2 # Review the number hidden nodes in the first layer hidden_nodes_layer1 # + id="Ux_lAsrvURsH" colab={"base_uri": "https://localhost:8080/"} outputId="0364e403-a4bb-4df1-e688-665b5d45d235" # Define the number of hidden nodes for the second hidden layer hidden_nodes_layer2 = (hidden_nodes_layer1 + 1) // 2 # Review the number hidden nodes in the second layer hidden_nodes_layer2 # + id="1kq9QWhLURsH" # Create the Sequential model instance nn = Sequential() # + id="NcVrWCD_URsI" # Add the first hidden layer nn.add(Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation="relu")) # + id="Ugxr6lisURsI" # Add the second hidden layer nn.add(Dense(units=hidden_nodes_layer2, activation="relu")) # + id="QxcGEyNoURsI" # Add the output layer to the model specifying the number of output neurons and activation function nn.add(Dense(units=1, activation="sigmoid")) # + id="1L2ZVcXgURsI" colab={"base_uri": "https://localhost:8080/"} outputId="1c866992-6693-4230-883d-3b1287219456" # Display the Sequential model summary nn.summary() # + [markdown] id="pzoKsvI3URsI" # ### Step 2: Compile and fit the model using the `binary_crossentropy` loss function, the `adam` optimizer, and the `accuracy` evaluation metric. # # + id="PR8r_cL1URsI" # Compile the Sequential model nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) # + id="5_z-FPHJURsI" colab={"base_uri": "https://localhost:8080/"} outputId="34813851-0798-497f-fe82-5fe3dc72a6d9" # Fit the model using 50 epochs and the training data fit_model = nn.fit(X_train_scaled, y_train, epochs=50) # + [markdown] id="JIbFSQ-FURsI" # ### Step 3: Evaluate the model using the test data to determine the model’s loss and accuracy. # # + id="rWaHGdGrURsI" colab={"base_uri": "https://localhost:8080/"} outputId="edf2076f-b180-41a3-f788-08d56f89fe65" # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # + [markdown] id="ppT2iIP5URsJ" # ### Step 4: Save and export your model to an HDF5 file, and name the file `AlphabetSoup.h5`. # # + id="iOuA28RQURsJ" # Set the model's file path file_path = "AlphabetSoup.h5" # Export your model to a HDF5 file nn.save(file_path) # + [markdown] id="xlN5E4WfURsJ" # --- # # ## Optimize the neural network model # # + [markdown] id="0q7pN2XKURsJ" # ### Step 1: Define at least three new deep neural network models (resulting in the original plus 3 optimization attempts). With each, try to improve on your first model’s predictive accuracy. # # > **Rewind** Recall that perfect accuracy has a value of 1, so accuracy improves as its value moves closer to 1. To optimize your model for a predictive accuracy as close to 1 as possible, you can use any or all of the following techniques: # > # > * Adjust the input data by dropping different features columns to ensure that no variables or outliers confuse the model. # > # > * Add more neurons (nodes) to a hidden layer. # > # > * Add more hidden layers. # > # > * Use different activation functions for the hidden layers. # > # > * Add to or reduce the number of epochs in the training regimen. # # + [markdown] id="slhZ-iS1URsJ" # ### Alternative Model 1 # + id="zqbrMOgCURsJ" colab={"base_uri": "https://localhost:8080/"} outputId="9497304b-efe3-46f0-ff7d-8461402f3e68" # Define the the number of inputs (features) to the model number_input_features = len(X_train.iloc[0]) # Review the number of features number_input_features # + id="U3rrh76sURsJ" # Define the number of neurons in the output layer number_output_neurons_A1 = 2 # + id="lMOrWGBKURsJ" # Define the number of hidden nodes for the hidden layers hidden_nodes_layer1_A1 = (number_input_features +1)//2 hidden_nodes_layer2_A1 = (hidden_nodes_layer1_A1 +1)//2 hidden_nodes_layer3_A1 = (hidden_nodes_layer2_A1 +1)//2 # + id="efNsx7TfURsK" # Create the Sequential model instance nn_A1 = Sequential() # + id="0uMhdXQWURsK" colab={"base_uri": "https://localhost:8080/"} outputId="f40765d0-704e-47b7-a138-e3a22885fd6d" # First hidden layer nn_A1.add(Dense(units=hidden_nodes_layer1_A1,input_dim=number_input_features,activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer2_A1,activation="relu")) nn_A1.add(Dense(units=hidden_nodes_layer3_A1,activation="relu")) # Output layer nn_A1.add(Dense(1, activation="sigmoid")) # Check the structure of the model nn_A1.summary() # + id="2gDpRhG7URsK" # Compile the Sequential model nn_A1.compile(loss="binary_crossentropy",optimizer="adam",metrics=["accuracy"]) # + id="zE4UD4NyURsK" colab={"base_uri": "https://localhost:8080/"} outputId="0f85ae58-6b1b-458e-c6cc-73d87d6b3514" # Fit the model using 50 epochs and the training data fit_model_A1 = nn_A1.fit(X_train_scaled,y_train, epochs=50) # + [markdown] id="aMZAmHrlURsK" # #### Alternative Model 2 # + id="Mw0cxkvOURsK" colab={"base_uri": "https://localhost:8080/"} outputId="faac89ba-180c-4806-d83a-95d8fd7e0ee0" # Define the the number of inputs (features) to the model number_input_features = len(X_train.iloc[0]) # Review the number of features number_input_features # + id="qnLSrAKJURsK" # Define the number of neurons in the output layer number_output_neurons_A2 = 2 # + id="myK4iGVBURsK" # Define the number of hidden nodes for the hidden layers hidden_nodes_layer1_A2 = (number_input_features +1)//2 hidden_nodes_layer2_A2 = (hidden_nodes_layer1_A2 +1)//2 hidden_nodes_layer3_A2 =(hidden_nodes_layer2_A2 +1)//2 hidden_nodes_layer4_A2 =(hidden_nodes_layer3_A1 +1)//2 # + id="lB6J98OSURsL" # Create the Sequential model instance nn_A2 = Sequential() # + id="EX5HLuh9URsL" colab={"base_uri": "https://localhost:8080/"} outputId="8da24db7-8034-4f80-c9b4-85a0b25bc869" # First hidden layer nn_A2.add(Dense(units=hidden_nodes_layer1_A2,input_dim=number_input_features,activation="relu")) nn_A2.add(Dense(units=hidden_nodes_layer2_A2,activation="relu")) nn_A2.add(Dense(units=hidden_nodes_layer3_A2,activation="relu")) nn_A2.add(Dense(units=hidden_nodes_layer4_A2,activation="relu")) # Output layer nn_A2.add(Dense(1, activation="sigmoid")) # Check the structure of the model nn_A2.summary() # + id="JrI6zrIRURsL" # Compile the model nn_A2.compile(loss="binary_crossentropy",optimizer="adam",metrics=["accuracy"]) # + id="8M3fXVn_URsL" colab={"base_uri": "https://localhost:8080/"} outputId="f8ac258c-2b22-4cbd-e70b-e9e7f4e0f5ca" # Fit the model fit_model_A2=nn_A2.fit(X_train_scaled,y_train, epochs=75) # + [markdown] id="JkA48Z4hURsL" # ### Step 2: After finishing your models, display the accuracy scores achieved by each model, and compare the results. # + id="RXhF6K8SURsL" colab={"base_uri": "https://localhost:8080/"} outputId="78fa0227-8b24-403c-8a64-46b1e424d47c" print("Original Model Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # + id="W52oclvkURsL" colab={"base_uri": "https://localhost:8080/"} outputId="e9cf47b5-8160-4ad1-e960-107342e8c40c" print("Alternative Model 1 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A1.evaluate(X_test_scaled,y_test,verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # + id="4yPtU8_nURsL" colab={"base_uri": "https://localhost:8080/"} outputId="67eee2ab-8b15-4108-ebea-bf200a99de35" print("Alternative Model 2 Results") # Evaluate the model loss and accuracy metrics using the evaluate method and the test data model_loss, model_accuracy = nn_A2.evaluate(X_test_scaled,y_test,verbose=2) # Display the model loss and accuracy results print(f"Loss: {model_loss}, Accuracy: {model_accuracy}") # + [markdown] id="eQ3b2y7OURsM" # ### Step 3: Save each of your alternative models as an HDF5 file. # # + id="a1tXiNBZURsM" # Set the file path for the first alternative model file_path = "alternativemodel1.h5" # Export your model to a HDF5 file nn.save(file_path) # + id="W9KTZW35URsM" # Set the file path for the second alternative model file_path = "alternativemodel2.h5" # Export your model to a HDF5 file nn.save(file_path)
GC_venture_funding_with_deep_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lionelsamrat10/machine-learning-a-to-z/blob/main/Classification%20Template%20for%20large%20datasets/Logistic%20Regression/logistic_regression_template_sam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="0MRC0e0KhQ0S" # # Logistic Regression # + [markdown] id="LWd1UlMnhT2s" # ## Importing the libraries # + id="YvGPUQaHhXfL" import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] id="K1VMqkGvhc3-" # ## Importing the dataset # + id="M52QDmyzhh9s" dataset = pd.read_csv('Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # + [markdown] id="YvxIPVyMhmKp" # ## Splitting the dataset into the Training set and Test set # + id="AVzJWAXIhxoC" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # + [markdown] id="kW3c7UYih0hT" # ## Feature Scaling # + id="9fQlDPKCh8sc" from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # + [markdown] id="bb6jCOCQiAmP" # ## Training the Logistic Regression model on the Training set # + id="e0pFVAmciHQs" colab={"base_uri": "https://localhost:8080/"} outputId="f1f80339-6d7d-4edc-ae38-111d20c5f789" from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train) # + [markdown] id="h4Hwj34ziWQW" # ## Making the Confusion Matrix # + id="D6bpZwUiiXic" colab={"base_uri": "https://localhost:8080/"} outputId="28e0c69b-ea40-4f4a-cae2-d9cd8c1be777" from sklearn.metrics import confusion_matrix, accuracy_score y_pred = classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred)
Classification Template for large datasets/Logistic Regression/logistic_regression_template_sam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/multitask/fine-tuning/api%20generation/small_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="c9eStCoLX0pZ" # **<h3>Generate the api based on the description using codeTrans multitask finetuning model</h3>** # <h4>You can make free prediction online through this # <a href="https://huggingface.co/SEBIS/code_trans_t5_small_api_generation_multitask_finetune">Link</a></h4> (When using the prediction online, you need to parse and tokenize the code first.) # + [markdown] id="6YPrvwDIHdBe" # **1. Load necessry libraries including huggingface transformers** # + colab={"base_uri": "https://localhost:8080/"} id="6FAVWAN1UOJ4" outputId="24aeb4c5-3977-4fc2-f91a-bad2dd1ae2fc" # !pip install -q transformers sentencepiece # + id="53TAO7mmUOyI" from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline # + [markdown] id="xq9v-guFWXHy" # **2. Load the token classification pipeline and load it into the GPU if avilabile** # + colab={"base_uri": "https://localhost:8080/", "height": 316, "referenced_widgets": ["17a15b1e4ce548ee872b01d9de4ae16d", "8e7ff00303bc4827ae75f3acdf05defa", "5613b7e358684ab8aea6a4f14c921afc", "473ea73a24fe4ce3a659a2007221a6cc", "23628af2ea5347d39a49d78bab018728", "cfa02af6c4e64f62bde5413f0d66fac3", "8f4290b8f2384e33bbe56142d8ef7270", "884579e3e2ea4bbaac8a23e682a81d0c", "94fcf35f44fd44f7ab174beda7d993ed", "f59ef59463cb45fbad3f2003220490ea", "d42fabefe4b74b0cb283799596369ade", "233b6703241a43f3b548e663671b6eea", "3d8d44f7ca3741e69cadc07666072824", "c8d8d495650f452ba7fd0be1e97272b4", "<KEY>", "a0719041022d43688468c616fa84d7ee", "925c56c03e024d5a8b5fc6ac1bcb29ba", "0762be1a1fd449abae9ced40d71f175e", "<KEY>", "fb0a16d6ce0441b78cea910de2c304fb", "<KEY>", "ef6920661f5842aca9d9c20c62147a22", "04915a6309c14af8b65153ec64f4d307", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "166e14cc59eb418594e4d76048c17965", "<KEY>", "5ef0aef6f5de45978f88bbbe342a7995", "<KEY>", "7348412d0229454a9ce5268021b00ca7", "<KEY>", "8c01921f26284076a0bde3af4faaee7a", "<KEY>", "e2f98285b5644eed8d11d18f79b0a2c5", "c42be322a3444d419c23e2d2a3d75261", "<KEY>", "c99ba0b6e8ab4f98845713400fee4635", "<KEY>"]} id="5ybX8hZ3UcK2" outputId="03ad8480-48f1-4397-8e84-b264d5678c7f" pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_small_api_generation_multitask_finetune"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_small_api_generation_multitask_finetune", skip_special_tokens=True), device=0 ) # + [markdown] id="hkynwKIcEvHh" # **3 Give the description for generating api, parse and tokenize it** # + id="nld-UUmII-2e" description = "parse the uses licence node of this package, if any, and returns the license definition if theres" #@param {type:"raw"} # + id="hqACvTcjtwYK" colab={"base_uri": "https://localhost:8080/"} outputId="b8e26683-7e78-4653-9d0b-1be9ee78a146" import nltk nltk.download('punkt') from nltk.tokenize import word_tokenize # + colab={"base_uri": "https://localhost:8080/"} id="fvnBXtkJBKGJ" outputId="f8153bb9-c10e-47e6-d3d4-15d276958948" def englishTokenizer(sentence): result = [] tokens = word_tokenize(sentence) for t in tokens: if( not len(t)>50): result.append(t) return ' '.join(result) tokenized_description = englishTokenizer(description) print("tokenized description: " + tokenized_description) # + [markdown] id="sVBz9jHNW1PI" # **4. Make Prediction** # + colab={"base_uri": "https://localhost:8080/"} id="KAItQ9U9UwqW" outputId="17794e48-47d9-4e88-af95-ec21c9fd3c6c" pipeline([tokenized_description])
prediction/multitask/fine-tuning/api generation/small_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # Training on Multiple GPUs # :label:`sec_multi_gpu` # # So far we discussed how to train models efficiently on CPUs and GPUs. We even showed how deep learning frameworks allow one to parallelize computation and communication automatically between them in :numref:`sec_auto_para`. We also showed in :numref:`sec_use_gpu` how to list all the available GPUs on a computer using the `nvidia-smi` command. # What we did *not* discuss is how to actually parallelize deep learning training. # Instead, we implied in passing that one would somehow split the data across multiple devices and make it work. The present section fills in the details and shows how to train a network in parallel when starting from scratch. Details on how to take advantage of functionality in high-level APIs is relegated to :numref:`sec_multi_gpu_concise`. # We assume that you are familiar with minibatch stochastic gradient descent algorithms such as the ones described in :numref:`sec_minibatch_sgd`. # # # ## Splitting the Problem # # Let us start with a simple computer vision problem and a slightly archaic network, e.g., with multiple layers of convolutions, pooling, and possibly a few fully-connected layers in the end. # That is, let us start with a network that looks quite similar to LeNet :cite:`LeCun.Bottou.Bengio.ea.1998` or AlexNet :cite:`Krizhevsky.Sutskever.Hinton.2012`. # Given multiple GPUs (2 if it is a desktop server, 4 on an AWS g4dn.12xlarge instance, 8 on a p3.16xlarge, or 16 on a p2.16xlarge), we want to partition training in a manner as to achieve good speedup while simultaneously benefitting from simple and reproducible design choices. Multiple GPUs, after all, increase both *memory* and *computation* ability. In a nutshell, we have the following choices, given a minibatch of training data that we want to classify. # # First, we could partition the network across multiple GPUs. That is, each GPU takes as input the data flowing into a particular layer, processes data across a number of subsequent layers and then sends the data to the next GPU. # This allows us to process data with larger networks when compared with what a single GPU could handle. # Besides, # memory footprint per GPU can be well controlled (it is a fraction of the total network footprint). # # However, the interface between layers (and thus GPUs) requires tight synchronization. This can be tricky, in particular if the computational workloads are not properly matched between layers. The problem is exacerbated for large numbers of GPUs. # The interface between layers also # requires large amounts of data transfer, # such as activations and gradients. # This may overwhelm the bandwidth of the GPU buses. # Moreover, compute-intensive, yet sequential operations are nontrivial to partition. See e.g., :cite:`Mirhoseini.Pham.Le.ea.2017` for a best effort in this regard. It remains a difficult problem and it is unclear whether it is possible to achieve good (linear) scaling on nontrivial problems. We do not recommend it unless there is excellent framework or operating system support for chaining together multiple GPUs. # # # Second, we could split the work layerwise. For instance, rather than computing 64 channels on a single GPU we could split up the problem across 4 GPUs, each of which generates data for 16 channels. # Likewise, for a fully-connected layer we could split the number of output units. # :numref:`fig_alexnet_original` (taken from :cite:`Krizhevsky.Sutskever.Hinton.2012`) # illustrates this design, where this strategy was used to deal with GPUs that had a very small memory footprint (2 GB at the time). # This allows for good scaling in terms of computation, provided that the number of channels (or units) is not too small. # Besides, # multiple GPUs can process increasingly larger networks since the available memory scales linearly. # # ![Model parallelism in the original AlexNet design due to limited GPU memory.](../img/alexnet-original.svg) # :label:`fig_alexnet_original` # # However, # we need a *very large* number of synchronization or barrier operations since each layer depends on the results from all the other layers. # Moreover, the amount of data that needs to be transferred is potentially even larger than when distributing layers across GPUs. Thus, we do not recommend this approach due to its bandwidth cost and complexity. # # Last, we could partition data across multiple GPUs. This way all GPUs perform the same type of work, albeit on different observations. Gradients are aggregated across GPUs after each minibatch of training data. # This is the simplest approach and it can be applied in any situation. # We only need to synchronize after each minibatch. That said, it is highly desirable to start exchanging gradients parameters already while others are still being computed. # Moreover, larger numbers of GPUs lead to larger minibatch sizes, thus increasing training efficiency. # However, adding more GPUs does not allow us to train larger models. # # # ![Parallelization on multiple GPUs. From left to right: original problem, network partitioning, layerwise partitioning, data parallelism.](../img/splitting.svg) # :label:`fig_splitting` # # # A comparison of different ways of parallelization on multiple GPUs is depicted in :numref:`fig_splitting`. # By and large, data parallelism is the most convenient way to proceed, provided that we have access to GPUs with sufficiently large memory. See also :cite:`Li.Andersen.Park.ea.2014` for a detailed description of partitioning for distributed training. GPU memory used to be a problem in the early days of deep learning. By now this issue has been resolved for all but the most unusual cases. We focus on data parallelism in what follows. # # ## Data Parallelism # # Assume that there are $k$ GPUs on a machine. Given the model to be trained, each GPU will maintain a complete set of model parameters independently though parameter values across the GPUs are identical and synchronized. # As an example, # :numref:`fig_data_parallel` illustrates # training with # data parallelism when $k=2$. # # # ![Calculation of minibatch stochastic gradient descent using data parallelism on two GPUs.](../img/data-parallel.svg) # :label:`fig_data_parallel` # # In general, the training proceeds as follows: # # * In any iteration of training, given a random minibatch, we split the examples in the batch into $k$ portions and distribute them evenly across the GPUs. # * Each GPU calculates loss and gradient of the model parameters based on the minibatch subset it was assigned. # * The local gradients of each of the $k$ GPUs are aggregated to obtain the current minibatch stochastic gradient. # * The aggregate gradient is re-distributed to each GPU. # * Each GPU uses this minibatch stochastic gradient to update the complete set of model parameters that it maintains. # # # # # Note that in practice we *increase* the minibatch size $k$-fold when training on $k$ GPUs such that each GPU has the same amount of work to do as if we were training on a single GPU only. On a 16-GPU server this can increase the minibatch size considerably and we may have to increase the learning rate accordingly. # Also note that batch normalization in :numref:`sec_batch_norm` needs to be adjusted, e.g., by keeping a separate batch normalization coefficient per GPU. # In what follows we will use a toy network to illustrate multi-GPU training. # # + origin_pos=2 tab=["pytorch"] # %matplotlib inline import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l # + [markdown] origin_pos=3 # ## [**A Toy Network**] # # We use LeNet as introduced in :numref:`sec_lenet` (with slight modifications). We define it from scratch to illustrate parameter exchange and synchronization in detail. # # + origin_pos=5 tab=["pytorch"] # Initialize model parameters scale = 0.01 W1 = torch.randn(size=(20, 1, 3, 3)) * scale b1 = torch.zeros(20) W2 = torch.randn(size=(50, 20, 5, 5)) * scale b2 = torch.zeros(50) W3 = torch.randn(size=(800, 128)) * scale b3 = torch.zeros(128) W4 = torch.randn(size=(128, 10)) * scale b4 = torch.zeros(10) params = [W1, b1, W2, b2, W3, b3, W4, b4] # Define the model def lenet(X, params): h1_conv = F.conv2d(input=X, weight=params[0], bias=params[1]) h1_activation = F.relu(h1_conv) h1 = F.avg_pool2d(input=h1_activation, kernel_size=(2, 2), stride=(2, 2)) h2_conv = F.conv2d(input=h1, weight=params[2], bias=params[3]) h2_activation = F.relu(h2_conv) h2 = F.avg_pool2d(input=h2_activation, kernel_size=(2, 2), stride=(2, 2)) h2 = h2.reshape(h2.shape[0], -1) h3_linear = torch.mm(h2, params[4]) + params[5] h3 = F.relu(h3_linear) y_hat = torch.mm(h3, params[6]) + params[7] return y_hat # Cross-entropy loss function loss = nn.CrossEntropyLoss(reduction='none') # + [markdown] origin_pos=6 # ## Data Synchronization # # For efficient multi-GPU training we need two basic operations. # First we need to have the ability to [**distribute a list of parameters to multiple devices**] and to attach gradients (`get_params`). Without parameters it is impossible to evaluate the network on a GPU. # Second, we need the ability to sum parameters across multiple devices, i.e., we need an `allreduce` function. # # + origin_pos=8 tab=["pytorch"] def get_params(params, device): new_params = [p.to(device) for p in params] for p in new_params: p.requires_grad_() return new_params # + [markdown] origin_pos=9 # Let us try it out by copying the model parameters to one GPU. # # + origin_pos=10 tab=["pytorch"] new_params = get_params(params, d2l.try_gpu(0)) print('b1 weight:', new_params[1]) print('b1 grad:', new_params[1].grad) # + [markdown] origin_pos=11 # Since we did not perform any computation yet, the gradient with regard to the bias parameter is still zero. # Now let us assume that we have a vector distributed across multiple GPUs. The following [**`allreduce` function adds up all vectors and broadcasts the result back to all GPUs**]. Note that for this to work we need to copy the data to the device accumulating the results. # # + origin_pos=13 tab=["pytorch"] def allreduce(data): for i in range(1, len(data)): data[0][:] += data[i].to(data[0].device) for i in range(1, len(data)): data[i][:] = data[0].to(data[i].device) # + [markdown] origin_pos=14 # Let us test this by creating vectors with different values on different devices and aggregate them. # # + origin_pos=16 tab=["pytorch"] data = [torch.ones((1, 2), device=d2l.try_gpu(i)) * (i + 1) for i in range(2)] print('before allreduce:\n', data[0], '\n', data[1]) allreduce(data) print('after allreduce:\n', data[0], '\n', data[1]) # + [markdown] origin_pos=17 # ## Distributing Data # # We need a simple utility function to [**distribute a minibatch evenly across multiple GPUs**]. For instance, on two GPUs we would like to have half of the data to be copied to either of the GPUs. # Since it is more convenient and more concise, we use the built-in function from the deep learning framework to try it out on a $4 \times 5$ matrix. # # + origin_pos=19 tab=["pytorch"] data = torch.arange(20).reshape(4, 5) devices = [torch.device('cuda:0'), torch.device('cuda:1')] split = nn.parallel.scatter(data, devices) print('input :', data) print('load into', devices) print('output:', split) # + [markdown] origin_pos=20 # For later reuse we define a `split_batch` function that splits both data and labels. # # + origin_pos=22 tab=["pytorch"] #@save def split_batch(X, y, devices): """Split `X` and `y` into multiple devices.""" assert X.shape[0] == y.shape[0] return (nn.parallel.scatter(X, devices), nn.parallel.scatter(y, devices)) # + [markdown] origin_pos=23 # ## Training # # Now we can implement [**multi-GPU training on a single minibatch**]. Its implementation is primarily based on the data parallelism approach described in this section. We will use the auxiliary functions we just discussed, `allreduce` and `split_and_load`, to synchronize the data among multiple GPUs. Note that we do not need to write any specific code to achieve parallelism. Since the computational graph does not have any dependencies across devices within a minibatch, it is executed in parallel *automatically*. # # + origin_pos=25 tab=["pytorch"] def train_batch(X, y, device_params, devices, lr): X_shards, y_shards = split_batch(X, y, devices) # Loss is calculated separately on each GPU ls = [loss(lenet(X_shard, device_W), y_shard).sum() for X_shard, y_shard, device_W in zip( X_shards, y_shards, device_params)] for l in ls: # Backpropagation is performed separately on each GPU l.backward() # Sum all gradients from each GPU and broadcast them to all GPUs with torch.no_grad(): for i in range(len(device_params[0])): allreduce([device_params[c][i].grad for c in range(len(devices))]) # The model parameters are updated separately on each GPU for param in device_params: d2l.sgd(param, lr, X.shape[0]) # Here, we use a full-size batch # + [markdown] origin_pos=26 # Now, we can define [**the training function**]. It is slightly different from the ones used in the previous chapters: we need to allocate the GPUs and copy all the model parameters to all the devices. # Obviously each batch is processed using the `train_batch` function to deal with multiple GPUs. For convenience (and conciseness of code) we compute the accuracy on a single GPU, though this is *inefficient* since the other GPUs are idle. # # + origin_pos=28 tab=["pytorch"] def train(num_gpus, batch_size, lr): train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) devices = [d2l.try_gpu(i) for i in range(num_gpus)] # Copy model parameters to `num_gpus` GPUs device_params = [get_params(params, d) for d in devices] num_epochs = 10 animator = d2l.Animator('epoch', 'test acc', xlim=[1, num_epochs]) timer = d2l.Timer() for epoch in range(num_epochs): timer.start() for X, y in train_iter: # Perform multi-GPU training for a single minibatch train_batch(X, y, device_params, devices, lr) torch.cuda.synchronize() timer.stop() # Evaluate the model on GPU 0 animator.add(epoch + 1, (d2l.evaluate_accuracy_gpu( lambda x: lenet(x, device_params[0]), test_iter, devices[0]),)) print(f'test acc: {animator.Y[0][-1]:.2f}, {timer.avg():.1f} sec/epoch ' f'on {str(devices)}') # + [markdown] origin_pos=29 # Let us see how well this works [**on a single GPU**]. # We first use a batch size of 256 and a learning rate of 0.2. # # + origin_pos=30 tab=["pytorch"] train(num_gpus=1, batch_size=256, lr=0.2) # + [markdown] origin_pos=31 # By keeping the batch size and learning rate unchanged and [**increasing the number of GPUs to 2**], we can see that the test accuracy roughly stays the same compared with # the previous experiment. # In terms of the optimization algorithms, they are identical. Unfortunately there is no meaningful speedup to be gained here: the model is simply too small; moreover we only have a small dataset, where our slightly unsophisticated approach to implementing multi-GPU training suffered from significant Python overhead. We will encounter more complex models and more sophisticated ways of parallelization going forward. # Let us see what happens nonetheless for Fashion-MNIST. # # + origin_pos=32 tab=["pytorch"] train(num_gpus=2, batch_size=256, lr=0.2) # + [markdown] origin_pos=33 # ## Summary # # * There are multiple ways to split deep network training over multiple GPUs. We could split them between layers, across layers, or across data. The former two require tightly choreographed data transfers. Data parallelism is the simplest strategy. # * Data parallel training is straightforward. However, it increases the effective minibatch size to be efficient. # * In data parallelism, data are split across multiple GPUs, where each GPU executes its own forward and backward operation and subsequently gradients are aggregated and results are broadcast back to the GPUs. # * We may use slightly increased learning rates for larger minibatches. # # ## Exercises # # 1. When training on $k$ GPUs, change the minibatch size from $b$ to $k \cdot b$, i.e., scale it up by the number of GPUs. # 1. Compare accuracy for different learning rates. How does it scale with the number of GPUs? # 1. Implement a more efficient `allreduce` function that aggregates different parameters on different GPUs? Why is it more efficient? # 1. Implement multi-GPU test accuracy computation. # # + [markdown] origin_pos=35 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/1669) #
python/d2l-en/pytorch/chapter_computational-performance/multiple-gpus.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # name: python38264bitb71451c982004edfaaad112ea3569916 # --- # + #chech you dont run out of reorses # - time = int(input('How Many years do you want your Buisnes to run? ')) location = str(input('What coutry do you live in? ')) money = int(input('How much money do you have? ')) # + tags=[] jobs = ['h', 'jfgdy', 'gul'] print('Jobs:', * jobs) job = str(input('Choose a Job? (Type + to add new one) ')) while job not in jobs: if job != '+': print('This is not a job') job = str(input('Choose a Job? (Type + to add new one)')) else: job = str(input('What is your job? ')) jobs.append(job) print(job) #find out wage for job # - place = int(input('Are you wanting to rent a place to work(1) or buy(2)? ')) while place != (1 or 2): place = int(input('Are you wanting to rent a place to work(1) or buy(2)? ')) if place == 1: #check how many moth in time period #add ammount ot money to take away up money - 50 #pay the money if place == 2: #check size of place #calculate prise money = -50 #pay #find material used #add to materials # + #choose where to get materialss from the envimonatl factors and waht each material need to be made (use NASA data) #check the biodiversity there #check what each organism need to survive #take what te resorces need away from the envioment #buy materials # see result (?how?) # -
TakeAction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nmningmei/Deep_learning_fMRI_EEG/blob/master/5_6_Extract_CV_representation_of_experiment_stimuli_words.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rW16Zws8D5np" colab_type="text" # # just have to do this if the data is in google drive # + id="Zv6KAchI_5Iv" colab_type="code" outputId="255638bd-4d7a-473b-b1b5-614f1bbbfa2a" colab={"base_uri": "https://localhost:8080/", "height": 52} # Install the PyDrive wrapper & import libraries. # This only needs to be done once per notebook. # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. # This only needs to be done once per notebook. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + [markdown] id="SOmm7YN7D9TO" colab_type="text" # # get the data from google drive # + id="XaPIMtVNAQUo" colab_type="code" colab={} image_id = 'https://drive.google.com/open?id=1OUb5x8HMtVzVQqgbLSDnzRS0c9BcDapl'.split('id=')[-1] downloaded = drive.CreateFile({'id':image_id}) downloaded.GetContentFile(f'metasema_img.zip') # + id="3G5F_HYVA9f4" colab_type="code" colab={} word_id = 'https://drive.google.com/open?id=18nfVy-o0GWX-QKEWrKK0EKLLAltpFy4U'.split('id=')[-1] downloaded = drive.CreateFile({'id':word_id}) downloaded.GetContentFile(f'word.npy') # + [markdown] id="uJy2VpNYD_0F" colab_type="text" # ## unzip the images # + id="g7zl2r6IAbUY" colab_type="code" colab={} import zipfile zip_ref = zipfile.ZipFile('metasema_img.zip', 'r') zip_ref.extractall('img2vec') zip_ref.close() # + id="ZHpiq_9iBvF5" colab_type="code" outputId="b45cd31d-e219-4b16-da5e-944ccafbe7d2" colab={"base_uri": "https://localhost:8080/", "height": 35} import os import numpy as np import pandas as pd import tensorflow as tf import seaborn as sns import keras.backend as K from glob import glob from PIL import Image from matplotlib import pyplot as plt from scipy.spatial import distance from tensorflow.keras import models,layers,regularizers,optimizers,losses,applications from sklearn.utils import shuffle # + id="eNM7FeWFAvDw" colab_type="code" colab={} image_dir = 'img2vec' words = np.load('word.npy').astype(str) images = glob(os.path.join(image_dir,'*','*','*','*.*')) image_size = 128 batch_size = 16 image_length = len(images) IMG_SHAPE = (image_size, image_size, 3) label_map = {'Living_Things':[0,1], 'Nonliving_Things':[1,0]} # + [markdown] id="8VdO5MB_EDBN" colab_type="text" # # define canidate models # + id="4lSKc6n3AwLX" colab_type="code" colab={} cadidates = [applications.VGG19, applications.DenseNet121, applications.MobileNetV2] # + [markdown] id="C7eoyztAEE7L" colab_type="text" # # the full process # + id="vduIBCX1CXbM" colab_type="code" outputId="52be16f5-8fb3-4091-f9f7-085f0f5e2fc7" colab={"base_uri": "https://localhost:8080/", "height": 2895} for model_ in cadidates: base_model = model_( weights = 'imagenet', input_shape = IMG_SHAPE, include_top = False, pooling = 'max') base_model.trainable = False data = {} labels = [] for image_dir in images: image_name = image_dir.split('/')[-2] if image_name not in list(data.keys()): data[image_name.lower()] = [] labels.append(image_name) original = Image.open(image_dir) original = original.convert("RGB") resized = np.array(original.resize((image_size,image_size),Image.ANTIALIAS)) resized = resized / 225. pred = base_model.predict(resized[np.newaxis]) data[image_name.lower()].append(np.squeeze(pred)) for name in words: data[name] = np.mean(data[name],0) preds_mean = np.array([data[name] for name in words]) corr = distance.squareform( distance.pdist( preds_mean - preds_mean.mean(1).reshape(-1,1), metric = 'cosine',)) np.fill_diagonal(corr,np.nan) fig,ax = plt.subplots(figsize = (16,16)) im = ax.imshow( corr, origin = 'lower', cmap = plt.cm.coolwarm, vmin = 0, vmax = 1., alpha = 0.9, ) ax.set(xticks = np.arange(36), yticks = np.arange(36), title = f'Metasema, image2vec RDM\n10 images per word\nimage resize to {image_size} by {image_size}, pretrained model: {base_model.name}') ax.set_xticklabels(words,rotation = 45) ax.set_yticklabels(words) ax.axhline(35/2,linestyle='--',alpha=1.,color='black') ax.axvline(35/2,linestyle='--',alpha=1.,color='black') plt.colorbar(im) # + id="Xo_qGdc3DCfR" colab_type="code" colab={}
5_6_Extract_CV_representation_of_experiment_stimuli_words.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # https://www.kaggle.com/riteshkrjha/riiid-quick-tabnet-vs-xgboost # + import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, roc_curve, classification_report import tensorflow as tf from LorisBallsBasedModel import * # - train = pd.read_csv('./data/riid/train.csv', usecols=[1, 2, 3, 4, 5, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'answered_correctly':'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'} ) # + # Remove lectures and additional processing train = train[train.content_type_id == False] train = train.sort_values(['timestamp'], ascending=True) train.drop(['timestamp', 'content_type_id'], axis=1, inplace=True) # - # Read Questions and Lectures questions = pd.read_csv('./data/riid/questions.csv') lectures = pd.read_csv('./data/riid/lectures.csv') # Merge train with Questions train = pd.merge(train, questions, left_on='content_id', right_on='question_id', how='left') #Indicator for first question in a batch train['firstQindicator'] = np.where(train['prior_question_elapsed_time'].isnull(), 1, 0) train['prior_question_elapsed_time'] = np.where(train['prior_question_elapsed_time'].isnull(), 0, train['prior_question_elapsed_time']) train.head() # Remove unused columns del train['question_id'] del train['bundle_id'] del train['correct_answer'] del train['tags'] import gc gc.collect() train.prior_question_had_explanation = train.prior_question_had_explanation.astype(object) train.prior_question_had_explanation = np.where(train.prior_question_had_explanation == 'True', 1, 0) # Sample 5M records train = train.sample(n=5000000) # train test split xtrain, xvalid, ytrain, yvalid = train_test_split(train.drop(['answered_correctly'], axis=1), train['answered_correctly'], random_state=42, test_size=0.2, shuffle=True) # + # Train LorisBallsBasedModel LBBM = LorisBallsBasedModel(nbr_steps=5, first_step_args={'attentive_transformer': FirstAttentiveTransformer, 'attentive_transformer_params_dict': {'dropout_rate': 0., 'regularizer': tf.keras.regularizers.L1(0.), 'entropy_weight': 0., }, 'features_outputs_units': 16, 'features_pass_next_step_units': 4, }, step_args={'attentive_transformer': AttentiveTransformer, 'attentive_transformer_params_dict': {'gamma': 1., 'dropout_rate': 0., 'regularizer': tf.keras.regularizers.L1(0.), 'entropy_weight': 0., }, 'features_outputs_units': 16, 'features_pass_next_step_units': 4, 'prior_outputs_units': 4, }, output_layer=tf.keras.layers.Dense(1, 'sigmoid'), input_processing_layer=tf.keras.layers.BatchNormalization(), ) LBBM.compile(loss=tf.keras.losses.binary_crossentropy, optimizer=tf.keras.optimizers.Adam(), metrics=['acc']) train_tensor = tf.data.Dataset.from_tensor_slices((xtrain.values, ytrain.values)).batch(10000) import math def step_decay(epoch): initial_lrate = 0.02 drop = 0.5 epochs_drop = 2. lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate lr_scheduler = tf.keras.callbacks.LearningRateScheduler(step_decay) history = LBBM.fit(train_tensor, epochs=12, callbacks=[lr_scheduler], verbose=1) # - LBBM.masks_explain(tf.convert_to_tensor(xvalid.values[:1])) p = LBBM.predict(xvalid.values).flatten().round().astype(int) print('\t\t\tCLASSIFICATIION METRICS: LBBM\n') print(classification_report(yvalid, p)) score = roc_auc_score(yvalid, p) print('ROC value is: {}'.format(score))
test_LorisBallsBasedModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sparse Relaxations to Binary Random Variables # In this notebook we present sparse relaxations to binary random variables. These are based on relaxing random outcomes from $\{0, 1\}$ to $[0, 1]$ while giving $\{0\}$ and $\{1\}$ discrete treatment. This enables a differentiable reparameterisation that is an unbiased alternative to the biased straight-through estimator. # %load_ext autoreload # %autoreload 2 # Helper code to plot # * samples # * pdf # * cdf # for distributions over $(\text{lower}, \text{upper}) \supset (0, 1)$ def plot(dist, n=10000, bins=40, step=0.001, lower=-0.2, upper=1.2, masses=[], vlines=[0., 1.], color='blue', title='summary'): plt.subplots_adjust(wspace=0.5, hspace=0.8) plt.suptitle(title) # samples plt.subplot(1, 3, 1) x = dist.sample(sample_shape=torch.Size([n])).reshape(-1).numpy() _ = plt.hist(x, bins, density=False, color=color) for value in vlines: plt.axvline(value, color='black', linestyle='dashed', linewidth=1) plt.xlim(left=lower) plt.xlim(right=upper) plt.title("samples") # pdf plt.subplot(1, 3, 2) x = np.arange(lower + step, upper, step) y = np.exp(dist.log_prob(torch.from_numpy(x).float()).numpy()) plt.plot(x, y, color=color, linestyle='', marker='.', markersize=0.2) for value in vlines: plt.axvline(value, color='black', linestyle='dashed', linewidth=1) plt.xlim(left=lower) plt.xlim(right=upper) if masses: y_masses = np.exp(dist.log_prob(torch.from_numpy(np.array(masses)).float()).numpy()) plt.bar(masses, y_masses, width=0.05, align='center', color=color) # plt.bar(masses, [dist.p0.numpy(), dist.p1.numpy()], width=0.05, align='center', color=color) plt.title("pdf") # cdf plt.subplot(1, 3, 3) y = dist.cdf(torch.from_numpy(x).float()).numpy() plt.plot(x, y, color=color, linestyle='', marker='.', markersize=0.2) for value in vlines: plt.axvline(value, color='black', linestyle='dashed', linewidth=1) plt.xlim(left=lower) plt.xlim(right=upper) if masses: y_masses = dist.cdf(torch.from_numpy(np.array(masses)).float()).numpy() plt.plot(masses, y_masses, color=color, linestyle='', marker='x', markersize=5) plt.title("cdf") import numpy as np import torch from matplotlib import pyplot as plt from torch.distributions import Uniform from probabll.distributions import Kumaraswamy, Stretched, Truncated01, Rectified01, MixtureD01C01 # # Stretch and Rectify # # Let's start with a Kumaraswamy distribution a, b = 0.5, 0.5 K = Kumaraswamy(a=torch.full([1], a), b=torch.full([1], b)) plot(K, title=f"Kuma({a}, {b})") # We can then stretch it to a support larger than $(0, 1)$ SK = Stretched(K) plot(SK, title=f"Stretched({a}, {b})") # We can also truncate this stretched distribution back to $(0, 1)$, this requires renormalising the truncated curve to obtain a proper pdf, which we do. TSK = Truncated01(SK) plot(TSK, title=f"Truncated({a}, {b})") # Instead of truncating, we can *rectify* the stretched distribution using a hard-sigmoid. This is quite different. Truncation simply disregards samples outside $(0, 1)$, whereas rectification maps those samples to either $0$ or $1$. The result is a distribution function that mixes a delta function at $0$, another at $1$, and a pdf over $(0, 1)$. The weight of $\delta(x)$ is given by how much of the stretched curve covers the negative support. Similarly, the weight of $\delta(1-x)$ is given by how much of the stretched curve covers the support beyond $1$. # Finally, the continuous distribution in the middle, i.e. over $(0, 1)$, is a density obtained by renormalising the truncated curve (see why we needed that?!). RSK = Rectified01(SK) plot(RSK, masses=[0., 1.], title=f"Rectified01({a}, {b})") # This abstract is very powerful! For as long as we know the cdf and the inverse cdf of a distribution that includes $(0, 1)$ in its support, we can obtain rectified distributions as presented above. # # Torch has efficient and differentiable algorithms for the Gaussian cdf and inverse cdf, thus we can use a standard Normal, for example, as base for rectification. from torch.distributions import Normal # If we truncated to $(0, 1)$ a Gaussian centered at $0.5$ with a large variance, we will see that the pdf is close to uniform. plot(Truncated01(Normal(torch.full([1], 0.5), torch.full([1], 10))), title="T-Normal(0.5, 10)") # Let's rectify $\mathcal N(0.5, 10)$ and see what happens. RN = Rectified01(Normal(torch.full([1], 0.5), torch.full([1], 10))) plot(RN, masses=[0., 1.], title="Rectified-Gaussian(0.5, 10)") # This is very close to assigning $0.5$ to each point mass with some negligible mass roughly uniformly spread over $(0,1)$. That looks like a cool prior, doesn't it? # Here is another way to get similar distribution, without the complicated Gaussian cdf and icdf. RU = Rectified01(Uniform(torch.full([1], -1.), torch.full([1], 2.))) plot(RU, masses=[0., 1.], title="Rectified-Uniform(-1, 2.)") # and note that in this case the point masses depend on how far we push the support of the Uniform. We could even have the masses differ in importance. # # Direct Parameterisation # # Another way to obtain sparse relaxations is to directly parameterise a mixture. Something like: # # \begin{align} # p(x|a, f) &= a_0 \delta(x) + a_1 \delta(1-x) + \underbrace{(1 - a_0 - a_1)}_{\overset{\text{def}}{=} a_c} f(x) \mathbb 1_{(0,1)}(x) # \end{align} # # where # # * $f(x)$ is a *properly normalised* density over $(0, 1)$ # * $\mathbb 1_{(0,1)}(x)$ is the indicator function # # Note that *rectified distributions* are members of this family too, howerver, in that case $a_0$, $a_1$, $a_c$, and $f(x)$ all depend on how mass distributes over some support $(l, u) \supset (0, 1)$ as governed by a base density $\pi(x)$. In that case we have # # \begin{align} # a_0 &= \int_{-\infty}^0 \pi(x) \text{d}x \\ # a_1 &= \int_{1}^\infty \pi(x) \text{d}x \\ # a_c &= \int_{0}^1 \pi(x) \text{d}x \\ # f(x) &= \frac{\pi(x)\mathbb 1_{(0,1)}(x)}{a_c} # \end{align} # Let's start by mixing deltas with uniform and controlling the weights of the deltas to get the shape we want: MU = MixtureD01C01( cont=Uniform(torch.zeros(1), torch.ones(1)), probs=torch.from_numpy(np.array([0.2, 0.3, 0.5])).float() ) plot(MU, masses=[0., 1.], title="M-Uniform") # We can also mix with a Kumaraswamy. One big difference here is that the Kumaraswamy has shapes that promote getting arbitrarily close to $0$ or $1$ without ever touching those values (note how the pdf will attain large values close to $0$ and $1$ in the example below). MK = MixtureD01C01( cont=Kumaraswamy(a=torch.full([1], 0.5), b=torch.full([1], 0.5)), probs=torch.from_numpy(np.array([0.2, 0.3, 0.5])).float() ) plot(MK, masses=[0., 1.], title="M-Kuma") # # KL Divergence # # Let's consider two distributions in this family of sparse relaxations to binary distributions # # \begin{align} # p(x|a, f) &= a_0 \delta(x) + a_1 \delta(1-x) + \underbrace{(1 - a_0 - a_1)}_{\overset{\text{def}}{=} a_c} f(x) \mathbb 1_{(0,1)}(x) \\ # q(x|b, g) &= b_0 \delta(x) + b_1 \delta(1-x) + \underbrace{(1 - b_0 - b_1)}_{\overset{\text{def}}{=} b_c} g(x) \mathbb 1_{(0, 1)}(x) # \end{align} # # and compute the KL between them. # # # Then # \begin{align} # &\text{KL}(p(x|a, f) || q(x|b, g)) \\ # &= a_0 \log \frac{a_0}{b_0} \\ # &+ a_1 \log \frac{a_1}{b_1} \\ # &+ a_c \log \frac{a_c}{b_c} \\ # &+ a_c \int_0^1 f(x) \log \frac{f(x)}{g(x)} \text{d}x # \end{align} # # We have some helper code to debug KL/entropy implementations. The idea is to compare to simpler MC estimates. from probabll.distributions.utils import mc_kl # When training our models, we generally estimate KL with a single sample, but we do so in mini-batches full of training instances and over many epochs. Thus I am going to get a sample mean of the single-sample estimate based on $1000$ simulations. mc_kl(MK, MU, 100000), np.mean([MK.kl(MU).numpy() for _ in range(1000)]) # Also, we could have exploited the fact that we know the Kumaraswamy entropy: mc_kl(MK, MU, 100000), np.mean([MK.kl(MU, exact_entropy=True).numpy() for _ in range(1000)]) # Recall I pointed out that rectified distributions are in the same family, we can estimate KL for them too: mc_kl(RSK, RN, 100000), np.mean([RSK.kl(RN).numpy() for _ in range(1000)]) mc_kl(RSK, RU, 100000), np.mean([RSK.kl(RU).numpy() for _ in range(1000)]) # And we can even mix and match ;) mc_kl(RSK, MK, 100000), np.mean([RSK.kl(MK).numpy() for _ in range(1000)]) mc_kl(RSK, MU, 100000), np.mean([RSK.kl(MU).numpy() for _ in range(1000)])
notebooks/Sparse Binary Relaxations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W2D2_ModernConvnets/student/W2D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # - # # (Bonus) Tutorial 2: Facial recognition using modern convnets # # **Week 2, Day 2: Modern Convnets** # # **By Neuromatch Academy** # # __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (based on an initial version by <NAME>) # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # __Content editors:__ <NAME>, <NAME> # # __Production editors:__ <NAME>, <NAME>, <NAME>, <NAME> # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # --- # # Tutorial Objectives # # In this tutorial you will learn about: # # 1. An application of modern CNNs in facial recognition. # 2. Ethical aspects of facial recognition. # + cellView="form" # @title Tutorial slides # @markdown These are the slides for the videos in this tutorial # @markdown If you want to download locally the slides, click [here](https://osf.io/4r2dp/download) from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/4r2dp/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) # - # --- # # Setup # + cellView="form" # @title Install dependencies # @markdown Install facenet - a model used to do facial recognition # !pip install facenet-pytorch --quiet # !pip install Pillow --quiet # + # Imports import glob import torch import numpy as np import sklearn.decomposition import matplotlib.pyplot as plt from PIL import Image from torchvision import transforms from torchvision.utils import make_grid from torchvision.datasets import ImageFolder from facenet_pytorch import MTCNN, InceptionResnetV1 # + cellView="form" # @title Set random seed # @markdown Executing `set_seed(seed=seed)` you are setting the seed # for DL its critical to set the random seed so that students can have a # baseline to compare their results to expected results. # Read more here: https://pytorch.org/docs/stable/notes/randomness.html # Call `set_seed` function in the exercises to ensure reproducibility. import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # In case that `DataLoader` is used def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) # + cellView="form" # @title Set device (GPU or CPU). Execute `set_device()` # especially if torch modules used. # inform the user if the notebook uses GPU or CPU. def set_device(): device = "cuda" if torch.cuda.is_available() else "cpu" if device != "cuda": print("WARNING: For this notebook to perform best, " "if possible, in the menu under `Runtime` -> " "`Change runtime type.` select `GPU` ") else: print("GPU is enabled in this notebook.") return device # - SEED = 2021 set_seed(seed=SEED) DEVICE = set_device() # --- # # Section 1: Face Recognition # # *Time estimate: ~12mins* # ## Section 1.1: Download and prepare the data # + cellView="form" # @title Download Data import requests, zipfile, io, os # original link: https://github.com/ben-heil/cis_522_data.git url = 'https://osf.io/2kyfb/download' fname = 'faces' if not os.path.exists(fname+'zip'): print("Data is being downloaded...") r = requests.get(url, stream=True) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall() print("The download has been completed.") else: print("Data has already been downloaded.") # + cellView="form" # @title Video 1: Face Recognition using CNNs from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV17B4y1K7WV", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"jJqEv8hpRa4", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # One application of large CNNs is **facial recognition**. The problem formulation in facial recognition is a little different from the image classification we've seen so far. In facial recognition, we don't want to have a fixed number of individuals that the model can learn. If that were the case then to learn a new person it would be necessary to modify the output portion of the architecture and retrain to account for the new person. # # Instead, we train a model to learn an **embedding** where images from the same individual are close to each other in an embedded space, and images corresponding to different people are far apart. When the model is trained, it takes as input an image and outputs an embedding vector corresponding to the image. # # To achieve this, facial recognitions typically use a **triplet loss** that compares two images from the same individual (i.e., "anchor" and "positive" images) and a negative image from a different individual (i.e., "negative" image). The loss requires the distance between the anchor and negative points to be greater than a margin $\alpha$ + the distance between the anchor and positive points. # ## Section 1.2: View and transform the data # # A well-trained facial recognition system should be able to map different images of the same individual relatively close together. We will load 15 images of three individuals (maybe you know them - then you can see that your brain is quite well in facial recognition). # # After viewing the images, we will transform them: MTCNN ([github repo](https://github.com/ipazc/mtcnn)) detects the face and crops the image around the face. Then we stack all the images together in a tensor. # + cellView="form" # @title Display Images # @markdown Here are the source images of <NAME>, <NAME>, and <NAME> train_transform = transforms.Compose((transforms.Resize((256, 256)), transforms.ToTensor())) face_dataset = ImageFolder('faces', transform=train_transform) image_count = len(face_dataset) face_loader = torch.utils.data.DataLoader(face_dataset, batch_size=45, shuffle=False) dataiter = iter(face_loader) images, labels = dataiter.next() # show images plt.figure(figsize=(15, 15)) plt.imshow(make_grid(images, nrow=15).permute(1, 2, 0)) plt.axis('off') plt.show() # + cellView="form" # @title Image Preprocessing Function def process_images(image_dir: str, size=256): """ This function returns two tensors for the given image dir: one usable for inputting into the facenet model, and one that is [0,1] scaled for visualizing Parameters: image_dir: The glob corresponding to images in a directory Returns: model_tensor: A image_count x channels x height x width tensor scaled to between -1 and 1, with the faces detected and cropped to the center using mtcnn display_tensor: A transformed version of the model tensor scaled to between 0 and 1 """ mtcnn = MTCNN(image_size=size, margin=32) images = [] for img_path in glob.glob(image_dir): img = Image.open(img_path) # Normalize and crop image img_cropped = mtcnn(img) images.append(img_cropped) model_tensor = torch.stack(images) display_tensor = model_tensor / (model_tensor.max() * 2) display_tensor += .5 return model_tensor, display_tensor # - # Now that we have our images loaded, we need to preprocess them. To make the images easier for the network to learn, we crop them to include just faces. # + bruce_tensor, bruce_display = process_images('faces/bruce/*.jpg') neil_tensor, neil_display = process_images('faces/neil/*.jpg') pam_tensor, pam_display = process_images('faces/pam/*.jpg') tensor_to_display = torch.cat((bruce_display, neil_display, pam_display)) plt.figure(figsize=(15, 15)) plt.imshow(make_grid(tensor_to_display, nrow=15).permute(1, 2, 0)) plt.axis('off') plt.show() # - # ## Section 1.3: Embedding with a pretrained network # # We load a pretrained facial recognition model called [FaceNet](https://github.com/timesler/facenet-pytorch). It was trained on the [VGGFace2](https://github.com/ox-vgg/vgg_face2) dataset which contains 3.31 million images of 9131 individuals. # # We use the pretrained model to calculate embeddings for all of our input images. resnet = InceptionResnetV1(pretrained='vggface2').eval().to(DEVICE) # Calculate embedding resnet.classify = False bruce_embeddings = resnet(bruce_tensor.to(DEVICE)) neil_embeddings = resnet(neil_tensor.to(DEVICE)) pam_embeddings = resnet(pam_tensor.to(DEVICE)) # ### Think! 1.3: Embedding vectors # # We want to understand what happens when the model receives an image and returns the corresponding embedding vector. # # - What are the height, width and number of channels of one input image? # - What are the dimensions of one stack of images (e.g. bruce_tensor)? # - What are the dimensions of the corresponding embedding (e.g. bruce_embeddings)? # - What would be the dimensions of the embedding of one input image? # # # **Hints:** # - You can double click on a variable name and hover over it to see the dimensions of tensors. # - You do not have to answer the questions in the order they are asked. # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W2D2_ModernConvnets/solutions/W2D2_Tutorial2_Solution_bbe072ff.py) # # # - # We cannot show 512-dimensional vectors visually, but using **Principal Component Analysis (PCA)** we can project the 512 dimensions onto a 2-dimensional space while preserving the maximum amount of data variation possible. This is just a visual aid for us to understand the concept. Note that if you would like to do any calculation, like distances between two images, this would be done with the whole 512-dimensional embedding vectors. # + embedding_tensor = torch.cat((bruce_embeddings, neil_embeddings, pam_embeddings)).to(device='cpu') pca = sklearn.decomposition.PCA(n_components=2) pca_tensor = pca.fit_transform(embedding_tensor.detach().cpu().numpy()) # - num = 15 categs = 3 colors = ['blue', 'orange', 'magenta'] labels = ['<NAME>', '<NAME>', '<NAME>'] markers = ['o', 'x', 's'] plt.figure(figsize=(8, 8)) for i in range(categs): plt.scatter(pca_tensor[i*num:(i+1)*num, 0], pca_tensor[i*num:(i+1)*num, 1], c=colors[i], marker=markers[i], label=labels[i]) plt.legend() plt.title('PCA Representation of the Image Embeddings') plt.xlabel('PC 1') plt.ylabel('PC 2') plt.show() # Great! The images corresponding to each individual are separated from each other in the embedding space! # # If <NAME> wants to unlock his phone with facial recognition, the phone takes the image from the camera, calculates the embedding and checks if it is close to the registered embeddings corresponding to <NAME>. # --- # # Section 2: Ethics – bias/discrimination due to pre-training datasets # # *Time estimate: ~19mins* # Popular facial recognition datasets like VGGFace2 and CASIA-WebFace consist primarily of caucasian faces. # As a result, even state of the art facial recognition models [substantially underperform](https://openaccess.thecvf.com/content_ICCV_2019/papers/Wang_Racial_Faces_in_the_Wild_Reducing_Racial_Bias_by_Information_ICCV_2019_paper.pdf) when attempting to recognize faces of other races. # # Given the implications that poor model performance can have in fields like security and criminal justice, it's very important to be aware of these limitations if you're going to be building facial recognition systems. # # In this example we will work with a small subset from the [UTKFace](https://susanqq.github.io/UTKFace/) dataset with 49 pictures of black women and 49 picture of white women. We will use the same pretrained model as in Section 8 of Tutorial 1, see and discuss the consequences of the model being trained on an imbalanced dataset. # + cellView="form" # @title Video 2: Ethical aspects from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f"BV1Jo4y1Q7K3", width=854, height=480, fs=1) print("Video available at https://www.bilibili.com/video/{0}".format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f"vYilJV3PqUM", width=854, height=480, fs=1, rel=0) print("Video available at https://youtube.com/watch?v=" + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # ## Section 2.1: Download the Data # + cellView="form" # @title Run this cell to get the data # original link: https://github.com/richardvogg/face_sample.git url = 'https://osf.io/36wyh/download' fname = 'face_sample2' if not os.path.exists(fname+'zip'): print("Data is being downloaded...") r = requests.get(url, stream=True) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall() print("The download has been completed.") else: print("Data has already been downloaded.") # - # ## Section 2.2: Load, view and transform the data black_female_tensor, black_female_display = process_images('face_sample2/??_1_1_*.jpg', size=150) white_female_tensor, white_female_display = process_images('face_sample2/??_1_0_*.jpg', size=150) # We can check the dimensions of these tensors and see that for each group we have images of size $150 \times 150$ and three channels (RGB) of 49 individuals. # # **Note:** Originally, the size of images was $200 \times 200$, but due to RAM resources, we have reduced it. You can change it back, i.e., `size=200`. print(white_female_tensor.shape) print(black_female_tensor.shape) # + cellView="form" # @title Visualize some example faces tensor_to_display = torch.cat((white_female_display[:15], black_female_display[:15])) plt.figure(figsize=(12, 12)) plt.imshow(make_grid(tensor_to_display, nrow = 15).permute(1, 2, 0)) plt.axis('off') plt.show() # - # ## Section 2.3: Calculate embeddings # # We use the same pretrained facial recognition network as in section 8 to calculate embeddings. If you have memory issues running this part, go to `Edit > Notebook settings` and check if GPU is selected as `Hardware accelerator`. If this does not help you can restart the notebook, go to `Runtime -> Restart runtime`. resnet.classify = False black_female_embeddings = resnet(black_female_tensor.to(DEVICE)) white_female_embeddings = resnet(white_female_tensor.to(DEVICE)) # We will use the embeddings to show that the model was trained on an imbalanced dataset. For this, we are going to calculate a distance matrix of all combinations of images, like in this small example with $n=3$ (in our case $n=98$). # # <img height=500 src=https://raw.githubusercontent.com/richardvogg/face_sample/main/04_DistanceMatrix.png> # # Calculate the distance between each pair of image embeddings in our tensor and visualize all the distances. Remember that two embeddings are vectors and the distance between two vectors is the Euclidean distance. # + cellView="form" # @title Function to calculate pairwise distances # @markdown [`torch.cdist`](https://pytorch.org/docs/stable/generated/torch.cdist.html) is used def calculate_pairwise_distances(embedding_tensor): """ This function calculates the distance between each pair of image embeddings in a tensor using the `torch.cdist`. Parameters: embedding_tensor : torch.Tensor A num_images x embedding_dimension tensor Returns: distances : torch.Tensor A num_images x num_images tensor containing the pairwise distances between each to image embedding """ distances = torch.cdist(embedding_tensor, embedding_tensor) return distances # + cellView="form" # @title Visualize the distances embedding_tensor = torch.cat((black_female_embeddings, white_female_embeddings)).to(device='cpu') distances = calculate_pairwise_distances(embedding_tensor) plt.figure(figsize=(8, 8)) plt.imshow(distances.detach().cpu().numpy()) plt.annotate('Black female', (2, -0.5), fontsize=20, va='bottom') plt.annotate('White female', (52, -0.5), fontsize=20, va='bottom') plt.annotate('Black female', (-0.5, 45), fontsize=20, rotation=90, ha='right') plt.annotate('White female', (-0.5, 90), fontsize=20, rotation=90, ha='right') cbar = plt.colorbar() cbar.set_label('Distance', fontsize=16) plt.axis('off') plt.show() # - # ## Exercise 2.1 # # What do you observe? The faces of which group are more similar to each other for the Face Detection algorithm? # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W2D2_ModernConvnets/solutions/W2D2_Tutorial2_Solution_866d0881.py) # # # - # ## Exercise 2.2 # - What does it mean in real life applications that the distance is smaller between the embeddings of one group? # - Can you come up with example situations/applications where this has a negative impact? # - What could you do to avoid these problems? # + [markdown] colab_type="text" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W2D2_ModernConvnets/solutions/W2D2_Tutorial2_Solution_9a053438.py) # # # - # Lastly, to show the importance of the dataset which you use to pretrain your model, look at how much space white men and women take in different embeddings. FairFace is a dataset which is specifically created with completely balanced classes. The blue dots in all visualizations are white male and white female. # <img src=https://i.imgur.com/hCdCBOa.png> # # Adopted from [Kärkkäinen and Joo, 2019, arXiv](https://arxiv.org/abs/1908.04913) # --- # # Section 3: Within Sum of Squares # # *Time estimate: ~10mins* # # We can try to put this observation in numbers. For this we work with the embeddings. # We want to calculate the centroid of each group, which is the average of the 49 embeddings of the group. As each embedding vector has a dimension of 512, the centroid will also have this dimension. # # Now we can calculate how far away the observations $x$ of each group $S_i$ are from the centroid $\mu_i$. This concept is known as Within Sum of Squares (WSS) from cluster analysis. # # \begin{equation} # \text{WSS} = \sum_{x\in S_i} ||x - \mu_i||^2 # \end{equation} # # where $|| \cdot ||$ is the Euclidean norm. # # The Within Sum of Squares (WSS) is a number which measures this variability of a group in the embedding space. If all embeddings of one group were very close to each other, the WSS would be very small. In our case we see that the WSS for the black females is much smaller than for the white females. This means that it is much harder for the model to distinguish two black females than to distinguish two white females. The WSS complements the observation from the distance matrix, where we observed overall smaller pairwise distances between black females. # + cellView="form" # @title Function to calculate WSS def wss(group): """ This function returns the sum of squared distances of the N vectors of a group tensor (N x K) to its centroid (1 x K). Args: group: A image_count x embedding_size tensor Returns: sum_sq: A 1x1 tensor with the sum of squared distances. Hints: - to calculate the centroid, torch.mean() will be of use. - We need the mean of the N=49 observations. If our input tensor is of size N x K, we expect the centroid to be of dimensions 1 x K. Use the axis argument within torch.mean """ centroid = torch.mean(group, axis=0) distance = torch.linalg.norm(group - centroid.view(1, -1), axis=1) sum_sq = torch.sum(distance**2) return sum_sq # + cellView="form" # @markdown Let's calculate the WSS for the two groups of our example. print(f"Black female embedding WSS: {np.round(wss(black_female_embeddings).item(), 2)}") print(f"White female embedding WSS: {np.round(wss(white_female_embeddings).item(), 2)}") # - # --- # # Summary # # In this tutorial we have learned how to apply a modern convnet in real application such as facial recognition. However, as the state-of-the-art tools for facial recognition are trained mostly with caucasian faces, they fail or they perform much worst when they have to deal with faces from other races.
tutorials/W2D2_ModernConvnets/student/W2D2_Tutorial2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Notebookのカレントディレクトリを取得する import os # osモジュールをインポート os.getcwd() # カレントディレクトリを取得 # + # カレントディレクトリを変更する import os # osモジュールをインポート os.chdir('C:/Windows/System32') # Cドライブの「Windows」→「System32」に移動 os.getcwd() # カレントディレクトリを取得 # + # カレントディレクトリを元に戻す import os # osモジュールをインポート os.chdir('元のカレントディレクトリのパスをここに入力') os.getcwd() # カレントディレクトリを取得 # + # 新規のフォルダーを作成する import os # osモジュールをインポート os.makedirs('C:/test/sample/my') # Cドライブ以下にtest→sample→myを作成 # + # カレントディレクトリの確認 import os # カレントディレクトリの絶対パスを取得 os.path.abspath('.') # + # 絶対パス/相対パスの確認 # カレントディレクトリのパスをos.path.abspath()で取得して引数にする os.path.isabs(os.path.abspath('.')) # + # 相対パスを取得する import os # Cドライブ直下からWindowsフォルダーへの相対パスを調べる os.path.relpath('C:/Windows', 'C:/') # - # C:\Program FilesからC:\Windowsへの相対パス os.path.relpath('C:/Windows', 'C:/Program Files') # カレントディレクトリからC:\Windowsへの相対パスを調べる os.path.relpath('C:/Windows', '.') # + # ディレクトリパスとベース名の取得 import os # 「メモ帳」の実行ファイルのパス path = 'C:/Windows/System32/notepad.exe' # ディレクトリパスを取得 os.path.dirname(path) # - # ベースパスを取得 os.path.basename(path) # + # ディレクトリパスとベース名をタプルにする path = 'C:/Windows/System32/notepad.exe' (os.path.dirname(path), os.path.basename(path)) # + # ディレクトリパスとベース名を格納したタプルを取得する os.path.split(path) # + # OSが使用しているパス区切り記号を取得 import os os.sep # + # パスを分解してリストとして取得する path = 'C:\\Windows\\System32\\notepad.exe' # os.sepを引数にしてパスを分解する path.split(os.sep) # + # パス区切り記号を引数にしてパスを分解する path.split('\\') # + # 「メモ帳」の実行ファイルのサイズを取得する import os os.path.getsize('C:/Windows/System32/notepad.exe') # + # System32'フォルダーのファイルとフォルダーの一覧を取得する import os os.listdir('C:/Windows/System32') # + # フォルダー内のファイルの合計サイズを取得する関数 import os def fileSize(path): # ファイルサイズを保持する変数 size = 0 # フォルダー内のすべてのファイル名をループ処理 for filename in os.listdir(path): # ファイルサイズを取得してsizeに足し合わせる size = size + os.path.getsize( # ディレクトリパスとファイル名を連結してフルパスを作る os.path.join(path, filename) ) # 合計サイズを出力 print(size) # - # ディレクトリを指定してfileSize()を実行 fileSize('C:/Windows/System32') # + # os.path.join()でフォルダー名とファイル名を連結してパスを作る os.path.join('user', 'temp', 'document.txt') # + # パスで指定した要素の存在をチェックする os.path.exists('C:/Windows') # CドライブにWindowsというディレクトリが存在する? # - os.path.isfile('C:/Windows') # CドライブのWindowsはファイル? os.path.isdir('C:/Windows') # CドライブのWindowsはフォルダー? os.path.exists('D:/') # Dドライブは存在する? # + # リストの要素をShelveオブジェクトとしてファイルに保存する関数 import shelve def saveByShelve(fname, key, list): shelve_file = shelve.open(fname) # ファイルfnameをオープン shelve_file[key] = list # キーの名前を'key'にしてlistを保存する shelve_file.close() # ファイルを閉じる print('ファイルを保存しました。') # メッセージ(任意) # + # 保存データを作成してsaveByShelve()を実行 fname = 'shelve' # ファイル名 key = 'd1' # キー lst = ['秀和太郎', '秀和花子', '築地次郎'] # 保存するデータ saveByShelve(fname, key, lst) # + # Shelveオブジェクトのファイルをオープンしてデータを取り出す # データの読み込みのみ shelve_file = shelve.open(fname) # friendファイルを開く print(shelve_file[key]) # 保存済みのデータを取得して出力 shelve_file.close() # ファイルを閉じる # + # 作成済みのhelveオブジェクトのファイルにデータを追加し一覧を取得 fname = 'shelve' # ファイル名 key = 'd2' # キー lst = ['A1', 'B2', 'A2'] # 保存するデータ saveByShelve(fname, key, lst) # データを保存 shelve_file = shelve.open(fname) # friendファイルを開く keys = list(shelve_file.keys()) # 登録済みのキーの一覧を取得 print('keys = ', keys) values = list(shelve_file.values()) # 登録済みの値の一覧を取得 values = list(shelve_file.values()) # 登録済みの値の一覧を取得 print('values = ', values) shelve_file.close() # ファイルを閉じる # - # 辞書を格納したリストをpprint.pprint()メソッドで整形して出力 name_id = [{'name':'秀和太郎', 'id':'A101'},{'name':'秀和花子', 'id':'B101'}, {'name':'築地次郎', 'id':'A102'}] import pprint pprint.pprint(name_id) # name_idをpprint.pformat()でフォーマットする pprint.pformat(name_id) # + # リストの定義コードを別ファイルに保存する関数 import pprint # pprintのインポート def saveListDef(fname, lst_name, lst): # ソースファイルを書き込みモードで開く file = open(fname, # 拡張子を含むファイル名 'w', # 書き込みモードを指定 encoding = 'utf-8' # 文字コードをUTF-8にする ) # リストの定義コードを作成してファイルに書き込む file.write(lst_name + ' = ' + pprint.pformat(lst) + '\n') # ファイルを閉じる file.close() # + # リストを定義してsaveListDef()を実行 # ファイル名 fname = 'customer.py' # 変数名 lst_name = 'name_id' # 辞書のリストを作成 lst = [{'name':'秀和太郎', 'id':'A101'}, {'name':'秀和花子', 'id':'B101'}, {'name':'築地次郎', 'id':'A102'}] saveListDef(fname, lst_name, lst) # + # 別ファイルに保存された変数(リスト)を呼出す # customer.pyをインポート import customer # customerモジュールに保存されている辞書のリストを出力 print(customer.name_id)
sample/Python_GOKUI/Python_GOKUI/chap04/sec01/FileOperations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from scipy.spatial import distance from imutils import face_utils import imutils import dlib import cv2 def eye_aspect_ratio(eye): A = distance.euclidean(eye[1], eye[5]) B = distance.euclidean(eye[2], eye[4]) C = distance.euclidean(eye[0], eye[3]) ear = (A + B) / (2.0 * C) return ear thresh = 0.20 frame_check = 20 detect = dlib.get_frontal_face_detector() predict = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] cap=cv2.VideoCapture(0) flag=0 while True: ret, frame=cap.read() frame = imutils.resize(frame, width=450) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) subjects = detect(gray, 0) for subject in subjects: shape = predict(gray, subject) shape = face_utils.shape_to_np(shape)#converting to NumPy Array leftEye = shape[lStart:lEnd] rightEye = shape[rStart:rEnd] leftEAR = eye_aspect_ratio(leftEye) rightEAR = eye_aspect_ratio(rightEye) ear = (leftEAR + rightEAR) / 2.0 leftEyeHull = cv2.convexHull(leftEye) rightEyeHull = cv2.convexHull(rightEye) cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1) cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1) if ear < thresh: flag += 1 #print (flag) if flag >= frame_check: cv2.putText(frame, "****************ALERT!****************", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "****************ALERT!****************", (10,325), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) else: flag = 0 cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): cv2.destroyAllWindows() cap.release() break
notebooks/Drowsiness Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="CP1k95JjmAEi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 730} outputId="f3cb18a5-cc1c-4724-ad94-1c6fd22f67c6" executionInfo={"status": "ok", "timestamp": 1583484266174, "user_tz": -60, "elapsed": 21127, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} # !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # !pip install hyperopt # + id="UvYS9nq1lNKT" colab_type="code" colab={} import pandas as pd import numpy as np import xgboost as xgb from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score, KFold from hyperopt import hp,fmin, tpe, STATUS_OK import eli5 from eli5.sklearn import PermutationImportance # + [markdown] id="cPNAH47WmZhy" colab_type="text" # #Wczytywanie danych # + id="mmndgYP6mfSZ" colab_type="code" outputId="2c73ed76-9298-4529-f9d6-14683104d7d5" executionInfo={"status": "ok", "timestamp": 1583484307020, "user_tz": -60, "elapsed": 1009, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 36} # cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car/data" # + id="6UoQUOMFm2pT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="f2780fbc-e689-40dd-92ba-1342984ef551" executionInfo={"status": "ok", "timestamp": 1583486103140, "user_tz": -60, "elapsed": 2064, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} # ls # + id="VayXHEcimrAy" colab_type="code" outputId="d6a3a65b-54ea-454e-93b0-b225e6131d63" executionInfo={"status": "ok", "timestamp": 1583484314437, "user_tz": -60, "elapsed": 4158, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 36} df = pd.read_hdf('car.h5') df.shape # + id="u0L2lm7UoiJ8" colab_type="code" outputId="17a731e9-22d0-4ebc-9ffd-4381fa6629f6" executionInfo={"status": "ok", "timestamp": 1583310059687, "user_tz": -60, "elapsed": 635, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 201} df.columns # + id="nWmej5_9okch" colab_type="code" colab={} # + [markdown] id="elv7NhIKo0nn" colab_type="text" # #Dummy Model # # + id="stV2HTFpo3Ul" colab_type="code" outputId="3c45f5c2-c6a7-4b0e-dead-7a863c967000" executionInfo={"status": "ok", "timestamp": 1583397194031, "user_tz": -60, "elapsed": 747, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} df.select_dtypes(np.number).columns # + id="4Eo-vZejo-V5" colab_type="code" outputId="a7386f0a-da4a-4757-8a12-45e3e63ac1ee" executionInfo={"status": "ok", "timestamp": 1583397195689, "user_tz": -60, "elapsed": 611, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} feats = ['car_id'] x = df[ feats ].values y = df[ 'price_value' ].values model = DummyRegressor() model.fit(x, y) y_pred = model.predict(x) mae(y, y_pred) # + id="yYhWxuJvrcOb" colab_type="code" outputId="aabd30c2-c9d1-4465-d340-904c22f3429e" executionInfo={"status": "ok", "timestamp": 1583310855261, "user_tz": -60, "elapsed": 437, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} [x for x in df.columns if 'price' in x] # + id="FmsJMqwBrn0J" colab_type="code" outputId="bfa40061-b0de-435e-9281-a87de66d23c5" executionInfo={"status": "ok", "timestamp": 1583310877954, "user_tz": -60, "elapsed": 923, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 72} df['price_currency'].value_counts() # + id="DU6b5AvJrtpp" colab_type="code" outputId="96216202-7fbd-40a3-a711-341f693cd9b8" executionInfo={"status": "ok", "timestamp": 1583310930292, "user_tz": -60, "elapsed": 1355, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} df = df[ df['price_currency'] !='EUR'] df.shape # + [markdown] id="UoeEHrD3r6lI" colab_type="text" # #Features # # + id="JQmmWBrlsQZF" colab_type="code" outputId="d3e8e37c-b2cf-4605-ee13-74dd804562d6" executionInfo={"status": "ok", "timestamp": 1583397200712, "user_tz": -60, "elapsed": 759, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} df['param_color'].factorize()[0] # + id="87nETZXxr9tm" colab_type="code" colab={} SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT ] = factorized_values # + id="Xjgr5SUluB7R" colab_type="code" outputId="d2143368-83e0-456c-cbbb-f443ed105658" executionInfo={"status": "ok", "timestamp": 1583485981708, "user_tz": -60, "elapsed": 989, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 36} cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x ] len(cat_feats) # + id="F5BGtIYXtc9u" colab_type="code" colab={} def run_model(model,feats): x = df[feats].values y = df['price_value'].values scores = cross_val_score(model , x, y, cv=3 ,scoring='neg_mean_absolute_error') return(np.mean(scores), np.std(scores)) # + [markdown] id="PgLj4LZE1_2v" colab_type="text" # #DecisionTree # # + id="33q1OXJp1ui-" colab_type="code" outputId="80591860-7c09-455c-aa2c-195e143e5de4" executionInfo={"status": "ok", "timestamp": 1583397456105, "user_tz": -60, "elapsed": 4527, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} run_model( DecisionTreeRegressor(max_depth=5), cat_feats) # + id="X9xG6oDyu8bB" colab_type="code" outputId="3ce42d8e-d20b-479f-e2ea-4c700087dde9" executionInfo={"status": "error", "timestamp": 1583485995971, "user_tz": -60, "elapsed": 806, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 250} m = DecisionTreeRegressor(max_depth=5) m.fit(x, y) imp = PermutationImportance(m, random_state=0).fit(x, y) eli5.show_weights(imp, feature_names=cat_feats) # + [markdown] id="EKzxqrhY2FnX" colab_type="text" # #RandomForest # # + id="Hnv94cDj2Jko" colab_type="code" outputId="fdf73c2c-0207-4882-83bc-07e040d7e626" executionInfo={"status": "error", "timestamp": 1583485989427, "user_tz": -60, "elapsed": 649, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 194} model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0) run_model(model, cat_feats) # + [markdown] id="tzJvPly72YSW" colab_type="text" # #XGBoost # # + id="j3ztTxYQ2aQl" colab_type="code" outputId="bc3ea0cb-3dc0-417a-d259-1d14a084dcea" executionInfo={"status": "ok", "timestamp": 1583486168057, "user_tz": -60, "elapsed": 59010, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 92} xgb_params = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate':0.1, 'seed':0 } run_model( xgb.XGBRegressor(**xgb_params), cat_feats ) # + id="PRYomMoW3fLl" colab_type="code" outputId="3ee2901a-0b21-448b-cad7-baa9b5a2c441" executionInfo={"status": "ok", "timestamp": 1583398528047, "user_tz": -60, "elapsed": 589652, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 417} m = xgb.XGBRegressor(max_depth=5, n_esimators=50, learning_rate = 0.1, seed = 0) m.fit(x, y) imp = PermutationImportance(m, random_state=0).fit(x, y) eli5.show_weights(imp, feature_names=cat_feats) # + id="5yGcQyzA7os7" colab_type="code" outputId="5d6a738b-94fd-4c3e-a2a1-5a8fa59b5522" executionInfo={"status": "ok", "timestamp": 1583398955167, "user_tz": -60, "elapsed": 704, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35} len(cat_feats) # + id="n54UMGq_6SR2" colab_type="code" outputId="1b232d0c-1cc8-4138-e8fc-c373b4eb96ef" executionInfo={"status": "ok", "timestamp": 1583399780743, "user_tz": -60, "elapsed": 59801, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 90} feats = ['param_rok-produkcji__cat', 'param_stan__cat', 'param_napęd__cat', 'param_skrzynia-biegów__cat', 'param_moc__cat', 'param_faktura-vat__cat', 'param_marka-pojazdu__cat', 'param_typ__cat', 'feature_kamera-cofania__cat', 'param_wersja__cat', 'param_model-pojazdu__cat', 'param_pojemność-skokowa__cat', 'param_kod-silnika__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'feature_czujniki-parkowania-przednie__cat', 'param_uszkodzony__cat', 'feature_system-start-stop__cat', 'feature_regulowane-zawieszenie__cat', 'feature_asystent-pasa-ruchu__cat' ] run_model( xgb.XGBRegressor(**xgb_params), feats ) # + id="iK-aOuID-25n" colab_type="code" outputId="573c7516-c403-40b2-84ff-ff2d91ad4c67" executionInfo={"status": "ok", "timestamp": 1583399845835, "user_tz": -60, "elapsed": 644, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 146} df['param_rok-produkcji'].unique() # + id="rmfk9aU97-O8" colab_type="code" outputId="48861b2e-11f8-4a2c-9df7-887bbce1a9db" executionInfo={"status": "ok", "timestamp": 1583486831834, "user_tz": -60, "elapsed": 720, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 148} df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_rok-produkcji'].unique() # + id="3xvGgw6Y7so1" colab_type="code" outputId="8b56b765-8847-4d7e-e7fa-05c99513b5c7" executionInfo={"status": "ok", "timestamp": 1583486847722, "user_tz": -60, "elapsed": 13448, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 92} feats = ['param_rok-produkcji', 'param_stan__cat', 'param_napęd__cat', 'param_skrzynia-biegów__cat', 'param_moc__cat', 'param_faktura-vat__cat', 'param_marka-pojazdu__cat', 'param_typ__cat', 'feature_kamera-cofania__cat', 'param_wersja__cat', 'param_model-pojazdu__cat', 'param_pojemność-skokowa__cat', 'param_kod-silnika__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'feature_czujniki-parkowania-przednie__cat', 'param_uszkodzony__cat', 'feature_system-start-stop__cat', 'feature_regulowane-zawieszenie__cat', 'feature_asystent-pasa-ruchu__cat' ] df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) run_model( xgb.XGBRegressor(**xgb_params), feats ) # + id="_wvAHrC9_Omx" colab_type="code" outputId="ae47e496-7119-4b5b-91da-ad517b8ab0a7" executionInfo={"status": "ok", "timestamp": 1583404550524, "user_tz": -60, "elapsed": 13478, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 90} feats = ['param_rok-produkcji', 'param_stan__cat', 'param_napęd__cat', 'param_skrzynia-biegów__cat', 'param_moc', 'param_faktura-vat__cat', 'param_marka-pojazdu__cat', 'param_typ__cat', 'feature_kamera-cofania__cat', 'param_wersja__cat', 'param_model-pojazdu__cat', 'param_pojemność-skokowa', 'param_kod-silnika__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'feature_czujniki-parkowania-przednie__cat', 'param_uszkodzony__cat', 'feature_system-start-stop__cat', 'feature_regulowane-zawieszenie__cat', 'feature_asystent-pasa-ruchu__cat' ] run_model( xgb.XGBRegressor(**xgb_params), feats ) # + id="2-paCu9fRc1Y" colab_type="code" colab={} df['param_pojemność-skokowa']= df['param_pojemność-skokowa'].map(lambda x: -1 if str(x)=='None' else int(x.split(' ')[0])) # + id="PuXAt4tyPhxv" colab_type="code" outputId="d32ffff6-4a0f-45b7-b1c6-08d4ca888b27" executionInfo={"status": "ok", "timestamp": 1583487431735, "user_tz": -60, "elapsed": 12997, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 92} feats = ['param_rok-produkcji', 'param_stan__cat', 'param_napęd__cat', 'param_skrzynia-biegów__cat', 'param_moc__cat', 'param_faktura-vat__cat', 'param_marka-pojazdu__cat', 'param_typ__cat', 'feature_kamera-cofania__cat', 'param_wersja__cat', 'param_model-pojazdu__cat', 'param_pojemność-skokowa', 'param_kod-silnika__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'feature_czujniki-parkowania-przednie__cat', 'param_uszkodzony__cat', 'feature_system-start-stop__cat', 'feature_regulowane-zawieszenie__cat', 'feature_asystent-pasa-ruchu__cat' ] run_model( xgb.XGBRegressor(**xgb_params), feats ) # + [markdown] id="hdCWx0JfOHX5" colab_type="text" # #Hyperopt # # + id="teebWbHZCjGM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="b7179b14-ba66-49d2-eaa4-fea8f3e1e789" executionInfo={"status": "ok", "timestamp": 1583487572524, "user_tz": -60, "elapsed": 124556, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} def obj_func(params): print ("Training with params: ") print (params) mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats) return {'loss': np.abs(mean_mae), 'status': STATUS_OK} ##space xgb_reg_params = { 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), 'subsmaple': hp.quniform('subsample', 0.5, 1, 0.05), 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05), 'objective': 'reg:squarederror', 'n_estimators': 100, 'seed' : 0, } ##run best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=3) best # + id="xWJM2W5HOxSf" colab_type="code" colab={}
day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 1: Data Exploration # # Use the code below to explore the raw timeseries data. Come up with 5 observations that might help inform an algorithm that you build using this data. # ## Imports # + import os from matplotlib import pyplot as plt import numpy as np import pandas as pd # - # ## Load Data data_dir = 'data' filenames = [os.path.splitext(f)[0] for f in sorted(os.listdir(data_dir))] fs = 256 data = [] for f in filenames: subject = f.split('_')[0] activity = f.split('_')[1] path = os.path.join(data_dir, f + '.csv') df = pd.read_csv(path) df = df.loc[: df.last_valid_index()] data.append((subject, activity, df)) # ## Offline # # Working in an offline notebook on your local machine is probably easier and faster for this exercise. See the instructions in the introductory lesson for this course to get started. # # Use the plotting code below to visualize the data. # ### Pick a backend # # You can only pick one matplotlib backend so delete two lines of code from the cell below before running it. # Use this backend if you are on MacOS # %matplotlib osx # Use this backend if you are not. # %matplotlib qt # A third backend to try if the ones above don't work. # %matplotlib tk # ### Sequentially Plot the Data # # You can interact with the plots with your mouse. Press any key on the keyboard to go to the next plot. for subject, activity, df in sorted(data, key=lambda x: x[1]): ts = np.arange(len(df)) / fs plt.clf() plt.plot(ts, df.accx, label='x') plt.plot(ts, df.accy, label='y') plt.plot(ts, df.accz, label='z') plt.title('{}_{}'.format(subject, activity)) plt.legend() plt.ylim((-25, 25)) plt.draw() while not plt.waitforbuttonpress(): pass # ## Inside the Udacity workspace # # Inside a VM you won't be able to open a new window so you have to plot the data inline. It's a lot of data to plot interactively so you may have to be patient. After examining the data from one activity class it's a good idea to clear that cell's output so you free up memory in the notebook. Click on the cell with the plots, then in the top menu `Cell` > `Current Outputs` > `Clear` # %matplotlib inline # `mpld3` will allow you to interact with the plots but if you run the following line of code the workspace could crash while generating the graphs. It is **highly** suggested you run the following data **without** `mpld3` and if you are interested in a particular graph to enable this and generate the graph individually. import mpld3 mpld3.enable_notebook() # #### Plot biking data for subject, activity, df in data: if activity != 'bike': continue ts = np.arange(len(df)) / fs plt.figure(figsize=(12, 8)) plt.plot(ts, df.accx, label='x') plt.plot(ts, df.accy, label='y') plt.plot(ts, df.accz, label='z') plt.title('{}_{}'.format(subject, activity)) plt.legend() plt.ylim((-25, 25)) plt.draw() # #### Plot running data for subject, activity, df in sorted(data, key=lambda x: x[1]): if activity != 'run': continue ts = np.arange(len(df)) / fs plt.figure(figsize=(12, 8)) plt.plot(ts, df.accx, label='x') plt.plot(ts, df.accy, label='y') plt.plot(ts, df.accz, label='z') plt.title('{}_{}'.format(subject, activity)) plt.legend() plt.ylim((-25, 25)) plt.draw() # #### Plot walking data for subject, activity, df in sorted(data, key=lambda x: x[1]): if activity != 'walk': continue ts = np.arange(len(df)) / fs plt.figure(figsize=(12, 8)) plt.plot(ts, df.accx, label='x') plt.plot(ts, df.accy, label='y') plt.plot(ts, df.accz, label='z') plt.title('{}_{}'.format(subject, activity)) plt.legend() plt.ylim((-25, 25)) plt.draw() # ## Observations # What do you notice about the data that might be helpful when we start building a classifier?
AI-for-Healthcare/wearable-data/lesson 4/1_data_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %reload_ext autoreload # %autoreload 2 import sys sys.path.insert(0, "../") from pathlib import Path import pandas as pd import geopandas as gpd import numpy as np import plotly.express as px import basedosdados as bd from shapely.wkt import loads from shapely import ops from shapely.ops import split import shapely pd.options.display.max_columns = 999 pd.options.display.max_rows = 1999 pd.options.display.max_colwidth = 200 # - # ### Observações # # - Os dados utilizados nos tratamentos se encontram na pasta do [Google Drive](https://drive.google.com/drive/folders/1TQjha0aPdBLfBGek8Bl48cmdGbX_j_qC) # ### Shapes # # - Trem # - VLT # - Transbrasil – Linha 62 # - Transolimpica - # - Transcarioca # - Transoeste # + ### SHAPES TP RJ query = """ SELECT CASE WHEN id_modal_smtr = '10' THEN "Metrô" WHEN id_modal_smtr = '11' THEN "Trem" WHEN id_modal_smtr = '12' THEN "VLT" WHEN id_modal_smtr = '13' THEN "Bonde" WHEN id_modal_smtr = '14' THEN "Plano Inclinado" WHEN id_modal_smtr = '20' THEN "BRT" WHEN id_modal_smtr = '21' THEN "BHLS" WHEN id_modal_smtr = '22' THEN "SPPO" WHEN id_modal_smtr = '23' THEN "SPPO Executivo" WHEN id_modal_smtr = '30' THEN "DETRO RMTJ" WHEN id_modal_smtr = '31' THEN "Varia conforme região" WHEN id_modal_smtr = '32' THEN "Varia conforme região" WHEN id_modal_smtr = '33' THEN "Varia conforme região" WHEN id_modal_smtr = '34' THEN "Varia conforme região" WHEN id_modal_smtr = '35' THEN "Varia conforme região" WHEN id_modal_smtr = '36' THEN "Varia conforme região" WHEN id_modal_smtr = '37' THEN "Varia conforme região" WHEN id_modal_smtr = '38' THEN "Varia conforme região" WHEN id_modal_smtr = '39' THEN "Varia conforme região" WHEN id_modal_smtr = '40' THEN "STPL" WHEN id_modal_smtr = '41' THEN "STPC" WHEN id_modal_smtr = '50' THEN "Barcas" WHEN id_modal_smtr = '60' THEN "Teleferico" END AS id_modal_smtr, * EXCEPT( versao, id_modal_smtr ) FROM `rj-smtr.br_rj_riodejaneiro_sigmob.shapes_geom` WHERE data_versao = "2022-03-10" """ shapes = bd.read_sql(query,billing_project_id='rj-escritorio-dev', use_bqstorage_api=True) shapes.to_csv('data/raw/shapes_transporte_rj_sigmob.csv', index=False) # + shapes = gpd.read_file("data/raw/shapes_transporte_rj_sigmob.csv") corredor_dict = { "62": "TransBrasil", "55": "TransOlímpica", "46": "TransCarioca", "42A": "TransCarioca", "53": "TransOlímpica", "18": "TransOeste", "17": "TransOeste", "20": "TransOeste", } gestao_dict = { "62": "Futuro", "55": "Paes", "46": "Paes", "42A": "Paes", "53": "Paes", "18": "Paes", "17": "Crivela", "20": "Paes", } shapes["corredor"] = shapes["linha_gtfs"].apply(lambda x: corredor_dict.get(x)) shapes["gestao"] = shapes["linha_gtfs"].apply(lambda x: gestao_dict.get(x)) shapes["geometry"] = shapes["shape"].apply(loads) shapes["shape"] = shapes["shape"].apply(loads) shapes = shapes.sort_values("corredor").drop_duplicates(["route_id", "linha_gtfs"]) remove_linha_55_transolimpica = (shapes["corredor"] == "TransOlímpica") & ( shapes["linha_gtfs"] == "53" ) shapes = shapes[~remove_linha_55_transolimpica] corredores = ( shapes[~shapes["corredor"].isna()] .drop("shape", 1) .dissolve(["corredor", "gestao"], as_index=False) ) # - corredores.reset_index().plot(column = 'corredor', figsize=(12,12)) corredores_plot = corredores.reset_index()[['corredor', 'geometry']] corredores_plot.to_csv('data/treated/shapes/corredores_plot.csv', index=False) corredores.to_csv('data/treated/shapes/corredores.csv', index=False) # ### shapes Metro, Trem # # - Quebra linha 4 outros = shapes[shapes['id_modal_smtr'].isin(['Metrô', 'Trem'])] # + query = """ SELECT distinct stop_name, stop_lat, stop_lon FROM `rj-smtr.br_rj_riodejaneiro_sigmob.stop_details_desaninhada` where modal = 'Metrô' and (stop_name = 'General Osório' or stop_name='Saens Peña') """ estacoes_quebra_original = bd.read_sql(query,billing_project_id='rj-escritorio-dev', use_bqstorage_api=True) # + estacoes_quebra = estacoes_quebra_original.drop_duplicates('stop_name') estacoes_quebra = gpd.GeoDataFrame( estacoes_quebra, geometry=gpd.points_from_xy(estacoes_quebra.stop_lon, estacoes_quebra.stop_lat), ) estacoes_quebra['geometry'] = estacoes_quebra['geometry'].apply(lambda x: x.buffer(0.0005)) # - # + mask = (outros["id_modal_smtr"] == "Metrô") & (outros["linha_gtfs"] == "1") linha_1 = outros[mask].head(1) linha = linha_1['geometry'].values[0] general = estacoes_quebra.head(1)['geometry'].values[0] saens = estacoes_quebra.tail(1)['geometry'].values[0] result = split(linha, general) barra = linha_1.copy() barra['linha_gtfs'][barra.index] = 'linha 4' barra['geometry'][barra.index] = result[2] zs = linha_1.copy() zs['linha_gtfs'][zs.index] = 'linha 1' zs['geometry'][zs.index] = result[0] # - outros_final = pd.concat([barra,outros]) outros_final = pd.concat([zs,outros_final]) outros_final = outros_final.drop_duplicates('linha_gtfs').reset_index(drop=True) outros_final = outros_final[outros_final['linha_gtfs']!='1'] outros_final['linha_gtfs'] = outros_final['linha_gtfs'].str.replace('2','linha 2') outros_final.to_csv('data/treated/shapes/trem_metro.csv', index=False) # # ### Estações com data de abertura # # - Transolimpica # - Transcarioca # - Transoeste # - Transbrasil # # + estacoes = pd.read_csv('data/raw/estacoes_brt_georeferenciadas.csv') estacoes = gpd.GeoDataFrame(estacoes) estacoes['geometry'] = estacoes['geometry'].apply(loads) estacoes['buffer'] = estacoes['geometry'].apply(lambda x: x.buffer(0.002)) estacoes['buffer'].plot() # + estacoes_corr = gpd.sjoin( corredores, gpd.GeoDataFrame(estacoes[["stop_id", "buffer"]], geometry="buffer") ).reset_index() estacoes_final = estacoes_corr.rename(columns={'geometry':'geometry_linha'}).drop( columns=[ "shape_id", "trip_id", "route_id", "start_pt", "end_pt", "data_versao", "index_right", ] ).merge(estacoes.drop(columns=["buffer", "var"]), on="stop_id").drop(columns=['index']) estacoes_final["data_reabertura"] = estacoes_final["date"].apply(lambda x: x[:7]) estacoes_final = estacoes_final.sort_values('corredor').drop_duplicates('stop_id') mask = (estacoes_final['reaberto']=='sim') & (estacoes_final['corredor']!='Transbrasil') estacoes_final['gestao'] = np.where(mask, 'Crivela',estacoes_final['gestao']) estacoes_final = estacoes_final.reset_index(drop=True) mask = (estacoes_final['data_reabertura']=='2021-01') & (estacoes_final['gestao']=='Crivela') & (estacoes_final['stop_name']=='Campo Grande') index_campo_grande = estacoes_final[mask]['data_reabertura'].index[0] estacoes_final['data_reabertura'][index_campo_grande]='2021-09' mask = (estacoes_final['data_reabertura']=='2021-01') & (estacoes_final['gestao']=='Crivela') & (estacoes_final['stop_name']=='Santa Eugênia') index_santa_eugenia = estacoes_final[mask]['data_reabertura'].index[0] estacoes_final['data_reabertura'][index_santa_eugenia]='2021-09' # - estacoes_final.to_csv('data/treated/estacoes.csv', index=False) estacoes_final.drop('geometry_linha',1).to_csv('data/treated/estacoes_plot.csv', index=False) # ### Crivela # # - Somente estacoes que existiam em 2021-01-01 # - Remover linha 17 e estacoes que sobrarem # + estacoes = pd.read_csv('data/treated/estacoes.csv') estacoes["geometry"] = estacoes["geometry_linha"].apply(loads) gestao_dict = { '62': 'Futuro', '55': 'Paes', '46': 'Paes', '42A': 'Paes', '53': 'Paes', '18': 'Paes', '17': 'Crivela', '20': 'Paes', } estacoes['gestao'] = estacoes['linha_gtfs'].apply(lambda x: gestao_dict.get(x)) estacoes_layer = gpd.GeoDataFrame(estacoes) group_cols = ["corredor", "data_reabertura", "gestao"] final_layer_corredores = ( gpd.GeoDataFrame(estacoes_layer[["geometry"] + group_cols], geometry="geometry") .dissolve(by=group_cols, as_index=False) .sort_values(["gestao", "corredor", "data_reabertura"]) ) final_layer_corredores["geometry"] = final_layer_corredores["geometry"].apply( lambda x: ops.linemerge(x) if type(x)==shapely.geometry.multilinestring.MultiLineString else x ) final_layer_corredores["geometry"] = final_layer_corredores["geometry"].apply( lambda x: x.buffer(0.0005) ) # + transporte = pd.read_csv('data/treated/shapes/trem_metro.csv') cols = ['id_modal_smtr','linha_gtfs','geometry'] final_layer_corredores_transporte = pd.concat([transporte[cols],final_layer_corredores]) # - final_layer_corredores.to_csv( "data/treated/shapes/corredores_por_gestao.csv", index=False ) final_layer_corredores_transporte.to_csv( "data/treated/shapes/corredores_por_gestao_transporte.csv", index=False ) print('Numero de estações reabertas ', len(estacoes[estacoes['date'].apply(lambda x: x.split(' ')[0]) != '2021-01-01'])) # #### Estações Crivela # + jupyter={"outputs_hidden": true} tags=[] estacoes_final[estacoes_final['date'].apply(lambda x: x.split(' ')[0]) == '2021-01-01'] # - # ### Dados # # - Numero de passageiros # - 2016 # - 2020 # - 2022 # - Futuro # - Gráfico # def to_melt(df, corredor): df = df.melt(id_vars='PERÍODO', value_vars=[str(x) for x in list(range(2012, 2023))]) df['corredor'] = corredor return df oeste = to_melt(pd.read_clipboard(), 'TransOeste') carioca = to_melt(pd.read_clipboard(), 'TransCarioca') olimpica = to_melt(pd.read_clipboard(), 'TransOlimpica') gratuidade = pd.read_clipboard() passageiros = pd.concat([oeste, carioca, olimpica, gratuidade]) list(passageiros['PERÍODO'].unique()) month_dict = { 'JAN': 1, 'FEV': 2, 'MAR': 3, 'ABR': 4, 'MAI': 5, 'JUN': 6, 'JUL': 7, 'AGO': 8, 'SET': 9, 'OUT': 10, 'NOV': 11, 'DEZ': 12 } passageiros['dt'] = passageiros.apply(lambda x: pd.Timestamp(year=int(x['variable']), month=month_dict[x['PERÍODO']], day=1), 1) # + tags=[] passageiros['value'] = passageiros['value'].apply(lambda x: int(x.replace(',', '')) if isinstance(x, str) else 0) # - passageiros = passageiros.replace({'transoeste': 'TransOeste', 'transcarioca': 'TransCarioca', 'transolimpica': 'TransOlimpica'}) passageiros.groupby('dt').sum().plot(figsize=(12,7)) px.line(passageiros.groupby(['dt', 'corredor']).sum().reset_index(), x='dt', y='value', color='corredor', width=720) passageiros['variable'] = passageiros['variable'].apply(int) passageiros.sort_values(by='variable').groupby('variable').sum() px.line(passageiros.groupby('dt').sum().divide(25).reset_index(), x='dt', y='value', width=720) passageiros_mes = passageiros.groupby('dt').sum().divide(25).reset_index()[['dt', 'value']] passageiros_mes['2012-2016'] = passageiros_mes[passageiros_mes['dt'] <= '2017-01-01']['value'] passageiros_mes['2017-2020'] = passageiros_mes[(passageiros_mes['dt'] >= '2017-01-01') & (passageiros_mes['dt'] <= '2021-01-01')]['value'] passageiros_mes['2021'] = passageiros_mes[passageiros_mes['dt'] >= '2021-01-01']['value'] # + jupyter={"outputs_hidden": true} tags=[] print(passageiros_mes[['dt','2012-2016', '2017-2020', '2021']].to_csv(index=False)) # - passageiros_ano = passageiros.sort_values(by='variable').groupby('variable').sum().reset_index().rename( columns={'variable':'ano', 'value':'passageiros'} ) print(passageiros_ano.to_csv(index=False)) passageiros_ano.to_csv('data/treated/passageiros_ano.csv', index=False) # ### Dados # # - Numero de estações # - 2016 # - 2020 # - 2022 # - Futuro # - Gráfico len(estacoes_final[estacoes_final['corredor'] == 'TransOeste']) stops_oeste = estacoes_final[estacoes_final['corredor'] == 'TransOeste']['stop_id'] len(estacoes_final[~(estacoes_final['stop_id'].isin(stops_oeste)) & (estacoes_final['corredor'] == 'TransCarioca')]) # ### Dados # # - Numero de veículos # - 2016 # - 2020 # - 2022 # - Futuro # - Gráfico DEZ/2016, 436 DEZ/2020, 120 DEZ/2021, 245 MAR/2022, 254 FUTURO , 572 # ### Dados # # - Comprimento dos corredores e número de estações por corredor # - Custo/km metro vs BRT # + # import requests # url = "https://maps.googleapis.com/maps/api/directions/json?origin=Toronto&destination=Montreal&avoid=highways&mode=bicycling&key=-Nfeg2M" # payload={} # headers = {} # response = requests.request("GET", url, headers=headers, data=payload) # - # ### Comparação de trajetos utilizando Google Directions API # + import googlemaps from datetime import datetime import json key = '' # credential = json.load(open('/Users/m/gabinite_sv/credentials/basedosdados/mac/emd/escritorio-rj-dev/google_directions_api.json')) gmaps = googlemaps.Client(key=key) downtown = (-23.004216,-43.317398) tanque = (-22.917722,-43.360131) directions_result = gmaps.directions( origin = downtown, destination=tanque, mode="transit", departure_time=datetime.fromisoformat('2022-03-14T18:00:00'), alternatives=True ) # + tags=[] json.dump(directions_result, open('dirs.json', 'w')) # - len(directions_result) # + import polyline from shapely.geometry import LineString def read_polygon(res): p = res['overview_polyline']['points'] return LineString([(x[1], x[0]) for x in polyline.decode(p)]) # - polys = { 'brt': read_polygon(directions_result[0]), 'onibus': read_polygon(directions_result[3])} directions_result = gmaps.directions( origin = downtown, destination=tanque, mode="driving", departure_time=datetime.fromisoformat('2022-03-14T18:00:00'), alternatives=True ) polys['carro'] = read_polygon(directions_result[0]) comparacao = pd.Series(polys).to_frame().reset_index() comparacao.columns = ['modo', 'geometry'] comparacao comparacao['tempo'] = [65,92,75] comparacao.to_csv('data/treated/comparacao.csv')
stories/brt/code/treat_brt_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Support Vector Machines # # Authors: <NAME> (<EMAIL>) # <NAME> (<EMAIL>) # # This notebook is a compilation of material taken from several sources: # # - The [sklearn documentation](href = http://scikit-learn.org/stable/modules/svm.html>) # - A [notebook](https://github.com/jakevdp/sklearn_pycon2015/blob/master/notebooks/03.1-Classification-SVMs.ipynb) by [<NAME>](https://github.com/jakevdp>) # - [Wikipedia](https://en.wikipedia.org/wiki/Support_vector_machine) # # + [markdown] slideshow={"slide_type": "notes"} # Notebook version: 1.0 (Oct 28, 2015) # 1.1 (Oct 27, 2016) # 2.0 (Nov 2, 2017) # 2.1 (Oct 20, 2018) # # Changes: # v.1.0 - First version # v.1.1 - Typo correction and illustrative figures for linear SVM # v.2.0 - Compatibility with Python 3 (backcompatible with Python 2.7) # v.2.1 - Minor corrections on the notation # v.2.2 - Minor equation errors. Reformatted hyperlinks. Restoring broken visualization of images in some Jupyter versions. # # + slideshow={"slide_type": "skip"} from __future__ import print_function # To visualize plots in the notebook # %matplotlib inline # Imported libraries #import csv #import random #import matplotlib import matplotlib.pyplot as plt from mpl_toolkits import mplot3d #import pylab import numpy as np #from sklearn.preprocessing import PolynomialFeatures from sklearn import svm from sklearn.datasets.samples_generator import make_blobs from sklearn.datasets.samples_generator import make_circles from ipywidgets import interact # + [markdown] slideshow={"slide_type": "slide"} # ## 1. Introduction # # <small> <font color="blue"> [Source: [sklearn documentation](http://scikit-learn.org/stable/modules/svm.html) </a>]</font> </small> # # Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection. # # The advantages of support vector machines are: # # - Effective in high dimensional spaces. # - Still effective in cases where number of dimensions is greater than the number of samples. # - Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient. # - Versatile: different Kernel functions can be specified for the decision function. # # The disadvantages of support vector machines include: # # - SVMs do not directly provide probability estimates. # + [markdown] slideshow={"slide_type": "slide"} # ## 2. Motivating Support Vector Machines # # <small> <font color="blue"> [Source: A [notebook](https://github.com/jakevdp/sklearn_pycon2015/blob/master/notebooks/03.1-Classification-SVMs.ipynb) by [<NAME>](https://github.com/jakevdp>)] </font> </small> # # Support Vector Machines (SVMs) are a kind of ***discriminative*** classifiers: that is, they draw a boundary between clusters of data without making any explicit assumption about the probability model underlying the data generation process. # # Let's show a quick example of support vector classification. First we need to create a dataset: # + slideshow={"slide_type": "subslide"} X, y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plt.xlabel("$x_0$", fontsize=14) plt.ylabel("$x_1$", fontsize=14) plt.axis('equal') plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # A discriminative classifier attempts to draw a line between the two sets of data. Immediately we see an inconvenience: such problem is ill-posed! For example, we could come up with several possibilities which perfectly discriminate between the classes in this example: # + slideshow={"slide_type": "fragment"} xfit = np.linspace(-1, 3.5) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]: plt.plot(xfit, m * xfit + b, '-k') plt.xlim(-1, 3.5); plt.xlabel("$x_0$", fontsize=14) plt.ylabel("$x_1$", fontsize=14) plt.axis('equal') plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # These are three very different separators which perfectly discriminate between these samples. Depending on which you choose, a new data point will be classified almost entirely differently! How can we improve on this? # # Support Vector Machines (SVM) select the boundary decision maximizing the ***margin***. The margin of a classifier is defined as twice the maximum signed distance between the decision boundary and the training data. By *signed* we mean that the distance to misclassified samples is counted negatively. Thus, if the classification problem is "separable" (i.e. if there exist a decision boundary with zero errors in the training set), the SVM will choose the zero-error decision boundary that is "as far as possible" from the training data. # # In summary, what an SVM does is to not only draw a line, but consider the "sample free" region about the line. Here's an example of what it might look like: # + slideshow={"slide_type": "subslide"} xfit = np.linspace(-1, 3.5) plt.scatter(X[:,0], X[:,1], c=y, s=50, cmap='copper') for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]: yfit = m * xfit + b plt.plot(xfit, yfit, '-k') plt.fill_between(xfit, yfit-d, yfit+d, edgecolor='none', color='#AAAAAA', alpha=0.4) plt.xlim(-1, 3.5) plt.xlabel("$x_0$", fontsize=14) plt.ylabel("$x_1$", fontsize=14) plt.axis('equal') plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # Notice here that if we want to maximize this width, the middle fit is clearly the best. This is the intuition of the SVM, which optimizes a linear discriminant model in conjunction with a margin representing the perpendicular distance between the datasets. # + [markdown] slideshow={"slide_type": "slide"} # ## 3. Linear SVM # # <small> <font color="blue"> [Source: adapted from [Wikipedia](https://en.wikipedia.org/wiki/Support_vector_machine)]</font> </small> # # In order to present the SVM in a formal way, consider a training dataset $\mathcal{D} = \left\{ (\mathbf{x}^{(k)}, y^{(k)}) \mid \mathbf{x}^{(k)}\in \Re^N,\, y^{(k)} \in \{-1,1\}, k=0,\ldots, {K-1}\right\}$, where the binary symmetric label $y^{(k)}\in \{-1,1\}$ indicates the class to which the point $\mathbf{x}^{(k)}$ belongs. Each $\mathbf{x}^{(k)}$ is a $p$-dimensional real vector. We want to find the maximum-margin hyperplane that divides the points having $y^{(k)}=1$ from those having $y^{(k)}=-1$. # # Any hyperplane can be written as the set of points $\mathbf{x}$ satisfying # # $$ # \mathbf{w}^\intercal \mathbf{x} - b=0, # $$ # # where ${\mathbf{w}}$ denotes the (not necessarily normalized) normal vector to the hyperplane. The parameter $\tfrac{b}{\|\mathbf{w}\|}$ determines the offset of the hyperplane from the origin along the normal vector ${\mathbf{w}}$. # # If the training data are linearly separable, we can select two parallel hyperplanes in a way that they separate the data and there are no points between them, and then try to maximize their distance. The region bounded by them is called "the margin". These hyperplanes can be described by the equations # # $$\mathbf{w}^\intercal \mathbf{x} - b=1$$ # and # $$\mathbf{w}^\intercal \mathbf{x} - b=-1.$$ # # Note that the two equations above can represent any two parallel hyperplanes in $\Re^N$. Essentially, the direction of vector $\mathbf{w}$ determines the orientation of the hyperplanes, whereas parameter $b$ and the norm of $\mathbf{w}$ can be used to select their exact location. # + [markdown] slideshow={"slide_type": "subslide"} # To compute the distance between the hyperplanes, we can obtain the projection of vector ${\mathbf x}_1 - {\mathbf x}_2$, where ${\mathbf x}_1$ and ${\mathbf x}_2$ are points from each of the hyperplanes, onto a unitary vector orthonormal to the hyperplanes: # # <img src="./figs/margin_calculation.png" width="500"> # # $$\text{Distance between hyperplanes} = \left[\frac{\mathbf{w}}{\|\mathbf{w}\|}\right]^\top ({\mathbf x}_1 - {\mathbf x}_2) = \frac{\mathbf{w}^\top {\mathbf x}_1 - \mathbf{w}^\top {\mathbf x}_2}{\|\mathbf{w}\|} = \frac{2}{\|\mathbf{w}\|}.$$ # # # Therefore, to maximize the distance between the planes we want to minimize $\|\mathbf{w}\|$. # + [markdown] slideshow={"slide_type": "subslide"} # As we also have to prevent data points from falling into the margin, we add the following constraints: for each $k$ either # # \begin{align} # \mathbf{w}^\top \mathbf{x}^{(k)} - b &\ge +1, \qquad\text{ if } y^{(k)}=1, \qquad \text{or} \\ # \mathbf{w}^\top \mathbf{x}^{(k)} - b &\le -1, \qquad\text{ if } y^{(k)}=-1. # \end{align} # # This can be rewritten as: # # $$ # y^{(k)}(\mathbf{w}^\top \mathbf{x}^{(k)} - b) \ge 1, \quad \text{ for all } 1 \le k \le K. # $$ # # We can put this together to get the optimization problem: # # $$ # (\mathbf{w}^*,b^*) = \arg\min_{(\mathbf{w},b)} \|\mathbf{w}\| \\ # \text{subject to: } # y^{(k)}(\mathbf{w}^\top \mathbf{x}^{(k)} - b) \ge 1, \, \text{ for any } k = 0, \dots, {K-1} # $$ # # + [markdown] slideshow={"slide_type": "subslide"} # This optimization problem is difficult to solve because it depends on $\|\mathbf{w}\|$, the norm of $\mathbf{w}$, which involves a square root. Fortunately it is possible to alter the minimization objective $\|\mathbf{w}\|$ by substituting it with $\tfrac{1}{2}\|\mathbf{w}\|^2$ (the factor of $\frac{1}{2}$ being used for mathematical convenience) without changing the solution (the minimum of the original and the modified equation have the same $\mathbf{w}$ and $b$): # # $$ # (\mathbf{w}^*,b^*) = \arg\min_{(\mathbf{w},b)} \frac{1}{2}\|\mathbf{w}\|^2 \\ # \text{subject to: } # y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b) \ge 1, \, \text{ for any } k = 0, \dots, {K-1} # $$ # # This is a particular case of a *quadratic programming* problem. # # + [markdown] slideshow={"slide_type": "slide"} # ### 3.1. Primal form # # The optimization problem stated in the preceding section can be solved by means of a generalization of the Lagrange method of multipliers for inequality constraints, using the so called Karush–Kuhn–Tucker (KKT) multipliers $\boldsymbol{\alpha}$. According to it, the constrained problem can be expressed as # # $$(\mathbf{w}^*,b^*, \boldsymbol{\alpha}^*) = \arg\min_{\mathbf{w},b } \max_{\boldsymbol{\alpha}\geq 0 } \left\{ \frac{1}{2}\|\mathbf{w}\|^2 - \sum_{k=0}^{K-1}{\alpha^{(k)}\left[y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b)-1\right]} \right\} # $$ # # that is, we look for a *saddle point*. # # A key result in convex optimization theory is that, for the kind of optimization problems discussed here (see [here](http://www.onmyphd.com/?p=kkt.karush.kuhn.tucker&ckattempt=1), for instance), the *max* and *min* operators are interchangeable, so that # $$ # (\mathbf{w}^*,b^*, \boldsymbol{\alpha}^*) = \arg\max_{\boldsymbol{\alpha}\geq 0 } \min_{\mathbf{w},b } \left\{ \frac{1}{2}\|\mathbf{w}\|^2 - \sum_{k=0}^{K-1}{\alpha^{(k)}\left[y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b)-1\right]} \right\} # $$ # # Note that the inner minimization problem is now quadratic in $\mathbf{w}$ and, thus, the minimum can be found by differentiation: # $$ # \mathbf{w}^* = \sum_{k=0}^{K-1}{\alpha^{(k)} y^{(k)}\mathbf{x}^{(k)}}. # $$ # # + [markdown] slideshow={"slide_type": "subslide"} # #### 3.1.1. Support Vectors # # In view of the optimization problem, we can check that all the points which can be separated as $y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b) - 1 > 0 $ do not matter since we must set the corresponding $\alpha^{(k)}$ to zero. Therefore, only a few $\alpha^{(k)}$ will be greater than zero. The corresponding $\mathbf{x}^{(k)}$ are known as `support vectors`. # # It can be seen that the optimum parameter vector $\mathbf{w}^\ast$ can be expressed in terms of the support vectors only: # # $$ # \mathbf{w}^* = \sum_{k\in {\cal{S}}_{SV}}{\alpha^{(k)} y^{(k)}\mathbf{x}^{(k)}}. # $$ # # where ${\cal{S}}_{SV}$ is the set of indexes associated to support vectors. # # + [markdown] slideshow={"slide_type": "subslide"} # #### 3.1.2. The computation of $b$ # # Support vectors lie on the margin and satisfy $y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b) = 1$. From this condition, we can obtain the value of $b$, since for any support vector: # # $$\mathbf{w}^\intercal\mathbf{x}^{(k)} - b = \frac{1}{y^{(k)}} = y^{(k)} \iff b = \mathbf{w}^\intercal\mathbf{x}^{(k)} - y^{(k)} # $$ # # This estimate of $b$, the centerpoint of the division, depends only on a single pair $y^{(k)}$ and $x^{(k)}$. We may get a more robust estimate of the center by averaging over all of the $N_{SV}$ support vectors, if we believe the population mean is a good estimate of the midpoint, so in practice, $b$ is often computed as: # # $$ # b = \frac{1}{N_{SV}} \sum_{\mathbf{x}^{(k)}\in {\cal{S}}_{SV}}{(\mathbf{w}^\intercal\mathbf{x}^{(k)} - y^{(k)})} # $$ # + [markdown] slideshow={"slide_type": "slide"} # ### 3.2. Dual form # # Writing the classification rule in its unconstrained dual form reveals that the *maximum-margin hyperplane* and therefore the classification task is only a function of the *support vectors*, the subset of the training data that lie on the margin. # # Using the fact that $\|\mathbf{w}\|^2 = \mathbf{w}^\intercal \mathbf{w}$ and substituting $\mathbf{w} = \sum_{k=0}^{K-1}{\alpha^{(k)} y^{(k)}\mathbf{x}^{(k)}}$, we obtain # # \begin{align} # (b^*, \boldsymbol{\alpha}^*) # &= \arg\max_{\boldsymbol{\alpha}\geq 0 } # \min_b \left\{ # \sum_{k=0}^{K-1}\alpha^{(k)} - # \frac{1}{2} # \sum_{k=0}^{K-1} \sum_{j=0}^{K-1} {\alpha^{(k)} \alpha^{(j)} y^{(k)} y^{(j)} (\mathbf{x}^{(k)})^\intercal\mathbf{x}^{(j)}} # + b \sum_{k=0}^{K-1}\alpha^{(k)}y^{(k)} # \right\} # \end{align} # # Note that, if $\sum_{k=0}^{K-1}\alpha^{(k)}y^{(k)} \neq 0$ the optimal value of $b$ is $+\infty$ of $-\infty$, and # # \begin{align} # \min_b \left\{ # \sum_{k=0}^{K-1}\alpha^{(k)} - # \frac{1}{2} # \sum_{k=0}^{K-1} \sum_{j=0}^{K-1} {\alpha^{(k)} \alpha^{(j)} y^{(k)} y^{(j)} (\mathbf{x}^{(k)})^\intercal\mathbf{x}^{(j)}} # # + b \sum_{k=0}^{K-1}\alpha^{(k)}y^{(k)} # \right\} = -\infty. # \end{align} # # Therefore, any $\boldsymbol{\alpha}$ satifying $\sum_{k=0}^{K-1}\alpha^{(k)}y^{(k)} \neq 0$ is suboptimal, so that the optimal multipliers must satisfy the condition $\sum_{k=0}^{K-1}\alpha^{(k)}y^{(k)} = 0$. # # + [markdown] slideshow={"slide_type": "subslide"} # Summarizing, the dual formulation of the optimization problem is # # $$ # \boldsymbol{\alpha}^* = \arg\max_{\boldsymbol{\alpha}\geq 0} \sum_{k=0}^{K-1} \alpha^{(k)} - # \frac12 \sum_{k,j} \alpha^{(k)} \alpha^{(j)} y^{(k)} y^{(j)} k(\mathbf{x}^{(k)}, \mathbf{x}^{(j)}) \\ # \text{subject to: } \qquad \sum_{k=0}^{K-1} \alpha^{(k)} y^{(k)} = 0. # $$ # # where the *kernel* $k(\cdot)$ is defined by $k(\mathbf{x}^{(k)},\mathbf{x}^{(j)})=(\mathbf{x}^{(k)})^\intercal\mathbf{x}^{(j)}$. # # Many implementations of the SVM use this dual formulation. They proceed in three steps: # # 1. Solve the dual problem to obtain $\boldsymbol{\alpha}^*$. Usually, only a small number of $\alpha^{*(k)}$ are nonzero. The corresponding values of ${\bf x}^{(k)}$ are called the *support vectors*. # 2. Compute $\mathbf{w}^* = \sum_{k=0}^{K-1}{\alpha^{*(k)} y^{(k)}\mathbf{x}^{(k)}}$ # 3. Compute $b^* = \frac{1}{N_{SV}} \sum_{\alpha^{*(k)}\neq 0}{(\mathbf{w}^{*\intercal}\mathbf{x}^{(k)} - y^{(k)})} # $ # # + [markdown] slideshow={"slide_type": "slide"} # ## 4. Fitting a Support Vector Machine # # <small> <font color="blue"> [Source: A [notebook](https://github.com/jakevdp/sklearn_pycon2015/blob/master/notebooks/03.1-Classification-SVMs.ipynb) by [<NAME>](https://github.com/jakevdp>)] </font> </small> # # Now we'll fit a Support Vector Machine Classifier to these points. # # + slideshow={"slide_type": "fragment"} clf = svm.SVC(kernel='linear') clf.fit(X, y) # + [markdown] slideshow={"slide_type": "fragment"} # To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us: # + slideshow={"slide_type": "subslide"} def plot_svc_decision_function(clf, ax=None): """Plot the decision function for a 2D SVC""" if ax is None: ax = plt.gca() x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30) y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30) Y, X = np.meshgrid(y, x) P = np.zeros_like(X) for i, xi in enumerate(x): for j, yj in enumerate(y): P[i, j] = clf.decision_function(np.array([xi, yj]).reshape(1,-1)) # plot the margins ax.contour(X, Y, P, colors='k', levels=[-1, 0, 1], alpha=0.5, linestyles=['--', '-', '--']) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plot_svc_decision_function(clf); # + [markdown] slideshow={"slide_type": "subslide"} # Notice that the dashed lines touch a couple of the points: these points are the *support vectors*. In scikit-learn, these are stored in the ``support_vectors_`` attribute of the classifier: # + slideshow={"slide_type": "fragment"} plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=200, marker='s'); plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plot_svc_decision_function(clf) # + [markdown] slideshow={"slide_type": "subslide"} # Let's use IPython's interact functionality to explore how the distribution of points affects the support vectors and the discriminative fit. (This is only available in IPython 2.0+, and will not work in a static view) # + slideshow={"slide_type": "skip"} def plot_svm(N=10): X, y = make_blobs(n_samples=200, centers=2, random_state=0, cluster_std=0.60) X = X[:N] y = y[:N] clf = svm.SVC(kernel='linear') clf.fit(X, y) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plt.xlim(-1, 4) plt.ylim(-1, 6) plot_svc_decision_function(clf, plt.gca()) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=200, facecolors='none') interact(plot_svm, N=[10, 200], kernel='linear') # + [markdown] slideshow={"slide_type": "skip"} # Notice the unique thing about SVM is that only the support vectors matter: that is, if you moved any of the other points without letting them cross the decision boundaries, they would have no effect on the classification results! # + [markdown] slideshow={"slide_type": "slide"} # ## 5. Non-separable problems. # # <small> <font color="blue"> [Source: adapted from [Wikipedia](https://en.wikipedia.org/wiki/Support_vector_machine)]</font> </small> # # In 1995, <NAME> and <NAME> suggested a modified maximum margin idea that allows for mislabeled examples. If there exists no hyperplane that can split the `positive` and `negative` samples, the `Soft Margin` # method will choose a hyperplane that splits the examples as cleanly as possible, while still maximizing the distance to the nearest cleanly split examples. The method introduces non-negative slack variables, $\xi^{(k)}$, which measure the degree of misclassification of the data $\mathbf{x}^{(k)}$ # # $$y^{(k)}(\mathbf{w}^\intercal\mathbf{x}^{(k)} - b) \ge 1 - \xi^{(k)} \quad k=0,\ldots, K-1. # $$ # # The objective function is then increased by a function which penalizes non-zero $\xi^{(k)}$, and the optimization becomes a trade off between a large margin and a small error penalty. If the penalty function is linear, the optimization problem becomes: # # $$(\mathbf{w}^*,\mathbf{\xi}^*, b^*) = \arg\min_{\mathbf{w},\mathbf{\xi}, b } \left\{\frac{1}{2} \|\mathbf{w}\|^2 + C \sum_{k=0}^{K-1} \xi^{(k)} \right\} \\ # \text{subject to: } \quad y^{(k)}(\mathbf{w}^\intercal\mathbf{x}^{(k)} - b) \ge 1 - \xi^{(k)}, \quad \xi^{(k)} \ge 0, \quad k=0,\ldots, K-1. # $$ # # + [markdown] slideshow={"slide_type": "subslide"} # # This constraint along with the objective of minimizing $\|\mathbf{w}\|$ can be solved using KKT multipliers as done above. One then has to solve the following problem: # # $$ # \arg\min_{\mathbf{w}, \mathbf{\xi}, b } \max_{\boldsymbol{\alpha}, \boldsymbol{\beta} } # \left\{ \frac{1}{2}\|\mathbf{w}\|^2 # # + C \sum_{k=0}^{K-1} \xi^{(k)} # - \sum_{k=0}^{K-1} {\alpha^{(k)}\left[y^{(k)}(\mathbf{w}^\intercal \mathbf{x}^{(k)} - b) -1 + \xi^{(k)}\right]} # - \sum_{k=0}^{K-1} \beta^{(k)} \xi^{(k)} \right \}\\ # \text{subject to: } \quad # \alpha^{(k)}, \beta^{(k)} \ge 0. # $$ # # A similar analysis to that in the separable case can be applied to show that the dual formulation of the optimization problem is # # $$ # \boldsymbol{\alpha}^* = \arg\max_{0 \leq \alpha^{(k)} \leq C} \sum_{k=0}^{K-1} \alpha^{(k)} - # \frac12 \sum_{k,j} \alpha^{(k)} \alpha^{(j)} y^{(k)} y^{(j)} k(\mathbf{x}^{(k)}, \mathbf{x}^{(j)}) \\ # \text{subject to: } \qquad \sum_{k=0}^{K-1} \alpha^{(k)} y^{(k)} = 0. # $$ # Note that the only difference with the separable case is given by the constraints $\alpha^{(k)} \leq C$. # + [markdown] slideshow={"slide_type": "slide"} # ## 6. Nonlinear classification # # <small> <font color="blue"> [Source: adapted from [Wikipedia](https://en.wikipedia.org/wiki/Support_vector_machine)]</font> </small> # # The original optimal hyperplane algorithm proposed by Vapnik in 1963 was a linear classifier. However, in 1992, <NAME>, <NAME> and <NAME> suggested a way to create nonlinear classifiers by applying the *kernel trick* to maximum-margin hyperplanes. The resulting algorithm is formally similar, except that every dot product is replaced by a nonlinear kernel function. This allows the algorithm to fit the maximum-margin hyperplane in a transformed feature space. The transformation may be nonlinear and the transformed space high dimensional; thus though the classifier is a hyperplane in the high-dimensional feature space, it may be nonlinear in the original input space. # # <img src="./figs/kernel.png" width="400"> # # The kernel is related to the transform $\phi(\mathbf{x})$ by the equation $k(\mathbf{x}, \mathbf{x}') = \phi(\mathbf{x})^\intercal \phi(\mathbf{x}')$. However, note that we do not need to explicitly compute $\phi(\mathbf{x})$, as long as we can express all necessary calculations in terms of the kernel function only, as it is the case for the optimization problem in the dual case. # # + [markdown] slideshow={"slide_type": "subslide"} # The predictions of the SVM classifier can also be expressed in terms of kernels only, so that we never need to explicitely compute $\phi(\mathbf{x})$. # # $$\begin{align}\hat y({\mathbf{x}}) & = {\mathbf {w^\ast}}^\intercal \phi(\mathbf{x}) - b^\ast \\ \\ # & = \left[\sum_{k \in {\cal{S}}_{SV}} \alpha^{(k)^*} y^{(k)} \phi(\mathbf{x}^{(k)})\right]^\intercal {\phi(\mathbf{x})} - b^\ast \\ \\ # & = - b^\ast + \sum_{k \in {\cal{S}}_{SV}} \alpha^{(k)^*} y^{(k)} k(\mathbf{x}^{(k)}, {\mathbf{x}}) # \end{align}$$ # # + [markdown] slideshow={"slide_type": "subslide"} # Some common kernels include: # # * **Gaussian**: $k(\mathbf{x},\mathbf{x}')=\exp(-\gamma \|\mathbf{x} - \mathbf{x}'\|^2)$, for $\gamma > 0$. Sometimes parametrized using $\gamma=\dfrac{1}{2 \sigma^2}$. This is by far the most widely used kernel. # * Polynomial (homogeneous): $k(\mathbf{x},\mathbf{x}')=(\mathbf{x}^\intercal \mathbf{x}')^d$ # * Polynomial (inhomogeneous): $k(\mathbf{x},\mathbf{x}') = (\mathbf{x}^\intercal \mathbf{x}' + 1)^d$ # * Hyperbolic tangent: $k(\mathbf{x},\mathbf{x}') = \tanh(\kappa \mathbf{x}^\intercal \mathbf{x}'+c)$, for some (not every) $\kappa > 0$ and $c < 0$. # # # + [markdown] slideshow={"slide_type": "slide"} # ### 6.1. Example. # # <small> <font color="blue"> [Source: A [notebook](https://github.com/jakevdp/sklearn_pycon2015/blob/master/notebooks/03.1-Classification-SVMs.ipynb) by [<NAME>](https://github.com/jakevdp>)] </font> </small> # # Where SVM gets incredibly exciting is when it is used in conjunction with *kernels*. # To motivate the need for kernels, let's look at some data which is not linearly separable: # + slideshow={"slide_type": "fragment"} X, y = make_circles(100, factor=.1, noise=.1) clf = svm.SVC(kernel='linear').fit(X, y) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plot_svc_decision_function(clf); # + [markdown] slideshow={"slide_type": "subslide"} # Clearly, no linear discrimination will ever separate these data. # One way we can adjust this is to apply a **kernel**, which is some functional transformation of the input data. # # For example, one simple model we could use is a **radial basis function** # + slideshow={"slide_type": "fragment"} r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2)) # + [markdown] slideshow={"slide_type": "subslide"} # If we plot this along with our data, we can see the effect of it: # + slideshow={"slide_type": "fragment"} def plot_3D(elev=30, azim=30): ax = plt.subplot(projection='3d') ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring') ax.view_init(elev=elev, azim=azim) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('r') interact(plot_3D, elev=[-90, 90], azip=(-180, 180)); # + [markdown] slideshow={"slide_type": "subslide"} # We can see that with this additional dimension, the data becomes trivially linearly separable! # This is a relatively simple kernel; SVM has a more sophisticated version of this kernel built-in to the process. This is accomplished by using the Gaussian kernel (``kernel='rbf'``), short for *radial basis function*: # + slideshow={"slide_type": "fragment"} clf = svm.SVC(kernel='rbf', C=10) clf.fit(X, y) plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='copper') plot_svc_decision_function(clf) plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=200, facecolors='none'); # + [markdown] slideshow={"slide_type": "subslide"} # Here there are effectively $N$ basis functions: one centered at each point! Through a clever mathematical trick, this computation proceeds very efficiently using the "Kernel Trick", without actually constructing the matrix of kernel evaluations. # + [markdown] slideshow={"slide_type": "subslide"} # **Exercise**: Apply the linear SVM and the SVM with Gaussian kernel to the discrimination of classes `Versicolor` and `Virginica` in the Iris Dataset, using atributes $x_0$ and $x_1$ only. Plot the corresponding decision boundaries and the support vectors. # + [markdown] slideshow={"slide_type": "slide"} # ## 7. Hyperparameters # # Note that the SVM formulation has several free parameters (hyperparameters) that must be selected out of the optimization problem: # # - The free parameter $C$ used to solve non-separable problems. # - The kernel parameters (for instance, parameter $\gamma$ from the Gaussian kernel. # # These parameters are usually adjusted using a cross-validation procedure.
C4.Classification_SVM/svm_student.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import re as regex import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('../../data/raw/mushrooms.csv') #dictionary by Alessandro. Thank you. dict = {'Class': {'e':'edible', 'p': 'poisonous'}, 'Cap Shape': {'b': 'bell', 'c': 'conical', 'x': 'convex', 'f': 'flat', 'k': 'knobbed', 's': 'sunken'}, 'Cap Surface': {'f': 'fibrous', 'g': 'grooves', 'y' : 'scaly', 's': 'smooth'}, 'Cap Color': {'n': 'brown', 'b':'buff','c':'cinnamon','g':'gray','r':'green','p':'pink','u':'purple','e': 'red','w':'white','y':'yellow'}, 'Bruises': {'t': True, 'f': False}, 'Odor': {'a': 'almond', 'l':'anise','c':'creosote','y':'fishy','f':'foul','m':'musty','n':'none','p':'pungent','s':'spicy'}, 'Gill Attachment': {'a':'attached', 'd':'descending','f':'free','n':'notched'}, 'Gill Spacing': {'c':'close','w':'crwoded','d':'distant'}, 'Gill Size': {'b':'broad','n':'narrow'}, 'Gill Color': {'k': 'black', 'n':'brown','b':'buff','h':'chocolate','g':'gray','r':'green','o':'orange','p':'pink','u':'purple','e':'red','w':'white','y':'yellow'}, 'Stalk Shape': {'e':'enlarging','t':'tapering'}, 'Stalk Root': {'b':'bulbous','c':'club','u':'cup','e':'equal','z':'rhizomorphs','r':'rooted','?':'unknown'}, 'Stalk Surface Above Ring': {'f':'fibrous','y':'scaly','k':'silky','s':'smooth'}, 'Stalk Surface Below Ring': {'f':'fibrous','y':'scaly','k':'silky','s':'smooth'}, 'Stalk Color Above Ring': {'n':'brown','b':'buff','c':'cinnamon','g':'gray','o':'orange','p':'pink','e':'red','w':'white','y':'yellow'}, 'Stalk Color Below Ring': {'n':'brown','b':'buff','c':'cinnamon','g':'gray','o':'orange','p':'pink','e':'red','w':'white','y':'yellow'}, 'Veil Type': {'p':'partial','u':'universal'}, 'Veil Color': {'n':'brown','o':'orange','w':'white','y':'yellow'}, 'Ring Number': {'n':0,'o':1,'t':2}, 'Ring Type': {'c':'cobwebby','e':'evanescent','f':'flaring','l':'large','n':'none','p':'pendant','s':'sheathing','z':'zone'}, 'Spore Print Color': {'k':'black','n':'brown','b':'buff','h':'chocolate','r':'green','o':'orange','u':'purlple','w':'white','y':'yellow'}, 'Population': {'a':'abundant','c':'clustered','n':'numerous','s':'scattered','v':'several','y':'solitary'}, 'Habitat': {'g':'grasses','l':'leaves','m':'meadows','p':'paths','u':'urban','w':'waste','d':'woods'}} df=(df .rename(columns=lambda x: regex.sub(r'-',r' ',x)) .rename(columns=lambda x: regex.sub(r'([a-z])([a-z]*)',lambda match: '{}{}'.format(match.group(1).upper(),match.group(2)),x)) .replace(dict) .assign(Edible=df['class']) .replace({'Edible':{'e':True, 'p':False}}) ) print(df.head(3)) # - #def plotEdibility(df) #for col in df.columns[1:-1]: # #print("{} has {}".format(col,df.nunique(axis=0)[col])) # length = df.nunique(axis=0)[col] # h = plt.figure(figsize=(10*3,10*int(length/3+1))) # axes = [None]*length # for i in range(0,length-1): # #print("{}: {}".format(col,i)) # axes[i] = h.add_subplot(int(length/3+1),3,i+1) # sns.countplot(data=df, x=col, hue="Edible", hue_order=[True,False], ax=axes[i]) length = len(df.columns[1:-1]) axes = [None]*length h = plt.figure(figsize=(10*3,10*int(length/3+1))) for i in range(1, length): #print('col {}'.format(df.columns[i])) axes[i] = h.add_subplot(int(length/3+1),3,i) sns.countplot(data=df, x=df.columns[i], hue="Edible", hue_order=[True,False], ax=axes[i])
analysis/alec_nixon/scriptTesting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="5ERWR-jl2psw" colab_type="code" colab={} # https://github.com/paraschopra/generating-text-small-corpus/blob/master/philosophy-generator-on-pretrained.ipynb # + id="EyOLtxRdZRUK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="0cc0e9d8-5703-4a6a-dc46-01dc04204c33" executionInfo={"status": "ok", "timestamp": 1546933503135, "user_tz": -330, "elapsed": 43125, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} from google.colab import drive drive.mount('/content/gdrive') # + id="-Hftf_Ksaz-T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="a47dff29-102b-45f4-c4a4-1d44050ddddc" executionInfo={"status": "ok", "timestamp": 1546933589340, "user_tz": -330, "elapsed": 39710, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} # http://pytorch.org/ from os.path import exists from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) # cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/' accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu' # !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision import torch # + id="bgHcmBLYbLni" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1006} outputId="c4f80d67-e972-44e7-efea-e6aad0bc8ac6" executionInfo={"status": "ok", "timestamp": 1546933628973, "user_tz": -330, "elapsed": 18466, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} # !pip install fastai # + id="tapRR92Wbaei" colab_type="code" colab={} from fastai import * from fastai.text import * # + id="AievNY1-bjoB" colab_type="code" colab={} import pandas as pd import numpy as np # + id="DZuoWQ0YbmxK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2198075b-77ef-4408-d5c2-7a0c9f2df361" executionInfo={"status": "ok", "timestamp": 1546933723073, "user_tz": -330, "elapsed": 1451, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} # cd 'gdrive/My Drive/app/shelbot/' # + id="fwhh065Lbz6Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="af422ae9-d5b4-48b0-d378-d920006a4c0e" executionInfo={"status": "ok", "timestamp": 1546933740941, "user_tz": -330, "elapsed": 4654, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} # !ls # + id="Evo62qXEb2w6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="4b3b64c1-b5f3-4fdd-e464-af9d4ee28db0" executionInfo={"status": "ok", "timestamp": 1546933775276, "user_tz": -330, "elapsed": 2034, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} df = pd.read_csv('sq.csv', sep=r'\<\|\>', header=None) # + id="_bi8nCjlcCMi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="1f8e0111-cf50-4a6d-e6ee-d3be327373ee" executionInfo={"status": "ok", "timestamp": 1546933786218, "user_tz": -330, "elapsed": 1312, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} df.head() # + id="cTZojnD-cFDi" colab_type="code" colab={} df[0] = df[0].str.lower() # + id="c0uxH1XJcHpy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="899bce10-1386-40bc-acf6-73875f0e0428" executionInfo={"status": "ok", "timestamp": 1546933826481, "user_tz": -330, "elapsed": 1123, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} df.head() # + id="2FkpfqpJcO6a" colab_type="code" colab={} valid_pct = 0.05 #validation percent df = df.iloc[np.random.permutation(len(df))] cut = int(valid_pct * len(df)) + 1 train_df, valid_df = df[cut:], df[:cut] # + id="lBVSDHi7cUo6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="51f7916f-1b87-4f4c-f1b3-44af90049e30" executionInfo={"status": "ok", "timestamp": 1546933857687, "user_tz": -330, "elapsed": 1604, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} len(train_df), len(valid_df) # + id="8jG4hLoScWbq" colab_type="code" colab={} nan_rows = df[df[0].isnull()] # + id="oxGgGZBdcZsy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 49} outputId="cfc92f7b-1fb3-433f-ed66-b68956e4879d" executionInfo={"status": "ok", "timestamp": 1546933898013, "user_tz": -330, "elapsed": 1702, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} nan_rows # + id="VNn6te-tcgQB" colab_type="code" colab={} data_lm = TextLMDataBunch.from_df('data', train_df, valid_df, text_cols=0) # + id="y_d-_8h9dgOl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bdf4b129-5228-4713-ee93-8213cbc8d55b" executionInfo={"status": "ok", "timestamp": 1546934360088, "user_tz": -330, "elapsed": 1652, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} torch.set_grad_enabled(False) # + id="PG48s4_6c_kL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 94} outputId="1c44599b-d6b7-41a1-ea9a-e1c733238594" executionInfo={"status": "ok", "timestamp": 1546934453687, "user_tz": -330, "elapsed": 13246, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} learn = language_model_learner(data_lm, pretrained_model=URLs.WT103, drop_mult=0.5) with torch.enable_grad(): learn.fit_one_cycle(1, 1e-2) # + id="aUyAKr1OdGJa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 94} outputId="7cd27f14-6399-4c4c-9449-cb9a3fd3cb00" executionInfo={"status": "ok", "timestamp": 1546934513329, "user_tz": -330, "elapsed": 16643, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} learn.unfreeze() with torch.enable_grad(): learn.fit_one_cycle(1, 1e-3) # + id="iw4H3IBhey0h" colab_type="code" colab={} wd=1e-7 lr=1e-3 lrs = lr # + id="FRcldSJGe3fa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="3bdccaa4-1d23-467c-ebd0-9778d0e97c31" executionInfo={"status": "ok", "timestamp": 1546934694474, "user_tz": -330, "elapsed": 153417, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} with torch.enable_grad(): learn.fit(10,lrs, wd) # + id="T83MXV82e6AL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="cdf96086-7f21-423e-85de-e851deef4c7b" executionInfo={"status": "ok", "timestamp": 1546940741680, "user_tz": -330, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} learn.predict("xxbos", n_words=50, temperature=0.75) # + id="HMEMiIFNshcE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="e7b84737-2c49-492c-ea85-9c344afb412b" executionInfo={"status": "ok", "timestamp": 1546939720325, "user_tz": -330, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} number_of_ideas = 10 ideas_counter = 0 all_ideas = [] for i in range(1000): idea = learn.predict("xxbos xxfld 1", n_words=20, temperature=0.8) print (idea) ideas = idea.split("xxbos xxfld 1") ideas = ideas[1:] print (ideas) if len(ideas) > 0: for idea in ideas: idea = idea.replace("xxbos xxfld 1 ","").strip() if idea: all_ideas.append(idea) ideas_counter = ideas_counter+1 else: ideas_counter += 1 print ('Counter: ' + str(ideas_counter)) if ideas_counter > number_of_ideas: break # + id="iyiH5ggnsx2s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e0f8436b-e38b-4f26-b9f8-e568bbc3454e" executionInfo={"status": "ok", "timestamp": 1546939725670, "user_tz": -330, "elapsed": 1327, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06271417307074127697"}} all_ideas # + id="C3h6RBq4tHXy" colab_type="code" colab={}
UsingPreTrained.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bite Size Bayes # # Copyright 2020 <NAME> # # License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) # ## Review # # [In the previous notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/03_cookie.ipynb) we started with Bayes's Theorem, written like this: # # $P(A|B) = P(A) ~ P(B|A) ~/~ P(B)$ # # And applied it to the case where we use data, $D$, to update the probability of a hypothesis, $H$. In this context, we write Bayes's Theorem like this: # # $P(H|D) = P(H) ~ P(D|H) ~/~ P(D)$ # # And give each term a name: # # * $P(H)$ is the "prior probability" of the hypothesis, which represents how confident you are that $H$ is true prior to seeing the data, # # * $P(D|H)$ is the "likelihood" of the data, which is the probability of seeing $D$ if the hypothesis is true, # # * $P(D)$ is the "total probability of the data", that is, the chance of seeing $D$ regardless of whether $H$ is true or not. # # * $P(H|D)$ is the "posterior probability" of the hypothesis, which indicates how confident you should be that $H$ is true after taking the data into account. # # We used Bayes's Theorem to solve a cookie-related problem, and I presented the Bayes table, a way to solve Bayesian problems more generally. I didn't really explain how it works, though. That's the goal of this notebook. # # I'll start by extending the table method to a problem with more than two hypotheses. # ## More hypotheses # # One nice thing about the table method is that it works with more than two hypotheses. As an example, let's do another version of the cookie problem. # # Suppose you have five bowls: # # * Bowl 0 contains no vanilla cookies. # # * Bowl 1 contains 25% vanilla cookies. # # * Bowl 2 contains 50% vanilla cookies. # # * Bowl 3 contains 75% vanilla cookies. # # * Bowl 4 contains 100% vanilla cookies. # # Now suppose we choose a bowl at random and then choose a cookie, and we get a vanilla cookie. What is the posterior probability that we chose each bowl? # # Here's a table that represents the five hypotheses and their prior probabilities: # + import pandas as pd table = pd.DataFrame() table['prior'] = 1/5, 1/5, 1/5, 1/5, 1/5 table # - # The likelihood of drawing a vanilla cookie from each bowl is the given proportion of vanilla cookies: table['likelihood'] = 0, 0.25, 0.5, 0.75, 1 table # Once we have priors and likelihoods, the remaining steps are always the same. We compute the unnormalized posteriors: table['unnorm'] = table['prior'] * table['likelihood'] table # And the total probability of the data. prob_data = table['unnorm'].sum() prob_data # Then divide through to get the normalized posteriors. table['posterior'] = table['unnorm'] / prob_data table # Two things you might notice about these results: # # 1. One of the hypotheses has a posterior probability of 0, which means it has been ruled out entirely. And that makes sense: Bowl 0 contains no vanilla cookies, so if we get a vanilla cookie, we know it's not from Bowl 0. # # 2. The posterior probabilities form a straight line. We can see this more clearly by plotting the results. import matplotlib.pyplot as plt table['posterior'].plot(kind='bar') plt.xlabel('Bowl #') plt.ylabel('Posterior probability'); # **Exercise:** Use the table method to solve the following problem and plot the results as a bar chart. # # >The blue M&M was introduced in 1995. Before then, the color mix in a bag of plain M&Ms was (30% Brown, 20% Yellow, 20% Red, 10% Green, 10% Orange, 10% Tan). # > # >Afterward it was (24% Blue , 20% Green, 16% Orange, 14% Yellow, 13% Red, 13% Brown). # > # >A friend of mine has two bags of M&Ms, and he tells me that one is from 1994 and one from 1996. He won't tell me which is which, but he gives me one M&M from each bag. One is yellow and one is green. What is the probability that the yellow M&M came from the 1994 bag? # # Hint: If the yellow came from 1994, the green must have come from 1996. By Theorem 2 (conjunction), the likelihood of this combination is (0.2)(0.2). # + # Solution goes here # + # Solution goes here # - # ## Why does this work? # # Now I will explain how the table method works, making two arguments: # # 1. First, I'll show that it makes sense to normalize the posteriors so they add up to 1. # # 2. Then I'll show that this step is consistent with Bayes's Theorem, because the total of the unnormalized posteriors is the total probability of the data, $P(D)$. # # Here's the first argument. Let's start with Bayes's Theorem: # # $P(H|D) = P(H) ~ P(D|H)~/~P(D)$ # # Notice that the denominator, $P(D)$, does not depend on $H$, so it is the same for all hypotheses. If we factor it out, we get: # # $P(H|D) \sim P(H) ~ P(D|H)$ # # which says that the posterior probabilities *are proportional to* the unnormalized posteriors. In other words, if we leave out $P(D)$, we get the proportions right, but not the total. # Then how do we figure out the total? Well, in this example we know that the cookie came from exactly one of the bowls. So the hypotheses are: # # * Mutually exclusive, that is, only one of them can be true, and # # * Collectively exhaustive, that is, at least one of them must be true. # # Exactly one of the hypotheses must be true, so the posterior probabilities have to add up to 1. Most of the time, the unnormalized posteriors don't add up to 1, but when we divide through by the total, we ensure that the *normalized* posteriors do. # # That's the first argument. I hope it makes some sense, but if you don't find it entirely satisfying, keep going. # ## Rolling the dice # # Before I can make the second argument, we need one more law of probability, which I will explain with a new example: # # > Suppose you have a 4-sided die and a 6-sided die. You choose one at random and roll it. What is the probability of getting a 1? # # To answer that, I'll define two hypotheses and a datum: # # * $H_4$: You chose the 4-sided die. # # * $H_6$: You chose the 6-sided die. # # * $D$: You rolled a 1. # On a 4-sided die, the probability of rolling 1 is $1/4$; on a 6-sided die it is $1/6$. So we can write the conditional probabilities: # # $P(D|H_4) = 1/4$ # # $P(D|H_6) = 1/6$ # # And if the probability of choosing either die is equal, we know the prior probabilities: # # $P(H_4) = 1/2$ # # $P(H_6) = 1/2$ # But what is the total probability of the data, $P(D)$? # # At this point your intuition might tell you that it is the weighted sum of the conditional probabilities: # # $P(D) = P(H_4)P(D|H_4) + P(H_6)P(D|H_6)$ # # Which is # # $P(D) = (1/2)(1/4) + (1/2)(1/6)$ # # Which is (1/2)*(1/4) + (1/2)*(1/6) # And that's correct. But if your intuition did not tell you that, or if you would like to see something closer to a proof, keep going. # ## Disjunction # # In this example, we can describe the outcome in terms of logical operators like this: # # > The outcome is 1 if you choose the 4-sided die **and** roll 1 **or** you roll the 6-sided die **and** roll 1. # # Using math notation, $D$ is true if: # # $(H_4 ~and~ D) ~or~ (H_6 ~and~ D)$ # # We've already seen the $and$ operator, also known as "conjunction", but we have not yet seen the $or$ operator, which is also known as "disjunction"? # # For that, we a new rule, which I'll call **Theorem 4**: # # $P(A ~or~ B) = P(A) + P(B) - P(A ~and~ B)$ # To see why that's true, let's take a look at the Venn diagram: # # <img width="200" src="https://github.com/AllenDowney/BiteSizeBayes/raw/master/theorem4_venn_diagram.png"> # # What we want is the total of the blue, red, and purple regions. If we add $P(A)$ and $P(B)$, we get the blue and red regions right, but we double-count the purple region. So we have to subtract off one purple region, which is $P(A ~and~ B)$. # **Exercise:** Let's do a quick example using disjunction. # # A standard deck of playing cards contains 52 cards; # # * 26 of them are red, # # * 12 of them are face cards, and # # * 6 of them are red face cards. # # The following diagram shows what I mean: the red rectangle contains the red cards; the blue rectangle contains the face cards, and the overlap includes the red face cards. # # <img width="500" # src="https://github.com/AllenDowney/BiteSizeBayes/raw/master/card_venn_diagram.png"> # # # If we choose a card at random, here are the probabilities of choosing a red card, a face card, and a red face card: # + p_red = 26/52 p_face = 12/52 p_red_face = 6/52 p_red, p_face, p_red_face # - # Use Theorem 4 to compute the probability of choosing a card that is either red, or a face card, or both: # + # Solution goes here # - # ## Total probability # # In the dice example, $H_4$ and $H_6$ are mutually exclusive, which means only one of them can be true, so the purple region is 0. Therefore: # # $P(D) = P(H_4 ~and~ D) + P(H_6 ~and~ D) - 0$ # # Now we can use **Theorem 2** to replace the conjunctions with conditonal probabilities: # # $P(D) = P(H_4)~P(D|H_4) + P(H_6)~P(D|H_6)$ # # By a similar argument, we can show that this is true for any number of hypotheses. For example, if we add an 8-sided die to the mix, we can write: # # $P(D) = P(H_4)~P(D|H_4) + P(H_6)~P(D|H_6) + P(H_8)~P(D|H_8)$ # And more generally, with any number of hypotheses $H_i$: # # $P(D) = \sum_i P(H_i)~P(D|H_i)$ # # Which shows that the total probability of the data is the sum of the unnormalized posteriors. # # And that's why the table method works. # Now let's get back to the original question: # # > Suppose you have a 4-sided die and a 6-sided die. You choose one at random and roll it. What is the probability of getting a 1? # # We can use a Bayes table to compute the answer. Here are the priors: table = pd.DataFrame(index=['H4', 'H6']) table['prior'] = 1/2, 1/2 table # And the likelihoods: table['likelihood'] = 1/4, 1/6 table # Now we compute the unnormalized posteriors in the usual way: table['unnorm'] = table['prior'] * table['likelihood'] table # And the total probability of the data: prob_data = table['unnorm'].sum() prob_data # That's what we got when we solved the problem by hand, so that's good. # **Exercise:** Suppose you have a 4-sided, 6-sided, and 8-sided die. You choose one at random and roll it, what is the probability of getting a 1? # # Do you expect it to be higher or lower than in the previous example? # + # Solution goes here # + # Solution goes here # - # ## Prediction and inference # # In the previous section, we use a Bayes table to solve this problem: # # > Suppose you have a 4-sided die and a 6-sided die. You choose one at random and roll it. What is the probability of getting a 1? # # I'll call this a "prediction problem" because we are given a scenario and asked for the probability of a predicted outcome. # # Now let's solve a closely-related problem: # # > Suppose you have a 4-sided die and a 6-sided die. You choose one at random, roll it, and get a 1. What is the probability that the die you rolled is 4-sided? # # I'll call this an "inference problem" because we are given the outcome and asked to figure out, or "infer", which die was rolled. # # Here's a solution: table = pd.DataFrame(index=['H4', 'H6']) table['prior'] = 1/2, 1/2 table['likelihood'] = 1/4, 1/6 table['unnorm'] = table['prior'] * table['likelihood'] prob_data = table['unnorm'].sum() table['posterior'] = table['unnorm'] / prob_data table # Given that the outcome is a 1, there is a 60% chance the die you rolled was 4-sided. # # As this example shows, prediction and inference closely-related problems, and we can use the same methods for both. # **Exercise:** Let's add some more dice: # # 1. Suppose you have a 4-sided, 6-sided, 8-sided, and 12-sided die. You choose one at random and roll it. What is the probabily of getting a 1? # # 2. Now suppose the outcome is a 1. What is the probability that the die you rolled is 4-sided? And what are the posterior probabilities for the other dice? # + # Solution goes here # + # Solution goes here # - # ## Summary # # In this notebook, I introduced a new law of probability, so now we have four: # # **Theorem 1** gives us a way to compute a conditional probability using a conjunction: # # $P(A|B) = \frac{P(A~\mathrm{and}~B)}{P(B)}$ # # **Theorem 2** gives us a way to compute a conjunction using a conditional probability: # # $P(A~\mathrm{and}~B) = P(B) P(A|B)$ # # **Theorem 3** gives us a way to get from $P(A|B)$ to $P(B|A)$, or the other way around: # # $P(A|B) = \frac{P(A) P(B|A)}{P(B)}$ # # **Theorem 4** gives us a way to compute a disjunction using a conjunction. # # $P(A ~or~ B) = P(A) + P(B) - P(A ~and~ B)$ # # Then we used Theorems 2 and 4 to show that the sum of the unnormalized posteriors is the total probability of the data, which we wrote like this: # # $P(D) = \sum_i P(H_i)~P(D|H_i)$ # # This conclusion is useful for two reasons: # # 1. It provides a way to compute the probability of future data using prior probabilities and likelihoods, and # # 2. It explains why the Bayes table method works. # # [In the next notebook](https://colab.research.google.com/github/AllenDowney/BiteSizeBayes/blob/master/05_test.ipynb) we will explore a famously useful application of Bayes's Theorem, medical testing.
04_dice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import datetime as dt import numpy as np import pandas as pd import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from flask import Flask, jsonify ################################################# # Database Setup ################################################# engine = create_engine("sqlite:///hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save reference to the table Measurement = Base.classes.measurement Station = Base.classes.station ################################################# # Session link from Python to SQLLite DB ################################################# session = Session(engine) ################################################# # Flask Setup ################################################# app = Flask(__name__) ################################################# # Flask Routes ################################################# @app.route("/") def welcome(): """List all available api routes for Hawaii Climate.""" return ( f"Hawaii Climate API<br/>" f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/temp/start/end" ) @app.route("/api/v1.0/precipitation") def prec(): # Return prec data from last year prev_year = dt.date(2017,8,23) - dt.timedelta(days=365) prec = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date >= prev_year).all() session.close() precipitat = {date: prcp for date, prcp in prec} return jsonify(precipitat) @app.route("/api/v1.0/tobs") def temp_monthly(): prev_year = dt.date(2017,8,23) - dt.timedelta(days=365) results = session.query(Measurement.tobs).\ session.close() results_months = session.query(Measurement.tobs).\ filter(Measurement.station == 'USC00519281').\ filter(Measurement.date>= prev_year).all() return jsonify(results_months) @app.route("/api/v1.0/stations") def stations(): results = session.query(Station.station).all session.close() stations=list(np.ravel(results)) return jsonify(stations) @app.route("/api/v1.0/temp/start/end") def stats(start=None, end=None): sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] if not end: results = session.query(*sel).\ filter(Measurement.date >= start).all() temps = list(np.ravel(results)) return jsonify(temps) results = session.query(*sel).\ filter(Measurement.date >= start).\ filter(Measurement.date <= end).all() temps = list(np.ravel(results)) return jsonify(temps) if __name__ == '__main__': app.run(debug=True)
app.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Whirlwind Tour of Python # *<NAME>* # <img src="fig/cover-large.gif"> # These are the Jupyter Notebooks behind my O'Reilly report, # [*A Whirlwind Tour of Python*](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp). # The full notebook listing is available [on Github](https://github.com/jakevdp/WhirlwindTourOfPython). # # *A Whirlwind Tour of Python* is a fast-paced introduction to essential # components of the Python language for researchers and developers who are # already familiar with programming in another language. # # The material is particularly aimed at those who wish to use Python for data # science and/or scientific programming, and in this capacity serves as an # introduction to my upcoming book, *The Python Data Science Handbook*. # These notebooks are adapted from lectures and workshops I've given on these # topics at University of Washington and at various conferences, meetings, and # workshops around the world. # ## Index # # 1. [Introduction](00-Introduction.ipynb) # 2. [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) # 3. [Basic Python Syntax](02-Basic-Python-Syntax.ipynb) # 4. [Python Semantics: Variables](03-Semantics-Variables.ipynb) # 5. [Python Semantics: Operators](04-Semantics-Operators.ipynb) # 6. [Built-In Scalar Types](05-Built-in-Scalar-Types.ipynb) # 7. [Built-In Data Structures](06-Built-in-Data-Structures.ipynb) # 8. [Control Flow Statements](07-Control-Flow-Statements.ipynb) # 9. [Defining Functions](08-Defining-Functions.ipynb) # 10. [Errors and Exceptions](09-Errors-and-Exceptions.ipynb) # 11. [Iterators](10-Iterators.ipynb) # 12. [List Comprehensions](11-List-Comprehensions.ipynb) # 13. [Generators and Generator Expressions](12-Generators.ipynb) # 14. [Modules and Packages](13-Modules-and-Packages.ipynb) # 15. [Strings and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb) # 16. [Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb) # 17. [Resources for Further Learning](16-Further-Resources.ipynb) # 18. [Appendix: Code To Reproduce Figures](17-Figures.ipynb) # ## License # # This material is released under the "No Rights Reserved" [CC0](LICENSE) # license, and thus you are free to re-use, modify, build-on, and enhance # this material for any purpose. # # That said, I request (but do not require) that if you use or adapt this material, # you include a proper attribution and/or citation; for example # # > *A Whirlwind Tour of Python* by <NAME> (O’Reilly). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1 # # Read more about CC0 [here](https://creativecommons.org/share-your-work/public-domain/cc0/).
Index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Targeted Bisuflite Sequencing Alignment Quality # - Parse the BSBolt log files to get general alignment statistics # - Run *samtools flagstat* on the bam files with marked duplicates # - Estimate duplication rate for each sample # - Samtools flagstat is called externally using python built-in library *subprocess* # - Command is run in parallel using *joblib*, a third party multiprocessing library that forms the backend for many projects like *sklearn* # - Plot the resulting QC checks using *matplotlib* and *seaborn* libraries # + # import libraries import os import subprocess import joblib import numpy as np import pandas as pd import seaborn as sns from sklearn.decomposition import PCA import matplotlib.pyplot as plt from matplotlib import rc from tqdm.notebook import tqdm from BSBolt.Matrix.MatrixAggregator import AggregateMatrix from BSBolt.Utils.CGmapIterator import OpenCGmap # + # use latex formatting for figures, latex must be on system path for this to work rc('text', usetex=False) # set environment plotting params params = {'legend.fontsize': 'x-large', 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} plt.rcParams.update(params) # - # set working directory wd = '~' alignment_dir = f'{wd}Align/' dup_dir = f'{wd}' # + samples = [] for _, _, files in os.walk(alignment_dir): for sample in files: if sample.endswith('bam'): samples.append(sample.replace('.bam', '').replace('sorted_', '')) # - # ### Get Average Read Coverage Over Target Regions # run samtools flagstat on each sample and parse output # command will fail if samtools is not on path def get_flagstats(sample_path, sample_name): sam_stats = {} sam_args = ['samtools', 'flagstat', sample_path] with subprocess.Popen(args=sam_args, stdout=subprocess.PIPE, universal_newlines=True) as sam_process: for count, line in enumerate(iter(sam_process.stdout)): if count == 0: sam_stats['TotalReads'] = int(line.split(' ')[0]) elif count == 1: sam_stats['SecondaryAlignment'] = int(line.split(' ')[0]) elif count == 2: sam_stats['Supplementary']= int(line.split(' ')[0]) elif count == 3: sam_stats['Duplicate']= int(line.split(' ')[0]) elif count == 4: sam_stats['Mapped']= int(line.split(' ')[0]) elif count == 5: sam_stats['Paired']= int(line.split(' ')[0]) elif count == 6: sam_stats['Read1']= int(line.split(' ')[0]) elif count == 7: sam_stats['Read2']= int(line.split(' ')[0]) elif count == 8: sam_stats['ProperPair']= int(line.split(' ')[0]) elif count == 9: sam_stats['MateMapped']= int(line.split(' ')[0]) elif count == 10: sam_stats['Singletons']= int(line.split(' ')[0]) elif count == 11: sam_stats['MateMappedToDiffChrom']= int(line.split(' ')[0]) elif count == 12: sam_stats['MateMappedToDiffChromMapq>=5']= int(line.split(' ')[0]) return sample_name, sam_stats flag_stats = joblib.Parallel(n_jobs=14, verbose=10)(joblib.delayed(get_flagstats)(*[f'{alignment_dir}sorted_{sample}.bam', sample]) for sample in samples) dup_flag_stats = joblib.Parallel(n_jobs=14, verbose=10)(joblib.delayed(get_flagstats)(*[f'{wd}{sample}_dedup.bam', sample]) for sample in samples) # + # format flagstat results processed_flag_stats = {} for sample, f_stats in flag_stats: processed_flag_stats[sample] = f_stats # - for sample, f_stats in dup_flag_stats: stats = processed_flag_stats[sample] processed_flag_stats[sample]['Duplicate'] = stats['Mapped'] - f_stats['Mapped'] # make pandas data frame flag_df = pd.DataFrame(processed_flag_stats).T # add duplication rate statistic flag_df['DuplicationRate'] = flag_df['Duplicate'] / flag_df['Mapped'] flag_df # + fig, ax = plt.subplots(figsize=(12,12)) plot_cats = ['TotalReads', 'Duplicate', 'Mapped', 'Paired', 'ProperPair'] sns.boxplot(data=flag_df[plot_cats], ax=ax, palette='Set2', linewidth=2.5) ax.set_title('Samtools Flagstat', fontsize=20) ax.set_xticklabels(ax.get_xticklabels(),rotation=30) #plt.savefig('flagstats.png', dpi=200, bbox_inches='tight') plt.show() # - flag_df.index = [x.split('_')[-1] for x in flag_df.index] flag_df.to_csv('alignment_stats.csv') # + fig, ax = plt.subplots(figsize=(12,12)) plot_cats = ['DuplicationRate'] sns.swarmplot(data=flag_df[plot_cats], ax=ax, palette='Set2', linewidth=2.5) ax.set_title('Observed Duplication') #plt.savefig('duplication.png', dpi=200, bbox_inches='tight') plt.show() # - # ### Pull CGmap Files cgmap_files = [f'{wd}CGmaps/CGmap_1x/{sample}_01.CGmap_hq.CGmap.gz' for sample in samples] for file in cgmap_files: if not os.path.exists(file): print(file) if not os.path.exists(f'{wd}covid_count.txt'): matrix = AggregateMatrix(file_list=cgmap_files, sample_list=samples, min_site_coverage=10, site_proportion_threshold=0.9, cg_only=True, verbose=True, threads=8, output_path=f'{wd}covid_count.txt', count_matrix=True) matrix.aggregate_matrix() # ### Matrix QC count_df = pd.read_csv(f'{wd}covid_count.txt', sep='\t', index_col=0) count_df['AverageCoverage'] = count_df[[col for col in list(count_df) if 'total' in col]].mean(axis=1) # + fig, ax = plt.subplots(figsize=(12,12)) sns.histplot(np.log2(count_df['AverageCoverage'].values), ax=ax) ax.set_xlabel('log2(AverageCoverage)') plt.show() # - count_df = count_df.loc[count_df['AverageCoverage'] >= 30] # + # construct value df from count df # if total great than 10 keep site def process_columns(meth_counts, total_counts, threshold=10): counts = np.array(meth_counts) counts[counts < threshold] = np.nan return meth_counts / total_counts # + value_matrix = [] for sample in samples: value_matrix.append(process_columns(count_df[f'{sample}_meth_cytosine'].values, count_df[f'{sample}_total_cytosine'].values, 15)) value_matrix = np.array(value_matrix).T # - value_df = pd.DataFrame(value_matrix, columns=samples, index=count_df.index) meth_matrix = value_df.dropna(axis=0) meth_matrix.to_csv(f'{wd}covid_values.txt', sep='\t') # ### Matrix QC # - assembled matrix is a complete matrix of sites with an average coverage of 30x # - minnimum coverage of 15x for all samples # + fig, ax = plt.subplots(figsize=(12,12)) for sample in tqdm(list(meth_matrix)): sns.kdeplot(meth_matrix[sample].values, ax=ax, label=sample) plt.show() # - pca = PCA() pcs = pca.fit_transform(meth_matrix.values.T) pca_score = pca.explained_variance_ratio_ # + fig, ax = plt.subplots(figsize=(12,12)) plt.scatter(x=pcs[0], y=pcs[1]) for x, y, label in zip(pcs[0], pcs[1], list(meth_matrix)): ax.text(x,y, label, fontsize=12) ax.set_xlabel(f'PC1 {pca_score[0]:.3f} Variation Explained') ax.set_ylabel(f'PC2 {pca_score[1]:.3f} Variation Explained') plt.savefig('pca_plot.png', dpi=100) plt.show()
TargetedBSAlignmentQC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mining Challenge Sample Notebook # # This notebook gives a small example of working with our data. # # ## Imports and Database Connection # + import re from mongoengine import connect from pycoshark.mongomodels import Project, VCSSystem, Commit, FileAction, Hunk, Refactoring, IssueSystem, Issue, IssueComment, MailingList, Message from pycoshark.utils import create_mongodb_uri_string # You may have to update this dict to match your DB credentials credentials = {'db_user': '', 'db_password': '', 'db_hostname': 'localhost', 'db_port': 27017, 'db_authentication_database': '', 'db_ssl_enabled': False} uri = create_mongodb_uri_string(**credentials) connect('smartshark_sample', host=uri, alias='default') # - # ## Working with commits # + # We first need the the project from the database project = Project.objects(name='giraph').get() # We now select the version control system of the project vcs_system = VCSSystem.objects(project_id=project.id).get() print('VCS System:', vcs_system.url) # We can now fetch the commits and analyze them num_commits = Commit.objects(vcs_system_id=vcs_system.id).count() print('Number of commits:', num_commits) count_bugfix = 0 count_linked_issue = 0 count_hunks = 0 count_refactorings_refdiff = 0 count_refactorings_refactoringminer = 0 # Only limits the fields we are reading to the required fields. This is important for the performance. for commit in Commit.objects(vcs_system_id=vcs_system.id).only('labels', 'linked_issue_ids'): if commit.labels is not None and 'validated_bugfix' in commit.labels and commit.labels['validated_bugfix']==True: count_bugfix += 1 if commit.linked_issue_ids is not None and len(commit.linked_issue_ids)>0: count_linked_issue += 1 # File actions group all changed hunks in a commit of the same file for fa in FileAction.objects(commit_id=commit.id): count_hunks += Hunk.objects(file_action_id=fa.id).count() count_refactorings_refdiff += Refactoring.objects(commit_id=commit.id, detection_tool='"refDiff"').count() count_refactorings_refactoringminer += Refactoring.objects(commit_id=commit.id, detection_tool='rMiner').count() print('Number of bug fixing commits:', count_bugfix) print('Number of commits that link to a Jira issue:', count_linked_issue) print('Number of hunks for all commits:', count_hunks) print('Number of refactorings detected by refDiff:', count_refactorings_refdiff) print('Number of refactorings detected by RefactoringMiner:', count_refactorings_refactoringminer) # - # ## Working with issues # + # We first need the the project from the database project = Project.objects(name='giraph').get() # We now select the issue tracking system of the project # Please note that some projects have multiple issue trackers # In this case get() would fail and you would need to loop over them issue_tracker = IssueSystem.objects(project_id=project.id).get() print('Issue Tracker:', issue_tracker.url) # we can now work with the issues num_issues = Issue.objects(issue_system_id=issue_tracker.id).count() print('Number of issues:', num_issues) count_comments = 0 count_referenced_by_commits = 0 count_bugs_dev_label = 0 count_bugs_validated = 0 for issue in Issue.objects(issue_system_id=issue_tracker.id): count_comments += IssueComment.objects(issue_id=issue.id).count() if issue.issue_type is not None and issue.issue_type.lower()=='bug': count_bugs_dev_label += 1 if issue.issue_type_verified is not None and issue.issue_type_verified.lower()=='bug': count_bugs_validated += 1 if Commit.objects(linked_issue_ids=issue.id).count()>0: count_referenced_by_commits += 1 print('Number of comments in discussions:', count_comments) print('Number of issues referenced by commits:', count_referenced_by_commits) print('Number of issues labeled as bugs by developers:', count_bugs_dev_label) print('Number of issues labeled validated as bug by researchers:', count_bugs_validated) # - # ## Working with the Mailing List # + # We first need the the project from the database project = Project.objects(name='giraph').get() # We now select the mailing list of the project # Since we have two mailing lists, we need to loop over them mailing_lists = MailingList.objects(project_id=project.id) for mailing_list in mailing_lists: print('Mailing List:', mailing_list.name) # We can now access the messages count_emails = Message.objects(mailing_list_id=mailing_list.id).count() print('Number of Emails:', count_emails) count_references_jira = 0 jira_id = re.compile('GIRAPH-[0-9]+', re.I | re.M) for message in Message.objects(mailing_list_id=mailing_list.id): if message.body is not None and jira_id.search(message.body): count_references_jira += 1 print('Number of emails that reference a Jira issue:', count_references_jira)
MSR-MC2022-Sample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # Libraries import pandas as pd import matplotlib.pyplot as plt import matplotlib matplotlib.style.use('ggplot') import random as rng import numpy as np # %matplotlib inline data = pd.read_csv("ocr.csv") data.head() # + # make individual dataframes for each year df_2015 = data[['2015','Unnamed: 4']] df_2014 = data[['2014','Unnamed: 7']] df_2013 = data[['2013','Unnamed: 4']] df_2015.head() # - plt.boxplot(data["Unnamed: 3"][1:]) plt.show()
notebooks/ocr_rankings_uno.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="0ST-mREnOk-0" # ![embedding_mapping.png](https://github.com/yandexdataschool/nlp_course/raw/master/resources/embedding_mapping.png) # + [markdown] colab_type="text" id="Bz1bd-W6Ok-2" # ## Homework: Un(semi)-supervised word translation learning # + [markdown] colab_type="text" id="NuBUKwStOk-2" # Homework based on [Conneau et al. 2018](https://arxiv.org/abs/1710.04087) article. # # In the homework we offer you to train a mapping between Ukrainian word vectors and Russian word vectors just like in the first homework of the NLP course. But unlike the first homework this mapping will be build (almost) unsupervised: without parallel data (pairs of corresponding words in Ukrainian and Russian). # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 2172, "status": "ok", "timestamp": 1544721782866, "user": {"displayName": "\u0410\u043d\u0442\u043e\u043d \u0427\u0438\u043a\u0443\u043d\u043e\u0432", "photoUrl": "https://lh6.googleusercontent.com/-UUCr162DexY/AAAAAAAAAAI/AAAAAAAACHw/TB_NX-xqcWs/s64/photo.jpg", "userId": "10327575406852457431"}, "user_tz": -300} id="PSClBBAcOk-4" outputId="94c290b4-9c8a-43eb-fe0e-f83423b8a4a9" # %env KERAS_BACKEND=tensorflow # %env CUDA_VISIBLE_DEVICES=1 # %load_ext autoreload # %autoreload 2 import tensorflow as tf import keras from keras.models import Sequential from keras import layers as L import numpy as np import gensim from IPython import display from tqdm import tnrange import matplotlib.pyplot as plt # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/", "height": 1397} colab_type="code" executionInfo={"elapsed": 21699, "status": "ok", "timestamp": 1544721805184, "user": {"displayName": "\u0410\u043d\u0442\u043e\u043d \u0427\u0438\u043a\u0443\u043d\u043e\u0432", "photoUrl": "https://lh6.googleusercontent.com/-UUCr162DexY/AAAAAAAAAAI/AAAAAAAACHw/TB_NX-xqcWs/s64/photo.jpg", "userId": "10327575406852457431"}, "user_tz": -300} id="1afraTn7Ok-8" outputId="57e06b37-b616-49dc-9bb2-8da4cc72d946" # !wget https://www.dropbox.com/s/cnwyfbfa44mqxph/ukr_rus.train.txt?dl=1 -O ./ukr_rus.train.txt # !wget https://www.dropbox.com/s/78otz1d4d9b0284/ukr_rus.test.txt?dl=1 -O ./ukr_rus.test.txt # !wget https://www.dropbox.com/s/210m7gwqkikpsxd/uk.w2v.bin?dl=1 -O ./uk.w2v.bin # !wget https://www.dropbox.com/s/3luwyjdmofsdfjz/ru.w2v.bin?dl=1 -O ./ru.w2v.bin # + colab={} colab_type="code" id="EiBrJSIPOk--" ru_embs = gensim.models.KeyedVectors.load_word2vec_format("ru.w2v.bin", binary=True) uk_embs = gensim.models.KeyedVectors.load_word2vec_format("uk.w2v.bin", binary=True) # + colab={} colab_type="code" id="0y5Or8yiOk_C" x = uk_embs.vectors[:50000] y = ru_embs.vectors[:50000] # + colab={} colab_type="code" id="m9j7n6RIOk_E" def precision(pairs, uk_vectors, topn=1): """ TODO maybe insert docstring """ assert len(pairs) == len(uk_vectors) num_matches = 0 for i, (uk, ru) in enumerate(pairs): num_matches += ru in set(w[0] for w in ru_embs.most_similar([uk_vectors[i]], topn=topn)) return num_matches / len(pairs) def load_word_pairs(filename): uk_ru_pairs = [] uk_vectors = [] ru_vectors = [] with open(filename, "r") as inpf: for line in inpf: uk, ru = line.rstrip().split("\t") if uk not in uk_embs or ru not in ru_embs: continue uk_ru_pairs.append((uk, ru)) uk_vectors.append(uk_embs[uk]) ru_vectors.append(ru_embs[ru]) return uk_ru_pairs, np.array(uk_vectors), np.array(ru_vectors) # + colab={} colab_type="code" id="cGYltnPnOk_H" uk_ru_test, x_test, y_test = load_word_pairs("ukr_rus.test.txt") uk_ru_train, x_train, y_train = load_word_pairs("ukr_rus.train.txt") # + colab={"base_uri": "https://localhost:8080/", "height": 88} colab_type="code" executionInfo={"elapsed": 10969, "status": "ok", "timestamp": 1544721838342, "user": {"displayName": "\u0410\u043d\u0442\u043e\u043d \u0427\u0438\u043a\u0443\u043d\u043e\u0432", "photoUrl": "https://lh6.googleusercontent.com/-UUCr162DexY/AAAAAAAAAAI/AAAAAAAACHw/TB_NX-xqcWs/s64/photo.jpg", "userId": "10327575406852457431"}, "user_tz": -300} id="OSPVYiDxOk_L" outputId="6e14e0f1-856e-432e-ab10-6319f8dd55cb" precision(uk_ru_test, x_test, 5) # + [markdown] colab_type="text" id="Y4p39NM_Ok_O" # ## Reminder # + [markdown] colab_type="text" id="alvAD41fOk_O" # ### Embedding space mapping # + [markdown] colab_type="text" id="TU0jq0MUOk_R" # Let $x_i \in \mathrm{R}^d$ be the distributed representation of word $i$ in the source language, and $y_i \in \mathrm{R}^d$ is the vector representation of its translation. Our purpose is to learn such linear transform $W$ that minimizes euclidian distance between $Wx_i$ and $y_i$ for some subset of word embeddings. Thus we can formulate so-called Procrustes problem: # # $$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$$ # or # $$W^*= \arg\min_W ||WX - Y||_F$$ # # where $||*||_F$ - Frobenius norm. # # In Greek mythology, Procrustes or "the stretcher" was a rogue smith and bandit from Attica who attacked people by stretching them or cutting off their legs, so as to force them to fit the size of an iron bed. We make same bad things with source embedding space. Our Procrustean bed is target embedding space. # + [markdown] colab_type="text" id="QxZH14cVOk_R" # But wait...$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$ looks like simple multiple linear regression (without intercept fit). So let's code. # + [markdown] colab_type="text" id="ynKUAK97Ok_S" # ### Orthogonal Procrustean Problem # + [markdown] colab_type="text" id="rABZZ5LiOk_U" # It can be shown (see original paper) that a self-consistent linear mapping between semantic spaces should be orthogonal. TODO simplify phrases # We can restrict transform $W$ to be orthogonal. Then we will solve next problem: # # $$W^*= \arg\min_W ||WX - Y||_F \text{, where: } W^TW = I$$ # # $$I \text{- identity matrix}$$ # # Instead of making yet another regression problem we can find optimal orthogonal transformation using singular value decomposition. It turns out that optimal transformation $W^*$ can be expressed via SVD components: # $$X^TY=U\Sigma V^T\text{, singular value decompostion}$$ # $$W^*=UV^T$$ # + [markdown] colab_type="text" id="QFm4691-Ok_U" # ## Word translation learning using GAN (8 points) # + [markdown] colab_type="text" id="Gf2MomxQOk_V" # ### Generator # + [markdown] colab_type="text" id="P3yoCDUOOk_W" # If $\mathcal{X}=\{x_1,...,x_n\} \subset \mathrm{R}^d$ - source embedding set, and $\mathcal{Y}=\{y_1,...,y_m\} \subset \mathrm{R}^d$ - target embedding set, then discriminator is simply orthogonal mapping that can be defined as square matrix: $W\in O_d(\mathrm{R})$. # # In terms of neural network, generator is a network with single linear layer with orthogonality constraint and without nonlinearity after it. # # The generator input is a source embedding $x_i$, the generator output is a mapped source embedding $Wx_i$ # + colab={} colab_type="code" id="dJHNu2uZOk_X" EMB_SIZE = 300 # + colab={} colab_type="code" id="Ljbp8vP5Ok_a" import keras, keras.layers as L def build_generator(emb_size): # TIPS: use keras.Sequential and keras.initializers # YOUR_CODE model = Sequential() model.add(L.InputLayer([emb_size])) model.add(L.Dense( emb_size, activation=None, use_bias=False, kernel_initializer=keras.initializers.Identity())) return model # + colab={} colab_type="code" id="TEo6jjn0Ok_c" generator = build_generator(EMB_SIZE) # + [markdown] colab_type="text" id="F9pggEaqOk_e" # ### Discriminator # + [markdown] colab_type="text" id="9nt5QLb0Ok_f" # Discriminator is a neural network that should discriminate between objects from $W\mathcal{X}$ (mapped source embeddings) and objects from $\mathcal{Y}$ (target embeddings). # # Just like in original article for discriminator we will use a multilayer perceptron with two hidden layers of size 2048, and Leaky-ReLU activation functions. The input to the discriminator is corrupted with dropout noise # with a rate of 0.1. # # The discriminator input is either mapped source embedding $Wx_i$ or target embedding $y_j$, the discriminator output is a probability of input to be from source distribution $p_D=p_D(source=1)$ # # # + colab={} colab_type="code" id="IfkFCMQTOk_f" def build_discriminator(emb_size): # YOUR_CODE model = Sequential() model.add(L.InputLayer([emb_size])) model.add(L.Dense(2048, activation=None)) model.add(L.LeakyReLU(0.2)) model.add(L.Dense(2048, activation=None)) model.add(L.LeakyReLU(0.2)) model.add(L.Dense(1, activation=None)) return model # + colab={} colab_type="code" id="yEc6XzoFOk_i" discriminator = build_discriminator(EMB_SIZE) # + [markdown] colab_type="text" id="4BjHtymHOk_m" # ### Discriminator loss # + [markdown] colab_type="text" id="c7tT9zvhOk_n" # The purpose of discriminator is to maximize output probability for mapped source embeddings $p_D(source=1|Wx_i)$ and minimize probability for target embeddings $p_D(source=1|y_j)$. The last is equivalent to maximization of $p_D(source=0|y_j)$. Thus, we can train this classifier with standard cross-entropy loss: # + [markdown] colab_type="text" id="e4f2wMsgOk_n" # $$\mathcal{L}_D(\theta_D|W)=-\frac{1}{n}\sum_{i=1}^n\log p_D(source=1|Wx_i)-\frac{1}{m}\sum_{i=1}^m\log p_D(source=0|y_i)$$ # Equivalent: # $$\mathcal{L}_D(\theta_D|W)=-\frac{1}{n}\sum_{i=1}^n\log p_D(source=1|Wx_i)-\frac{1}{m}\sum_{i=1}^m\log (1-p_D(source=1|y_i))$$ # # **NB:** We minimize $\mathcal{L}_D(\theta_D|W)$ with respect discriminator parameters $\theta_D$. The matrix $W$ is fixed. # + colab={} colab_type="code" id="XKBzJ313Ok_p" # YOUR_CODE HERE #X = #Y = #W = #WX = X = tf.placeholder(tf.float32, [None, EMB_SIZE]) Y = tf.placeholder(tf.float32, [None, EMB_SIZE]) W = generator.weights[0] WX = generator(X) #logp_wx_is_real = #logp_wx_is_fake = #logp_y_is_real = desc_wx = discriminator(WX) logp_wx_is_real = tf.log_sigmoid(desc_wx) logp_wx_is_fake = tf.log_sigmoid(-desc_wx) logp_y_is_real = tf.log_sigmoid(discriminator(Y)) # L_d = L_d_source + L_d_target L_d_source = -tf.reduce_mean(logp_wx_is_fake) L_d_target = -tf.reduce_mean(logp_y_is_real) L_d = L_d_source + L_d_target # + [markdown] colab_type="text" id="zNoB_WomOk_t" # As suggested Goodfellow (2016) it is useful to use soft targets instead hard ones. In case label smoothing: # $$\mathcal{L}_D(\theta_D|W)=\mathcal{L}_{D_1}+\mathcal{L}_{D_2}$$ # # Where: # $$\mathcal{L}_{D_1}=\frac{1}{n}\sum_{i=1}^n[(1-\alpha)\log p_D(source=1|Wx_i) + \alpha\log p_D(source=0|Wx_i)]$$ # # $$\mathcal{L}_{D_2}=\frac{1}{m}\sum_{i=1}^n[(1-\alpha)\log p_D(source=0|Wx_i) + \alpha\log p_D(source=1|Wx_i)]$$ # + colab={} colab_type="code" id="HVfGGI6XOk_u" # YOUR CODE HERE IF YOU REALLY WANT TO USE LABEL SMOOTHING # + [markdown] colab_type="text" id="qN4GCBBOOk_z" # ### Generator loss # + [markdown] colab_type="text" id="ZjpKXjAQOk_1" # The purpose of generator is to fool discriminator, i.e. to produce mapping $W\mathcal{X}$ indistinguishable from $\mathcal{Y}$. Therefore we should turn over discriminator loss, minimize output probability for mapped source embeddings $p_D(source=1|Wx_i)$ and minimize probability for target embeddings $p_D(source=0|y_j)$. # # $$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))-\frac{1}{m}\sum_{i=1}^m\log p_D(source=1|y_i)$$ # # **NB:** We minimize $\mathcal{L}_G(W|\theta_D)$ with respect matrix $W$ coefficients. Disciminator parameters $\theta_D$ is fixed. # + [markdown] colab_type="text" id="b_M1V8a5Ok_2" # Because gradients do not flow through generator for target samples: # # $$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))$$ # + [markdown] colab_type="text" id="4558YRHPOk_3" # In contrast with original article to be more stable we allow you to add a supervised component of loss - MSE for small number of fixed pairs vectors from $\mathcal{X}$ and $\mathcal{Y}$. # # $$\mathcal{L}_G(W|\theta_D)=-\frac{1}{n}\sum_{i=1}^n\log (1-p_D(source=1|Wx_i))+\gamma \frac{1}{N}\sum_{k}^N(Wx_k-y_k)^2$$ # + colab={} colab_type="code" id="V3rG40bIOk_4" #X_pair = tf.placeholder('float32', [None, EMB_SIZE]) #Y_pair = tf.placeholder('float32', [None, EMB_SIZE]) X_pair = tf.placeholder('float32', [None, EMB_SIZE]) Y_pair = tf.placeholder('float32', [None, EMB_SIZE]) # YOUR_CODE_HERE #L_g = L_mse * 100 + L_g_source L_g_source = -tf.reduce_mean(logp_wx_is_real) L_mse = tf.losses.mean_squared_error(Y_pair, generator(X_pair)) L_g = L_mse * 100 + L_g_source # + [markdown] colab_type="text" id="tYTql1SjOk_6" # ### Orthogonality constraint # Conneau et al. propose to use a simple update step to ensure that the matrix $W$ stays close to an # orthogonal matrix during training: # # $$W \gets (1+\beta)W-\beta(WW^T)W$$ # + colab={} colab_type="code" id="YY5-OWRiOk_7" BETA = tf.constant(0.1) # TIPS: USE tf.assing W_new = (1 + BETA) * W - BETA * tf.matmul(tf.matmul(W, W, transpose_b=True), W) orthogonolize = tf.assign(W, W_new) # + [markdown] colab_type="text" id="JFCesgI0Ok_9" # ### Training # + colab={} colab_type="code" id="3y59YeJAOk_-" # LEARNING_RATE = 0.1 # GRADIENT DESCENT OPTIMIZER? LEARNING_RATE = 0.1 #gen_optim = #dis_optim = gen_optim = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(L_g, var_list=generator.trainable_weights) dis_optim = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(L_d, var_list=discriminator.trainable_weights) # + colab={} colab_type="code" id="n9aX1umMOlAB" BATCH_SIZE = 32 def sample_batch(bsize): x_batch = x[np.random.choice(np.arange(x.shape[0]), size=bsize)] y_batch = y[np.random.choice(np.arange(y.shape[0]), size=bsize)] return x_batch, y_batch # + colab={} colab_type="code" id="kmpkKH6POlAD" def discriminator_step(): # YOUR_CODE feed_dict = { X: x_batch, Y: y_batch } sess.run(dis_optim, feed_dict) def generator_step(): # YOUR_CODE feed_dict = { X: x_batch, X_pair: x_train[:50], Y_pair: y_train[:50] } sess.run(gen_optim, feed_dict) def orthogonolize_step(): sess.run(orthogonolize) # + colab={} colab_type="code" id="kf48AIJgOlAH" def get_metrics(): feed_dict = { X: x_test, Y: y_test, X_pair: x_train[:50], Y_pair: y_train[:50] } loss_g, loss_d, logp_x, logp_y, wx = sess.run([L_g, L_d, logp_wx_is_real, logp_y_is_real, WX], feed_dict) return loss_g, loss_d, np.exp(logp_x), np.exp(logp_y), wx # + colab={} colab_type="code" id="Q9XqnUzIOlAK" sess = keras.backend.get_session() sess.run(tf.global_variables_initializer()) # + colab={"base_uri": "https://localhost:8080/", "height": 895} colab_type="code" executionInfo={"elapsed": 3632, "status": "ok", "timestamp": 1544727482822, "user": {"displayName": "\u0410\u043d\u0442\u043e\u043d \u0427\u0438\u043a\u0443\u043d\u043e\u0432", "photoUrl": "https://lh6.googleusercontent.com/-UUCr162DexY/AAAAAAAAAAI/AAAAAAAACHw/TB_NX-xqcWs/s64/photo.jpg", "userId": "10327575406852457431"}, "user_tz": -300} id="0NrvBtB7OlAM" outputId="74e2f30c-19ff-43e6-eaaa-ce19192ebce0" N_EPOCHS = 10 EPOCH_SIZE = 1000 DIS_STEPS = 5 GEN_STEPS = 1 gen_loss_history = [] dis_loss_history = [] prec_history = [] for epoch_num in range(N_EPOCHS): print("Epoch: {}".format(epoch_num + 1)) for batch_num in tnrange(EPOCH_SIZE): for _ in range(DIS_STEPS): # YOUR_CODE x_batch, y_batch = sample_batch(BATCH_SIZE) discriminator_step() for _ in range(GEN_STEPS): # YOUR_CODE x_batch, y_batch = sample_batch(BATCH_SIZE) generator_step() orthogonolize_step() if batch_num % 10 == 0: display.clear_output(wait=True) loss_g, loss_d, p_x, p_y, wx = get_metrics() gen_loss_history.append(loss_g) dis_loss_history.append(loss_d) if batch_num % 100 == 0: prec_history.append(precision(uk_ru_test, wx, 5)) plt.figure(figsize=(15,15)) plt.subplot(212) plt.plot(gen_loss_history, label="Generator loss") plt.plot(dis_loss_history, label="Discriminator loss") plt.legend(loc='best') plt.subplot(221) plt.title('Mapped vs target data') plt.hist(p_x, label='D(Y)', alpha=0.5,range=[0,1], bins=20) plt.hist(p_y, label='D(WX)',alpha=0.5,range=[0,1], bins=20) plt.legend(loc='best') plt.subplot(222) plt.title('Precision top5') plt.plot(prec_history) plt.show() # + [markdown] colab_type="text" id="3gyxDfStOlAP" # ## Fully unsupervised word translation learning (2 points) # + [markdown] colab_type="text" id="ZgfwyHQnOlAP" # Try to exclude MSE term from generator loss and train GAN with sufficient quality (~40% precision top5). You should tune parameters of optimizers and training schedule to make it stable. # # Good luck! # + colab={} colab_type="code" id="1kdP3KKhUozS"
week11_gan/homework (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # A Simple Autoencoder # # We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data. # # ![Autoencoder](assets/autoencoder_1.png) # # In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset. # + # %matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # - from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) # Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits. img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') # We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a **single ReLU hidden layer**. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a **sigmoid activation on the output layer** to get values matching the input. # # ![Autoencoder architecture](assets/simple_autoencoder.png) # # # > **Exercise:** Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. Feel free to use TensorFlow's higher level API, `tf.layers`. For instance, you would use [`tf.layers.dense(inputs, units, activation=tf.nn.relu)`](https://www.tensorflow.org/api_docs/python/tf/layers/dense) to create a fully connected layer with a ReLU activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this `tf.nn.sigmoid_cross_entropy_with_logits` ([documentation](https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits)). You should note that `tf.nn.sigmoid_cross_entropy_with_logits` takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function. # + # Size of the encoding layer (the hidden layer) encoding_dim = 32 image_size = mnist.train.images.shape[1] inputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs') targets_ = tf.placeholder(tf.float32, (None, image_size), name='targets') # Output of hidden layer encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu) # Output layer logits logits = tf.layers.dense(encoded, image_size, activation=None) # Sigmoid output from decoded = tf.nn.sigmoid(logits, name='output') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) # - # ## Training # Create the session sess = tf.Session() # Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss and the test loss afterwards. # # Calling `mnist.train.next_batch(batch_size)` will return a tuple of `(images, labels)`. We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with `sess.run(tf.global_variables_initializer())`. Then, run the optimizer and get the loss with `batch_cost, _ = sess.run([cost, opt], feed_dict=feed)`. epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) feed = {inputs_: batch[0], targets_: batch[0]} batch_cost, _ = sess.run([cost, opt], feed_dict=feed) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) # ## Checking out the results # # Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts. # + fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) # - sess.close() # ## Up Next # # We're dealing with images here, so we can (usually) get better performance using convolution layers. So, next we'll build a better autoencoder with convolutional layers. # # In practice, autoencoders aren't actually better at compression compared to typical methods like JPEGs and MP3s. But, they are being used for noise reduction, which you'll also build.
autoencoder/Simple_Autoencoder_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: jsl250 # language: python # name: jsl250 # --- # + [markdown] colab_type="text" id="PIIR_uOgzR14" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/enterprise/healthcare/Disambiguation.ipynb) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="MdE588BiY3z1" outputId="c1afd395-4f08-4d0c-b062-192b264937de" import json with open('keys.json') as f: license_keys = json.load(f) license_keys.keys() # + colab={"base_uri": "https://localhost:8080/", "height": 408} colab_type="code" id="FVFdvGChZDDP" outputId="4d39e25b-d977-43aa-db5b-b825e9d85f6f" import os # Install java # ! apt-get update -qq # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null secret = license_keys.get("secret",license_keys.get('SPARK_NLP_SECRET', "")) spark_version = os.environ.get("SPARK_VERSION", license_keys.get("SPARK_VERSION","2.4")) version = license_keys.get("version",license_keys.get('SPARK_NLP_PUBLIC_VERSION', "")) jsl_version = license_keys.get("jsl_version",license_keys.get('SPARK_NLP_VERSION', "")) os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # ! java -version os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['JSL_OCR_LICENSE'] = license_keys['JSL_OCR_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] print(spark_version, version, jsl_version) # ! python -m pip install "pyspark==$spark_version".* # ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret import sparknlp import sparknlp_jsl from sparknlp.base import * from sparknlp.annotator import * from sparknlp_jsl.annotator import * from pyspark.ml import Pipeline from pyspark.sql import SparkSession print (sparknlp.version()) print (sparknlp_jsl.version()) spark = sparknlp_jsl.start(secret, gpu=False, spark23=(spark_version[:3]=="2.3")) # + colab={} colab_type="code" id="1zgsiTxjaiMd" # Sample data text = "The show also had a contestant named <NAME> " \ + "who later defeated <NAME> on the way to become Female Vocalist Champion in the 1989 edition of Star Search in the United States. " data = spark.createDataFrame([ [text]]) \ .toDF("text").cache() # + colab={} colab_type="code" id="rrPjWyxAzR2t" # + colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" id="weY5V9h7ZDf0" outputId="c1d7be57-f0eb-4dda-f96e-9a6570511971" # Preprocessing pipeline da = DocumentAssembler().setInputCol("text").setOutputCol("document") sd = SentenceDetector().setInputCols("document").setOutputCol("sentence") tk = Tokenizer().setInputCols("sentence").setOutputCol("token") emb = WordEmbeddingsModel.pretrained().setOutputCol("embs") semb = SentenceEmbeddings().setInputCols("sentence","embs").setOutputCol("sentence_embeddings") ner = NerDLModel.pretrained().setInputCols("sentence","token","embs").setOutputCol("ner") nc = NerConverter().setInputCols("sentence","token","ner").setOutputCol("ner_chunk").setWhiteList(["PER"]) disambiguator = NerDisambiguator() \ .setS3KnowledgeBaseName("i-per") \ .setInputCols("ner_chunk", "sentence_embeddings") \ .setOutputCol("disambiguation") \ .setNumFirstChars(5) pl = Pipeline().setStages([da,sd,tk,emb,semb,ner,nc,disambiguator]) data = pl.fit(data).transform(data) data.selectExpr("explode(disambiguation)").show(10, False) # + colab={} colab_type="code" id="5nzfRLHe3SJB"
jupyter/enterprise/healthcare/Disambiguation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Genetic Algorithm in 15 lines of Python code</h1> # <h4>A simple yet powerful genetic algorithm implementation used to train a neural network in 15 lines of code.</h4> # <p style="font-size:smaller;"><b>Disclaimer: </b> I am not a machine learning expert by any means, I mostly do web development, so this is not my forte at all, but I have enjoyed messing around writing basic neural nets and genetic algorithms and am just trying to share what little I've learned to other neophytes out there.</p> # <p><b>Summary:</b> # This is a spinoff of a really great tutorial called "A Neural Network in 11 lines of Python" found here: < http://iamtrask.github.io/2015/07/12/basic-python-network/ > So please go through that article first otherwise this may not make any sense. <br /> # Here I will show you how I wrote a basic genetic algorithm (GA) that finds an optimal set of weights to train the neural network. I'm not going to go into detail about what a genetic algorithm is, so if you're already not familiar with them, please do some googling. # <p style="font-size:small;">By the way, GAs are generally much slower than good ol' gradient descent, but I think applying GAs to a simple neural net is a more fun way to learn it. Also, GAs may be good for finding an optimal set of <em>hyperparameters</em> for a neural net (e.g. the net architecture).</p> # <h3>Just Give Me The Code:</h3> import random, numpy as np, NeuralNet as NN params = [100, 0.05, 250, 3, 20] curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False) nextPop = np.zeros((curPop.shape[0], curPop.shape[1])) fitVec = np.zeros((params[0], 2)) for i in range(params[2]): fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])]) winners = np.zeros((params[4], params[3])) #20x2 for n in range(len(winners)): selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False) wnr = np.argmin(fitVec[selected,1]) winners[n] = curPop[int(fitVec[selected[wnr]][0])] nextPop[:len(winners)] = winners nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T curPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape)) # <br />Ok, so I'm assuming that code is completely not helpful at this point, and in fact, it won't even run if you tried to copy and paste and run it because you also need the code for the Neural Network, which I stored in a separate file and imported. Let's first talk about the general steps in implementing a genetic algorithm and then we'll break down the code line by line, add in some print() statements to see what's going on, and maybe even make some fancy graphs.<p>Essentially, a genetic algorithm is a search algorithm that will hopefully find an optimal solution through a process that simulates natural selection and evolution. Here's the overall flow for how they work:</p> # + We generate a population of random potential solutions # + Then we iterate through this population and assess the fitness of (i.e. how good of a solution) each solution # + We prefentially select solutions with higher fitness to survive and make it to the next generation. # Solutions with higher fitness have a higher probability of being selected # + These "winner" solutions then "mate" and produce offspring solutions. For example, if our solutions are simply # vectors of integers, then mating vector1 with vector2 involves taking a few elements from vector1 and combining it with a few elements of vector2 to make a new offspring vector of the same dimensions. Vector1: [1 2 3], Vector2: [4 5 6]. Vector1 mates with Vector2 to produce [4 5 3] and [1 2 6] # + So now we have a new population with the top solutions from the last generation along with new offspring solutions, at this point, we will iterate over our solutions and randomly mutate some of them to make sure to introduce new "genetic diversity" into every generation to prevent premature convergence on a local optimum. # + Repeat this process for X number of generations or until we have a sufficiently good solution # As a quick review, the iamtrask article shows you how to implement a really simple 2-layer (1 input layer, 1 output layer) neural network that is trained to solve this problem: # <table class="tg" style="width: 234px; margin-right: 65vw;"> # <tbody><tr> # <th class="tg-5rcs" colspan="3">Inputs</th> # <th class="tg-5rcs">Output</th> # </tr> # <tr> # <td class="tg-4kyz">0</td> # <td class="tg-4kyz">0</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">0</td> # </tr> # <tr> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">1</td> # </tr> # <tr> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">0</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">1</td> # </tr> # <tr> # <td class="tg-4kyz">0</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">1</td> # <td class="tg-4kyz">0</td> # </tr> # </tbody></table> # </p> # As you can see, the output simply depends on whether the first input is a 1 or not. The 2nd input is irrelevant and the 3rd input is our bias (explained elsewhere). # <p>If you train the 2-layer neural net (thus one set of weights) using gradient descent using the implementation in the iamtrask article, you will get a set of weights close to this:</p> # > [[ 9.67299303],[-0.2078435],[-4.62963669]] # And if you calculate the cost using these weights (the cost function is a simple difference between expected and actual output values), you get... # > Cost: 0.0557587344696 # Pretty low right? Now just to jump ahead a bit, when I tuned the genetic algorithm and ran it a couple of times, it found a completely different set of weights: # > [[ 3.09686945e+05 -7.88485054e-03 -1.67477116e+03]] # > <br />Cost: 0.0 # Obviously these weights resulted in a significantly lower cost (better fitness). In all honesty however, for this simplistic problem, the difference in cost is pretty inconsequential. In more complex problems, a cost that low is probably resulting in overfitting. Not to mention, genetic algorithms almost certainly will take longer to converge than gradient descent. But let's ignore all those details, we just want to build a genetic algorithm because they're cool. # Before I jump into the details of the genetic algorithm, I want to revisit the neural net. Here's the code for the neural net I implemented, which is an adaptation from the one by iamtrask: # + import numpy as np import math X = np.array([ [0,0,1],[0,1,1],[1,0,1],[1,1,1]]) #training data X y = np.array([[0,0,1,1, 1, 0, 0]]).T #training data Y syn0 = 2*np.random.random((3,1)) - 1 #randomize intial weights (Theta) def runForward(X, theta): #this runs our net and returns the output return sigmoid(np.dot(X, theta)) def costFunction(X, y, theta): #our cost function, simply determines the arithmetic difference between the expected y and our actual y m = float(len(X)) hThetaX = np.array(runForward(X, theta)) return np.sum(np.abs(y - hThetaX)) def sigmoid(x): return 1 / (1 + np.exp(- x)) #Just our run-of-the-mill sigmoid function # - # You should be able to figure this out if you've run through the iamtrask article or already have an understanding of neural nets. Just a note: I call the weights Theta. Let's go ahead and run this network just to make sure it's working right. runForward(np.array([0,1,1]), syn0) # We expect to get about [ 0 ] for an input of [0,1,1], but obviously when we use random weights, that's not likely to happen. Let's try again with those weights I got from doing gradient descent (not shown here). optimal_theta = np.array([[ 9.67299303],[-0.2078435],[-4.62963669]]) runForward(np.array([0,1,1]), optimal_theta) # As you can see, we get a value pretty close to 0, as expected. Nice. Okay, so now let's try the weights I got from running the genetic algorithm. optimal_theta_ga = np.array([3.09686945e+05,-7.88485054e-03,-1.67477116e+03]) runForward(np.array([0,1,1]), optimal_theta_ga) # Wow! The result we get here is so close to zero we get an overflow warning. Just ignore that, the point is, the error/cost is really, really low. (Again, this is not necessarily a good thing...becauase of potential overfitting, but for this particular problem, overfitting is fine). Just to make things really clear, let's take a look at what these weights are doing diagramatically. # <div> # <img src="images/NNDiagram1.png" width="200px" style="display:inline-block;" /><img style="display:inline-block;" src="images/NNDiagram2.png" width="200px" /> # </div> # So as you can see on the right, whenever the bottom (left-most) input is 1, a really, really big number gets sent over to our sigmoid function, which will of course return something very close to 1. # <p>Alright, so enough about the neural network. Let's go line by line with the genetic algorithm (skipping imports).</p> params = [100, 0.05, 250, 3, 20] #These are just some parameters for the GA, defined below in order: # [Init pop (pop=100), mut rate (=5%), num generations (250), chromosome/solution length (3), # winners/per gen] # Nothing too interesting there, but just noting that params[3] (solution length) refers to the number of elements in each individual solution. Since our solutions are weights to the 2-layer neural net, each solution is a 3 element vector (numpy array). Also need to note the last parameter, params[4] refers to how many solutions we will pick as winners from each generation to populate the new generation. So out of total population of 100, every generation we will preferentially pick the top 20 solutions, populate the new generation with them and their offspring. curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False) nextPop = np.zeros((curPop.shape[0], curPop.shape[1])) fitVec = np.zeros((params[0], 2)) # The top line is the most important here. Basically we're creating a matrix 100x3 with an initial population of random solutions. We're using the np.arange() function to create a bunch of values -15, -14.99, -14.89....15 in order in a long array, then we use np.random.choice() to randomly choose 100x3 = 300 of them to build the 100x3 matrix of initial solutions. This isn't the most computationally efficient way to do things, but I've found it works really well. This is certainly not the only way to do it, and I encourage you to mess around with different ways to intialize your population. It turns out this step is really important to how well it does. If your initial population is not well randomized and not very diverse, you won't get good results. for i in range(params[2]): fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])]) # params[2] is our number of generations, so this is our main, outer loop to go through the whole flow each generation. # Our first step is to calculate the cost/error of each solution (there's 100) and add it to a matrix called <b>fitVec</b>. Each element of fitVec is an array consisting of the index of the solution in curPop and its cost, e.g. [0, 2.54] means that the 0th element in curPop (first solution) has an error of 2.54 winners = np.zeros((params[4], params[3])) # We initialize a new matrix called <b>winners</b>; this will hold our winning solutions temporarily until we move them to the next generation. for n in range(len(winners)): selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False) # Now we're in a loop to populate the winners matrix. We use np.random.choice() to randomly pick params[4]/2 (20/2=10) solutions. We're gonna use a <b>tournament style selection</b> process where we randomly choose a subset of our population, and then pick the best solution from that subset and add it to our winners array. Obviously higher fitness (lower error) solutions have a higher chance of making it to the winners array, but we don't just pick the top 20 solutions because we want to maintain some genetic diversity in each generation, so have a few higher error solutions is generally a good thing. wnr = np.argmin(fitVec[selected,1]) # So the array 'selected' contains 10 random solutions (actually the indices to 10 solutions) from our population. Now we reference fitVec to find the actual elements, use np.argmin() to pick the one with the smallest error/cost and assign the index of that winning element to a variable, 'wnr' winners[n] = curPop[int(fitVec[selected[wnr]][0])] # Then we reference the winner in curPop, the array of all solutions of the current generation, and copy it to our 'winners' array. nextPop[:len(winners)] = winners # nextPop is the array containing all the solutions for the next generation. We populate the first 20 elements of nextPop with our winning solutions from 'winners' array. nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T # Okay, yeah this is a really long line and it's not very readable. I kind of cheated to make this all in 15 lines. # This line is our <b>mating</b> process, and it's probably the most complicated part of a genetic algorithm. Let's start with the core of this nasty line. <br /> # # > `np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0)` <br /> # # Basically np.repeat() will duplicate our 20x3 matrix to create a 80x3 matrix. We already populated the first 20 elements of nextPop with the winners from last generation. Now we want to populate the last 80 elements with their offspring. # # > `np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))` # # Now we just use np.random.permutation() to shuffle the columns of this next 80x3 matrix. This is how we accomplish the crossover functional. Imagine we have a 3x3 matrix (2 solutions) like this:<br /> `np.array([[1,2,3],[4,5,6],[7,8,9]])` , when we run the permutation function, it will change it something like:<br /> `np.array([[7,5,3],[1,8,9],[4,2,3]])` # <br />Go look at the numpy documentation to learn more about permutation if you still don't understand how it's working here. curPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape)) # Ahh. Our last line of code! This is our <b>mutation</b> process. I'm using a list comprehension to build a matrix of the same dimensions as nextPop, but filled with 1s. However, with a probability of params[1] (our mutation rate), we randomly "mutate" some of the elements. Our mutation is basically using a random value from numpy.random.normal() instead of 1. So we end up with a matrix like this (I've shrunk it to 10x3 to make it fit here and changed the mutation rate to 20% so you can see more mutated elements): np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < 0.20 else 1 for x in range(30)]).reshape(10,3) # Then we multiply this matrix (element-wise multiplication) to our nextPop matrix. Most of the time we're multiplying each element in nextPop by 1, so leaving them unchanged, but sometimes we multiply by one of the mutated values and thus will randomly change some elements in nextPop. This adds genetic diversity to our next generation. So now we've filled up nextPop with a new generation of higher fitness solutions. We just repeat this process for how ever many generations we defined in params. # <p><h4>Alright, so we're done! That's it! We made a genetic algorithm that trains a neural network, cool!</h4></p> # <p>...Okay, yeah technically we did, but let's actually watch it do something. Here we go...</p> # + import random, numpy as np import NeuralNet as NN params = [100, 0.05, 10, 3, 20] # [Init pop (pop=100), mut rate (=5%), num generations (250), chromosome/solution length (3), # winners/per gen] curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False) #initialize current population to random values within range nextPop = np.zeros((curPop.shape[0], curPop.shape[1])) fitVec = np.zeros((params[0], 2)) #1st col is indices, 2nd col is cost for i in range(params[2]): #iterate through num generations fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])]) #Create vec of all errors from cost function print("(Gen: #%s) Total error: %s\n" % (i, np.sum(fitVec[:,1]))) winners = np.zeros((params[4], params[3])) #20x2 for n in range(len(winners)): #for n in range(10) selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False) wnr = np.argmin(fitVec[selected,1]) winners[n] = curPop[int(fitVec[selected[wnr]][0])] nextPop[:len(winners)] = winners #populate new gen with winners nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T #Populate the rest of the generation with offspring of mating pairs nextPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape)) #randomly mutate part of the population curPop = nextPop best_soln = curPop[np.argmin(fitVec[:,1])] X = np.array([[0,1,1],[1,1,1],[0,0,1],[1,0,1]]) result = np.round(NN.runForward(X, best_soln.reshape(3,1))) print("Best Sol'n:\n%s\nCost:%s" % (best_soln,np.sum(NN.costFunction(NN.X, NN.y, best_soln.reshape(3,1))))) print("When X = \n%s \nhThetaX = \n%s" % (X[:,:2], result,)) # - # <h3>Sweet!</h3> # Looks like it converged, finding a solution with a cost of only 0.007, pretty close to the error of the solution found with gradient descent. Notice I only ran it for 10 generations 1) because clearly that's all it takes and 2) because I didn't want a 20 page long document here. # # <p><h2>Where to go from here?</h2> # First off, thanks. If you made it this far, I must've done something right. <br /> # But if you want to do more, then I really encourage you to play around with the parameters, maybe change up the neural network, or change the neural network cost function, etc and see what happens. The best way to learn is to get your hands dirty. Keep in mind this GA was kind of hard-wired for our little neural net by iamtrask, but if you understand the concepts and methods, you should be able to adapt it to more complex problems. # </p> # <h3>Bonus! Let's graph the population errors vs the generation #</h3> import matplotlib as plt # %matplotlib inline # + import random, numpy as np import NeuralNet as NN params = [100, 0.05, 25, 3, 20] # [Init pop (pop=100), mut rate (=5%), num generations (250), chromosome/solution length (3), # winners/per gen] curPop = np.random.choice(np.arange(-15,15,step=0.01),size=(params[0],params[3]),replace=False) #initialize current population to random values within range nextPop = np.zeros((curPop.shape[0], curPop.shape[1])) fitVec = np.zeros((params[0], 2)) #1st col is indices, 2nd col is cost for i in range(params[2]): #iterate through num generations fitVec = np.array([np.array([x, np.sum(NN.costFunction(NN.X, NN.y, curPop[x].reshape(3,1)))]) for x in range(params[0])]) #Create vec of all errors from cost function plt.pyplot.scatter(i,np.sum(fitVec[:,1])) winners = np.zeros((params[4], params[3])) #20x2 for n in range(len(winners)): #for n in range(10) selected = np.random.choice(range(len(fitVec)), params[4]/2, replace=False) wnr = np.argmin(fitVec[selected,1]) winners[n] = curPop[int(fitVec[selected[wnr]][0])] nextPop[:len(winners)] = winners #populate new gen with winners nextPop[len(winners):] = np.array([np.array(np.random.permutation(np.repeat(winners[:, x], ((params[0] - len(winners))/len(winners)), axis=0))) for x in range(winners.shape[1])]).T #Populate the rest of the generation with offspring of mating pairs nextPop = np.multiply(nextPop, np.matrix([np.float(np.random.normal(0,2,1)) if random.random() < params[1] else 1 for x in range(nextPop.size)]).reshape(nextPop.shape)) #randomly mutate part of the population curPop = nextPop best_soln = curPop[np.argmin(fitVec[:,1])] X = np.array([[0,1,1],[1,1,1],[0,0,1],[1,0,1]]) result = np.round(NN.runForward(X, best_soln.reshape(3,1))) print("Best Sol'n:\n%s\nCost:%s" % (best_soln,np.sum(NN.costFunction(NN.X, NN.y, best_soln.reshape(3,1))))) print("When X = \n%s \nhThetaX = \n%s" % (X[:,:2], result,)) # - # Looks like we converge after just <b>4</b> generations! Also notice we get these little bumps in error every 10 generations or so, likely due to a particularly dramatic mutation round.
content/Mini GA and NN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_latest_p36 # language: python # name: conda_pytorch_latest_p36 # --- # + import numpy as np import pandas as pd df = pd.read_csv('tsv.csv') df['date_of_infraction']= pd.to_datetime(df['date_of_infraction']) df['Count Date'] = df['Count Date'].str.replace('*', '').str.replace(',', '') df['Latitude'] = df['Latitude'].str.replace('*', '').str.replace(',', '') df['Longitude'] = df['Longitude'].str.replace('*', '').str.replace(',', '') df['Longitude'] = df['Longitude'].str.replace('*', '').str.replace(',', '') df['8 Peak Hr Vehicle Volume'] = df['8 Peak Hr Vehicle Volume'].str.replace('*', '').str.replace(',', '') df['8 Peak Hr Pedestrian Volume'] = df['8 Peak Hr Pedestrian Volume'].str.replace('*', '').str.replace(',', '') df['Activation Date'] = df['Activation Date'].str.replace('*', '').str.replace(',', '') df['TCS '] = df['TCS '].str.replace('*', '').str.replace(',', '') df['Count Date']= pd.to_datetime(df['Count Date']) df['Activation Date']= pd.to_datetime(df['Activation Date']) df df1 = df[['date_of_infraction','set_fine_amount','time_of_infraction','location2','Latitude','Longitude','Count Date','8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']] df1 df1['date_of_infraction_year'] = df1['date_of_infraction'].dt.year df1['date_of_infraction_month'] = df1['date_of_infraction'].dt.month df1['date_of_infraction_week'] = df1['date_of_infraction'].dt.week df1['date_of_infraction_day'] = df1['date_of_infraction'].dt.day df1['date_of_infraction_dayofweek'] = df1['date_of_infraction'].dt.dayofweek df1['Count Date_year'] = df1['Count Date'].dt.year df1['Count Date_month'] = df1['Count Date'].dt.month df1['Count Date_week'] = df1['Count Date'].dt.week df1['Count Date_day'] = df1['Count Date'].dt.day df1['Count Date_dayofweek'] = df1['Count Date'].dt.dayofweek df1 df1 = df1.drop('date_of_infraction',axis = 1) df1 = df1.drop('Count Date',axis = 1) df1 = df1.drop('location2',axis = 1) df1 = df1.dropna(how='all') df1["8 Peak Hr Vehicle Volume"] = df1["8 Peak Hr Vehicle Volume"].astype(str).astype(int) df1["8 Peak Hr Pedestrian Volume"] = df1["8 Peak Hr Pedestrian Volume"].astype(str).astype(int) df1['hour_sin'] = np.sin(2 * np.pi * df1['time_of_infraction']/23.0) df1['hour_cos'] = np.cos(2 * np.pi * df1['time_of_infraction']/23.0) df1["Latitude"] = df1.Latitude.astype(float) df1["Longitude"] = df1.Latitude.astype(float) np.where(df1.values >= np.finfo(np.float64).max) df1 = df1.dropna() pd.isnull(df1).sum() > 0 X=df1.drop(columns=['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']) y= df1[['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.60) # !pip install tensorflow # !pip install keras # - df1.columns #Creating bins for different sets bin_names1=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O'] df1['Level']=pd.qcut(df1['time_of_infraction'],15,labels=bin_names1) #Plotting number of datapoints in each bin. df1['Level'].value_counts().plot(kind='barh') E=df1.loc[df1['Level']=='G'] E=E.drop(columns='Level') X=E.drop(columns=['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']) y= E[['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']] E #Scaling the bin and obtaining the scores for the pca analysis. from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler #Importing All major The Necessary Packages import numpy as np import pandas as pd from pandas import DataFrame,Series from sklearn.decomposition import PCA import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MaxAbsScaler E_index= E.index EE= StandardScaler().fit_transform(E) pca = PCA(n_components=3, svd_solver='full') PC_scores = pca.fit_transform(EE) scores_pd = pd.DataFrame(data = PC_scores ,columns = ['PC1', 'PC2', 'PC3'] ,index =E_index) df2 = pd.DataFrame(data = EE,columns = E.columns) X=df2.drop(columns=['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']) y= df2[['8 Peak Hr Vehicle Volume','8 Peak Hr Pedestrian Volume']] from keras.models import Sequential from keras.layers import Dense, LSTM from numpy import array from numpy.random import uniform from numpy import hstack import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split seed = 1 test_size=0.2 X_train, X_test, y_train, y_test = train_test_split(X_test, y_test, test_size=test_size, random_state=seed) import numpy as np import pandas as pd from numpy import array from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler #cleaning test data df = df2.replace([np.inf, -np.inf], np.nan) df = df.dropna() df scaler = MinMaxScaler(feature_range=(0, 1)) df_scaled = scaler.fit_transform(df.values) df_scaled from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.60) # having the dataset as x and y . Making x into a 3-d data and y as 2-d data ie.) reshaping X_train = [] y_train = [] n_output_steps = 1 # Number of outputs we want to predict into the future n_input_steps = 1 # Number of past inputs that we want to use to predict the future for i in range(n_input_steps, len(df_scaled) - n_output_steps +1): X_train.append(df_scaled[i - n_input_steps:i, 0:df.shape[1] - 1]) y_train.append(df_scaled[i + n_output_steps - 1:i + n_output_steps, 0]) X_train, y_train = np.array(X_train), np.array(y_train) print('X_train shape == {}.'.format(X_train.shape)) # no.of samples, no. of time stamps, no. of features print('y_train shape == {}.'.format(y_train.shape)) # no. of features, no. of output time steps # + from sklearn.model_selection import train_test_split train, test = train_test_split(df2, test_size=0.2) # - X import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Dense from tensorflow.python.keras.wrappers.scikit_learn import KerasRegressor #Variables scaler_x = MinMaxScaler() scaler_y = MinMaxScaler() print(scaler_x.fit(X)) xscale=scaler_x.transform(X) print(scaler_y.fit(y)) yscale=scaler_y.transform(y) X_train, X_test, y_train, y_test = train_test_split(xscale, yscale) from keras.models import load_model from keras.models import Model from keras import models, layers model = Sequential() model.add(Dense(100, input_dim=16, kernel_initializer='uniform', activation='tanh')) model.add(Dense(50, activation='tanh')) model.add(Dense(25, activation='tanh')) model.add(Dense(15, activation='tanh')) model.add(Dense(10, activation='tanh')) model.add(Dense(1, kernel_initializer='uniform', activation='tanh')) # Compile model model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) model.compile(loss='mse', optimizer='adam', metrics=['mse','mae']) history = model.fit(X_train, y_train, epochs=100, batch_size=160, verbose=1, validation_split=0.2) model.evaluate(X_test, y_test) print(history.history.keys()) # "Loss" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() # MLP for Pima Indians Dataset Serialize to JSON and HDF5 from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json # serialize model to JSON model_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("tanh_LSTM_model.h5") print("Saved model to disk") from keras import models, layers model1 = models.Sequential() model1.add(layers.Dense(300, activation='relu', input_shape=[X_train.shape[1]])) model1.add(layers.Dense(150, activation='relu')) model1.add(layers.Dense(75, activation='relu')) model1.add(layers.Dense(25, activation='relu')) model1.add(layers.Dense(1, activation='relu')) # output layer model1.add(layers.Dense(1)) model1.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) history = model1.fit(X_train, y_train, validation_split=0.2, epochs=100) print(history.history.keys()) # "Loss" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() model1.evaluate(X_test, y_test) # MLP for Pima Indians Dataset Serialize to JSON and HDF5 from keras.models import Sequential from keras.layers import Dense from keras.models import model_from_json # serialize model to JSON model_json = model1.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("RELU_LSTM_model.h5") print("Saved model to disk") # !pip install eli5 from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor import eli5 from eli5.sklearn import PermutationImportance def base_model(): model2 = models.Sequential() model2.add(layers.Dense(300, activation='relu', input_shape=[X_train.shape[1]])) model2.add(layers.Dense(150, activation='relu')) model2.add(layers.Dense(75, activation='relu')) model2.add(layers.Dense(25, activation='relu')) model2.add(layers.Dense(1, activation='relu')) return model param_model = KerasRegressor(build_fn=base_model, epochs=100, batch_size=16, verbose=0) param_model.fit(X_test,y_test) perm = PermutationImportance(model, random_state=1).fit(X_test,y_test) eli5.show_weights(perm, feature_names = X.columns.tolist())
Models/LSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import jams import glob import os import librosa jampaths = glob.glob("./jams/*.jams") jampaths.sort() jampaths # i=2 # jampath = jampaths[i] for jampath in jampaths: piece, tempo, key = os.path.basename(jampath).split('_')[0].split('-') tempo = float(tempo) beat_step = 60/tempo tran_semi = librosa.note_to_midi(key) % 12 prog = piece[-1] print (piece, tempo, beat_step, key, tran_semi) jam = jams.load(jampath) if len(jam.search(namespace='beat_position')) == 0: ann_beat = jams.Annotation( namespace='beat_position', time=0, duration=jam.file_metadata.duration ) if prog == '1': num_measure = 12 else: num_measure = 16 for i in range(num_measure * 4): measure = int(i/4) + 1 # measure number starts with one, not zero time = i * beat_step value = { "position": (i%4) + 1, "measure": measure, "num_beats": 4, "beat_units": 4 } ann_beat.append(time=time, duration=0, value=value) ann_tempo = jams.Annotation(namespace='tempo', time=0, duration=jam.file_metadata.duration) ann_tempo.append(time=0, duration=jam.file_metadata.duration, value=tempo, confidence=1.0) ann_chord = jams.Annotation( namespace='chord', time=0, duration=jam.file_metadata.duration ) prog1_list = ['C:maj', 'F:maj', 'C:maj', 'G:maj', 'F:maj', 'C:maj'] prog2_list = ['F:min', 'Bb:7', 'Eb:maj', 'Ab:maj','D:hdim7','G:7', 'C:min', 'F:min', 'Bb:7', 'Eb:maj', 'Ab:maj','D:hdim7','G:7', 'C:min'] prog3_list = ['C:maj', 'G:maj', 'A:min', 'E:min', 'F:maj', 'C:maj','F:maj', 'G:maj', 'C:maj', 'G:maj', 'A:min', 'E:min', 'F:maj', 'C:maj','F:maj', 'G:maj'] chord_change = [] if prog is '1': prog_list = prog1_list for beat in ann_beat: if beat.value['measure'] in [1,5,7,9,10,11] and beat.value['position'] == 1: chord_change.append(beat.time) if prog is '2': prog_list = prog2_list for beat in ann_beat: if beat.value['measure'] not in [8, 16] and beat.value['position'] == 1: chord_change.append(beat.time) if prog is '3': prog_list = prog3_list for beat in ann_beat: if beat.value['position'] == 1: chord_change.append(beat.time) chord_change.sort() for i, t in enumerate(chord_change): if i==len(chord_change)-1: duration = jam.file_metadata.duration - t else: duration = chord_change[i+1] - t chord = prog_list[i] root_in_c, quality = chord.split(':') root_in_key = librosa.midi_to_note(librosa.note_to_midi(root_in_c) + tran_semi)[:-1] transposed_chord = ':'.join([root_in_key, quality]) ann_chord.append(time=t, duration=duration, value=transposed_chord) jam.annotations.append(ann_beat) jam.annotations.append(ann_tempo) jam.annotations.append(ann_chord) jam.save(jampath)
annotation_process/step_3_2_update_jams.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df = pd.read_csv('/home/marco/Electricity Data/Spot_prices/elspot-prices_2019_hourly_dkk.csv') df data = df.rename(columns=df.iloc[1]) data = data.iloc[2:] data.dropna(axis=1, how='all', inplace=True) # Drop missing data, due to timeclock change: data.dropna(axis=0, inplace=True) data.set_index(pd.to_datetime(data.iloc[:,0]), inplace=True) data.set_index(pd.date_range(data.index[0], data.index[-1]+pd.Timedelta(hours=23), freq='1H'),inplace=True) data.drop(data.columns[[0,1]],axis=1,inplace=True) data.replace({',': '.'}, regex=True, inplace=True) data = data.astype(float) data
Additional Notebooks/clean_prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Introduction # # In the previous lesson, we learned how to create a grid of subplots using the `subplot` and `subplots` functions. One of the things we didn't see though, was how to adjust some of the parameters of the grid, like the spacing between subplots for example. In this lesson, we'll take a quick look at some of the options that you have for adjusting these parameters to make your subplots look as nice and readable as possible. # # Let's go ahead and jump right into the code. # + # %matplotlib inline import matplotlib.pyplot as plt from IPython.display import set_matplotlib_formats set_matplotlib_formats('retina') # - # ## Adjusting Subplots # # In the code below we use the `subplots` function to create a 3x2 grid. Let's run it and see how it looks. plt.subplots(3, 2); # ### Tight Layout # # It doesn't look too bad, but the vertical spacing between each subplot is a little tight---notice that there's a little overlap with some of the x-axis labels. If you want to fix that, and make the plots fit together much better, you can simply call the `pyplot.tight_layout` function. The [`tight_layout`][1] function automatically adjusts the parameters of the subplots so that they fit nicely within the figure. # # Let's go ahead and give it a try now to see how well it works. # # [1]: http://matplotlib.org/users/tight_layout_guide.html plt.subplots(3, 2) plt.tight_layout(); # ### `pyplot.subplots_adjust` # # Well, that looks much better! The `tight_layout` function is really nice and should solve most of your problems, but as of now, it's still an experimental feature and not guaranteed to work. If you do run into problems with it, you can fall back on the `pyplot.subplots_adjust` function to change some, or all, of the attributes of the grid. # # The example below uses this function to adjust the vertical spacing, or hspace for height, between each plot. plt.subplots(3, 2) plt.subplots_adjust(hspace=0.5); # Not quite as easy as calling the `tight_layout` function, but not exactly rocket science either. # # ## Conclusion # # And, that will bring us to the end of this lesson. We just saw how to use the `tight_layout` and `subplots_adjust` functions to make minor tweaks to our grids that can help make our plots easier to read and comprehend. In the next lesson, we'll see how to use the subplotting features that we've learned in the last few lessons to create a very powerful visualization technique called a scatterplot matrix. Following that, we'll see how to create grids of subplots of varying shapes and sizes.
03 - pyplot/0306 - Adjusting Subplot Parameters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.12 64-bit (''common'': conda)' # name: python3 # --- list1 = [1,1,5,8,12,13,20,22] print(len(list1)) print("统计次数count(1):", list1.count(1)) print("匹配索引下标位置index(5)", list1.index(5)) print("移除匹配项list.remove(obj):", list1.remove(5), list1) list1.sort( reverse=False) # reverse = True 降序, reverse = False 升序(默认)。 print(list1)
python/02list.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Пример использования рекуррентных сетей для анализа временных рядов # # Оглавление <a name='toc'></a> # # <ul> # <li><a href='#libs'>Загрузка библиотек</a></li> # <li><a href='#dataset'>Загрузка датасета</a></li> # <li><a href='#build'>Построение нейронной сети</a></li> # <li><a href='#prediction'>Использование нейронной сети для предсказания по всему датасету</a></li> # <li><a href='#links'>Полезные ссылки</a></li> # </ul> # # Загрузка библиотек <a name='libs'></a> import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import SimpleRNN, RNN, LSTM, GRU from tensorflow.keras.layers import Dense, Flatten, Input import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime import time # # Загрузка датасета <a name='dataset'></a> # Использован файл 1.csv открытого датасета из репозитория https://github.com/sevaiq/Datacenter_dataset path = '/home/datapk/datasets/DataCenter/sevaig/datacenter_dataset/1.csv' df = pd.read_csv(path, header=0, index_col=0, sep=';') df.columns = [s.replace('\t', '') for s in df.columns] df.index = [datetime.strftime(datetime.fromtimestamp(s), "%Y:%m:%d-%H:%M:%S") for s in df.index] df.head(3) df.info() # Анализ изменения параметра "CPU usage [%]" за несколько дней. # + plt.figure(figsize=(20,10)) df1 = df[(df.index>'2013:08:15-00:00:00') & (df.index<'2013:08:20-00:00:00')]['CPU usage [%]'] df1.plot() curr_day = 15 i, curr_date = 0, "2013:08:{}-00:00:00".format(curr_day) while i < df1.shape[0]: if df1.index[i] >= curr_date: plt.axvline(i, c= 'r', marker='s', linestyle='--') curr_day += 1 curr_date = "2013:08:{}-00:00:00".format(curr_day) i += 1 plt.title('CPU usage [%]') plt.grid(True) plt.show() # - # <a href='#toc'>Назад к Оглавлению</a> # # Построение нейронной сети <a name='build'></a> wl = 10 model = Sequential() model.add(LSTM(wl, input_shape=(None, 10))) model.add(Dense(10, activation='relu')) model.add(Dense(2, activation='softmax')) model.summary() model.compile(optimizer='SGD', loss='categorical_crossentropy', metrics=['accuracy']) # + X = [] for i in range(len(df1)-wl): X.append(df1.values[i:i+wl]) X = np.array(X) X = X.reshape(len(X), 1, 10) y = [] for i, j in zip(df1.index[:], df1.index[wl:]): if df1[i:j].mean() > df1.mean(): y.append(1) else: y.append(0) y = np.array(y) y = tf.keras.utils.to_categorical(y) # - X.shape, y.shape model.fit(X, y, epochs=10, verbose=1) predict = model.predict(X) predict.shape # + plt.figure(figsize=(20,10)) df1 = df[(df.index>'2013:08:15-00:00:00') & (df.index<'2013:08:20-00:00:00')]['CPU usage [%]'] plt.subplot(211) df1.plot() curr_day = 15 i, curr_date = 0, "2013:08:{}-00:00:00".format(curr_day) while i < df1.shape[0]: if df1.index[i] >= curr_date: plt.axvline(i, c= 'r', marker='s', linestyle='--') curr_day += 1 curr_date = "2013:08:{}-00:00:00".format(curr_day) i += 1 plt.title('CPU usage [%]') plt.grid(True) plt.subplot(212) plt.scatter(range(predict.shape[0]), predict[:,0]) plt.title('Вероятность работы в режиме 0') plt.grid(True) plt.show() # - # <a href='#toc'>Назад к Оглавлению</a> # # Применение модели для анализа всего датасета <a name='prediction'></a> model.predict(np.array(df1.values[0:10]).reshape(1,1,10)) # + X_all = [] for i in range(len(df)-wl): X_all.append(df['CPU usage [%]'].values[i:i+wl]) X_all = np.array(X_all) X_all = X_all.reshape(len(X_all), 1, 10) y_all = [] for i, j in zip(df.index[:], df.index[wl:]): if df['CPU usage [%]'][i:j].mean() > df['CPU usage [%]'].mean(): y_all.append(1) else: y_all.append(0) y_all = np.array(y_all) y_all = tf.keras.utils.to_categorical(y_all) # - X_all.shape predict_all = model.predict(X_all) # + plt.figure(figsize=(20,10)) plt.subplot(211) df['CPU usage [%]'].plot() plt.title('CPU usage [%]') plt.grid(True) plt.subplot(212) plt.scatter(range(predict_all.shape[0]), predict_all[:,0]) plt.title('Вероятность работы в режиме 0') plt.grid(True) plt.show() # - X_all.shape # <a href='#toc'>Назад к Оглавлению</a> # # Полезные ссылки <a name='links'></a> # - https://toolbox.google.com/datasetsearch # # - https://github.com/sevaiq/Datacenter_dataset # # - https://www.tensorflow.org/guide/keras/rnn # # - https://habr.com/ru/post/487808/ # <a href='#toc'>Назад к Оглавлению</a>
RNN for TimeSeries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- pip install psycopg2-binary from sqlalchemy import create_engine engine = create_engine('postgresql://postgres:Jazmine14@localhost:5433/spotify_tracks') tracks.to_sql("tracks_", engine) # + import pandas as pd tracks = pd.read_csv("archive/tracks.csv") # - tracks.to_json("tracks.json")
Python Working Files/data_toJSON.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # Session 7: The Errata Review No. 1 // // This session is a review of the prior six sessions and covering those pieces that were left off. Not necessarily errors, but missing pieces to complete the picture from the series. These topics answer some questions and will help complete the picture of the C# language features discussed to this point. // // ## Increment and Assignment operators // // In session 1, we reviewed operators and interacting with numbers. We skipped the [increment](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/arithmetic-operators?WT.mc_id=visualstudio-twitch-jefritz#increment-operator-) `++` and [decrement](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/arithmetic-operators?WT.mc_id=visualstudio-twitch-jefritz#decrement-operator---) `--` operators. These operators allow you to increment and decrement values quickly. You can place these operators before and after the variable you would like to act on, and they will be incremented or decremented before or after being returned. // // Let's take a look: var counter = 1; display(counter--); // Running ++ AFTER counter will display 1 display(counter); // and then display 2 in the next row var counter = 1; display(--counter); // Running ++ BEFORE counter will display 2 as it is incrementing the variable before // displaying it // ## Logical negation operator // // Sometimes you want to invert the value of a boolean, converting from `true` to `false` and from `false` to `true`. Quite simply, just prefix your test or boolean value with the [negation operator](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/boolean-logical-operators?WT.mc_id=visualstudio-twitch-jefritz#logical-negation-operator-) `!` to invert values // + var isTrue = true; display(!isTrue); display(!(1 > 2)) // - // ## TypeOf, GetType and NameOf methods // // Sometimes you need to work with the type of a variable or the name of a value. The methods `typeof`, `GetType()` and `nameof` allow you to interact with the types and pass them along for further interaction. // // [typeof](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/type-testing-and-cast?WT.mc_id=visualstudio-twitch-jefritz#typeof-operator) allows you to get a reference to a type for use in methods where you need to inspect the underlying type system display(typeof(int)); // Conversely, the `GetType()` method allows you to get the type information for a variable already in use. Every object in C# has the `GetType()` method available. var myInt = 5; display(myInt.GetType()); // The [`nameof` expression](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/nameof?WT.mc_id=visualstudio-twitch-jefritz) gives the name of a type or member as a string. This is particularly useful when you are generating error messages. // + class People { public string Name { get; set; } public TimeSpan CalculateAge() => DateTime.Now.Subtract(new DateTime(2000,1,1)); } var fritz = new People { Name="Fritz" }; display(nameof(People)); display(typeof(People)); display(nameof(fritz.Name)); // - // ## String Formatting // // Formatting and working with strings or text is a fundamental building block of working with user-input. We failed to cover the various ways to interact with those strings. Let's take a look at a handful of the ways to work with text data. // // ## Concatenation // // You may have seen notes and output that concatenates strings by using the [`+` operator](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/addition-operator?WT.mc_id=visualstudio-twitch-jefritz#string-concatenation). This is the simplest form of concatenation and only works when both sides of the `+` operator are strings. // + var greeting = "Hello"; display(greeting + " World!"); // // += also works greeting += " C# developers"; display(greeting); // - // If you have multiple strings to combine, the `+` operator gets a little unwieldy and is not as performance aware as several other techniques. We can [combine multiple strings](https://docs.microsoft.com/en-us/dotnet/csharp/how-to/concatenate-multiple-strings?WT.mc_id=visualstudio-twitch-jefritz) using the `Concat`, `Join`, `Format` and interpolation features of C#. // + var greeting = "Good"; var time = DateTime.Now.Hour < 12 && DateTime.Now.Hour > 3 ? "Morning" : DateTime.Now.Hour < 17 ? "Afternoon" : "Evening"; var name = "Visual Studio Channel"; // Use string.concat with a comma separated list of arguments display(string.Concat(greeting, " ", time, " ", name + "!")); // + var terms = new [] {greeting, time, name}; // Use string.Join to assembly values in an array with a separator display(string.Join(" ", terms)); // - // Use string.Format to configure a template string and load values into it based on position var format = "Good {1} {0}"; display(string.Format(format, time, name)); // + // With C# 7 and later you can now use string interpolation to format a string. // Simply prefix a string with a $ to allow you to insert C# expressions in { } inside // a string var names = new string[] {"Fritz", "Scott", "Maria", "Jayme"}; display($"Good {time} {name} {string.Join(",",names)}"); // + // Another technique that can be used when you don't know the exact number of strings // to concatenate is to use the StringBuilder class. var sb = new StringBuilder(); sb.AppendFormat("Good {0}", time); sb.Append(" "); sb.Append(name); display(sb.ToString()); // - // ### Parsing strings with Split // // You can turn a string into an array of strings using the `Split` method on a string variable. Pass the character that identifies the boundary between elements of your array to turn it into an array: var phrase = "Good Morning Cleveland"; display(phrase.Split(' ')); display(phrase.Split(' ')[2]); var fibonacci = "1,1,2,3,5,8,13,21"; display(fibonacci.Split(',')); // ## A Deeper Dive on Enums // // We briefly discussed enumeration types in session 3 and touched on using the `enum` keyword to represent related values. Let's go a little further into conversions and working with the enum types. // // ### Conversions // // [Enum types](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/builtin-types/enum?WT.mc_id=visualstudio-twitch-jefritz) are extensions on top of numeric types. By default, they wrap the `int` integer data type. While this base numeric type can be overridden, we can also convert data into and out of the enum using standard explicit conversion operators // + enum DotNetLanguages : byte { csharp = 100, visual_basic = 2, fsharp = 3 } var myLanguage = DotNetLanguages.csharp; display(myLanguage); display((byte)myLanguage); display((int)myLanguage); // Push a numeric type INTO DotNetLanguages myLanguage = (DotNetLanguages)2; display(myLanguage); // - // ### Working with strings using Parse and TryParse // // What about the string value of the enumeration itself? We can work with that using the [`Parse`](https://docs.microsoft.com/en-us/dotnet/api/system.enum.parse?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) and [`TryParse`](https://docs.microsoft.com/en-us/dotnet/api/system.enum.tryparse?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) methods of the Enum object to convert a string into the Enum type // + var thisLanguage = "csharp"; myLanguage = Enum.Parse<DotNetLanguages>(thisLanguage); display(myLanguage); // Use the optional boolean flag parameter to indicate if the Parse operation is case-insensitive thisLanguage = "CSharp"; myLanguage = Enum.Parse<DotNetLanguages>(thisLanguage, true); display(myLanguage); // - // TryParse has a similar signature, but returns a boolean to indicate success var success = Enum.TryParse<DotNetLanguages>("Visual_Basic", true, out var foo); display(success); display(foo); // ### GetValues and the Enumeration's available values // // The constant values of the enum type can be exposed using the [Enum.GetValues](https://docs.microsoft.com/en-us/dotnet/api/system.enum.getvalues?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) method. This returns an array of the numeric values of the enum. Let's inspect our `DotNetLanguages` type: var languages = Enum.GetValues(typeof(DotNetLanguages)); display(languages); // We can convert back to the named values of the enum with a little conversion foreach (var l in languages) { display((DotNetLanguages)l); } // ## Classes vs. Structs // // In the second session we introduced the `class` keyword to create reference types. There is another keyword, `struct`, that allows you to create [Structure](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/builtin-types/struct?WT.mc_id=visualstudio-twitch-jefritz) **value types** which will be allocated in memory and reclaimed more quickly than a class. While a `struct` looks like a class in syntax, there are some constraints: // // - A constructor must be defined that configures all properties / fields // - The parameterless constructor is not allowed // - Instance Fields / Properties cannot be assigned in their declaration // - Finalizers are not allowed // - A struct cannot inherit from another type, but can implement interfaces // // Structs are typically used to store related numeric types. Let's tinker with an example: // + struct Rectangle { public Rectangle(int length, int width) { this.Length = length; this.Width = width; } public static readonly int Depth = DateTime.Now.Minute; public int Length {get;set;} public int Width {get;set;} public int Area { get { return Length * Width;}} public int Perimeter { get { return Length*2 + Width*2;}} } var myRectangle = new Rectangle(2, 5); display(myRectangle); display(Rectangle.Depth); // + enum CountryCode { USA = 1 } struct PhoneNumber { public PhoneNumber(CountryCode countryCode, string exchange, string number) { this.CountryCode = countryCode; this.Exchange = exchange; this.Number = number; } public CountryCode CountryCode { get; set;} public string Exchange { get; set;} public string Number {get; set;} } var jennysNumber = new PhoneNumber(CountryCode.USA, "867", "5309"); display(jennysNumber); // - // ### When should I use a struct instead of a class? // // This is a common question among C# developers. How do you decide? Since a `struct` is a simple value type, there are [several guidelines to help you decide](https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/choosing-between-class-and-struct?WT.mc_id=visualstudio-twitch-jefritz): // // **Choose a struct INSTEAD of a class if all of these are true about the type:** // - It will be small and short-lived in memory // - It represents a single value // - It can be represented in 16 bytes or less // - It will not be changed, and is immutable // - You will not be converting it to a class (called `boxing` and `unboxing`) // ## Stopping and Skipping Loops // // In session four we learned about loops using `for`, `while`, and `do`. We can speed up our loop by moving to the next iteration in the loop and we can stop a loop process completely using the `continue` and `break` keywords. Let's take a look at some examples: for (var i=1; i<10_000_000; i++) { display(i); if (i%10 == 0) break; // Stop if the value is a multiple of 10 } // We can skip an iteration in the loop using the continue keyword for (var i = 1; i<10_000_000; i++) { if (i%3 == 0) continue; // Skip this iteration display(i); if (i%10 == 0) break; } // ## Initializing Collections // // In the fifth session we explored Arrays, Lists, and Dictionary types. We saw that you could initialize an array with syntax like the following: // + var fibonacci = new int[] {1,1,2,3,5,8,13}; display(fibonacci); //var coordinates = new int[,] {{1,2}, {2,3}}; //display(coordinates); // - // We can also initialize List and Dictionary types using the curly braces notation: // + var myList = new List<string> { "C#", "Visual Basic", "F#" }; display(myList); var myShapes = new List<Rectangle> { new Rectangle(2, 5), new Rectangle(3, 4), new Rectangle(4, 3) }; display(myShapes); // - var myDictionary = new Dictionary<int, string> { {100, "C#"}, {200, "Visual Basic"}, {300, "F#"} }; display(myDictionary); // ## Const and Static keywords // // // // + const int Five = 5; // Five = 6; display(Five); // + class Student { public const decimal MaxGPA = 5.0m; } display(Student.MaxGPA); // + class Student { public static bool InClass = false; public string Name { get; set; } public override string ToString() { return Name + ": " + Student.InClass; } public static void GoToClass() { Student.InClass = true; } public static void DitchClass() { Student.InClass = false; } } var students = new Student[] { new Student { Name="Hugo" }, new Student {Name="Fritz"}, new Student {Name="Lily"}}; foreach (var s in students) { display(s.ToString()); } // - Student.GoToClass(); foreach (var s in students) { display(s.ToString()); } // + static class DateMethods { public static int CalculateAge(DateTime date1, DateTime date2) { return 10; } } display(DateMethods.CalculateAge(DateTime.Now, DateTime.Now))
notebooks/0107-Errata_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} from sys import modules IN_COLAB = 'google.colab' in modules if IN_COLAB: # !pip install -q ir_axioms[examples] python-terrier # + pycharm={"name": "#%%\n"} # Start/initialize PyTerrier. from pyterrier import started, init if not started(): init(tqdm="auto") # + pycharm={"name": "#%%\n"} from pyterrier.datasets import get_dataset, Dataset # Load dataset. dataset_name = "msmarco-passage" dataset: Dataset = get_dataset(f"irds:{dataset_name}") dataset_train: Dataset = get_dataset(f"irds:{dataset_name}/trec-dl-2019/judged") dataset_test: Dataset = get_dataset(f"irds:{dataset_name}/trec-dl-2020/judged") # + pycharm={"name": "#%%\n"} from pathlib import Path cache_dir = Path("cache/") index_dir = cache_dir / "indices" / dataset_name.split("/")[0] # + pycharm={"name": "#%%\n"} from pyterrier.index import IterDictIndexer if not index_dir.exists(): indexer = IterDictIndexer(str(index_dir.absolute())) indexer.index( dataset.get_corpus_iter(), fields=["text"] ) # + pycharm={"name": "#%%\n"} from pyterrier.batchretrieve import BatchRetrieve # BM25 baseline retrieval. bm25 = BatchRetrieve(str(index_dir.absolute()), wmodel="BM25") # + pycharm={"name": "#%%\n"} from ir_axioms.axiom import ( ArgUC, QTArg, QTPArg, aSL, PROX1, PROX2, PROX3, PROX4, PROX5, TFC1, TFC3, RS_TF, RS_TF_IDF, RS_BM25, RS_PL2, RS_QL, AND, LEN_AND, M_AND, LEN_M_AND, DIV, LEN_DIV, M_TDC, LEN_M_TDC, STMC1, STMC1_f, STMC2, STMC2_f, LNC1, TF_LNC, LB1, REG, ANTI_REG, REG_f, ANTI_REG_f, ASPECT_REG, ASPECT_REG_f, ORIG ) axioms = [ ~ArgUC(), ~QTArg(), ~QTPArg(), ~aSL(), ~LNC1(), ~TF_LNC(), ~LB1(), ~PROX1(), ~PROX2(), ~PROX3(), ~PROX4(), ~PROX5(), ~REG(), ~REG_f(), ~ANTI_REG(), ~ANTI_REG_f(), ~ASPECT_REG(), ~ASPECT_REG_f(), ~AND(), ~LEN_AND(), ~M_AND(), ~LEN_M_AND(), ~DIV(), ~LEN_DIV(), ~RS_TF(), ~RS_TF_IDF(), ~RS_BM25(), ~RS_PL2(), ~RS_QL(), ~TFC1(), ~TFC3(), ~M_TDC(), ~LEN_M_TDC(), ~STMC1(), ~STMC1_f(), ~STMC2(), ~STMC2_f(), ORIG() ] # + pycharm={"name": "#%%\n"} from ir_axioms.backend.pyterrier.transformers import AggregatedAxiomaticPreference aggregations = [ lambda prefs: sum(p >= 0 for p in prefs) / len(prefs), lambda prefs: sum(p == 0 for p in prefs) / len(prefs), lambda prefs: sum(p <= 0 for p in prefs) / len(prefs), ] features = bm25 % 20 >> AggregatedAxiomaticPreference( axioms=axioms, index=index_dir, aggregations=aggregations, dataset=dataset_name, cache_dir=cache_dir, verbose=True, ) # + pycharm={"name": "#%%\n"} features.transform(dataset_train.get_topics()[:1])["features"] # + pycharm={"name": "#%%\n"} from lightgbm import LGBMRanker from pyterrier.ltr import apply_learned_model lambda_mart = LGBMRanker( num_iterations=1000, metric="ndcg", eval_at=[10], importance_type="gain", ) ltr = features >> apply_learned_model(lambda_mart, form="ltr") # + pycharm={"name": "#%%\n"} ltr.fit( dataset_train.get_topics()[:-5], dataset_train.get_qrels(), dataset_train.get_topics()[-5:], dataset_train.get_qrels() ) # + pycharm={"name": "#%%\n"} from pyterrier.pipelines import Experiment from ir_measures import nDCG, MAP, RR experiment = Experiment( [bm25, ltr ^ bm25], dataset_test.get_topics(), dataset_test.get_qrels(), [nDCG @ 10, RR, MAP], ["BM25", "Axiomatic LTR"], verbose=True, ) experiment.sort_values(by="nDCG@10", ascending=False, inplace=True) # + pycharm={"name": "#%%\n"} experiment # + pycharm={"name": "#%%\n"} from numpy import ndarray feature_importance: ndarray = lambda_mart.feature_importances_.reshape(-1, len(aggregations)) feature_importance # + pycharm={"name": "#%%\n"} feature_importance.sum(0) # + pycharm={"name": "#%%\n"} feature_importance.sum(1)
examples/pyterrier_ltr_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''flownet'': conda)' # language: python # name: python3 # --- # + import torch import torch.nn as nn from torch.utils.data import DataLoader from torch.autograd import Variable import argparse, os, sys, subprocess import numpy as np from tqdm import tqdm from glob import glob from os.path import * import models, losses from utils import flow_utils, tools from networks import FlowNetS from utils import tools from path import Path from glob import glob as glob from natsort import natsorted from imageio import imread, imwrite # - import torch model = torch.load('/user_data/vayzenbe/GitHub_Repos/LiMA/Weights/flownet2S.pt') dir(model.__module__) img_pairs = [] ext = 'jpg' data_dir = Path('/user_data/vayzenbe/GitHub_Repos/LiMA/Frames/Figure_23_Bulge') test_files = data_dir.files('*1.{}'.format(ext)) for file in test_files: img_pair = file.parent / (file.stem[:-1] + '2.{}'.format(ext)) if img_pair.isfile(): img_pairs.append([file, img_pair]) img_pairs imfiles = natsorted(glob(f'{data_dir}/*.jpg')) img_pairs = [] for fn in range(0,len(imfiles)-1): img_pairs.append([imfiles[fn], imfiles[fn+1]]) img_pairs model = torch.load('weights/flownet2S.pt') checkpoint = torch.load('weights/FlowNet2-S_checkpoint.pth.tar') model.load_state_dict(checkpoint['state_dict']) encoder = nn.Sequential(*list(model.children())[:-1]) model = torch.load('weights/flownet2S.pt') checkpoint['state_dict'] model.module.model.load_state_dict(checkpoint['state_dict']).load_state_dict(model, checkpoint['state_dict']) model.modules.load_state_dict(checkpoint['state_dict']).load_state_dict(model, checkpoint['state_dict']) print(model.children())
flow_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide import sys sys.path.append("../") from upit.data.unpaired import * from upit.models.cyclegan import * from upit.models.ganilla import * from upit.models.dualgan import * from upit.train.cyclegan import * from upit.train.dualgan import * from upit.inference.cyclegan import * from fastai.vision.all import * #hide set_seed(999, reproducible=True) # # Unpaired image-to-image translation # # > A fastai/PyTorch package for unpaired image-to-image translation currently with CycleGAN implementation. # # This is a package for training and testing unpaired image-to-image translation models. It currently only includes the [CycleGAN](https://junyanz.github.io/CycleGAN/), [DualGAN](https://arxiv.org/abs/1704.02510), and [GANILLA](https://arxiv.org/abs/2002.05638) models, but other models will be implemented in the future. # # This package uses [fastai](https://github.com/fastai/fastai) to accelerate deep learning experimentation. Additionally, [nbdev](https://github.com/fastai/nbdev) was used to develop the package and produce documentation based on a series of notebooks. # ## Install # To install, use `pip`: # # `pip install git+https://github.com/tmabraham/UPIT.git` # # The package uses torch 1.7.1, torchvision 0.8.2, and fastai 2.3.0 (and its dependencies). It also requires nbdev 1.1.13 if you would like to add features to the package. Finally, for creating a web app model interface, gradio 1.1.6 is used. # ## How to use # Training a CycleGAN model is easy with UPIT! Given the paths of the images from the two domains `trainA_path` and `trainB_path`, you can do the following: #hide horse2zebra = untar_data('https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/horse2zebra.zip') folders = horse2zebra.ls().sorted() trainA_path = folders[2] trainB_path = folders[3] testA_path = folders[0] testB_path = folders[1] #cuda dls = get_dls(trainA_path, trainB_path) cycle_gan = CycleGAN(3,3,64) learn = cycle_learner(dls, cycle_gan,opt_func=partial(Adam,mom=0.5,sqr_mom=0.999)) learn.fit_flat_lin(100,100,2e-4) # The GANILLA model is only a different generator model architecture (that's meant to strike a better balance between style and content), so the same `cycle_learner` class can be used. #cuda ganilla = GANILLA(3,3,64) learn = cycle_learner(dls, ganilla,opt_func=partial(Adam,mom=0.5,sqr_mom=0.999)) learn.fit_flat_lin(100,100,2e-4) # Finally, we provide separate functions/classes for `DualGAN` model and training: #cuda dual_gan = DualGAN(3,64,3) learn = cycle_learner(dls, dual_gan, opt_func=RMSProp) learn.fit_flat_lin(100,100,2e-4) # Additionally, we provide metrics for quantitative evaluation of the models, as well as experiment tracking with Weights and Biases. Check the [documentation](https://tmabraham.github.io/UPIT) for more information! # ## Citing UPIT # If you use UPIT in your research please use the following BibTeX entry: # # ``` # @Misc{UPIT, # author = {<NAME>}, # title = {UPIT - A fastai/PyTorch package for unpaired image-to-image translation.}, # howpublished = {Github}, # year = {2021}, # url = {https://github.com/tmabraham/UPIT} # } # ```
nbs/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import nose.tools # Write your imports here import numpy as np from collections import OrderedDict # # Data Visualization and Exploratory Data Analysis Lab # ## Visualizing and exploring data. Data mining process as a whole # ### Problem 1. Read the dataset (1 point) # You'll be exploring data about people's income. Your task is to understand whether there are significant differences in the lifestyle of lower- vs. higher-income groups. # # Read the dataset located [here](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data). The information file is [here](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names). Save it into the variable `income_data`. Change the column names to what you like. The last column is related to the income class. # # Get acquainted with the information file well before starting work. # # You don't need to clean the dataset. # + deletable=false nbgrader={"checksum": "1100e6bb30ef5be00db508ca1f24d75c", "grade": false, "grade_id": "read-dataset", "locked": false, "schema_version": 1, "solution": true} income_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", header = None) income_data.columns = ["age","workclass","fnlwgt","education","education_num","marital_status","occupation","relationship","race","sex","capital_gain","capital_loss","hours_per_week","native_country","income_class"] income_data.head() # + deletable=false editable=false nbgrader={"checksum": "6f1acd92663c630ea7be4ae6a4f4b1a3", "grade": true, "grade_id": "read-dataset-tests", "locked": true, "points": 1, "schema_version": 1, "solution": false} nose.tools.assert_is_not_none(income_data) # - # ### Problem 2. High income (1 point) # How many people have high income (over 50 000 USD per year)? Write a function to return the value. The function should accept the dataframe as a parameter. Work with that parameter. # + deletable=false nbgrader={"checksum": "2e3a3eb52f23b84701cddd0c71c1a443", "grade": false, "grade_id": "high-income", "locked": false, "schema_version": 1, "solution": true} def get_num_people_with_high_income(dataframe): high_income = dataframe[dataframe["income_class"].str.contains(">50K")] return high_income.shape[0] # + deletable=false editable=false nbgrader={"checksum": "e6284216dc88e4a433035553084c42ab", "grade": true, "grade_id": "high-income-tests", "locked": true, "points": 1, "schema_version": 1, "solution": false} # This cell contains hidden tests # - # ### Problem 3. Capital gain: thresholding (1 point) # Plot a histogram of the capital gain. You can see that there are many people with relatively low gains and a few people - with very high gains. # + deletable=false nbgrader={"checksum": "5c1843e15f32b6f2a8ba19bd9f237a43", "grade": false, "grade_id": "high-gain", "locked": false, "schema_version": 1, "solution": true} plt.hist(income_data.capital_gain,bins = 10) plt.show() # - # Write a function which accepts a dataframe and a capital gain value (in USD) and returns how many people are there with **greater than or equal to** that threshold gain. # + deletable=false nbgrader={"checksum": "1a4b91174558f18a2c8aa05349124a42", "grade": false, "grade_id": "high-gain-fn", "locked": false, "schema_version": 1, "solution": true} def get_num_people_with_higher_gain(dataframe, threshold_gain): higher_gain = dataframe.capital_gain[dataframe["capital_gain"] >= threshold_gain] return higher_gain.shape[0] # + deletable=false editable=false nbgrader={"checksum": "f95e3ca5c5cf7af82ba098cc4505060b", "grade": true, "grade_id": "high-gain-tests", "locked": true, "points": 1, "schema_version": 1, "solution": false} nose.tools.assert_equal(get_num_people_with_higher_gain(income_data, 60000), 159) # - # Create a pie chart of the number of people by marital status. # + deletable=false nbgrader={"checksum": "3f7c991513f38ee3e685493f9196a269", "grade": false, "grade_id": "cell-1e91969a128f0bd6", "locked": false, "schema_version": 1, "solution": true} data = income_data.marital_status.unique() print(data) never_married_peoples = income_data[income_data["marital_status"].str.contains("Never-married")].shape[0] married_civ_peoples = income_data[income_data["marital_status"].str.contains("Married-civ-spouse")].shape[0] divorced_peoples = income_data[income_data["marital_status"].str.contains("Divorced")].shape[0] married_spouse_peoples = income_data[income_data["marital_status"].str.contains("Married-spouse-absent")].shape[0] separated_peoples = income_data[income_data["marital_status"].str.contains("Separated")].shape[0] married_af_peoples = income_data[income_data["marital_status"].str.contains("Married-AF-spouse")].shape[0] widowed_peoples = income_data[income_data["marital_status"].str.contains("Widowed")].shape[0] plt.pie(labels=data,x = [never_married_peoples,married_civ_peoples,divorced_peoples,married_spouse_peoples,separated_peoples, married_af_peoples,widowed_peoples],autopct='%1.1f%%') plt.title("Number of people by marital status") plt.gca().set_aspect("equal") plt.show() # - # ### Problem 4. Marital status (2 points) # Which type of marital status is the most prominent (i.e. has the most people)? How many are there? Write a function that **calculates and returns the two answers**. # + deletable=false nbgrader={"checksum": "e9966cb2920b04c4df332da313f4e508", "grade": false, "grade_id": "cell-5c9a472102a7b452", "locked": false, "schema_version": 1, "solution": true} def most_prominent_marital_status(dataframe): status = "" num_people = 0 #list of tuples(status - peoples count) statuses = [("Never-married",dataframe[dataframe["marital_status"].str.contains("Never-married")].shape[0]), ("Married-civ-spouse",dataframe[dataframe["marital_status"].str.contains("Married-civ-spouse")].shape[0]), ("Divorced",dataframe[dataframe["marital_status"].str.contains("Divorsed")].shape[0]), ("Married-spouse-absent",dataframe[dataframe["marital_status"].str.contains("Married-spouse-absent")].shape[0]), ("Separated",dataframe[dataframe["marital_status"].str.contains("Separated")].shape[0]), ("Married-AF-spouse",dataframe[dataframe["marital_status"].str.contains("Married-AF-spouse")].shape[0]), ("Widowed",dataframe[dataframe["marital_status"].str.contains("Widowed")].shape[0])] for x,y in statuses: if y > num_people: num_people = y status = x return (status, num_people) # + deletable=false editable=false nbgrader={"checksum": "a2ef87117dcbc8db2d43cf9e9883c695", "grade": true, "grade_id": "cell-0077a3c8d4339ad7", "locked": true, "points": 2, "schema_version": 1, "solution": false} (status, num_people) = most_prominent_marital_status(income_data) nose.tools.assert_not_equal(status, "") nose.tools.assert_greater(num_people, 10000) # - # ### Problem 5. Age groups (1 point) # Create a histogram of all people's ages. Use the default settings. Add the label "Age" on the x-axis and "Count" on the y-axis. # + deletable=false nbgrader={"checksum": "60c6510ffb115c08ab7fed3b29a27465", "grade": false, "grade_id": "cell-3ba52bf669280861", "locked": false, "schema_version": 1, "solution": true} plt.hist(income_data.age) plt.xlabel("Age") plt.ylabel("Count") plt.show() # - # Let's get another view of the data. Split the ages into three: # 1. Young people: $\text{age} \le 30$ # 2. Middle-aged people: $30 < \text{age} \le 60$ # 3. Old people: $60 < \text{age}$ # # Return the counts in the following function. Which age group has the most people? How many are there? # + deletable=false nbgrader={"checksum": "03420d0fea2b98e57aa98b9b5f483b86", "grade": false, "grade_id": "cell-b8a6cda122bf0fb3", "locked": false, "schema_version": 1, "solution": true} def get_num_people_by_age_category(dataframe): young, middle_aged, old = (0, 0, 0) young = dataframe[dataframe.age <= 30].shape[0] middle_aged = dataframe[(dataframe.age > 30) & (dataframe.age <= 60)].shape[0] old = dataframe[dataframe.age > 60].shape[0] return young, middle_aged, old # + deletable=false editable=false nbgrader={"checksum": "79117a39e1c6066eece95309ce45dc81", "grade": true, "grade_id": "cell-17898d5f42dd42d5", "locked": true, "points": 1, "schema_version": 1, "solution": false} young, middle_aged, old = get_num_people_by_age_category(income_data) nose.tools.assert_greater(young, 0) nose.tools.assert_greater(middle_aged, 0) nose.tools.assert_greater(old, 0) # - # Now we can create a bar chart. Execute the code below to see it. plt.title("Distribution of people by age groups") plt.bar(range(3), [young, middle_aged, old]) plt.xticks(range(3), ["Young", "Middle-aged", "Old"]) plt.ylabel("Count") plt.show() # ### Problem 6. Native country (2 points) # Have a look at the native country of the people. The highest number of people are, as expected, from the US. What country makes for **the second highest** number of people? How many are they? Write a function to **calculate** and return the answer given a dataframe. DO NOT hardcode the answer, e.g. `return "Germany"`. # + deletable=false nbgrader={"checksum": "f48b741de66aedff2d878c5bcaf010cf", "grade": false, "grade_id": "cell-96faf6efe52dd3d0", "locked": false, "schema_version": 1, "solution": true} def get_second_highest_num_people(dataframe): countries=dataframe['native_country'].value_counts().sort_values(ascending=False) num_people, country = countries[1],countries.index[1] return num_people, country # + deletable=false editable=false nbgrader={"checksum": "d5981b1108063667383480f69588bdf6", "grade": true, "grade_id": "cell-923d941301d6acc8", "locked": true, "points": 2, "schema_version": 1, "solution": false} num_people, country = get_second_highest_num_people(income_data) nose.tools.assert_greater(num_people, 0) nose.tools.assert_not_equal(country, "") # - # ### Problem 7. Busiest occupations (2 points) # Which people are most overworked? Group all data by occupation and calculate the mean hours per week for each group. # # Write a function that **calculates and returns** all mean hours per week as a `Series`. Sort the results in descending order (most hours to fewest hours). # + deletable=false nbgrader={"checksum": "8800a118285464a703ef223c058dc869", "grade": false, "grade_id": "cell-f93bf9800cb3bc46", "locked": false, "schema_version": 1, "solution": true} def get_mean_working_hours_by_income(dataframe): return dataframe.groupby(['occupation'])['hours_per_week'].mean().sort_values( ascending=False) # i am sure that this is working because it is a Series and it should be right to say hours["Handlers-cleaners"] # + deletable=false editable=false nbgrader={"checksum": "95d5a2c122057fb62658d0d3602436bc", "grade": true, "grade_id": "cell-69cd7b7f6076b0ed", "locked": true, "points": 2, "schema_version": 1, "solution": false} hours = get_mean_working_hours_by_income(income_data) print(hours) nose.tools.assert_almost_equal(hours["Handlers-cleaners"], 37.95, delta = 0.01) # - # Finally, let's plot a bar chart. Check the values carefully. If your do not match, feel free to edit the chart generation code below. plt.figure(figsize = (10, 6)) plt.title("Weekly hours by occupation") plt.barh(range(len(hours)), hours) plt.yticks(list(range(len(hours))), hours.index) plt.show()
03-Data-Visualization-Exploratory-Data-Analysis/Data Visualization and EDA Lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST Digits - Classification Using SVM # # In this notebook, we'll explore the popular MNIST dataset and build an SVM model to classify handwritten digits. <a href='http://yann.lecun.com/exdb/mnist/'>Here is a detailed description of the dataset.</a> # # We'll divide the analysis into the following parts: # - Data understanding and cleaning # - Data preparation for model building # - Building an SVM model - hyperparameter tuning, model evaluation etc. # # ## Data Understanding and Cleaning # # Let's understand the dataset and see if it needs some cleaning etc. # !conda install -c conda-forge opencv --yes import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import linear_model from sklearn.model_selection import train_test_split import gc import cv2 # read the dataset digits = pd.read_csv("train.csv") digits.info() # head digits.head() four = digits.iloc[3, 1:] four.shape four = four.values.reshape(28, 28) plt.imshow(four, cmap='gray') # #### Side note: Indexing Recall #### # `list = [0, 4, 2, 10, 22, 101, 10]` <br> # `indices = [0, 1, 2, 3, ..., ]` <br> # `reverse = [-n -3 -2 -1]` <br> # visualise the array print(four[5:-5, 5:-5]) # Summarise the counts of 'label' to see how many labels of each digit are present digits.label.astype('category').value_counts() # Summarise count in terms of percentage 100*(round(digits.label.astype('category').value_counts()/len(digits.index), 4)) # Thus, each digit/label has an approximately 9%-11% fraction in the dataset and the **dataset is balanced**. This is an important factor in considering the choices of models to be used, especially SVM, since **SVMs rarely perform well on imbalanced data** (think about why that might be the case). # # Let's quickly look at missing values, if any. # missing values - there are none digits.isnull().sum() # Also, let's look at the average values of each column, since we'll need to do some rescaling in case the ranges vary too much. # average values/distributions of features description = digits.describe() description # You can see that the max value of the mean and maximum values of some features (pixels) is 139, 255 etc., whereas most features lie in much lower ranges (look at description of pixel 0, pixel 1 etc. above). # # Thus, it seems like a good idea to rescale the features. # ## Data Preparation for Model Building # # Let's now prepare the dataset for building the model. We'll only use a fraction of the data else training will take a long time. # # + # Creating training and test sets # Splitting the data into train and test X = digits.iloc[:, 1:] Y = digits.iloc[:, 0] # Rescaling the features from sklearn.preprocessing import scale X = scale(X) # train test split with train_size=10% and test size=90% x_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=0.10, random_state=101) print(x_train.shape) print(x_test.shape) print(y_train.shape) print(y_test.shape) # + # delete test set from memory, to avoid a memory error # we'll anyway use CV to evaluate the model, and can use the separate test.csv file as well # to evaluate the model finally # del x_test # del y_test # - # ## Model Building # # Let's now build the model and tune the hyperparameters. Let's start with a **linear model** first. # # ### Linear SVM # # Let's first try building a linear SVM model (i.e. a linear kernel). # + from sklearn import svm from sklearn import metrics # an initial SVM model with linear kernel svm_linear = svm.SVC(kernel='linear') # fit svm_linear.fit(x_train, y_train) # - # predict predictions = svm_linear.predict(x_test) predictions[:10] # evaluation: accuracy # C(i, j) represents the number of points known to be in class i # but predicted to be in class j confusion = metrics.confusion_matrix(y_true = y_test, y_pred = predictions) confusion # measure accuracy metrics.accuracy_score(y_true=y_test, y_pred=predictions) # class-wise accuracy class_wise = metrics.classification_report(y_true=y_test, y_pred=predictions) print(class_wise) # run gc.collect() (garbage collect) to free up memory # else, since the dataset is large and SVM is computationally heavy, # it'll throw a memory error while training gc.collect() # ### Non-Linear SVM # # Let's now try a non-linear model with the RBF kernel. # rbf kernel with other hyperparameters kept to default svm_rbf = svm.SVC(kernel='rbf') svm_rbf.fit(x_train, y_train) # + # predict predictions = svm_rbf.predict(x_test) # accuracy print(metrics.accuracy_score(y_true=y_test, y_pred=predictions)) # - # The accuracy achieved with a non-linear kernel is slightly higher than a linear one. Let's now do a grid search CV to tune the hyperparameters C and gamma. # # ### Grid Search Cross-Validation # + # conduct (grid search) cross-validation to find the optimal values # of cost C and the choice of kernel from sklearn.model_selection import GridSearchCV parameters = {'C':[1, 10, 100], 'gamma': [1e-2, 1e-3, 1e-4]} # instantiate a model svc_grid_search = svm.SVC(kernel="rbf") # create a classifier to perform grid search clf = GridSearchCV(svc_grid_search, param_grid=parameters, scoring='accuracy') # fit clf.fit(x_train, y_train) # - # results cv_results = pd.DataFrame(clf.cv_results_) cv_results # + # converting C to numeric type for plotting on x-axis cv_results['param_C'] = cv_results['param_C'].astype('int') # # plotting plt.figure(figsize=(16,6)) # subplot 1/3 plt.subplot(131) gamma_01 = cv_results[cv_results['param_gamma']==0.01] plt.plot(gamma_01["param_C"], gamma_01["mean_test_score"]) plt.plot(gamma_01["param_C"], gamma_01["mean_train_score"]) plt.xlabel('C') plt.ylabel('Accuracy') plt.title("Gamma=0.01") plt.ylim([0.60, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='lower right') plt.xscale('log') # subplot 2/3 plt.subplot(132) gamma_001 = cv_results[cv_results['param_gamma']==0.001] plt.plot(gamma_001["param_C"], gamma_001["mean_test_score"]) plt.plot(gamma_001["param_C"], gamma_001["mean_train_score"]) plt.xlabel('C') plt.ylabel('Accuracy') plt.title("Gamma=0.001") plt.ylim([0.60, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='lower right') plt.xscale('log') # subplot 3/3 plt.subplot(133) gamma_0001 = cv_results[cv_results['param_gamma']==0.0001] plt.plot(gamma_0001["param_C"], gamma_0001["mean_test_score"]) plt.plot(gamma_0001["param_C"], gamma_0001["mean_train_score"]) plt.xlabel('C') plt.ylabel('Accuracy') plt.title("Gamma=0.0001") plt.ylim([0.60, 1]) plt.legend(['test accuracy', 'train accuracy'], loc='lower right') plt.xscale('log') plt.show() # - # From the plot above, we can observe that (from higher to lower gamma / left to right): # - At very high gamma (0.01), the model is achieving 100% accuracy on the training data, though the test score is quite low (<75%). Thus, the model is overfitting. # # - At gamma=0.001, the training and test scores are comparable at around C=1, though the model starts to overfit at higher values of C # # - At gamma=0.0001, the model does not overfit till C=10 but starts showing signs at C=100. Also, the training and test scores are slightly lower than at gamma=0.001. # # Thus, it seems that the best combination is gamma=0.001 and C=1 (the plot in the middle), which gives the highest test accuracy (~92%) while avoiding overfitting. # # Let's now build the final model and see the performance on test data. # # ### Final Model # # Let's now build the final model with chosen hyperparameters. # + # optimal hyperparameters best_C = 1 best_gamma = 0.001 # model svm_final = svm.SVC(kernel='rbf', C=best_C, gamma=best_gamma) # fit svm_final.fit(x_train, y_train) # - # predict predictions = svm_final.predict(x_test) # + # evaluation: CM confusion = metrics.confusion_matrix(y_true = y_test, y_pred = predictions) # measure accuracy test_accuracy = metrics.accuracy_score(y_true=y_test, y_pred=predictions) print(test_accuracy, "\n") print(confusion) # - # ### Conclusion # # The final accuracy on test data is approx. 92%. Note that this can be significantly increased by using the entire training data of 42,000 images (we have used just 10% of that!). # #
8. Machine Learning-2/3. Support Vector Machines/5. Handwritten Digits recognition Assignment/MNIST+-+SVM+Assignment+Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() pd.options.display.float_format = '{:,.3f}'.format pd.options.mode.chained_assignment = None import seaborn as sns # %matplotlib inline sns.set(style="whitegrid", font_scale = 1.5) sns.set_context(rc={"lines.markersize": 10}) import matplotlib import pickle as pkl from matplotlib import pyplot as plt new_plot_col=list(range(1800,2010,20)) from scipy.stats.stats import pearsonr from scipy.stats.stats import pearsonr from functools import reduce # - features=pd.read_csv("../../Compounding/coha_compounds/features_CompoundAware_10_20_300.pkl", sep="\t") features['compound_rating']='' features.loc[features.compound_mean>=4,'compound_rating']='high' features.loc[(features.compound_mean>=2) & (features.compound_mean<4),'compound_rating']='med' features.loc[features.compound_mean<2,'compound_rating']='low' features.compound_rating.value_counts() features to_add_cols=['compound_mean','compound_rating'] lmi_cols = [col for col in features.columns if 'local_mi' in col] llr_cols = [col for col in features.columns if 'log_ratio' in col] ppmi_cols = [col for col in features.columns if 'ppmi' in col] sim_bw_constituents_cols = [col for col in features.columns if 'sim_bw_constituents' in col] sim_with_head_cols = [col for col in features.columns if 'sim_with_head' in col] sim_with_modifier_cols = [col for col in features.columns if 'sim_with_modifier' in col] #selected_compounds=['health insurance','silver bullet','melting pot','gold mine','swimming pool','bank account'] #assert (len(lmi_cols)+len(llr_cols)+len(ppmi_cols)+len(sim_bw_constituents_cols)+len(sim_with_head_cols)+len(sim_with_modifier_cols))==(len(features.columns)) lmi_df=features.set_index(["modifier", "head"])[lmi_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col lmi_df.reset_index(inplace=True) lmi_df['compound']=lmi_df['modifier'].str[:-5]+' '+lmi_df['head'].str[:-5] lmi_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] lmi_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in lmi_df.columns if 'compound' not in x] lmi_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() lmi_df.reset_index(inplace=True) plot_lmi_df=pd.melt(lmi_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) lmi_df plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_lmi_df,palette="Dark2", marker='o',linewidth=1,dashes=False,markers=True)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("LMI", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('LMI_merged_new.png') llr_df=features.set_index(["modifier", "head"])[llr_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col llr_df.reset_index(inplace=True) llr_df['compound']=llr_df['modifier'].str[:-5]+' '+llr_df['head'].str[:-5] llr_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] llr_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in llr_df.columns if 'compound' not in x] llr_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() llr_df.reset_index(inplace=True) plot_llr_df=pd.melt(llr_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) llr_df.head() plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_llr_df,palette="Dark2", marker='o',linewidth=1,dashes=False)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("LLR", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('LLR_merged_new.png') ppmi_df=features.set_index(["modifier", "head"])[ppmi_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col ppmi_df.reset_index(inplace=True) ppmi_df['compound']=ppmi_df['modifier'].str[:-5]+' '+ppmi_df['head'].str[:-5] ppmi_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] ppmi_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in ppmi_df.columns if 'compound' not in x] ppmi_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() ppmi_df.reset_index(inplace=True) plot_ppmi_df=pd.melt(ppmi_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) ppmi_df.head() plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_ppmi_df,palette="Dark2", marker='o',linewidth=1,dashes=False,markers=True)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("PPMI", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('PPMI_merged_new.png') sim_bw_constituents_df=features.set_index(["modifier", "head"])[sim_bw_constituents_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col sim_bw_constituents_df.reset_index(inplace=True) sim_bw_constituents_df['compound']=sim_bw_constituents_df['modifier'].str[:-5]+' '+sim_bw_constituents_df['head'].str[:-5] sim_bw_constituents_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] sim_bw_constituents_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in sim_bw_constituents_df.columns if 'compound' not in x] sim_bw_constituents_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() sim_bw_constituents_df.reset_index(inplace=True) plot_sim_bw_constituents_df=pd.melt(sim_bw_constituents_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) sim_bw_constituents_df.head() plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_sim_bw_constituents_df,palette="Dark2", marker='o',linewidth=1,dashes=False,markers=True)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("sim-bw-constituents", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('sim-bw-const_merged_new.png') sim_with_head_df=features.set_index(["modifier", "head"])[sim_with_head_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col sim_with_head_df.reset_index(inplace=True) sim_with_head_df['compound']=sim_with_head_df['modifier'].str[:-5]+' '+sim_with_head_df['head'].str[:-5] sim_with_head_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] sim_with_head_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in sim_with_head_df.columns if 'compound' not in x] sim_with_head_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() sim_with_head_df.reset_index(inplace=True) plot_sim_with_head_df=pd.melt(sim_with_head_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) sim_with_head_df.head() plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_sim_with_head_df,palette="Dark2", marker='o',linewidth=1,dashes=False,markers=True)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("sim-with-head", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('sim-with-head_merged_new.png') sim_with_modifier_df=features.set_index(["modifier", "head"])[sim_with_modifier_cols+to_add_cols] #lmi_df=lmi_df.pct_change(axis='columns')*100 #lmi_df.columns=new_plot_col sim_with_modifier_df.reset_index(inplace=True) sim_with_modifier_df['compound']=sim_with_modifier_df['modifier'].str[:-5]+' '+sim_with_modifier_df['head'].str[:-5] sim_with_modifier_df.drop(['modifier','head'],axis=1,inplace=True) #plot_lmi_df=plot_lmi_df.loc[plot_lmi_df['compound'].isin(selected_compounds)] sim_with_modifier_df.set_index('compound',inplace=True) #plot_lmi_df=plot_lmi_df[plot_lmi_df.columns[10:]] old_vars=[x.split('_')[0] for x in sim_with_modifier_df.columns if 'compound' not in x] sim_with_modifier_df.columns=old_vars+to_add_cols #plot_lmi_df.columns #lmi_df.info() sim_with_modifier_df.reset_index(inplace=True) plot_sim_with_modifier_df=pd.melt(sim_with_modifier_df,id_vars=['compound','compound_rating','compound_mean'], value_vars=old_vars) sim_with_modifier_df.head() plt.figure(figsize=(10,10)) g=sns.lineplot(x="variable", y="value", hue="compound_rating",style="compound_rating",data=plot_sim_with_modifier_df,palette="Dark2", marker='o',linewidth=1,dashes=False,markers=True)#,err_style="bars", ci=68) g.set_xlabel("Score", fontsize=20) g.set_ylabel("sim-with-mod", fontsize=20) g.legend(title='Compositionality Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) #g.set_xlim(1799, 2000) #plt.savefig('sim-with-mod_merged_new.png') compounds=pd.read_pickle("../../Compounding/coha_compounds/compounds_CompoundAware_20_20_300.pkl") compounds.head() merge_df_aware=features[['modifier','head','compound_rating']].merge(compounds.drop(['common'],axis=1).reset_index(),on=['modifier','head'],how='inner') merge_df_aware.set_index(["modifier", "head",'time','compound_rating'], inplace = True) merge_df_aware.head() def cosine(row1,row2): if row1.name[:-2]!=row2.name[:-2]: return np.nan else: denom1=np.sqrt(np.sum(np.square(row1))) denom2=np.sqrt(np.sum(np.square(row2))) num=np.sum(row1*row2) return num/(denom1*denom2) cosine(merge_df_aware.iloc[2-1],merge_df_aware.iloc[2]) cosine_compound_agnostic=[np.nan] for i in range(1,merge_df_aware.shape[0]): cosine_compound_agnostic.append(cosine(merge_df_aware.iloc[i-1],merge_df_aware.iloc[i])) merge_df_aware['compound_cosine']=cosine_compound_agnostic compound_df=pd.pivot_table(merge_df_aware.reset_index(), values = 'compound_cosine', index=['modifier','head','compound_rating'], columns = 'time') compound_df compound_df compound_df.reset_index(inplace=True) compound_df['compound']=compound_df['modifier'].str[:-5]+' '+compound_df['head'].str[:-5] compound_df.drop(['modifier','head'],axis=1,inplace=True) #plot_compound_df=plot_compound_df.loc[plot_compound_df['compound'].isin(selected_compounds)] #compound_df.set_index('compound',inplace=True) #plot_compound_df=plot_compound_df[plot_compound_df.columns[10:]] plot_compound_df=pd.melt(compound_df,id_vars=['compound','compound_rating'], value_vars=list(range(1820,2009,20))) plot_compound_df plt.figure(figsize=(15,15)) g=sns.lineplot(x="time", y="value", hue="compound_rating",style="compound_rating",data=plot_compound_df,palette="Dark2",linewidth=1,dashes=False)#,err_style="bars", ci=68) g.set_xlabel("Time", fontsize=20) g.set_ylabel("Compound", fontsize=20) #g.legend(title='Compound Rating', loc='upper left', labels=["high", "low", "med"], fontsize=20, title_fontsize=20) #g.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1) g.set_xlim(1900, 2009) heads=pd.read_pickle("../../datasets/constituents_CompoundAgnostic_20_20_300.pkl") heads.index.set_names('head',level=0,inplace=True) heads.head() merge_df_aware_heads=features[['modifier','head']].merge(heads.reset_index(),on=['head'],how='inner') merge_df_aware_heads.set_index(["modifier", "head",'time'], inplace = True) merge_df_aware_heads.head() cosine_head_agnostic=[np.nan] for i in range(1,merge_df_aware_heads.shape[0]): cosine_head_agnostic.append(cosine(merge_df_aware_heads.iloc[i-1],merge_df_aware_heads.iloc[i])) merge_df_aware_heads['head_cosine']=cosine_head_agnostic head_df=pd.pivot_table(merge_df_aware_heads.reset_index(), values = 'head_cosine', index=['modifier','head'], columns = 'time') head_df.drop([1800],axis=1,inplace=True) head_df plot_head_df=head_df.reset_index() plot_head_df['compound']=plot_head_df['modifier'].str[:-5]+' '+plot_head_df['head'].str[:-5] plot_head_df.drop(['modifier','head'],axis=1,inplace=True) plot_head_df.set_index('compound',inplace=True) plot_head_df compound_df # + plt.figure(figsize=(10,10)) for i in range(0,compound_df.drop(['time','compound_rating'],axis=1).shape[0]): plt.figure() g=sns.lineplot(data=compound_df.iloc[i].to_frame(), palette="Paired", marker='o',linewidth=1,dashes=False) g.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1) g.legend(title=compound_df.iloc[i].name, title_fontsize=20) #g.set(ylim=(-150, 150)) # - plt.figure(figsize=(10,10)) g=sns.lineplot(data=plot_lmi_df.sample(n=10,random_state=1001).transpose(), palette="Paired", marker='o',linewidth=1,dashes=False) g.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1) #g.set(ylim=(-150, 150))
compositionality_over_time/Notebooks/coha_vis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Brief Honor Code**. Do the homework on your own. You may discuss ideas with your classmates, but DO NOT copy the solutions from someone else or the Internet. If stuck, discuss with TA. # **Note**: The expected figures are provided so you can check your solutions. # **1**. (20 points) # # Find the gradient and Hessian for the following equation # # $$ # f(x, y) = 1 + 2x + 3y + 4x^2 + 2xy + y^2 # $$ # # - Plot the contours of this function using `matplotlib` in the box $-5 \le x \le 5$ and $-5 \le y \le 5$ using a $100 \times 100$ grid. # - Then plot the gradient vectors using the `quiver` function on top of the contour plot using a $10 \times 10$ grid. Are the gradients orthogonal to the contours? # # Hint: Use `numpy.meshgrid`, `matplotlib.contour` and `matplotllib.quiver`. # # ![img](figs/7_1.png) # + # - # **2**. (30 points) # # This exercise is about using Newton's method to find the cube roots of unity - find $z$ such that $z^3 = 1$. From the fundamental theorem of algebra, we know there must be exactly 3 complex roots since this is a degree 3 polynomial. # # We start with Euler's equation # $$ # e^{ix} = \cos x + i \sin x # $$ # # Raising $e^{ix}$ to the $n$th power where $n$ is an integer, we get from Euler's formula with $nx$ substituting for $x$ # $$ # (e^{ix})^n = e^{i(nx)} = \cos nx + i \sin nx # $$ # # Whenever $nx$ is an integer multiple of $2\pi$, we have # $$ # \cos nx + i \sin nx = 1 # $$ # # So # $$ # e^{2\pi i \frac{k}{n}} # $$ # is a root of 1 whenever $k/n = 0, 1, 2, \ldots$. # # So the cube roots of unity are $1, e^{2\pi i/3}, e^{4\pi i/3}$. # # ![img](figs/7_2.png) # # While we can do this analytically, the idea is to use Newton's method to find these roots, and in the process, discover some rather perplexing behavior of Newton's method. # # Newton's method for functions of complex variables - stability and basins of attraction. (30 points) # # 1. Write a function with the following function signature `newton(z, f, fprime, max_iter=100, tol=1e-6)` where # - `z` is a starting value (a complex number e.g. ` 3 + 4j`) # - `f` is a function of `z` # - `fprime` is the derivative of `f` # The function will run until either max_iter is reached or the absolute value of the Newton step is less than tol. In either case, the function should return the number of iterations taken and the final value of `z` as a tuple (`i`, `z`). # # 2. Define the function `f` and `fprime` that will result in Newton's method finding the cube roots of 1. Find 3 starting points that will give different roots, and print both the start and end points. # # Write the following two plotting functions to see some (pretty) aspects of Newton's algorithm in the complex plane. # # 3. The first function `plot_newton_iters(f, fprime, n=200, extent=[-1,1,-1,1], cmap='hsv')` calculates and stores the number of iterations taken for convergence (or max_iter) for each point in a 2D array. The 2D array limits are given by `extent` - for example, when `extent = [-1,1,-1,1]` the corners of the plot are `(-i, -i), (1, -i), (1, i), (-1, i)`. There are `n` grid points in both the real and imaginary axes. The argument `cmap` specifies the color map to use - the suggested defaults are fine. Finally plot the image using `plt.imshow` - make sure the axis ticks are correctly scaled. Make a plot for the cube roots of 1. # # ![img](figs/7_2A.png) # # 4. The second function `plot_newton_basins(f, fprime, n=200, extent=[-1,1,-1,1], cmap='jet')` has the same arguments, but this time the grid stores the identity of the root that the starting point converged to. Make a plot for the cube roots of 1 - since there are 3 roots, there should be only 3 colors in the plot. # # ![img](figs/7_2B.png) # + # - # **3**. (20 points) # # Consider the following function on $\mathbb{R}^2$: # # $$ # f(x_1,x_2) = -x_1x_2e^{-\frac{(x_1^2+x_2^2)}{2}} # $$ # # - Find the minimum under the constraint # $$g(x) = x_1^2+x_2^2 \leq 10$$ # and # $$h(x) = 2x_1 + 3x_2 = 5$$ using `scipy.optimize.minimize`. # - Plot the function contours using `matplotlib`, showing the constraints $g$ and $h$ and indicate the constrained minimum with an `X`. # # ![img](figs/7_3.png) # + # - # **4** (30 points) # # Find solutions to $x^3 + 4x^2 -3 = x$. # # - Write a function to find brackets, assuming roots are always at least 1 unit apart and that the roots lie between -10 and 10 # - For each bracket, find the enclosed root using # - a bisection method # - Newton-Raphson (no guarantee to stay within brackets) # - Use the end points of the bracket as starting points for the bisection methods and the midpoint for Newton-Raphson. # - Use the companion matrix and characteristic polynomial to find the solutions # - Plot the function and its roots (marked with a circle) in a window just large enough to contain all roots. # # Use a tolerance of 1e-6. # # ![img](figs/7_4.png) # +
labs/Lab07.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Preprocessing #Standard Scaler # xi - mean(x)/stddev(x) #used if data is normalized import numpy as np import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({ 'x1' :np.random.normal(0,2,10000), 'x2':np.random.normal(5,3,10000), 'x3':np.random.normal(-5,5,10000) }) # - df.plot.kde() # + #Standard Scaler from sklearn.preprocessing import StandardScaler SS = StandardScaler() df_ss = SS.fit_transform(df) df_ss # - df = pd.DataFrame(df_ss,columns=['x1','x2','x3']) df df.plot.kde() # + #Min Max scaler # xi-mean(x) / max(x)-min(x) # output will always will be 0 & 1 df = pd.DataFrame({ 'x1':np.random.chisquare(8,1000), 'x2':np.random.beta(8,2,1000)*40, 'x3':np.random.normal(50,3,1000) }) df # - df.plot.kde() from sklearn.preprocessing import MinMaxScaler mm = MinMaxScaler() df_mm = mm.fit_transform(df) df_mm df = pd.DataFrame(df_mm,columns=['x1','x2','x3']) df df.plot.kde() # + # Robust Scaler # xi - Q1(x) / Q3(x) - Q1(x) //Quartile Q1 =25%, Q3=75% #Used when data has Outliers df = pd.DataFrame({ 'x1':np.concatenate([np.random.normal(20,1,1000),np.random.normal(1,1,25)]), 'x2':np.concatenate([np.random.normal(30,1,1000),np.random.normal(50,1,25)]) }) df # - df.plot.kde() from sklearn.preprocessing import RobustScaler rs = RobustScaler() df_rs = rs.fit_transform(df) df_rs df = pd.DataFrame(df_rs,columns=['x1','x2']) df.plot.kde() # + #Normalizer # xi/sqrt(xi**2+yi**2+zi**2) df = pd.DataFrame({ 'x1': np.random.randint(-100,100,1000).astype(float), 'y1': np.random.randint(-80,80,1000).astype(float), 'z1': np.random.randint(-150,150,1000).astype(float), }) df # + from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = plt.axes(projection='3d') ax.scatter3D(df.x1,df.y1,df.z1) # - from sklearn.preprocessing import Normalizer n = Normalizer() df_n =n.fit_transform(df) df_n df = pd.DataFrame(df_n,columns=['x1','y1','z1']) ax = plt.axes(projection='3d') ax.scatter3D(df.x1,df.y1,df.z1) # + # Normalizer # used when inputs to be 0 & 1 as mentioned in Bernouli Distribution #naive Bayes X = np.array([ [1,-1,2], [5,6,8], [10,25.6,30.5] ]) X # - from sklearn.preprocessing import Binarizer b = Binarizer() df = b.fit_transform(X) df # + # Encoding Categorical values #using LabelEncoder() and OneHotEncoder() df = pd.DataFrame({ 'age': [35,25,40,55,60], 'gender':['male','female','female','male','male'] }) df # - from sklearn.preprocessing import LabelEncoder,OneHotEncoder le = LabelEncoder() df['gender_tf'] = le.fit_transform(df.gender) df X = OneHotEncoder().fit_transform(df[['gender_tf']]).toarray() #df['gender_tf'] = df1 X # + #Imputation # for missing values df = pd.DataFrame({ 'A':[1,2,3,4,np.nan,8], 'B':[2,4,5,6,7,np.nan] }) df # - from sklearn.impute import SimpleImputer imp = SimpleImputer(strategy='mean',missing_values=np.nan) imp.fit_transform(df) x = (2+4+5+6+7)/5 x # + # polynomial df = pd.DataFrame({ 'x':[1,2,3,4,5], 'y':[3,4,5,6,7] }) df # - from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=2) poly.fit_transform(df) # x,x1,y1,x**2,xy,y**2 # + #Custom Transformer #custom transformer can be created using FucntionTransformer from sklearn.preprocessing import FunctionTransformer def mapping(x): x['age']=x['age']+2 x['counter']=x['counter']*2 return x ct = FunctionTransformer(mapping,validate=False) # validate will take care of string data df = pd.DataFrame({ 'age' :[33,45,67,78,90], 'counter':[1,2,3,4,5] }) df # - ct.transform(df) #custom transformer can be created # + # Convert text to number for ML # CountVectorizer scentance = ['hi what are you doing','my name is chandrashekar','iam working on ML'] df = pd.DataFrame({ 'scentance':scentance }) df # + from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer() cv.fit_transform(df.scentance).toarray() # - cv.vocabulary_ #count the number of times the word oocured # + cv = CountVectorizer(vocabulary=['you','iam','ml']) # only 3 columns will be ouput based on the words cv.fit_transform(df.scentance).toarray() # - cv = CountVectorizer(stop_words=['is','are']) cv.fit_transform(df.scentance).toarray() cv = CountVectorizer(ngram_range=[1,2]) cv.fit_transform(df.scentance).toarray() cv.vocabulary_ #TfIdVectorizer # words converted 0 and 1 df from sklearn.feature_extraction.text import TfidfVectorizer td = TfidfVectorizer(stop_words = 'english') td.fit_transform(df.scentance).toarray() td.get_feature_names() # + # Hashing Vectorizer #reduces no of columns from sklearn.feature_extraction.text import HashingVectorizer hv =HashingVectorizer(n_features=5) # creats only 5 columns hv.fit_transform(df.scentance).toarray() # + # Image processing using skimage from skimage.io import imread,imshow image = imread('girl.jpg') image.shape # - image[0] imshow(image) #from rgb to greyscale from skimage.color import rgb2gray rgb2gray(image).shape imshow(rgb2gray(image)) # + # resize image from skimage.transform import resize imshow(resize(image,(70,70))) # -
Preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.4 64-bit (''.venv'': venv)' # name: python3 # --- # + [markdown] colab_type="text" id="aBJaE--wKYOJ" # # Female headed households in South Africa # # In this notebook we will work on the data provided by Zindi during the competition **Women in Big Data South Africa - Women-Headed Households in South Africa**. We will create our own model to find out which features have an impact on the income of a female-headed household. # + colab={} colab_type="code" id="gV8Fpz7vKYOP" # import packages import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from scipy.stats import boxcox import statsmodels.api as sm from sklearn.model_selection import train_test_split, KFold from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score # %matplotlib inline plt.rcParams['figure.figsize'] = [10, 6] # + [markdown] colab_type="text" id="g6T5r-RjKYON" # # Loading the data # We will load the provided csv file from Zindi. In this notebook we will only work with the train data (Train.csv), as the test data (Test.csv) has no target variable and we can't evaluate our predictions. Additionally we will also load the file with the descriptions of the features (descriptions.csv). # + colab={"base_uri": "https://localhost:8080/", "height": 309} colab_type="code" id="gfGrrqGlKYOV" outputId="c918b49a-67a7-4b1b-81bf-918684a2ca9b" # import data df = pd.read_csv('data/Train.csv') df.head() # - # get some general information about the dataset df.info() # get a statistical overview of the dataframe df.describe() # check for null values df.isnull().sum().sort_values(ascending=False) # check for columns with single values df.nunique().sort_values().head(10) # print the columns with single values for a visual check df[['dw_13', 'lan_13', 'dw_12', 'pw_08', 'pw_07']] # The columns ['dw_13', 'lan_13', 'dw_12', 'pw_08', 'pw_07'] have only 0 in them --> we could drop these features, Also we can drop some other non-relevant features (ward, coordinates, features not given in percentage, Nightlight). # + # drop the non-numerical features df = df.drop(['ward', 'ADM4_PCODE'], axis=1) # drop non-percentage features: (total_households, total_individuals, lat, lon, NL and all-Zero values) df = df.drop(['total_households', 'total_individuals', 'lat', 'lon', 'NL', 'dw_13', 'lan_13', 'dw_12', 'pw_08', 'pw_07'], axis=1) # - # check new dataframe after dropping the features with only 0 in them df.shape # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="ytHU7Sh2KYOc" outputId="4233dd3a-0f6b-41e9-d494-2b3d98f883bf" # load the description file and have a look at them var_desc = pd.read_csv('data/variable_descriptions.csv') pd.set_option('display.max_colwidth', 200) # So that we can see the full descriptions var_desc # + [markdown] colab_type="text" id="5chJlQBxKYOi" # # Quick EDA # Before we dive into modelling, let's take a quick look at the data and see what we have to work with. # + [markdown] colab_type="text" id="kO6UB7uQKYOj" # ## Looking at the target variable # The purpose of this project is to predict the percentage of households in a ward that are woman-headed and have an annual household income of <R19,600. To put that in context, that's less than 250USD per month. # + colab={"base_uri": "https://localhost:8080/", "height": 623} colab_type="code" id="fhCt8mEIKYOl" outputId="9fb84b2e-74d7-42fd-c87b-b8099e2798d8" # Plot the distribution ax = df['target'].plot(kind='hist') plt.xlabel('Percentage of households that are w-headed and have income under R19.6k/month'); # + [markdown] colab_type="text" id="aBJaE--wKYOJ" # # Baseline Model # # As a starter we will create a baseline model to get an idea how a more complex model could look like. # - # Get a very reduced dataframe for the baseline model # we choose the highly correlated variable school attendance = yes (psa_00) for our baseline model df_base = df[['psa_00', 'target']] df_base.head() # plot the baseline model df_base.plot(x='psa_00', y='target', kind='scatter', alpha=0.3); # + [markdown] colab_type="text" id="88Ek8kY9KYPg" # ## Modelling the baseline model # Before we go deeper, we will build a quick model FIRST, to see a baseline to compare to. So, in this section we'll try a simple linear model based on only one highly correlated feature. # + # Modelling by hand: regression line formula # as we can see from the scatter plot, a line would run through (0 | -5), we only need a slope # points would be: (0.2 | 10), (0.3 | 20) --> the slope is 100 # y = 100 * x -5 # put this line in the plot: x = df_base['psa_00'] y = 100 * x - 5 df_base.plot(x='psa_00', y='target', kind='scatter', alpha=0.3) plt.plot(x, y, '-r', label='y = 100 * x') plt.show(); # + [markdown] colab_type="text" id="wL09aomvQ-eC" # ### Evaluating Predictions # # We can evaluate our predictions with the "R-Squared Score", which is a measure of how well our model explains the variation in the target variable. The closer to 1, the better. # + # try to calculate RMSE for this line: from sklearn.metrics import mean_squared_error import math mse = mean_squared_error(df_base['target'], y) rmse = math.sqrt(mse) print("RMSE of baseline model:", round(rmse, 2)) print("Error in % of baseline model:", round((rmse/np.average(y))*100, 2)) print("Mean of the target:", round(np.average(y), 2)) print("Standard deviation of the target:", round(np.std(y), 2)) # - # # Complex model # In order to model the complex model we consider all the remaining 51 features, # + # Train test split with same random seed # Defining X and y features = df.columns.tolist() features.remove('target') X = df[features] y = df.target # Splitting the dataset X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=150, shuffle=True) # Check the shape of the data sets print("X_train:", X_train.shape) print("y_train:", y_train.shape) print("X_test:", X_test.shape) print("y_test:", y_test.shape) # - # train model lr = LinearRegression() lr.fit(X_train,y_train) # predict target values y_pred = lr.predict(X_test) # check error for predictions from sklearn.metrics import r2_score score = r2_score(y_test, y_pred) print("r2 score is ", round(score, 2)) print("mean_sqrd_error is:", round(mean_squared_error(y_test, y_pred), 2)) print("root_mean_squared error is:", round(np.sqrt(mean_squared_error(y_test, y_pred)), 2)) # # Final Model # After trying a first very basic Baseline model and then the complex model, we now make our model simpler by reducing the features. The features were chosen after checking the correlations. # + [markdown] colab_type="text" id="Y5898TVfKYOs" # # Looking at a few interesting correlations # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="fVMaahPiKYOv" outputId="5bdd7b41-98f9-423e-ee7d-9c849032035d" # Strong +ve correlations df.corr()['target'].sort_values(ascending=False).head(10) # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="d61CC8IlKYO4" outputId="86e36bad-b955-4045-b6a7-e160b674fdab" # And negative correlations df.corr()['target'].sort_values().head(10) # - # create a list of positive and negative correlated features above a treshold of |0.5| # negatively correlated features (- 0.5 threshold) list1 = list((df.corr()['target'].sort_values().head(8)).index) # positively correlated features (+ 0.5 threshold) list2 = list((df.corr()['target'].sort_values(ascending=False).head(6)).index) # list2 = list((df.corr()['target'].sort_values(ascending=False).head(6)).index) # combine both lists corr_feature_list = list1 + list2 corr_feature_list # plot the list of highly correlated features corr = df[corr_feature_list].corr() matrix = np.triu(corr) sns.heatmap(corr, annot=True, mask=matrix); # Car00 and Car01 / Stv00 and Stv01 / Lln00 and Lln01 perfectly correlated (- 1) --> we can remove each one of them # select features with high correlations #feature_list = ['pw_00', 'pw_01', 'psa_00', 'psa_01', 'car_00', 'lln_00', 'stv_00', 'pg_00', 'pg_03', 'lan_00', 'lan_01', 'target'] df_final = df[corr_feature_list] df_final.drop(['car_01', 'stv_01', 'lln_01'], axis=1, inplace=True) df_final.head() # ## Transformation # write function to transform features with boxcox def convert_zeros(x): ''' function to convert zeros to a postive number so that it can be transformed with the boxcox''' if x == 0.0: return 0.0000001 else : return x # check for 0 before and convert them print (df['pw_00'].isin([0]).sum()) print(df['pw_00'].apply(convert_zeros).isin([0]).sum()) # plot transformed features for col in df_final.columns: if col != 'target': fig, axes = plt.subplots(1,2) df_final[col] = df_final[col].apply(convert_zeros) sns.histplot(df_final[col], ax=axes[0]) plt.xlabel(col) sns.histplot(boxcox(df_final[col]), ax=axes[1]) plt.xlabel('boxcox_transformed ' + col) # + # apply the boxcox transformation on for col in df_final.columns: if col != 'target' and col != 'psa_00' and col != 'psa_01' and col != 'car_00' and col != 'pg_00': #df_final[col] = df_final[col].apply(convert_zeros) df_final[col] = boxcox(df_final[col])[0].reshape(-1,1); # - # check data after log transformation df_final.head() # + # do the cross validation manually from sklearn.model_selection import KFold # Using this to test a model on 5 different splits kf = KFold(n_splits=5, shuffle=False) ycol = 'target' in_cols = df_final.drop('target', axis=1).columns scores = [] for train, test in kf.split(df_final): lr = LinearRegression() lr.fit(df_final[in_cols].iloc[train], df_final[ycol].iloc[train]) rmse = np.sqrt(mean_squared_error(df_final[ycol].iloc[test], lr.predict(df_final[in_cols].iloc[test]))) scores.append(rmse) print(rmse) print("Average score in 5-fold CV:", np.mean(scores)) # + # Train test split with same random seed # Defining X and y features = df_final.columns.tolist() features.remove('target') X = df_final[features] y = df_final.target # Splitting the dataset X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=150, shuffle=True) # Check the shape of the data sets print("X_train:", X_train.shape) print("y_train:", y_train.shape) print("X_test:", X_test.shape) print("y_test:", y_test.shape) # + lr = LinearRegression() lr.fit(X_train,y_train) # predict target values y_pred = lr.predict(X_test) # - # check error for predictions from sklearn.metrics import r2_score score = r2_score(y_test, y_pred) print("r2 score is ", round(score, 2)) print("mean_sqrd_error is:", round(mean_squared_error(y_test, y_pred), 2)) print("root_mean_squared error is:", round(np.sqrt(mean_squared_error(y_test, y_pred)), 2)) # + # Plot the errors from yellowbrick.regressor import ResidualsPlot #visualizer = ResidualsPlot(gs, hist=False, qqplot=True) visualizer = ResidualsPlot(gs) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # - #
Final_Notebook_update_Ravi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Sentiments Analysis of tweets tweeted in context of Demonetization step taken by the union Govt # + # Importing the libraries import pandas as pd #Library for dealing with the dataset import numpy as np import matplotlib.pyplot as plt import seaborn as sns import string import nltk # for text manipulation pd.set_option("display.max_colwidth", 200) import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # %matplotlib inline import re # Library to clean the data from nltk.corpus import stopwords # To Remove Stopwords from nltk.stem.porter import PorterStemmer # For Stemming Purpose from nltk.tokenize import word_tokenize from string import punctuation import wordcloud # - data = pd.read_csv("D:\\Sentiment_analysis\\Demonetization_data29th.csv", encoding='latin') data.head() data.shape data.info() round(data.isnull().sum()/len(data.index)*100,2) pct_null = data.isnull().sum() / len(data) missing_features = pct_null[pct_null > 0.30].index missing_features # ## Dropping columns having more than 30% null values. data = data.loc[:, data.isin([' ','NULL',0]).mean() < .3] data.info() data_1 = data[['LANGUAGE','CONTENT','from_user_followers_count','retweet_count']] data_1.info() data_1.head() data.CONTENT.head() # ## Data Cleaning # Given below is a user-defined function to remove unwanted text patterns from the tweets. # + def remove_pattern(input_txt, pattern): r = re.findall(pattern, input_txt) for i in r: input_txt = re.sub(i, '', input_txt) return input_txt # - # ## Converting into tidy tweets. data_1['proper_tweets'] = np.vectorize(remove_pattern)(data_1['CONTENT'], "@[\w]*") data_1.head() data_1.proper_tweets.head() data_1['proper_tweets'] = data_1['proper_tweets'].str.replace("[^a-zA-Z#]", " ") data_1.head(10) data_1['proper_tweets'] = data_1['proper_tweets'].str.replace("#", " ") data_1.head() # Tokeninzing the tweets ,i.e. breakuing them into components tokenized_tweets = data_1['proper_tweets'].apply(lambda x: x.split()) # tokenizing tokenized_tweets.head() # ## now lemmetizing the tweets. # from nltk.stem.porter import * # stemmer = PorterStemmer() # # tokenized_tweets = tokenized_tweets.apply(lambda x: [stemmer.stem(i) for i in x]) # joining the tokenized tweets # + for i in range(len(tokenized_tweets)): tokenized_tweets[i] = ' '.join(tokenized_tweets[i]) data_1['proper_tweets'] = tokenized_tweets # - # ## Visualizing which words were used the most , i.e which were the words that describe the tweets # + from wordcloud import WordCloud words = ' '.join([text for text in data_1['proper_tweets']]) wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(words) plt.figure(figsize=(10, 7)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis('off') plt.show() # - # ## As we can see that most words that were used were either nuetral or positive. # Next, we will try to extract features from the tokenized tweets. from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer # # Bag of words features bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english') bow = bow_vectorizer.fit_transform(data_1['proper_tweets']) bow.shape # # TF-IDF Vectorized features. tfidf_vectorizer = TfidfVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english') tfidf = tfidf_vectorizer.fit_transform(data_1['proper_tweets']) tfidf.shape data_1.drop(['CONTENT','LANGUAGE'],axis=1, inplace=True) data_1.rename(columns={'from_user_followers_count': '#Followers','proper_tweets':'Tweets',\ 'retweet_count':'#ReTweets'}, inplace=True) data_1.head() # ## Analysing the sentiments of tweets. from nltk.sentiment.vader import SentimentIntensityAnalyzer sid = SentimentIntensityAnalyzer() sid.polarity_scores(data_1.Tweets[0]) sid.polarity_scores(data_1.Tweets[1]) sid.polarity_scores(data_1.Tweets[2]) data_1['Score'] = data_1['Tweets'].apply(lambda Tweets:sid.polarity_scores(Tweets)) data_1.head() # ## Calculating compound scores data_1['Compound'] = data_1['Score'].apply(lambda score_dict: score_dict['compound']) data_1.head() # ### Analysing polarity of tweets from compound score, if its greater than 0 then the polarity is positive otherwise negative. data_1['Polarity'] = data_1['Compound'].apply(lambda x:'positive' if x>0 else 'neutral' if x==0 else 'negative') data_1.head() data_1.Polarity.value_counts() round(data_1.Polarity.value_counts()/len(data_1.index)*100,1) # ## 40 % tweets had positive connotations & 33 % tweets had negative connotations and 27% were neutral about it. # + percent = round(data_1.Polarity.value_counts()/len(data_1.index)*100,1) sub=['Positive','Negative','Neutral'] plt.axis("equal") plt.pie(percent , labels=sub,radius=1.6,autopct='%1.2f%%',explode=[0.05,0.05,0.05],startangle=90,shadow=True,counterclock=False,pctdistance=0.6) plt.show() # + plt.figure(figsize=(5,6),dpi=80,facecolor='y',edgecolor='k') figx=sns.barplot(x='Polarity', y='#Followers',estimator=np.median, data= data_1) plt.xlabel("Tweets",fontsize=18,color='black') plt.ylabel("No. of Followers",fontsize=18,color='black') plt.title("No.of Followers against Polarity",fontsize=20,color='black') for i in figx.patches: # get_x pulls left or right; get_height pushes up or down figx.text(i.get_x()+.01, i.get_height()+0.3, \ str(round((i.get_height()), 2)), fontsize=15, color='black', rotation=0) plt.show() # + plt.figure(figsize=(7,7) ,dpi=80,facecolor='y',edgecolor='c') figx=sns.barplot(x='Polarity', y='#ReTweets',estimator=np.median, data= data_1) plt.xlabel("Tweets",fontsize=18,color='black') plt.ylabel("No. of ReTweets",fontsize=18,color='black') plt.title("ReTweets Count against Polarity",fontsize=20,color='black') for i in figx.patches: # get_x pulls left or right; get_height pushes up or down figx.text(i.get_x()+.01, i.get_height()+0.3, \ str(round((i.get_height()), 2)), fontsize=15, color='black', rotation=0) plt.show() # -
sentiment_analysis.ipynb