text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import matplotlib matplotlib.use('Agg') %matplotlib qt import matplotlib.pyplot as plt import numpy as np import os import SimpleITK as sitk from os.path import expanduser, join from scipy.spatial.distance import euclidean os.chdir(join(expanduser('~'), 'Medical Imaging')) import liversegmentation ``` --- # Read in DICOM images ``` sliceNum = 42 dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase', 'TCIALocal', '0', 'images', '') reader = sitk.ImageSeriesReader() seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1] dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread) reader.SetFileNames(dicomFilenames) imgSeries = reader.Execute() imgSlice = imgSeries[:,:,sliceNum] ``` Note that the TCGA-BC-4073 patient has 2 series of images (series 9 & 10). The series IDs are: ``` reader.GetGDCMSeriesIDs(dicomPath) ``` By comparing images between OsiriX and plots of the SimpleITK images, the 2<sup>nd</sup> tuple element corresponds to series 9. ``` liversegmentation.sitk_show(imgSlice) ``` Cast original slice to unsigned 8-bit integer so that segmentations can be overlaid on top ``` imgSliceUInt8 = sitk.Cast(sitk.RescaleIntensity(imgSlice), sitk.sitkUInt8) ``` # Filtering ## Curvature anisotropic diffusion ``` anisoParams = (0.06, 9.0, 5) imgFilter = liversegmentation.anisotropic_diffusion(imgSlice, *anisoParams) liversegmentation.sitk_show(imgFilter) ``` ## Median filter ``` med = sitk.MedianImageFilter() med.SetRadius(3) imgFilter = med.Execute(imgSlice) liversegmentation.sitk_show(imgFilter) ``` # Edge potential ## Gradient magnitude recursive Gaussian ``` #sigma = 3.0 sigma = 1.0 imgGauss = liversegmentation.gradient_magnitude(imgFilter, sigma) liversegmentation.sitk_show(imgGauss) ``` # Feature Image ## Sigmoid mapping ``` #K1, K2 = 20.0, 6.0 #K1, K2 = 14.0, 4.0 K1, K2 = 8.0, 2.0 imgSigmoid = liversegmentation.sigmoid_filter(imgGauss, K1, K2) liversegmentation.sitk_show(imgSigmoid) ``` # Input level set Create 2 lists, one to hold the seed coordinates and the other for the radii. The radius in the 1<sup>st</sup> index corresponds to the 1<sup>st</sup> index, and so on. ``` coords = [(118, 286), (135, 254), (202, 75), (169, 89), (145, 209), (142, 147), (252, 58), (205, 119)] radii = [10, 10, 10, 10, 10, 10, 5, 5] seed2radius = {tuple(reversed(p[0])): p[1] for p in zip(coords, radii)} initImg = liversegmentation.input_level_set(imgSigmoid, seed2radius) liversegmentation.sitk_show(initImg) ``` Creating new level set from segmentation of downsampled image. First convert the segmentation result into a workable format: ``` binaryThresh = sitk.BinaryThresholdImageFilter() binaryThresh.SetLowerThreshold(-2.3438) binaryThresh.SetUpperThreshold(0.0) binaryThresh.SetInsideValue(1) binaryThresh.SetOutsideValue(0) binaryImg = binaryThresh.Execute(imgGac2) liversegmentation.sitk_show(binaryImg) ``` Add in new seeds: ``` coords2 = [(235, 108), (199, 188), (120, 113), (96, 140)] radii2 = [5, 5, 5, 5] seed2radius2 = {tuple(reversed(p[0])): p[1] for p in zip(coords2, radii2)} ``` Now create new level set image: ``` X_1 = sitk.GetArrayFromImage(binaryImg) # create a 2nd seed matrix from the 2nd set of coordinates setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8) X_2 = sitk.GetArrayFromImage(setupImg) for i in range(X_2.shape[0]): for j in range(X_2.shape[1]): for s in seed2radius2.keys(): if euclidean((i,j), s) <= seed2radius2[s]: X_2[i,j] = 1 X = X_1.astype(bool) + X_2.astype(bool) initImg2 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5 initImg2.SetSpacing(imgSigmoid.GetSpacing()) initImg2.SetOrigin(imgSigmoid.GetOrigin()) initImg2.SetDirection(imgSigmoid.GetDirection()) liversegmentation.sitk_show(initImg2) ``` Add in a 3<sup>rd</sup> set of seeds: ``` coords3 = [(225, 177), (246, 114), (83, 229), (78, 208), (82, 183), (238, 126)] radii3 = [5, 10, 5, 5, 5, 15] seed2radius3 = {tuple(reversed(p[0])): p[1] for p in zip(coords3, radii3)} X_1 = sitk.GetArrayFromImage(binaryImg) # create a 3rd seed matrix from the 3rd set of coordinates setupImg = sitk.Image(imgSigmoid.GetSize()[0], imgSigmoid.GetSize()[1], sitk.sitkUInt8) X_2 = sitk.GetArrayFromImage(setupImg) for i in range(X_2.shape[0]): for j in range(X_2.shape[1]): for s in seed2radius3.keys(): if euclidean((i,j), s) <= seed2radius3[s]: X_2[i,j] = 1 X = X_1.astype(bool) + X_2.astype(bool) initImg3 = sitk.Cast(sitk.GetImageFromArray(X.astype(int)), imgSigmoid.GetPixelIDValue()) * -1 + 0.5 initImg3.SetSpacing(imgSigmoid.GetSpacing()) initImg3.SetOrigin(imgSigmoid.GetOrigin()) initImg3.SetDirection(imgSigmoid.GetDirection()) liversegmentation.sitk_show(initImg3) ``` # Segmentation ## Geodesic Active Contour ``` #gacParams = (1.0, 0.2, 4.5, 0.01, 250) #gacParams = (1.0, 0.2, 4.5, 0.01, 200) gacParams = (1.0, 0.2, 5.0, 0.01, 350) imgGac3 = liversegmentation.geodesic_active_contour(initImg3, imgSigmoid, *gacParams) liversegmentation.sitk_show(imgGac) ``` Display overlay of segmentation over original slice: ``` labelLowThresh = -2.3438 labelUpThresh = 0.0 binarySegImg3 = liversegmentation.binary_threshold(imgGac3, labelLowThresh, labelUpThresh) liversegmentation.sitk_show(sitk.LabelOverlay(imgSliceUInt8, binarySegImg3, backgroundValue=255)) ```
github_jupyter
``` import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback from keras import regularizers from keras import backend as K from keras.utils.generic_utils import Progbar from keras.layers.merge import _Merge import keras.losses from functools import partial from collections import defaultdict import tensorflow as tf from tensorflow.python.framework import ops import isolearn.keras as iso import numpy as np import tensorflow as tf import logging logging.getLogger('tensorflow').setLevel(logging.ERROR) import pandas as pd import os import pickle import numpy as np import scipy.sparse as sp import scipy.io as spio import matplotlib.pyplot as plt import isolearn.io as isoio import isolearn.keras as isol from sequence_logo_helper import plot_dna_logo import pandas as pd from keras.backend.tensorflow_backend import set_session def contain_tf_gpu_mem_usage() : config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess) contain_tf_gpu_mem_usage() class EpochVariableCallback(Callback) : def __init__(self, my_variable, my_func) : self.my_variable = my_variable self.my_func = my_func def on_epoch_begin(self, epoch, logs={}) : K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch)) #Define dataset/experiment name dataset_name = "apa_doubledope" #Load cached dataframe cached_dict = pickle.load(open('apa_doubledope_cached_set.pickle', 'rb')) data_df = cached_dict['data_df'] print("len(data_df) = " + str(len(data_df)) + " (loaded)") #Make generators valid_set_size = 0.05 test_set_size = 0.05 batch_size = 32 #Generate training and test set indexes data_index = np.arange(len(data_df), dtype=np.int) train_index = data_index[:-int(len(data_df) * (valid_set_size + test_set_size))] valid_index = data_index[train_index.shape[0]:-int(len(data_df) * test_set_size)] test_index = data_index[train_index.shape[0] + valid_index.shape[0]:] print('Training set size = ' + str(train_index.shape[0])) print('Validation set size = ' + str(valid_index.shape[0])) print('Test set size = ' + str(test_index.shape[0])) data_gens = { gen_id : iso.DataGenerator( idx, {'df' : data_df}, batch_size=batch_size, inputs = [ { 'id' : 'seq', 'source_type' : 'dataframe', 'source' : 'df', 'extractor' : iso.SequenceExtractor('padded_seq', start_pos=180, end_pos=180 + 205), 'encoder' : iso.OneHotEncoder(seq_length=205), 'dim' : (1, 205, 4), 'sparsify' : False } ], outputs = [ { 'id' : 'hairpin', 'source_type' : 'dataframe', 'source' : 'df', 'extractor' : lambda row, index: row['proximal_usage'], 'transformer' : lambda t: t, 'dim' : (1,), 'sparsify' : False } ], randomizers = [], shuffle = True if gen_id == 'train' else False ) for gen_id, idx in [('all', data_index), ('train', train_index), ('valid', valid_index), ('test', test_index)] } #Load data matrices x_train = np.concatenate([data_gens['train'][i][0][0] for i in range(len(data_gens['train']))], axis=0) x_test = np.concatenate([data_gens['test'][i][0][0] for i in range(len(data_gens['test']))], axis=0) y_train = np.concatenate([data_gens['train'][i][1][0] for i in range(len(data_gens['train']))], axis=0) y_test = np.concatenate([data_gens['test'][i][1][0] for i in range(len(data_gens['test']))], axis=0) print("x_train.shape = " + str(x_train.shape)) print("x_test.shape = " + str(x_test.shape)) print("y_train.shape = " + str(y_train.shape)) print("y_test.shape = " + str(y_test.shape)) #Define sequence template (APA Doubledope sublibrary) sequence_template = 'CTTCCGATCTNNNNNNNNNNNNNNNNNNNNCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCTAC' sequence_mask = np.array([1 if sequence_template[j] == 'N' else 0 for j in range(len(sequence_template))]) #Visualize background sequence distribution pseudo_count = 1.0 x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count) x_mean_logits = np.log(x_mean / (1. - x_mean)) plot_dna_logo(np.copy(x_mean), sequence_template=sequence_template, figsize=(14, 0.65), logo_height=1.0, plot_start=0, plot_end=205) #Calculate mean training set conservation entropy = np.sum(x_mean * -np.log(x_mean), axis=-1) / np.log(2.0) conservation = 2.0 - entropy x_mean_conservation = np.sum(conservation) / np.sum(sequence_mask) print("Mean conservation (bits) = " + str(x_mean_conservation)) #Calculate mean training set kl-divergence against background x_train_clipped = np.clip(np.copy(x_train[:, 0, :, :]), 1e-8, 1. - 1e-8) kl_divs = np.sum(x_train_clipped * np.log(x_train_clipped / np.tile(np.expand_dims(x_mean, axis=0), (x_train_clipped.shape[0], 1, 1))), axis=-1) / np.log(2.0) x_mean_kl_divs = np.sum(kl_divs * sequence_mask, axis=-1) / np.sum(sequence_mask) x_mean_kl_div = np.mean(x_mean_kl_divs) print("Mean KL Div against background (bits) = " + str(x_mean_kl_div)) from tensorflow.python.framework import ops #Stochastic Binarized Neuron helper functions (Tensorflow) #ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html #See Github https://github.com/spitis/ def st_sampled_softmax(logits): with ops.name_scope("STSampledSoftmax") as namescope : nt_probs = tf.nn.softmax(logits) onehot_dim = logits.get_shape().as_list()[1] sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0) with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}): return tf.ceil(sampled_onehot * nt_probs) def st_hardmax_softmax(logits): with ops.name_scope("STHardmaxSoftmax") as namescope : nt_probs = tf.nn.softmax(logits) onehot_dim = logits.get_shape().as_list()[1] sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0) with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}): return tf.ceil(sampled_onehot * nt_probs) @ops.RegisterGradient("STMul") def st_mul(op, grad): return [grad, grad] #Gumbel Distribution Sampler def gumbel_softmax(logits, temperature=0.5) : gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits) batch_dim = logits.get_shape().as_list()[0] onehot_dim = logits.get_shape().as_list()[1] return gumbel_dist.sample() #PWM Masking and Sampling helper functions def mask_pwm(inputs) : pwm, onehot_template, onehot_mask = inputs return pwm * onehot_mask + onehot_template def sample_pwm_st(pwm_logits) : n_sequences = K.shape(pwm_logits)[0] seq_length = K.shape(pwm_logits)[2] flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4)) sampled_pwm = st_sampled_softmax(flat_pwm) return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4)) def sample_pwm_gumbel(pwm_logits) : n_sequences = K.shape(pwm_logits)[0] seq_length = K.shape(pwm_logits)[2] flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 4)) sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5) return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 4)) #Generator helper functions def initialize_sequence_templates(generator, sequence_templates, background_matrices) : embedding_templates = [] embedding_masks = [] embedding_backgrounds = [] for k in range(len(sequence_templates)) : sequence_template = sequence_templates[k] onehot_template = iso.OneHotEncoder(seq_length=len(sequence_template))(sequence_template).reshape((1, len(sequence_template), 4)) for j in range(len(sequence_template)) : if sequence_template[j] not in ['N', 'X'] : nt_ix = np.argmax(onehot_template[0, j, :]) onehot_template[:, j, :] = -4.0 onehot_template[:, j, nt_ix] = 10.0 elif sequence_template[j] == 'X' : onehot_template[:, j, :] = -1.0 onehot_mask = np.zeros((1, len(sequence_template), 4)) for j in range(len(sequence_template)) : if sequence_template[j] == 'N' : onehot_mask[:, j, :] = 1.0 embedding_templates.append(onehot_template.reshape(1, -1)) embedding_masks.append(onehot_mask.reshape(1, -1)) embedding_backgrounds.append(background_matrices[k].reshape(1, -1)) embedding_templates = np.concatenate(embedding_templates, axis=0) embedding_masks = np.concatenate(embedding_masks, axis=0) embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0) generator.get_layer('template_dense').set_weights([embedding_templates]) generator.get_layer('template_dense').trainable = False generator.get_layer('mask_dense').set_weights([embedding_masks]) generator.get_layer('mask_dense').trainable = False generator.get_layer('background_dense').set_weights([embedding_backgrounds]) generator.get_layer('background_dense').trainable = False #Generator construction function def build_sampler(batch_size, seq_length, n_classes=1, n_samples=1, sample_mode='st') : #Initialize Reshape layer reshape_layer = Reshape((1, seq_length, 4)) #Initialize background matrix onehot_background_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='background_dense') #Initialize template and mask matrices onehot_template_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='zeros', name='template_dense') onehot_mask_dense = Embedding(n_classes, seq_length * 4, embeddings_initializer='ones', name='mask_dense') #Initialize Templating and Masking Lambda layer masking_layer = Lambda(mask_pwm, output_shape = (1, seq_length, 4), name='masking_layer') background_layer = Lambda(lambda x: x[0] + x[1], name='background_layer') #Initialize PWM normalization layer pwm_layer = Softmax(axis=-1, name='pwm') #Initialize sampling layers sample_func = None if sample_mode == 'st' : sample_func = sample_pwm_st elif sample_mode == 'gumbel' : sample_func = sample_pwm_gumbel upsampling_layer = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]), name='upsampling_layer') sampling_layer = Lambda(sample_func, name='pwm_sampler') permute_layer = Lambda(lambda x: K.permute_dimensions(K.reshape(x, (n_samples, batch_size, 1, seq_length, 4)), (1, 0, 2, 3, 4)), name='permute_layer') def _sampler_func(class_input, raw_logits) : #Get Template and Mask onehot_background = reshape_layer(onehot_background_dense(class_input)) onehot_template = reshape_layer(onehot_template_dense(class_input)) onehot_mask = reshape_layer(onehot_mask_dense(class_input)) #Add Template and Multiply Mask pwm_logits = masking_layer([background_layer([raw_logits, onehot_background]), onehot_template, onehot_mask]) #Compute PWM (Nucleotide-wise Softmax) pwm = pwm_layer(pwm_logits) #Tile each PWM to sample from and create sample axis pwm_logits_upsampled = upsampling_layer(pwm_logits) sampled_pwm = sampling_layer(pwm_logits_upsampled) sampled_pwm = permute_layer(sampled_pwm) sampled_mask = permute_layer(upsampling_layer(onehot_mask)) return pwm_logits, pwm, sampled_pwm, onehot_mask, sampled_mask return _sampler_func #Scrambler network definition def make_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0, drop_rate=0.0) : #Initialize res block layers batch_norm_0 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_0') relu_0 = Lambda(lambda x: K.relu(x, alpha=0.0)) conv_0 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0') batch_norm_1 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_1') relu_1 = Lambda(lambda x: K.relu(x, alpha=0.0)) conv_1 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1') skip_1 = Lambda(lambda x: x[0] + x[1], name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1') drop_1 = None if drop_rate > 0.0 : drop_1 = Dropout(drop_rate) #Execute res block def _resblock_func(input_tensor) : batch_norm_0_out = batch_norm_0(input_tensor) relu_0_out = relu_0(batch_norm_0_out) conv_0_out = conv_0(relu_0_out) batch_norm_1_out = batch_norm_1(conv_0_out) relu_1_out = relu_1(batch_norm_1_out) if drop_rate > 0.0 : conv_1_out = drop_1(conv_1(relu_1_out)) else : conv_1_out = conv_1(relu_1_out) skip_1_out = skip_1([conv_1_out, input_tensor]) return skip_1_out return _resblock_func def load_scrambler_network(n_groups=1, n_resblocks_per_group=4, n_channels=32, window_size=8, dilation_rates=[1], drop_rate=0.0) : #Discriminator network definition conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_conv_0') skip_convs = [] resblock_groups = [] for group_ix in range(n_groups) : skip_convs.append(Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_skip_conv_' + str(group_ix))) resblocks = [] for layer_ix in range(n_resblocks_per_group) : resblocks.append(make_resblock(n_channels=n_channels, window_size=window_size, dilation_rate=dilation_rates[group_ix], group_ix=group_ix, layer_ix=layer_ix, drop_rate=drop_rate)) resblock_groups.append(resblocks) last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_last_block_conv') skip_add = Lambda(lambda x: x[0] + x[1], name='scrambler_skip_add') final_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='softplus', kernel_initializer='glorot_normal', name='scrambler_final_conv') onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='scrambler_onehot_to_logits') scale_logits = Lambda(lambda x: K.tile(x[0], (1, 1, 1, 4)) * x[1], name='scrambler_logit_scale') def _scrambler_func(sequence_input) : conv_0_out = conv_0(sequence_input) #Connect group of res blocks output_tensor = conv_0_out #Res block group execution skip_conv_outs = [] for group_ix in range(n_groups) : skip_conv_out = skip_convs[group_ix](output_tensor) skip_conv_outs.append(skip_conv_out) for layer_ix in range(n_resblocks_per_group) : output_tensor = resblock_groups[group_ix][layer_ix](output_tensor) #Last res block extr conv last_block_conv_out = last_block_conv(output_tensor) skip_add_out = last_block_conv_out for group_ix in range(n_groups) : skip_add_out = skip_add([skip_add_out, skip_conv_outs[group_ix]]) #Final conv out final_conv_out = final_conv(skip_add_out) #Scale logits by importance scores scaled_logits = scale_logits([final_conv_out, onehot_to_logits(sequence_input)]) return scaled_logits, final_conv_out return _scrambler_func #Keras loss functions def get_sigmoid_nll() : def _sigmoid_nll(y_true, y_pred) : y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()) return K.mean(-y_true * K.log(y_pred) - (1.0 - y_true) * K.log(1.0 - y_pred), axis=-1) return _sigmoid_nll def get_kl_divergence() : def _kl_divergence(y_true, y_pred) : y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()) y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon()) left_mean_kl = K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1) right_mean_kl = K.mean(y_pred * K.log(y_pred / y_true) + (1.0 - y_pred) * K.log((1.0 - y_pred) / (1.0 - y_true)), axis=-1) return left_mean_kl + right_mean_kl return _kl_divergence def get_margin_entropy_ame_masked(pwm_start, pwm_end, pwm_background, max_bits=1.0) : def _margin_entropy_ame_masked(pwm, pwm_mask) : conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0) conservation = K.sum(conservation, axis=-1) mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1) n_unmasked = K.sum(mask, axis=-1) mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked margin_conservation = K.switch(mean_conservation > K.constant(max_bits, shape=(1,)), mean_conservation - K.constant(max_bits, shape=(1,)), K.zeros_like(mean_conservation)) return margin_conservation return _margin_entropy_ame_masked def get_target_entropy_sme_masked(pwm_start, pwm_end, pwm_background, target_bits=1.0) : def _target_entropy_sme_masked(pwm, pwm_mask) : conservation = pwm[:, 0, pwm_start:pwm_end, :] * K.log(K.clip(pwm[:, 0, pwm_start:pwm_end, :], K.epsilon(), 1. - K.epsilon()) / K.constant(pwm_background[pwm_start:pwm_end, :])) / K.log(2.0) conservation = K.sum(conservation, axis=-1) mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1) n_unmasked = K.sum(mask, axis=-1) mean_conservation = K.sum(conservation * mask, axis=-1) / n_unmasked return (mean_conservation - target_bits)**2 return _target_entropy_sme_masked def get_weighted_loss(loss_coeff=1.) : def _min_pred(y_true, y_pred) : return loss_coeff * y_pred return _min_pred #Initialize Encoder and Decoder networks batch_size = 32 seq_length = 205 n_samples = 32 sample_mode = 'st' #sample_mode = 'gumbel' #Resnet parameters resnet_n_groups = 1 resnet_n_resblocks_per_group = 4 resnet_n_channels = 32 resnet_window_size = 8 resnet_dilation_rates = [1] resnet_drop_rate = 0.25 #Load scrambler scrambler = load_scrambler_network( n_groups=resnet_n_groups, n_resblocks_per_group=resnet_n_resblocks_per_group, n_channels=resnet_n_channels, window_size=resnet_window_size, dilation_rates=resnet_dilation_rates, drop_rate=resnet_drop_rate ) #Load sampler sampler = build_sampler(batch_size, seq_length, n_classes=1, n_samples=n_samples, sample_mode=sample_mode) #Load Predictor predictor_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5' predictor = load_model(predictor_path) predictor.trainable = False predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error') #Build scrambler model scrambler_class = Input(shape=(1,), name='scrambler_class') scrambler_input = Input(shape=(1, seq_length, 4), name='scrambler_input') scrambled_logits, importance_scores = scrambler(scrambler_input) pwm_logits, pwm, sampled_pwm, _, _ = sampler(scrambler_class, scrambled_logits) scrambler_model = Model([scrambler_input, scrambler_class], [pwm_logits, pwm, sampled_pwm, importance_scores]) #Initialize Sequence Templates and Masks initialize_sequence_templates(scrambler_model, [sequence_template], [x_mean_logits]) scrambler_model.compile( optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999), loss='mean_squared_error' ) #Build Auto-scrambler pipeline #Define model inputs ae_scrambler_class = Input(shape=(1,), name='ae_scrambler_class') ae_scrambler_input = Input(shape=(1, seq_length, 4), name='ae_scrambler_input') #APARENT-specific tensors aparent_lib = Input(shape=(13,), name='aparent_lib_input') aparent_distal_pas = Input(shape=(1,), name='aparent_distal_pas_input') #Run encoder and decoder _, scrambled_pwm, scrambled_sample, pwm_mask, _ = sampler(ae_scrambler_class, scrambler(ae_scrambler_input)[0]) #Define layer to deflate sample axis deflate_scrambled_sample = Lambda(lambda x: K.reshape(x, (batch_size * n_samples, 1, seq_length, 4)), name='deflate_scrambled_sample') #Deflate sample axis scrambled_sample_deflated = deflate_scrambled_sample(scrambled_sample) def _make_prediction(inputs, predictor=predictor) : pred_seq_in, pred_lib_in, pred_distal_pas_in = inputs pred_seq_in_perm = K.expand_dims(pred_seq_in[:, 0, ...], axis=-1) return predictor([pred_seq_in_perm, pred_lib_in, pred_distal_pas_in])[0] def _make_prediction_scrambled(inputs, predictor=predictor, n_samples=n_samples) : pred_seq_in, pred_lib_in, pred_distal_pas_in = inputs pred_seq_in_perm = K.expand_dims(pred_seq_in[:, 0, ...], axis=-1) return predictor([pred_seq_in_perm, K.tile(pred_lib_in, (n_samples, 1)), K.tile(pred_distal_pas_in, (n_samples, 1))])[0] #Make reference prediction on non-scrambled input sequence y_pred_non_scrambled = Lambda(_make_prediction, name='make_prediction_non_scrambled')([ae_scrambler_input, aparent_lib, aparent_distal_pas]) #Make prediction on scrambled sequence samples y_pred_scrambled_deflated = Lambda(_make_prediction_scrambled, name='make_prediction_scrambled')([scrambled_sample_deflated, aparent_lib, aparent_distal_pas]) #Define layer to inflate sample axis inflate_scrambled_prediction = Lambda(lambda x: K.reshape(x, (batch_size, n_samples)), name='inflate_scrambled_prediction') #Inflate sample axis y_pred_scrambled = inflate_scrambled_prediction(y_pred_scrambled_deflated) #Cost function parameters pwm_start = 10 pwm_end = 201 target_bits = 0.5 #NLL cost nll_loss_func = get_kl_divergence() #Conservation cost conservation_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=1.8) #Entropy cost entropy_loss_func = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, target_bits=target_bits) #entropy_loss_func = get_margin_entropy_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end, pwm_background=x_mean, max_bits=target_bits) #Define annealing coefficient anneal_coeff = K.variable(1.0) #Execute NLL cost nll_loss = Lambda(lambda x: nll_loss_func(K.tile(x[0], (1, K.shape(x[1])[1])), x[1]), name='nll')([y_pred_non_scrambled, y_pred_scrambled]) #Execute conservation cost conservation_loss = Lambda(lambda x: anneal_coeff * conservation_loss_func(x[0], x[1]), name='conservation')([scrambled_pwm, pwm_mask]) #Execute entropy cost entropy_loss = Lambda(lambda x: (1. - anneal_coeff) * entropy_loss_func(x[0], x[1]), name='entropy')([scrambled_pwm, pwm_mask]) loss_model = Model( [ae_scrambler_class, ae_scrambler_input, aparent_lib, aparent_distal_pas], [nll_loss, conservation_loss, entropy_loss] ) #Initialize Sequence Templates and Masks initialize_sequence_templates(loss_model, [sequence_template], [x_mean_logits]) loss_model.compile( optimizer=keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9), loss={ 'nll' : get_weighted_loss(loss_coeff=1.0), 'conservation' : get_weighted_loss(loss_coeff=1.0), 'entropy' : get_weighted_loss(loss_coeff=1.0) } ) scrambler_model.summary() loss_model.summary() #Training configuration #Define number of training epochs n_epochs = 50 #Define experiment suffix (optional) experiment_suffix = "" #Define anneal function def _anneal_func(val, epoch, n_epochs=n_epochs) : if epoch in [0] : return 1.0 return 0.0 architecture_str = "resnet_" + str(resnet_n_groups) + "_" + str(resnet_n_resblocks_per_group) + "_" + str(resnet_n_channels) + "_" + str(resnet_window_size) + "_" + str(resnet_drop_rate).replace(".", "") model_name = "autoscrambler_dataset_" + dataset_name + "_sample_mode_" + sample_mode + "_n_samples_" + str(n_samples) + "_" + architecture_str + "_n_epochs_" + str(n_epochs) + "_target_bits_" + str(target_bits).replace(".", "") + experiment_suffix print("Model save name = " + model_name) #Execute training procedure callbacks =[ #ModelCheckpoint("model_checkpoints/" + model_name + "_epoch_{epoch:02d}.hdf5", monitor='val_loss', mode='min', period=10, save_weights_only=True), EpochVariableCallback(anneal_coeff, _anneal_func) ] s_train = np.zeros((x_train.shape[0], 1)) s_test = np.zeros((x_test.shape[0], 1)) aparent_l_train = np.zeros((x_train.shape[0], 13)) aparent_l_train[:, 4] = 1. aparent_l_test = np.zeros((x_test.shape[0], 13)) aparent_l_test[:, 4] = 1. aparent_d_train = np.ones((x_train.shape[0], 1)) aparent_d_test = np.ones((x_test.shape[0], 1)) # train the autoencoder train_history = loss_model.fit( [s_train, x_train, aparent_l_train, aparent_d_train], [s_train, s_train, s_train], shuffle=True, epochs=n_epochs, batch_size=batch_size, validation_data=( [s_test, x_test, aparent_l_test, aparent_d_test], [s_test, s_test, s_test] ), callbacks=callbacks ) f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(3 * 4, 3)) n_epochs_actual = len(train_history.history['nll_loss']) ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['nll_loss'], linewidth=3, color='green') ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_nll_loss'], linewidth=3, color='orange') plt.sca(ax1) plt.xlabel("Epochs", fontsize=14) plt.ylabel("NLL", fontsize=14) plt.xlim(1, n_epochs_actual) plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12) plt.yticks(fontsize=12) ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['entropy_loss'], linewidth=3, color='green') ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_entropy_loss'], linewidth=3, color='orange') plt.sca(ax2) plt.xlabel("Epochs", fontsize=14) plt.ylabel("Entropy Loss", fontsize=14) plt.xlim(1, n_epochs_actual) plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12) plt.yticks(fontsize=12) ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['conservation_loss'], linewidth=3, color='green') ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_conservation_loss'], linewidth=3, color='orange') plt.sca(ax3) plt.xlabel("Epochs", fontsize=14) plt.ylabel("Conservation Loss", fontsize=14) plt.xlim(1, n_epochs_actual) plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12) plt.yticks(fontsize=12) plt.tight_layout() plt.show() # Save model and weights save_dir = 'saved_models' if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name + '.h5') scrambler_model.save(model_path) print('Saved scrambler model at %s ' % (model_path)) #Load models save_dir = 'saved_models' if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name + '.h5') scrambler_model = load_model(model_path, custom_objects={ 'st_sampled_softmax' : st_sampled_softmax }) print('Loaded scrambler model %s ' % (model_path)) #Load models save_dir = 'saved_models' if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name + '.h5') scrambler_model.load_weights(model_path, by_name=True) print('Loaded scrambler model %s ' % (model_path)) #Visualize a few reconstructed sequence patterns sequence_template = 'CTTCCGATCTNNNNNNNNNNNNNNNNNNNNCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNANTAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCTAC' save_examples = [2, 3, 4, 5, 6, 7] s_test = np.zeros((x_test.shape[0], 1)) aparent_l_test = np.zeros((x_test.shape[0], 13)) aparent_l_test[:, 4] = 1. aparent_d_test = np.ones((x_test.shape[0], 1)) _, pwm_test, sample_test, _ = scrambler_model.predict_on_batch(x=[x_test[:32], s_test[:32]]) for plot_i in range(0, 10) : print("Test sequence " + str(plot_i) + ":") y_test_hat_ref = predictor.predict(x=[np.expand_dims(np.expand_dims(x_test[plot_i, 0, :, :], axis=0), axis=-1), aparent_l_test[:1], aparent_d_test[:1]], batch_size=1)[0][0, 0] y_test_hat = predictor.predict(x=[np.expand_dims(sample_test[plot_i, :, 0, :, :], axis=-1), aparent_l_test[:32], aparent_d_test[:32]], batch_size=32)[0][:10, 0].tolist() print(" - Prediction (original) = " + str(round(y_test_hat_ref, 2))[:4]) print(" - Predictions (scrambled) = " + str([float(str(round(y_test_hat[i], 2))[:4]) for i in range(len(y_test_hat))])) save_figs = False if save_examples is not None and plot_i in save_examples : save_figs = True plot_dna_logo(x_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(plot_i)) plot_dna_logo(pwm_test[plot_i, 0, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(plot_i)) #Visualize a few reconstructed images s_test = np.zeros((x_test.shape[0], 1)) _, pwm_test, sample_test, importance_scores_test = scrambler_model.predict(x=[x_test, s_test], batch_size=32, verbose=True) #Save predicted importance scores np.save(model_name + "_importance_scores_test", importance_scores_test) #Calculate original and scrambled predictions aparent_l_test = np.zeros((x_test.shape[0], 13)) aparent_l_test[:, 4] = 1. aparent_d_test = np.ones((x_test.shape[0], 1)) y_test_hats = [] y_test_hats_scrambled = [] for i in range(x_test.shape[0]) : y_test_hat_ref = predictor.predict(x=[np.expand_dims(np.expand_dims(x_test[i, 0, :, :], axis=0), axis=-1), aparent_l_test[:1], aparent_d_test[:1]], batch_size=1)[0][0, 0] y_test_hat = np.mean(predictor.predict(x=[np.expand_dims(sample_test[i, :, 0, :, :], axis=-1), aparent_l_test[:32], aparent_d_test[:32]], batch_size=32)[0][:, 0]) y_test_hats.append(y_test_hat_ref) y_test_hats_scrambled.append(y_test_hat) y_test_hat = np.array(y_test_hats) y_test_hat_scrambled = np.array(y_test_hats_scrambled) from scipy.stats import pearsonr save_figs = True r_val, _ = pearsonr(y_test_hat, y_test_hat_scrambled) left_kl_divs = y_test_hat * np.log(y_test_hat / y_test_hat_scrambled) + (1. - y_test_hat) * np.log((1. - y_test_hat) / (1. - y_test_hat_scrambled)) right_kl_divs = y_test_hat_scrambled * np.log(y_test_hat_scrambled / y_test_hat) + (1. - y_test_hat_scrambled) * np.log((1. - y_test_hat_scrambled) / (1. - y_test_hat)) mean_kl_div = np.mean(left_kl_divs + right_kl_divs) f = plt.figure(figsize=(4, 4)) plt.scatter(y_test_hat, y_test_hat_scrambled, color='black', s=5, alpha=0.25) plt.xlim(0, 1) plt.ylim(0, 1) plt.xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], [0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14) plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], [0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=14) plt.xlabel("Original Prediction", fontsize=14) plt.ylabel("Scrambled Prediction", fontsize=14) plt.title("R^2 = " + str(round(r_val**2, 2)) + ", KL = " + str(round(mean_kl_div, 2)), fontsize=14) plt.tight_layout() if save_figs : plt.savefig(model_name + "_test_scatter.png", transparent=True, dpi=300) plt.savefig(model_name + "_test_scatter.eps") plt.show() ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png) # Automated Machine Learning _**Classification of credit card fraudulent transactions with local run **_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Train](#Train) 1. [Results](#Results) 1. [Test](#Tests) 1. [Explanation](#Explanation) 1. [Acknowledgements](#Acknowledgements) ## Introduction In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge. This notebook is using the local machine compute to train the model. If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. In this notebook you will learn how to: 1. Create an experiment using an existing workspace. 2. Configure AutoML using `AutoMLConfig`. 3. Train the model. 4. Explore the results. 5. Test the fitted model. 6. Explore any model's explanation and explore feature importance in azure portal. 7. Create an AKS cluster, deploy the webservice of AutoML scoring model and the explainer model to the AKS and consume the web service. ## Setup As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging from matplotlib import pyplot as plt import pandas as pd import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig from azureml.interpret import ExplanationClient ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.20.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-classification-ccard-local' experiment=Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ### Load Data Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. ``` data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv" dataset = Dataset.Tabular.from_delimited_files(data) training_data, validation_data = dataset.random_split(percentage=0.8, seed=223) label_column_name = 'Class' ``` ## Train Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment. |Property|Description| |-|-| |**task**|classification or regression| |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| |**enable_early_stopping**|Stop the run if the metric score is not showing improvement.| |**n_cross_validations**|Number of cross validation splits.| |**training_data**|Input dataset, containing both features and label column.| |**label_column_name**|The name of the label column.| **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric) ``` automl_settings = { "n_cross_validations": 3, "primary_metric": 'average_precision_score_weighted', "experiment_timeout_hours": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible "verbosity": logging.INFO, "enable_stack_ensemble": False } automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', training_data = training_data, label_column_name = label_column_name, **automl_settings ) ``` Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. In this example, we specify `show_output = True` to print currently running iterations to the console. ``` local_run = experiment.submit(automl_config, show_output = True) # If you need to retrieve a run that already started, use the following code #from azureml.train.automl.run import AutoMLRun #local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>') local_run ``` ## Results #### Widget for Monitoring Runs The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details ``` from azureml.widgets import RunDetails RunDetails(local_run).show() ``` ### Analyze results #### Retrieve the Best Model Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. ``` best_run, fitted_model = local_run.get_output() fitted_model ``` #### Print the properties of the model The fitted_model is a python object and you can read the different properties of the object. ## Tests Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values. ``` # convert the test data to dataframe X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe() y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe() # call the predict functions on the model y_pred = fitted_model.predict(X_test_df) y_pred ``` ### Calculate metrics for the prediction Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned. ``` from sklearn.metrics import confusion_matrix import numpy as np import itertools cf =confusion_matrix(y_test_df.values,y_pred) plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest') plt.colorbar() plt.title('Confusion Matrix') plt.xlabel('Predicted') plt.ylabel('Actual') class_labels = ['False','True'] tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks,class_labels) plt.yticks([-0.5,0,1,1.5],['','False','True','']) # plotting text value inside cells thresh = cf.max() / 2. for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])): plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black') plt.show() ``` ## Explanation In this section, we will show how to compute model explanations and visualize the explanations using azureml-interpret package. We will also show how to run the automl model and the explainer model through deploying an AKS web service. Besides retrieving an existing model explanation for an AutoML model, you can also explain your AutoML model with different test data. The following steps will allow you to compute and visualize engineered feature importance based on your test data. ### Run the explanation #### Download the engineered feature importance from artifact store You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features. ``` client = ExplanationClient.from_run(best_run) engineered_explanations = client.download_model_explanation(raw=False) print(engineered_explanations.get_feature_importance_dict()) print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + best_run.get_portal_url()) ``` #### Download the raw feature importance from artifact store You can use ExplanationClient to download the raw feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the raw features. ``` raw_explanations = client.download_model_explanation(raw=True) print(raw_explanations.get_feature_importance_dict()) print("You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + best_run.get_portal_url()) ``` #### Retrieve any other AutoML model from training ``` automl_run, fitted_model = local_run.get_output(metric='accuracy') ``` #### Setup the model explanations for AutoML models The fitted_model can generate the following which will be used for getting the engineered explanations using automl_setup_model_explanations:- 1. Featurized data from train samples/test samples 2. Gather engineered name lists 3. Find the classes in your labeled column in classification scenarios The automl_explainer_setup_obj contains all the structures from above list. ``` X_train = training_data.drop_columns(columns=[label_column_name]) y_train = training_data.keep_columns(columns=[label_column_name], validate=True) X_test = validation_data.drop_columns(columns=[label_column_name]) from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, X_test=X_test, y=y_train, task='classification') ``` #### Initialize the Mimic Explainer for feature importance For explaining the AutoML models, use the MimicWrapper from azureml-interpret package. The MimicWrapper can be initialized with fields in automl_explainer_setup_obj, your workspace and a surrogate model to explain the AutoML model (fitted_model here). The MimicWrapper also takes the automl_run object where engineered explanations will be uploaded. ``` from interpret.ext.glassbox import LGBMExplainableModel from azureml.interpret.mimic_wrapper import MimicWrapper explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, explainable_model=automl_explainer_setup_obj.surrogate_model, init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run, features=automl_explainer_setup_obj.engineered_feature_names, feature_maps=[automl_explainer_setup_obj.feature_map], classes=automl_explainer_setup_obj.classes, explainer_kwargs=automl_explainer_setup_obj.surrogate_model_params) ``` #### Use Mimic Explainer for computing and visualizing engineered feature importance The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features. ``` # Compute the engineered explanations engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform) print(engineered_explanations.get_feature_importance_dict()) print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + automl_run.get_portal_url()) ``` #### Use Mimic Explainer for computing and visualizing raw feature importance The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the original features in your data. You can also use azure portal url to view the dash board visualization of the feature importance values of the original/raw features. ``` # Compute the raw explanations raw_explanations = explainer.explain(['local', 'global'], get_raw=True, raw_feature_names=automl_explainer_setup_obj.raw_feature_names, eval_dataset=automl_explainer_setup_obj.X_test_transform, raw_eval_dataset=automl_explainer_setup_obj.X_test_raw) print(raw_explanations.get_feature_importance_dict()) print("You can visualize the raw explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + automl_run.get_portal_url()) ``` #### Initialize the scoring Explainer, save and upload it for later use in scoring explanation ``` from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer import joblib # Initialize the ScoringExplainer scoring_explainer = TreeScoringExplainer(explainer.explainer, feature_maps=[automl_explainer_setup_obj.feature_map]) # Pickle scoring explainer locally to './scoring_explainer.pkl' scoring_explainer_file_name = 'scoring_explainer.pkl' with open(scoring_explainer_file_name, 'wb') as stream: joblib.dump(scoring_explainer, stream) # Upload the scoring explainer to the automl run automl_run.upload_file('outputs/scoring_explainer.pkl', scoring_explainer_file_name) ``` ### Deploying the scoring and explainer models to a web service to Azure Kubernetes Service (AKS) We use the TreeScoringExplainer from azureml.interpret package to create the scoring explainer which will be used to compute the raw and engineered feature importances at the inference time. In the cell below, we register the AutoML model and the scoring explainer with the Model Management Service. ``` # Register trained automl model present in the 'outputs' folder in the artifacts original_model = automl_run.register_model(model_name='automl_model', model_path='outputs/model.pkl') scoring_explainer_model = automl_run.register_model(model_name='scoring_explainer', model_path='outputs/scoring_explainer.pkl') ``` #### Create the conda dependencies for setting up the service We need to download the conda dependencies using the automl_run object. ``` from azureml.automl.core.shared import constants from azureml.core.environment import Environment automl_run.download_file(constants.CONDA_ENV_FILE_PATH, 'myenv.yml') myenv = Environment.from_conda_specification(name="myenv", file_path="myenv.yml") myenv ``` #### Write the Entry Script Write the script that will be used to predict on your model ``` %%writefile score.py import joblib import pandas as pd from azureml.core.model import Model from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations def init(): global automl_model global scoring_explainer # Retrieve the path to the model file using the model name # Assume original model is named original_prediction_model automl_model_path = Model.get_model_path('automl_model') scoring_explainer_path = Model.get_model_path('scoring_explainer') automl_model = joblib.load(automl_model_path) scoring_explainer = joblib.load(scoring_explainer_path) def run(raw_data): data = pd.read_json(raw_data, orient='records') # Make prediction predictions = automl_model.predict(data) # Setup for inferencing explanations automl_explainer_setup_obj = automl_setup_model_explanations(automl_model, X_test=data, task='classification') # Retrieve model explanations for engineered explanations engineered_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform) # Retrieve model explanations for raw explanations raw_local_importance_values = scoring_explainer.explain(automl_explainer_setup_obj.X_test_transform, get_raw=True) # You can return any data type as long as it is JSON-serializable return {'predictions': predictions.tolist(), 'engineered_local_importance_values': engineered_local_importance_values, 'raw_local_importance_values': raw_local_importance_values} ``` #### Create the InferenceConfig Create the inference config that will be used when deploying the model ``` from azureml.core.model import InferenceConfig inf_config = InferenceConfig(entry_script='score.py', environment=myenv) ``` #### Provision the AKS Cluster This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it. ``` from azureml.core.compute import ComputeTarget, AksCompute from azureml.core.compute_target import ComputeTargetException # Choose a name for your cluster. aks_name = 'scoring-explain' # Verify that cluster does not exist already try: aks_target = ComputeTarget(workspace=ws, name=aks_name) print('Found existing cluster, use it.') except ComputeTargetException: prov_config = AksCompute.provisioning_configuration(vm_size='STANDARD_D3_V2') aks_target = ComputeTarget.create(workspace=ws, name=aks_name, provisioning_configuration=prov_config) aks_target.wait_for_completion(show_output=True) ``` #### Deploy web service to AKS ``` # Set the web service configuration (using default here) from azureml.core.webservice import AksWebservice from azureml.core.model import Model aks_config = AksWebservice.deploy_configuration() aks_service_name ='model-scoring-local-aks' aks_service = Model.deploy(workspace=ws, name=aks_service_name, models=[scoring_explainer_model, original_model], inference_config=inf_config, deployment_config=aks_config, deployment_target=aks_target) aks_service.wait_for_deployment(show_output = True) print(aks_service.state) ``` #### View the service logs ``` aks_service.get_logs() ``` #### Consume the web service using run method to do the scoring and explanation of scoring. We test the web sevice by passing data. Run() method retrieves API keys behind the scenes to make sure that call is authenticated. ``` # Serialize the first row of the test data into json X_test_json = X_test_df[:1].to_json(orient='records') print(X_test_json) # Call the service to get the predictions and the engineered and raw explanations output = aks_service.run(X_test_json) # Print the predicted value print('predictions:\n{}\n'.format(output['predictions'])) # Print the engineered feature importances for the predicted value print('engineered_local_importance_values:\n{}\n'.format(output['engineered_local_importance_values'])) # Print the raw feature importances for the predicted value print('raw_local_importance_values:\n{}\n'.format(output['raw_local_importance_values'])) ``` #### Clean up Delete the service. ``` aks_service.delete() ``` ## Acknowledgements This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project Please cite the following works: • Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015 • Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon • Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE o Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi) • Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier • Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
github_jupyter
## Common plotting pitfalls that get worse with large data When working with large datasets, visualizations are often the only way available to understand the properties of that dataset -- there are simply too many data points to examine each one! Thus it is very important to be aware of some common plotting problems that are minor inconveniences with small datasets but very serious problems with larger ones. We'll cover: 1. [Overplotting](#1.-Overplotting) 2. [Oversaturation](#2.-Oversaturation) 3. [Undersampling](#3.-Undersampling) 4. [Undersaturation](#4.-Undersaturation) 5. [Underutilized range](#5.-Underutilized-range) 6. [Nonuniform colormapping](#6.-Nonuniform-colormapping) You can [skip to the end](#Summary) if you just want to see an illustration of these problems. This notebook requires [HoloViews](http://holoviews.org), [colorcet](https://github.com/bokeh/colorcet), and matplotlib, and optionally scikit-image, which can be installed with: ``` conda install -c bokeh -c ioam holoviews colorcet matplotlib scikit-image ``` We'll first load the plotting libraries and set up some defaults: ``` import numpy as np np.random.seed(42) import holoviews as hv from holoviews.operation.datashader import datashade from holoviews import opts, dim hv.extension('matplotlib') from colorcet import fire datashade.cmap=fire[50:] opts.defaults( opts.Image(cmap="gray_r", axiswise=True), opts.Points(cmap="bwr", edgecolors='k', s=50, alpha=1.0), # Remove color_index=2 opts.RGB(bgcolor="black", show_grid=False), opts.Scatter3D(color=dim('c'), fig_size=250, cmap='bwr', edgecolor='k', s=50, alpha=1.0)) #color_index=3 ``` ### 1. Overplotting Let's consider plotting some 2D data points that come from two separate categories, here plotted as blue and red in **A** and **B** below. When the two categories are overlaid, the appearance of the result can be very different depending on which one is plotted first: ``` def blue_points(offset=0.5,pts=300): blues = (np.random.normal( offset,size=pts), np.random.normal( offset,size=pts), -1 * np.ones((pts))) return hv.Points(blues, vdims=['c']).opts(color=dim('c')) def red_points(offset=0.5,pts=300): reds = (np.random.normal(-offset,size=pts), np.random.normal(-offset,size=pts), 1*np.ones((pts))) return hv.Points(reds, vdims=['c']).opts(color=dim('c')) blues, reds = blue_points(), red_points() blues + reds + (reds * blues) + (blues * reds) ``` Plots **C** and **D** shown the same distribution of points, yet they give a very different impression of which category is more common, which can lead to incorrect decisions based on this data. Of course, both are equally common in this case, so neither **C** nor **D** accurately reflects the data. The cause for this problem is simply occlusion: ``` hmap = hv.HoloMap({0:blues,0.000001:reds,1:blues,2:reds}, kdims=['level']) hv.Scatter3D(hmap.table(), kdims=['x','y','level'], vdims=['c']) ``` Occlusion of data by other data is called **overplotting** or **overdrawing**, and it occurs whenever a datapoint or curve is plotted on top of another datapoint or curve, obscuring it. It's thus a problem not just for scatterplots, as here, but for curve plots, 3D surface plots, 3D bar graphs, and any other plot type where data can be obscured. ### 2. Oversaturation You can reduce problems with overplotting by using transparency/opacity, via the alpha parameter provided to control opacity in most plotting programs. E.g. if alpha is 0.1, full color saturation will be achieved only when 10 points overlap, reducing the effects of plot ordering but making it harder to see individual points: ``` layout = blues + reds + (reds * blues) + (blues * reds) layout.opts(opts.Points(s=50, alpha=0.1)) ``` Here **C&nbsp;**and **D&nbsp;**look very similar (as they should, since the distributions are identical), but there are still a few locations with **oversaturation**, a problem that will occur when more than 10 points overlap. In this example the oversaturated points are located near the middle of the plot, but the only way to know whether they are there would be to plot both versions and compare, or to examine the pixel values to see if any have reached full saturation (a necessary but not sufficient condition for oversaturation). Locations where saturation has been reached have problems similar to overplotting, because only the last 10 points plotted will affect the final color (for alpha of 0.1). Worse, even if one has set the alpha value to approximately or usually avoid oversaturation, as in the plot above, the correct value depends on the dataset. If there are more points overlapping in that particular region, a manually adjusted alpha setting that worked well for a previous dataset will systematically misrepresent the new dataset: ``` blues, reds = blue_points(pts=600), red_points(pts=600) layout = blues + reds + (reds * blues) + (blues * reds) layout.opts(opts.Points(alpha=0.1)) ``` Here **C&nbsp;**and **D&nbsp;**again look qualitatively different, yet still represent the same distributions. Since we're assuming that the point of the visualization is to reveal the underlying dataset, having to tune visualization parameters manually based on the properties of the dataset itself is a serious problem. To make it even more complicated, the correct alpha also depends on the dot size, because smaller dots have less overlap for the same dataset. With smaller dots, **C&nbsp;**and **D&nbsp;**look more similar, but the color of the dots is now difficult to see in all cases because the dots are too transparent for this size: ``` layout = blues + reds + (reds * blues) + (blues * reds) layout.opts(opts.Points(s=10, alpha=0.1, edgecolor=None)) ``` As you can see, it is very difficult to find settings for the dotsize and alpha parameters that correctly reveal the data, even for relatively small and obvious datasets like these. With larger datasets with unknown contents, it is difficult to detect that such problems are occuring, leading to false conclusions based on inappropriately visualized data. ### 3. Undersampling With a single category instead of the multiple categories shown above, oversaturation simply obscures spatial differences in density. For instance, 10, 20, and 2000 single-category points overlapping will all look the same visually, for alpha=0.1. Let's again consider an example that has a sum of two normal distributions slightly offset from one another, but no longer using color to separate them into categories: ``` def gaussians(specs=[(1.5,0,1.0),(-1.5,0,1.0)],num=100): """ A concatenated list of points taken from 2D Gaussian distributions. Each distribution is specified as a tuple (x,y,s), where x,y is the mean and s is the standard deviation. Defaults to two horizontally offset unit-mean Gaussians. """ np.random.seed(1) dists = [(np.random.normal(x,s,num), np.random.normal(y,s,num)) for x,y,s in specs] return np.hstack([d[0] for d in dists]), np.hstack([d[1] for d in dists]) points = (hv.Points(gaussians(num=600), label="600 points", group="Small dots") + hv.Points(gaussians(num=60000), label="60000 points", group="Small dots") + hv.Points(gaussians(num=600), label="600 points", group="Tiny dots") + hv.Points(gaussians(num=60000), label="60000 points", group="Tiny dots")) points.opts( opts.Points('Small_dots', s=1, alpha=1), opts.Points('Tiny_dots', s=0.1, alpha=0.1)) ``` Just as shown for the multiple-category case above, finding settings to avoid overplotting and oversaturation is difficult. The "Small dots" setting (size 0.1, full alpha) works fairly well for a sample of 600 points **A,** but it has serious overplotting issues for larger datasets, obscuring the shape and density of the distribution **B.** Using the "Tiny dots" setting (10 times smaller dots, alpha 0.1) works well for the larger dataset **D,** but not at all for the 600-point dataset **C.** Clearly, not all of these settings are accurately conveying the underlying distribution, as they all appear quite different from one another. Similar problems occur for the same size of dataset, but with greater or lesser levels of overlap between points, which of course varies with every new dataset. In any case, as dataset size increases, at some point plotting a full scatterplot like any of these will become impractical with current plotting software. At this point, people often simply subsample their dataset, plotting 10,000 or perhaps 100,000 randomly selected datapoints. But as panel **A&nbsp;**shows, the shape of an **undersampled** distribution can be very difficult or impossible to make out, leading to incorrect conclusions about the distribution. Such problems can occur even when taking very large numbers of samples, if examining sparsely populated regions of the space, which will approximate panel **A&nbsp;**for some plot settings and panel **C&nbsp;**for others. The actual shape of the distribution is only visible if sufficient datapoints are available in that region *and* appropriate plot settings are used, as in **D,** but ensuring that both conditions are true is a quite difficult process of trial and error, making it very likely that important features of the dataset will be missed. To avoid undersampling large datasets, researchers often use 2D histograms visualized as heatmaps, rather than scatterplots showing individual points. A heatmap has a fixed-size grid regardless of the dataset size, so that they can make use of all the data. Heatmaps effectively approximate a probability density function over the specified space, with coarser heatmaps averaging out noise or irrelevant variations to reveal an underlying distribution, and finer heatmaps able to represent more details in the distribution. Let's look at some heatmaps with different numbers of bins for the same two-Gaussians distribution: ``` def heatmap(coords,bins=10,offset=0.0,transform=lambda d,m:d, label=None): """ Given a set of coordinates, bins them into a 2d histogram grid of the specified size, and optionally transforms the counts and/or compresses them into a visible range starting at a specified offset between 0 and 1.0. """ hist,xs,ys = np.histogram2d(coords[0], coords[1], bins=bins) counts = hist[:,::-1].T transformed = transform(counts,counts!=0) span = transformed.max()-transformed.min() compressed = np.where(counts!=0,offset+(1.0-offset)*transformed/span,0) args = dict(label=label) if label else {} return hv.Image(compressed,bounds=(xs[-1],ys[-1],xs[1],ys[1]),**args) hv.Layout([heatmap(gaussians(num=60000),bins) for bins in [8,20,200]]) ``` As you can see, a too-coarse binning grid **A&nbsp;**cannot represent this distribution faithfully, but with enough bins **C,** the heatmap will approximate a tiny-dot scatterplot like plot **D&nbsp;**in the previous figure. For intermediate grid sizes **B&nbsp;**the heatmap can average out the effects of undersampling; **B&nbsp;**is actually a more faithful representation of the *distribution* than **C&nbsp;**is (which we know is two offset 2D Gaussians), while **C&nbsp;**more faithfully represents the *sampling* (i.e., the individual points drawn from this distribution). Thus choosing a good binning grid size for a heatmap does take some expertise and knowledge of the goals of the visualization, and it's always useful to look at multiple binning-grid spacings for comparison. Still, at least the binning parameter is something meaningful at the data level (how coarse a view of the data is desired?) rather than just a plotting detail (what size and transparency should I use for the points?) that must be determined arbitrarily. In any case, at least in principle, the heatmap approach can entirely avoid the first three problems above: **overplotting** (since multiple data points sum arithmetically into the grid cell, without obscuring one another), **oversaturation** (because the minimum and maximum counts observed can automatically be mapped to the two ends of a visible color range), and **undersampling** (since the resulting plot size is independent of the number of data points, allowing it to use an unbounded amount of incoming data). ### 4. Undersaturation Of course, heatmaps come with their own plotting pitfalls. One rarely appreciated issue common to both heatmaps and alpha-based scatterplots is **undersaturation**, where large numbers of data points can be missed entirely because they are spread over many different heatmap bins or many nearly transparent scatter points. To look at this problem, let's again consider a set of multiple 2D Gaussians, but this time with different amounts of spread (standard deviation): ``` dist = gaussians(specs=[(2,2,0.02), (2,-2,0.1), (-2,-2,0.5), (-2,2,1.0), (0,0,3)],num=10000) hv.Points(dist) + hv.Points(dist).opts(s=0.1) + hv.Points(dist).opts(s=0.01, alpha=0.05) ``` Plots **A,** **B,** and **C&nbsp;**are all scatterplots for the same data, which is a sum of 5 Gaussian distributions at different locations and with different standard deviations: 1. Location (2,2): very narrow spread 2. Location (2,-2): narrow spread 3. Location (-2,-2): medium spread 4. Location (-2,2): large spread 5. Location (0,0): very large spread In plot **A,** of course, the very large spread covers up everything else, completely obscuring the structure of this dataset by overplotting. Plots **B&nbsp;**and **C&nbsp;**reveal the structure better, but they required hand tuning and neither one is particularly satisfactory. In **B&nbsp;**there are four clearly visible Gaussians, but all but the largest appear to have the same density of points per pixel, which we know is not the case from how the dataset was constructed, and the smallest is nearly invisible. Each of the five Gaussians has the same number of data points (10000), but the second-largest looks like it has more than the others, and the narrowest one is likely to be overlooked altogether, which is thus a clear example of oversaturation obscuring important features. Yet if we try to combat the oversaturation by using transparency in **C,** we now get a clear problem with **undersaturation** -- the "very large spread" Gaussian is now essentially invisible. Again, there are just as many datapoints in that category, but we'd never even know they were there if only looking at **C.** Similar problems occur for a heatmap view of the same data: ``` hv.Layout([heatmap(dist,bins) for bins in [8,20,200]]) ``` Here the narrow-spread distributions lead to pixels with a very high count, and if the other pixels are linearly ramped into the available color range, from zero to that high count value, then the wider-spread values are obscured (as in **B&nbsp;**) or entirely invisible (as in **C&nbsp;**). To avoid undersaturation, you can add an offset to ensure that low-count (but nonzero) bins are mapped into a visible color, with the remaining intensity scale used to indicate differences in counts: ``` hv.Layout([heatmap(dist,bins,offset=0.2) for bins in [8,20,200]]).cols(4) ``` Such mapping entirely avoids undersaturation, since all pixels are either clearly zero (in the background color, i.e. white in this case), or a non-background color taken from the colormap. The widest-spread Gaussian is now clearly visible in all cases. However, the actual structure (5 Gaussians of different spreads) is still not visible. In **A&nbsp;**the problem is clearly too-coarse binning, but in **B&nbsp;**the binning is also somewhat too coarse for this data, since the "very narrow spread" and "narrow spread" Gaussians show up identically, each mapping entirely into a single bin (the two black pixels). **C&nbsp;**shouldn't suffer from too-coarse binning, yet it still looks more like a plot of the "very large spread" distribution alone, than a plot of these five distributions of different spreads, and it is thus still highly misleading despite the correction for undersaturation. ### 5. Underutilized range So, what is the problem in plot **C&nbsp;**above? By construction, we've avoided the first four pitfalls: **overplotting**, **oversaturation**, **undersampling**, and **undersaturation**. But the problem is now more subtle: differences in datapoint density are not visible between the five Gaussians, because all or nearly all pixels end up being mapped into either the bottom end of the visible range (light gray), or the top end (black, used only for the single pixel holding the "very narrow spread" distribution). The entire rest of the visible colors in this gray colormap are unused, conveying no information to the viewer about the rich structure that we know this distribution contains. If the data were uniformly distributed over the range from minimum to maximum counts per pixel (0 to 10,000, in this case), then the above plot would work well, but that's not the case for this dataset or for most real-world datasets. So, let's try transforming the data from its default linear representation (integer count values) into something that preserves relative differences in count values but maps them into visually distinct colors. A logarithmic transformation is one common choice: ``` hv.Layout([heatmap(dist,bins,offset=0.2,transform=lambda d,m: np.where(m,np.log1p(d),0)) for bins in [8,20,200]]) ``` Aha! We can now see the full structure of the dataset, with all five Gaussians clearly visible in **B&nbsp;**and **C,** and the relative spreads also clearly visible in **C.** We still have a problem, though. The choice of a logarithmic transform was fairly arbitrary, and it mainly works well because we happened to have used an approximately geometric progression of spread sizes when constructing the example. For large datasets with truly unknown structure, can we have a more principled approach to mapping the dataset values into a visible range? Yes, if we think of the visualization problem in a different way. The underlying difficulty in plotting this dataset (as for very many real-world datasets) is that the values in each bin are numerically very different (ranging from 10,000, in the bin for the "very narrow spread" Gaussian, to 1 (for single datapoints from the "very large spread" Gaussian)). Given the 256 gray levels available in a normal monitor (and the similarly limited human ability to detect differences in gray values), numerically mapping the data values into the visible range is not going to work well. But given that we are already backing off from a direct numerical mapping in the above approaches for correcting undersaturation and for doing log transformations, what if we entirely abandon the numerical mapping approach, using the numbers only to form a partial ordering of the data values? Such an approach would be a rank-order plot, preserving order and not magnitudes. For 100 gray values, you can think of it as a percentile-based plot, with the lowest 1% of the data values mapping to the first visible gray value, the next 1% mapping to the next visible gray value, and so on to the top 1% of the data values mapping to the gray value 255 (black in this case). The actual data values would be ignored in such plots, but their relative magnitudes would still determine how they map onto colors on the screen, preserving the structure of the distribution rather than the numerical values. We can approximate such a rank-order or percentile encoding using the histogram equalization function from an image-processing package, which makes sure that each gray level is used for about the same number of pixels in the plot: ``` try: from skimage.exposure import equalize_hist eq_hist = lambda d,m: equalize_hist(1000*d,nbins=100000,mask=m) except ImportError: eq_hist = lambda d,m: d print("scikit-image not installed; skipping histogram equalization") hv.Layout([heatmap(dist,bins,transform=eq_hist) for bins in [8,20,200]]) ``` Plot **C** now reveals the full structure that we know was in this dataset, i.e. five Gaussians with different spreads, with no arbitrary parameter choices. (Well, there is a "number of bins" parameter for building the histogram for equalizing, but for integer data like this even that parameter can be eliminated entirely.) The differences in counts between pixels are now very clearly visible, across the full (and very wide) range of counts in the original data. Of course, we've lost the actual counts themselves, and so we can no longer tell just how many datapoints are in the "very narrow spread" pixel in this case. So plot **C** is accurately conveying the structure, but additional information would need to be provided to show the actual counts, by adding a color key mapping from the visible gray values into the actual counts and/or by providing hovering value information. At this point, one could also consider explicitly highlighting hotspots so that they cannot be overlooked. In plots B and C above, the two highest-density pixels are mapped to the two darkest pixel colors, which can reveal problems with your monitor settings if they were adjusted to make dark text appear blacker. Thus on those monitors, the highest values may not be clearly distinguishable from each other or from nearby grey values, which is a possible downside to fully utilizing the dynamic range available. But once the data is reliably and automatically mapped into a repeatable, reliable, fully utilized range for display, making explicit adjustments (e.g. based on wanting to make hotspots particularly clear) can be done in a principled way that doesn't depend on the actual data distribution (e.g. by just making the top few pixel values into a different color, or by stretching out those portions of the color map to show the extremes more safely across different monitors). Before getting into such specialized manipulations, there's a big pitfall to avoid first: ### 6. Nonuniform colormapping Let's say you've managed avoid pitfalls 1-5 somehow. However, there is one more problem waiting to catch you at the last stage, ruining all of your work eliminating the other issues: using a perceptually non-uniform colormap. A heatmap requires a colormap before it can be visualized, i.e., a lookup table from a data value (typically a normalized magnitude in the range 0 to 1) to a pixel color. The goal of a scientific visualization is to reveal the underlying properties of the data to your visual system, and to do so it is necessary to choose colors for each pixel that lead the viewer to perceive that data faithfully. Unfortunately, most of the colormaps in common use in plotting programs are highly *non*uniform. For instance, in "jet" (the default colormap for matlab and matplotlib until 2015), a large range of data values will all appear in shades of green that are perceptually indistinguishable, and similarly for the yellow regions of their "hot" colormaps: ![hot_jet](https://github.com/pyviz/colorcet/raw/master/doc/images/hot_jet.png) In this image, a good colormap would have "teeth" equally visible at all data values, as for the perceptually uniform equivalents from the [colorcet](https://github.com/bokeh/colorcet) package: ![fire_rainbow](https://github.com/pyviz/colorcet/raw/master/doc/images/fire_rainbow.png) We can easily see these effects if we look at our example dataset after histogram equalization, where all the different data levels are known to be distributed evenly in the array of normalized magnitudes: ``` hv.Layout([heatmap(dist,200,transform=eq_hist,label=cmap).opts(cmap=cmap) for cmap in ["hot","fire"]]).cols(2) ``` Comparing **A&nbsp;** to **B&nbsp;**it should be clear that the "fire" colormap is revealing much more of the data, accurately rendering the density differences between each of the different blobs. The unsuitable "hot" colormap is mapping all of the high density regions to perceptually indistinguishable shades of bright yellow/white, giving an "oversaturated" appearance even though we know the underlying heatmap array is *not* oversaturated (by construction). Luckily it is easy to avoid this problem; just use one of the 50 perceptually uniform colormaps available in the [colorcet](https://github.com/bokeh/colorcet) package, one of the four shipped with matplotlib [(viridis, plasma, inferno, or magma)](https://bids.github.io/colormap), or the Parula colormap shipped with Matlab. ## Summary Starting with plots of specific datapoints, we showed how typical visualization techniques will systematically misrepresent the distribution of those points. Here's an example of each of those six problems, all for the same distribution: ``` layout = (hv.Points(dist,label="1. Overplotting") + hv.Points(dist,label="2. Oversaturation").opts(s=0.1,alpha=0.5) + hv.Points((dist[0][::200],dist[1][::200]),label="3. Undersampling").opts(s=2,alpha=0.5) + hv.Points(dist,label="4. Undersaturation").opts(s=0.01,alpha=0.05) + heatmap(dist,200,offset=0.2,label="5. Underutilized dynamic range") + heatmap(dist,200,transform=eq_hist,label="6. Nonuniform colormapping").opts(cmap="hot")) layout.opts( opts.Points(axiswise=False), opts.Layout(sublabel_format="", tight=True)).cols(3) ``` Here we could avoid each of these problems by hand, using trial and error based on our knowledge about the underlying dataset, since we created it. But for big data in general, these issues are major problems, because you don't know what the data *should* look like. Thus: #### For big data, you don't know when the viz is lying I.e., visualization is supposed to help you explore and understand your data, but if your visualizations are systematically misrepresenting your data because of **overplotting**, **oversaturation**, **undersampling**, **undersaturation**, **underutilized range**, and **nonuniform colormapping**, then you won't be able to discover the real qualities of your data and will be unable to make the right decisions. Luckily, using the systematic approach outlined in this discussion, you can avoid *all* of these pitfalls, allowing you to render your data faithfully without requiring *any* "magic parameters" that depend on your dataset: ``` heatmap(dist,200,transform=eq_hist).opts(cmap="fire") ``` ### [Datashader](https://github.com/bokeh/datashader) The steps above show how to avoid the six main plotting pitfalls by hand, but it can be awkward and relatively slow to do so. Luckily there is a new Python library available to automate and optimize these steps, named [Datashader](https://github.com/bokeh/datashader). Datashader avoids users having to make dataset-dependent decisions and parameter settings when visualizing a new dataset. Datashader makes it practical to create accurate visualizations of datasets too large to understand directly, up to a billion points on a normal laptop and larger datasets on a compute cluster. As a simple teaser, the above steps can be expressed very concisely using the Datashader interface provided by [HoloViews](http://holoviews.org): ``` hv.output(size=200) datashade(hv.Points(dist)) ``` Without any change to the settings, the same command will work with dataset sizes too large for most plotting programs, like this 50-million-point version of the distribution: ``` dist = gaussians(specs=[(2,2,0.02), (2,-2,0.1), (-2,-2,0.5), (-2,2,1.0), (0,0,3)], num=10000000) datashade(hv.Points(dist)) ``` See the [Datashader web site](https://raw.githubusercontent.com/bokeh/datashader/master/examples/README.md) for details and examples to help you get started.
github_jupyter
``` # GPU: 32*40 in 9.87s = 130/s # CPU: 32*8 in 31.9s = 8/s import os import sys import numpy as np import mxnet as mx from collections import namedtuple print("OS: ", sys.platform) print("Python: ", sys.version) print("Numpy: ", np.__version__) print("MXNet: ", mx.__version__) !cat /proc/cpuinfo | grep processor | wc -l !nvidia-smi --query-gpu=gpu_name --format=csv Batch = namedtuple('Batch', ['data']) BATCH_SIZE = 32 RESNET_FEATURES = 2048 BATCHES_GPU = 40 BATCHES_CPU = 8 def give_fake_data(batches): """ Create an array of fake data to run inference on""" np.random.seed(0) dta = np.random.rand(BATCH_SIZE*batches, 224, 224, 3).astype(np.float32) return dta, np.swapaxes(dta, 1, 3) def yield_mb(X, batchsize): """ Function yield (complete) mini_batches of data""" for i in range(len(X)//batchsize): yield i, X[i*batchsize:(i+1)*batchsize] # Create batches of fake data fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_GPU) print(fake_input_data_cl.shape, fake_input_data_cf.shape) # Download Resnet weights path='http://data.mxnet.io/models/imagenet/' [mx.test_utils.download(path+'resnet/50-layers/resnet-50-symbol.json'), mx.test_utils.download(path+'resnet/50-layers/resnet-50-0000.params')] # Load model sym, arg_params, aux_params = mx.model.load_checkpoint('resnet-50', 0) # List the last 10 layers all_layers = sym.get_internals() print(all_layers.list_outputs()[-10:]) def predict_fn(classifier, data, batchsize): """ Return features from classifier """ out = np.zeros((len(data), RESNET_FEATURES), np.float32) for idx, dta in yield_mb(data, batchsize): classifier.forward(Batch(data=[mx.nd.array(dta)])) out[idx*batchsize:(idx+1)*batchsize] = classifier.get_outputs()[0].asnumpy().squeeze() return out ``` ## 1. GPU ``` # Get last layer fe_sym = all_layers['flatten0_output'] # Initialise GPU fe_mod = mx.mod.Module(symbol=fe_sym, context=[mx.gpu(0)], label_names=None) fe_mod.bind(for_training=False, inputs_need_grad=False, data_shapes=[('data', (BATCH_SIZE,3,224,224))]) fe_mod.set_params(arg_params, aux_params) cold_start = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE) %%time # GPU: 9.87s features = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE) ``` ## 2. CPU ``` # Kill all GPUs ... os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Get last layer fe_sym = all_layers['flatten0_output'] # Initialise CPU fe_mod = mx.mod.Module(symbol=fe_sym, context=mx.cpu(), label_names=None) fe_mod.bind(for_training=False, inputs_need_grad=False, data_shapes=[('data', (BATCH_SIZE,3,224,224))]) fe_mod.set_params(arg_params, aux_params) # Create batches of fake data fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_CPU) print(fake_input_data_cl.shape, fake_input_data_cf.shape) cold_start = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE) %%time # CPU: 31.9s features = predict_fn(fe_mod, fake_input_data_cf, BATCH_SIZE) ```
github_jupyter
# Breast Cancer Diagnosis In this notebook we will apply the LogitBoost algorithm to a toy dataset to classify cases of breast cancer as benign or malignant. ## Imports ``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(style='darkgrid', palette='colorblind', color_codes=True) from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report from sklearn.manifold import TSNE from logitboost import LogitBoost ``` ## Loading the Data The breast cancer dataset imported from [scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html) contains 569 samples with 30 real, positive features (including cancer mass attributes like mean radius, mean texture, mean perimeter, et cetera). Of the samples, 212 are labeled "malignant" and 357 are labeled "benign". We load this data into a 569-by-30 feature matrix and a 569-dimensional target vector. Then we randomly shuffle the data and designate two thirds for training and one third for testing. ``` data = load_breast_cancer() X = data.data y = data.target_names[data.target] n_classes = data.target.size # Shuffle data and split it into training/testing samples test_size = 1 / 3 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, shuffle=True, stratify=y, random_state=0) ``` ## Visualizing the Training Set Although the features are 30-dimensional, we can visualize the training set by using [t-distributed stochastic neighbor embedding](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) (t-SNE) to project the features onto a 2-dimensional space. ``` tsne = TSNE(n_components=2, random_state=0) X_train_tsne = tsne.fit_transform(X_train) plt.figure(figsize=(10, 8)) mask_benign = (y_train == 'benign') mask_malignant = (y_train == 'malignant') plt.scatter(X_train_tsne[mask_benign, 0], X_train_tsne[mask_benign, 1], marker='s', c='g', label='benign', edgecolor='k', alpha=0.7) plt.scatter(X_train_tsne[mask_malignant, 0], X_train_tsne[mask_malignant, 1], marker='o', c='r', label='malignant', edgecolor='k', alpha=0.7) plt.title('t-SNE plot of the training data') plt.xlabel('1st embedding axis') plt.ylabel('2nd embedding axis') plt.legend(loc='best', frameon=True, shadow=True) plt.tight_layout() plt.show() plt.close() ``` ## Fitting the LogitBoost Model Next, we initialize a LogitBoost classifier and fit it to the training data. By default, LogitBoost uses decision stumps (decision trees with depth 1, i.e., a single split) as its base estimator. ``` lboost = LogitBoost(n_estimators=200, random_state=0) lboost.fit(X_train, y_train) ``` ## Prediction Accuracy As a first indicator of how well the model predicts the correct labels, we can check its accuracy score (number of correct predictions over the number of total predictions) on the training and test data. If the classifier is good, then the accuracy score should be close to 1. ``` y_pred_train = lboost.predict(X_train) y_pred_test = lboost.predict(X_test) accuracy_train = accuracy_score(y_train, y_pred_train) accuracy_test = accuracy_score(y_test, y_pred_test) print('Training accuracy: %.4f' % accuracy_train) print('Test accuracy: %.4f' % accuracy_test) ``` ## Precision and Recall We can also report our LogitBoost model's [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall). ``` report_train = classification_report(y_train, y_pred_train) report_test = classification_report(y_test, y_pred_test) print('Training\n%s' % report_train) print('Testing\n%s' % report_test) ``` ## Visualizing Accuracy During Boosting ``` iterations = np.arange(1, lboost.n_estimators + 1) staged_accuracy_train = list(lboost.staged_score(X_train, y_train)) staged_accuracy_test = list(lboost.staged_score(X_test, y_test)) plt.figure(figsize=(10, 8)) plt.plot(iterations, staged_accuracy_train, label='Training', marker='.') plt.plot(iterations, staged_accuracy_test, label='Test', marker='.') plt.xlabel('Iteration') plt.ylabel('Accuracy') plt.title('Ensemble accuracy during each boosting iteration') plt.legend(loc='best', shadow=True, frameon=True) plt.tight_layout() plt.show() plt.close() ``` ## Contribution of Each Estimator in the Ensemble Like other ensemble models, the LogitBoost model can suffer from *over-specialization*: estimators added to the ensemble in later boosting iterations make relatively small or even negligible contributions toward improving the overall predictions on the training set. This can be quantified by computing the mean of the absolute prediction of each estimator in the ensemble taken over the training set. ``` contrib_train = lboost.contributions(X_train) plt.figure(figsize=(10, 8)) plt.plot(iterations, contrib_train, lw=2) plt.xlabel('Estimator Number') plt.ylabel('Average Absolute Contribution') plt.title('Average absolute contribution of the estimators in the ensemble') plt.show() plt.close() ``` ## Appendix: System Information This is included for replicability. ``` # sys_info.py is a file in the same directory as these example notebooks: # doc/source/examples import sys_info ```
github_jupyter
Title: Are the Warriors better without Kevin Durant? Date: 2019-06-10 12:00 Tags: python Slug: ab_kd In the media, there have been debates about whether or not the Golden State Warriors (GSW) are better without Kevin Durant (KD). From the eye-test, it's laughable to even suggest this, as he's one of the top 3 players in the league (Lebron, KD, Kawhi). Nonetheless, people argue that ball movement is better without him, and therefore make the GSW more lethal. But, just because the Warriors won a title without KD, does not mean they don't need him more than ever. At the time of writing, the Toronto Raptors lead 3-1 in the Finals! #WeTheNorth 🦖🍁 Using Bayesian estimation, we can test this hypothesis, by comparing two treatment groups, games played with KD and without KD. Bayesian statistics are an excellent tool to reach for when sample sizes are small, as we can introduce explicit assumptions into the model, when there aren't thousands of observations. --- # Primer on Bayesian Statistics <img src="images/dist.png" class="img-responsive"> $$P\left(model\;|\;data\right) = \frac{P\left(data\;|\;model\right)}{P(data)}\; P\left(model\right)$$ --- $$ \text{prior} = P\left(model\right) $$ > The **prior** is our belief in the model given no additional information. In our example, this is the mean win % with KD playing. $$ \text{likelihood} = P\left(data\;|\;model\right) $$ > The **likelihood** is the probability of the data we observed occurring given the model. $$ \text{marginal probability of data} = P(data) $$ > The **marginal probability** of the data is the probability that our data are observed regardless of what model we choose or believe in. $$ \text{posterior} = P\left(model\;|\;data\right) $$ > The **posterior** is our _updated_ belief in the model given the new data we have observed. Bayesian statistics are all about updating a prior belief we have about the world with new data, so we're transforming our _prior_ belief into this new _posterior_ belief about the world. <br><br> In this example, this is the GSW mean winning % with KD playing, given the game logs from the past three seasons. Note, a Bayesian approach is different from a Frequentist's. Rather than only testing whether two groups are different, we instead pursue an estimate of _how_ different they are, from the posterior distribution. ## Objective To calculate the distribution of the posterior probability of GSW mean winning % with KD and without KD. Moreover, we can calculate the _delta_ between both probabilities to determine if the mean is statistically different from zero (i.e. no difference with or without him). --- # Observed Data ``` import pandas as pd import numpy as np import scipy.stats as stats import pymc3 as pm from IPython.display import HTML import matplotlib.pyplot as plt %matplotlib inline plt.style.use('fivethirtyeight') from IPython.core.pylabtools import figsize import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} pylab.rcParams.update(params) import warnings warnings.simplefilter(action='ignore', category=FutureWarning) ``` As the competition is much higher in the playoffs, let's analyze Playoff vs. Regular Season data separately. We can run one test on the regular season, and one test for the playoffs. Data is from [Basketball Reference](https://www.basketball-reference.com/). --- # Regular Season <table class="table"> <thead class="table-responsive table-bordered"> <tr> <th scope="col">Regular Season</th> <th scope="col">With Kevin Durant</th> <th scope="col">No Kevin Durant</th> <th scope="col">Notes</th> </tr> </thead> <tbody> <tr> <td>2019</td> <td>0.69 <br> {'W': 54, 'L': 24} </td> <td>0.75 <br> {'W': 3, 'L': 1} </td> <td>Record is better when KD is out, but small sample size.</td> </tr> <tr> <td>2018</td> <td>0.72 <br> {'W': 49, 'L': 19} </td> <td>0.64 <br> {'W': 9, 'L': 5} </td> <td>Record is better when KD plays</td> </tr> <tr> <td>2017</td> <td>0.82 <br> {'W': 51, 'L': 11} </td> <td>0.80 <br> {'W': 16, 'L': 4} </td> <td>Record is better when KD plays</td> </tr> <tr> <td>Total (3 seasons)</td> <td>0.740 <br> {'W': 154, 'L': 54} </td> <td>0.737 <br> {'W': 28, 'L': 10} </td> <td>Record is better when KD plays</td> </tr> </tbody> </table> Over the last three seasons with the Warriors, KD has missed 38 games regular season games, and played in 208. ``` def occurrences(year, kd=True): '''occurences(2019, kd=True) By default, kd=True means with KD healthy''' # clean data # regular season data = pd.read_csv(f'./data/{year}.txt', sep=',') new_columns = ['Rk', 'G', 'Date', 'Age', 'Tm', 'Away', 'Opp', 'Result', 'GS', 'MP', 'FG', 'FGA', 'FG%', '3P', '3PA', '3P%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS', 'GmSc', '+/-'] data.columns=new_columns # replace did not dress with inactive data.GS = np.where(data.GS == 'Did Not Dress','Inactive',data.GS) if kd == False: game_logs = list(data[data.GS=='Inactive'].Result) else: game_logs = list(data[data.GS!='Inactive'].Result) results = [game.split(' ')[0] for game in game_logs] occurrences = [1 if result == 'W' else 0 for result in results] return occurrences regular_season_with_kd = occurrences(2019, kd=True)+occurrences(2018, kd=True)+occurrences(2017, kd=True) regular_season_no_kd = occurrences(2019, kd=False)+occurrences(2018, kd=False)+occurrences(2017, kd=False) print(f'Observed win % when Kevin Durant plays: {round(np.mean(regular_season_with_kd),4)}') print(f'Observed win % when Kevin Durant does not play: {round(np.mean(regular_season_no_kd),4)}') ``` * Note, we do not know the true win %, only the observed win %. We infer the true quantity from the observed data. * Notice the unequal sample sizes (208 vs. 38), but this is not problem in Bayesian analysis. We will see the uncertainty of the smaller sample size captured in the posterior distribution. --- ## Bayesian Tests with MCMC * Markov Chain Monte Carlo (MCMC) is a method to find the posterior distribution of our parameter of interest. > This type of algorithm generates Monte Carlo simulations in a way that relies on the Markov property, then accepts these simulations at a certain rate to get the posterior distribution. * We will use [PyMC3](https://docs.pymc.io/), a probabilistic library for Python to generate MC simulations. * Before seeing any of the data, my prior is that GSW will win between 50% - 90% of their games, because they are an above average basketball team, and no team has ever won more than 72 games. ``` # Instantiate observations_A = regular_season_with_kd observations_B = regular_season_no_kd with pm.Model() as model: # Assume Uniform priors for p_A and p_B p_A = pm.Uniform("p_A", 0.5, .9) p_B = pm.Uniform("p_B", 0.5, .9) # Define the deterministic delta function. This is our unknown of interest. # Delta is deterministic, no uncertainty beyond p_A and p_B delta = pm.Deterministic("delta", p_A - p_B) # We have two observation datasets: A, B # Posterior distribution is Bernoulli obs_A = pm.Bernoulli("obs_A", p_A, observed=observations_A) obs_B = pm.Bernoulli("obs_B", p_B, observed=observations_B) # Draw samples from the posterior distribution trace = pm.sample(20000) burned_trace=trace[1000:] ``` * Using PyMC3, we generated a trace, or chain of values from the posterior distribution * Generated 20,000 samples from the posterior distribution (20,000 samples / chain / core) Because this algorithm needs to converge, we set a number of tuning steps (1,000) to occur first and where the algorithm should "start exploring." It's good to see the Markov Chains overlap, which suggests convergence. ``` pm.traceplot(trace); # plt.savefig('trace.svg'); df = pm.summary(burned_trace).round(2)[['mean', 'sd', 'hpd_2.5', 'hpd_97.5']] HTML(df.to_html(classes="table table-responsive table-striped table-bordered")) ``` * Unlike with confidence intervals (frequentist), there is a measure of probability with the credible interval. * There is a 95% probability that the true win rate with KD is in the interval (0.68, 0.79). * There is a 95% probability that the true win rate with no KD is in the interval (0.59, 0.85). ``` p_A_samples = burned_trace["p_A"] p_B_samples = burned_trace["p_B"] delta_samples = burned_trace["delta"] figsize(15, 10) ax = plt.subplot(311) plt.xlim(0, 1) plt.hist(p_A_samples, histtype='stepfilled', bins=25, alpha=0.85, label="posterior of $p_A$", color="#006BB6", density=True) plt.vlines(df.iloc[0]["mean"], 0, 12.5, color="white", alpha=0.5,linestyle="--", label=f'mean') plt.vlines(df.iloc[0]["hpd_2.5"], 0, 1.3, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df.iloc[0]["hpd_97.5"], 0, 1.3, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right") plt.title("Regular Season \n Posterior distributions of $p_A$, $p_B$, and delta unknowns \n\n $p_A$: Mean Win % with KD") ax = plt.subplot(312) plt.xlim(0, 1) plt.hist(p_B_samples, histtype='stepfilled', bins=25, alpha=0.85, label="posterior of $p_B$", color="#FDB927", density=True) plt.vlines(df.iloc[1]["mean"], 0, 5.5, color="white", alpha=0.5,linestyle="--", label=f'mean') plt.vlines(df.iloc[1]["hpd_2.5"], 0, .8, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df.iloc[1]["hpd_97.5"], 0, .8, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right") plt.title("$p_B$: Mean Win % No KD") ax = plt.subplot(313) plt.xlim(-0.5, 0.5) plt.hist(delta_samples, histtype='stepfilled', bins=30, alpha=0.85, label="posterior of delta", color="#BE3A34", density=True) plt.vlines(df.iloc[2]["mean"], 0, 5, color="white", alpha=0.5,linestyle="--", label=f'mean delta') plt.vlines(df.iloc[2]["hpd_2.5"], 0, 1, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df.iloc[2]["hpd_97.5"], 0, 1, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right"); plt.title("$delta$ = $p_A - p_B$") plt.savefig('reg_season.svg'); ``` Note, the 2.5% and 97.5% markers indicate the quantiles for the credible interval, similar to the confidence interval in frequentist statistics. --- ## Results * In the third graph, the posterior win rate is 1.2% higher when KD plays in the regular season. * Observe that because have less data for when KD is out, our posterior distribution of 𝑝𝐵 is wider, implying we are less certain about the true value of 𝑝𝐵 than we are of 𝑝𝐴. The 95% credible interval is much wider for $p_B$, as there is a smaller sample size, for when KD did not play. We are less certain that the GSW wins 73% of the time without KD. * The difference in sample sizes ($N_B$ < $N_A$) naturally fits into Bayesian analysis, whereas you need the same populations for frequentist approach! ``` # Count the number of samples less than 0, i.e. the area under the curve print("Probability that GSW is worse with Kevin Durant in the regular season: %.2f" % \ np.mean(delta_samples < 0)) print("Probability that GSW is better with Kevin Durant in the regular season: %.2f" % \ np.mean(delta_samples > 0)) ``` The probabilities are pretty close, so we can chalk this up to the Warriors having a experienced supporting cast. There is significant overlap between the distribution pf posterior pA and posterior of pB, so one is not better than the other with high probability. The majority of the distribution of delta is around 0, so there is no statistically difference between the groups in the regular season. Ideally, we should perform more trials when KD is injured (as each data point for scenario B contributes more inferential power than each additional point for scenario A). One could do a similar analysis for when he played on the Oklahoma City Thunder. --- # Playoffs ## Do superstars shine when the stakes are highest? <table class="table"> <thead class="table-responsive table-bordered"> <tr> <th scope="col">Playoffs</th> <th scope="col">With Kevin Durant</th> <th scope="col">No Kevin Durant</th> <th scope="col">Notes</th> </tr> </thead> <tbody> <tr> <td>2019</td> <td>0.64 <br> {'W': 7, 'L': 4} </td> <td>0.66 <br> {'W': 6, 'L': 3} </td> <td>Record is marginally better when KD is out, but small sample size. Skewed by Portland series, which GSW won 4-0 with KD injured.</td> </tr> <tr> <td>2018</td> <td>0.76 <br> {'W': 16, 'L': 5} </td> <td>n/a <br> {'W': 0, 'L': 0} </td> <td>KD did not miss any games. Won Championship.</td> </tr> <tr> <td>2017</td> <td>0.82 <br> {'W': 14, 'L': 1} </td> <td>1 <br> {'W': 2, 'L': 0}. Small sample size. </td> <td>Won championship.</td> </tr> <td>Total (3 seasons)</td> <td>0.79 <br> {'W': 37, 'L': 10} </td> <td>0.73 <br> {'W': 8, 'L': 3} </td> <td>Record is better when KD plays</td> </tbody> </table> ``` playoffs_with_kd = occurrences('2019_playoffs', kd=True)+occurrences('2018_playoffs', kd=True)+occurrences('2017_playoffs', kd=True) playoffs_no_kd = occurrences('2019_playoffs', kd=False)+occurrences('2018_playoffs', kd=False)+occurrences('2017_playoffs', kd=False) print(f'Observed win % when Kevin Durant plays: {round(np.mean(playoffs_with_kd),2)}') print(f'Observed win % when Kevin Durant does not play: {round(np.mean(playoffs_no_kd),2)}') ``` Over the last three playoff runs with the Warriors, KD has missed 11, and played in 47. See how the difference is much more pronounced with more data across three seasons. Let's similar if the GSW has a higher win % with KD in the playoffs. ``` playoff_obs_A = playoffs_with_kd playoff_obs_B = playoffs_no_kd with pm.Model() as playoff_model: playoff_p_A = pm.Uniform("playoff_p_A", 0, 1) playoff_p_B = pm.Uniform("playoff_p_B", 0, 1) playoff_delta = pm.Deterministic("playoff_delta", playoff_p_A - playoff_p_B) playoff_obs_A = pm.Bernoulli("playoff_obs_A", playoff_p_A, observed=playoff_obs_A) playoff_obs_B = pm.Bernoulli("playoff_obs_B", playoff_p_B, observed=playoff_obs_B) playoff_trace = pm.sample(20000) playoff_burned_trace=playoff_trace[1000:] df2 = pm.summary(playoff_burned_trace).round(2)[['mean', 'sd', 'hpd_2.5', 'hpd_97.5']] HTML(df2.to_html(classes="table table-responsive table-striped table-bordered")) playoff_p_A_samples = playoff_burned_trace['playoff_p_A'] playoff_p_B_samples = playoff_burned_trace["playoff_p_B"] playoff_delta_samples = playoff_burned_trace["playoff_delta"] figsize(15, 10) #histogram of posteriors ax = plt.subplot(311) plt.xlim(0, 1) plt.hist(playoff_p_A_samples, histtype='stepfilled', bins=25, alpha=0.85, label="posterior of $p_A$", color="#006BB6", density=True) plt.vlines(df2.iloc[0]["mean"], 0, 7.5, color="white", alpha=0.5,linestyle="--", label=f'mean delta') plt.vlines(df2.iloc[0]["hpd_2.5"], 0, 1, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df2.iloc[0]["hpd_97.5"], 0, 1, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right") plt.title("Playoffs \n Posterior distributions of $p_A$, $p_B$, and delta unknowns \n\n $p_A$: Mean Win % with KD") ax = plt.subplot(312) plt.xlim(0, 1) plt.hist(playoff_p_B_samples, histtype='stepfilled', bins=25, alpha=0.85, label="posterior of $p_B$", color="#FDB927", density=True) plt.vlines(df2.iloc[1]["mean"], 0, 3, color="white", alpha=0.5,linestyle="--", label=f'mean delta') plt.vlines(df2.iloc[1]["hpd_2.5"], 0, .8, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df2.iloc[1]["hpd_97.5"], 0, .8, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right") plt.title("$p_B$: Mean Win % No KD") ax = plt.subplot(313) plt.xlim(-0.5, 0.5) plt.hist(playoff_delta_samples, histtype='stepfilled', bins=30, alpha=0.85, label="posterior of delta", color="#BE3A34", density=True) plt.vlines(df2.iloc[2]["mean"], 0, 3, color="white", alpha=0.5,linestyle="--", label=f'mean delta') plt.vlines(df2.iloc[2]["hpd_2.5"], 0, 0.25, color="black", alpha=0.5,linestyle="--", label='2.5%') plt.vlines(df2.iloc[2]["hpd_97.5"], 0, 0.25, color="black", alpha=0.5,linestyle="--", label='97.5%') plt.legend(loc="upper right"); plt.title("$delta$: $p_A - p_B$") plt.savefig('playoffs.svg'); # Count the number of samples less than 0, i.e. the area under the curve print("Probability that GSW is worse with Kevin Durant in the playoffs: %.2f" % \ np.mean(playoff_delta_samples < 0)) print("Probability that GSW is better with Kevin Durant in the playoffs: %.2f" % \ np.mean(playoff_delta_samples > 0)) ``` --- ## Are the Warriors better without Kevin Durant? No. By combining results from the past three seasons, we obtain a larger test group, which allows us to observe a real change vs. looking at the pure stats for a single year. We can see that while delta=0 (i.e. no effect when KD plays) is in the credible interval at 95%, the majority of the distribution is above delta=0, implying the treatment group with KD is likely better than the group without KD. In fact, the probability that GSW is better with Kevin Durant in the playoffs is 71%, a significant improvement than 55% in the regular season! Superstars make a significant difference. The regular season is where you make your name, but the postseason is where you make your fame. The delta is 8% higher with KD. That's the advantage you gain with a player of his caliber, as he can hit clutch shots when it matters most. As a basketball fan, I hope to see Kevin Durant healthy and back in action soon. # References * https://multithreaded.stitchfix.com/blog/2015/05/26/significant-sample/ * https://multithreaded.stitchfix.com/blog/2015/02/12/may-bayes-theorem-be-with-you/
github_jupyter
``` import requests import arrow import pprint import json from urllib.parse import urlencode from functools import reduce token = open("./NOTION_TOKEN", "r").readlines()[0] notion_version = "2021-08-16" extra_data = {"filter": {"and": [{"property": "标签", "multi_select": {"is_not_empty": True}},],},} r_database = requests.post( url="https://api.notion.com/v1/databases/cecf4bb039dc46bca130a29a9db58906/query", headers={"Authorization": "Bearer " + token, "Notion-Version": notion_version, "Content-Type": "application/json", }, data=json.dumps(extra_data), ) respond = json.loads(r_database.text) def take_page_plain_text(respond: dict): for result in respond["results"]: page_id = result["url"].split("/")[-1].split("-")[-1] r_page = requests.get( url=f"https://api.notion.com/v1/blocks/{page_id}/children", headers={"Authorization": f"Bearer {token}", "Notion-Version": notion_version, "Content-Type": "application/json", }, ) for block in json.loads(r_page.text).get("results", []): for key in block: if not isinstance(block[key], dict): continue if "text" not in block[key]: continue for text in block[key]["text"]: yield text["plain_text"] text_list = list(take_page_plain_text(respond)) text_list[:3] import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer import jieba import sys from unicodedata import category codepoints = range(sys.maxunicode + 1) punctuation = {c for k in codepoints if category(c := chr(k)).startswith("P")} from functional import seq split_text_list = [jieba.lcut(text, HMM=True) for text in text_list] from glob import glob stopfiles = glob("./stopwords/*stopwords.txt") stopwords = reduce(lambda x,y: x.union(y), [set([x.strip() for x in open(file, "r").readlines()]) for file in stopfiles]) def check_stopwords(word): return word in stopwords \ or word in punctuation \ or word.isdigit() sequence = seq(split_text_list).map(lambda sent: [word for word in sent if not check_stopwords(word)]) uniqueWords = (sequence .map(lambda sent: set(sent)) .reduce(lambda x, y: x.union(y)) ) word2sents = {word.lower(): set() for word in uniqueWords} for text in text_list: for word in uniqueWords: if word in text: word2sents[word.lower()].add(text) ``` ## 现有库 ``` vectorizer = TfidfVectorizer() vectors = vectorizer.fit_transform(sequence.map(lambda x: " ".join(x)).to_list()) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() df = pd.DataFrame(denselist, columns=feature_names) df.max(axis=0).sort_values(key=lambda x: -x).to_csv("./tf_idf_topic.csv") for word in df.max(axis=0).sort_values(key=lambda x: -x).head(3).index: print(word) print(word2sents[word]) print("-" * 10) ``` ## 自定义(不是tf*idf) ``` uniqueWords = (sequence .map(lambda sent: set(sent)) .reduce(lambda x, y: x.union(y)) ) def computeTF(wordDict, bagOfWords): tfDict = {} bagOfWordsCount = len(bagOfWords) for word, count in wordDict.items(): tfDict[word] = count / float(bagOfWordsCount) return tfDict def computeIDF(documents): import math N = len(documents) idfDict = dict.fromkeys(documents[0].keys(), 0) for document in documents: for word, val in document.items(): if val > 0: idfDict[word] += 1 for word, val in idfDict.items(): idfDict[word] = math.log(N / float(val)) return idfDict ```
github_jupyter
![](https://images.unsplash.com/photo-1602084551218-a28205125639?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=2070&q=80) <div class = 'alert alert-block alert-info' style = 'background-color:#4c1c84; color:#eeebf1; border-width:5px; border-color:#4c1c84; font-family:Comic Sans MS; border-radius: 50px 50px'> <p style = 'font-size:24px'>Exp 027</p> <a href = "#Config" style = "color:#eeebf1; font-size:14px">1.Config</a><br> <a href = "#Settings" style = "color:#eeebf1; font-size:14px">2.Settings</a><br> <a href = "#Data-Load" style = "color:#eeebf1; font-size:14px">3.Data Load</a><br> <a href = "#Pytorch-Settings" style = "color:#eeebf1; font-size:14px">4.Pytorch Settings</a><br> <a href = "#Training" style = "color:#eeebf1; font-size:14px">5.Training</a><br> </div> <p style = 'font-size:24px; color:#4c1c84'> 実施したこと </p> <li style = "color:#4c1c84; font-size:14px">使用データ:Jigsaw2nd</li> <li style = "color:#4c1c84; font-size:14px">使用モデル:DeBERTa-Base</li> <li style = "color:#4c1c84; font-size:14px">New!! Attentionの可視化</li> <br> <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> Config </h1> <br> ``` import sys sys.path.append("../src/utils/iterative-stratification/") sys.path.append("../src/utils/detoxify") sys.path.append("../src/utils/coral-pytorch/") import warnings warnings.simplefilter('ignore') import os import gc gc.enable() import sys import glob import copy import math import time import random import string import psutil import pathlib from pathlib import Path from contextlib import contextmanager from collections import defaultdict from box import Box from typing import Optional from pprint import pprint import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import japanize_matplotlib from tqdm.auto import tqdm as tqdmp from tqdm.autonotebook import tqdm as tqdm tqdmp.pandas() ## Model from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold, KFold import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer, AutoModel, AdamW from transformers import RobertaModel, RobertaForSequenceClassification from transformers import RobertaTokenizer from transformers import LukeTokenizer, LukeModel, LukeConfig from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup from transformers import BertTokenizer, BertForSequenceClassification from transformers import RobertaTokenizer, RobertaForSequenceClassification from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification from transformers import DebertaTokenizer, DebertaModel # Pytorch Lightning import pytorch_lightning as pl from pytorch_lightning.utilities.seed import seed_everything from pytorch_lightning import callbacks from pytorch_lightning.callbacks.progress import ProgressBarBase from pytorch_lightning import LightningDataModule, LightningDataModule from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor from pytorch_lightning.loggers import WandbLogger from pytorch_lightning.loggers.csv_logs import CSVLogger from pytorch_lightning.callbacks import RichProgressBar from sklearn.linear_model import Ridge from sklearn.svm import SVC, SVR from sklearn.feature_extraction.text import TfidfVectorizer from scipy.stats import rankdata from cuml.svm import SVR as cuml_SVR from cuml.linear_model import Ridge as cuml_Ridge import cudf from detoxify import Detoxify from iterstrat.ml_stratifiers import MultilabelStratifiedKFold import torch config = { "exp_comment":"Wiki AttackデータをLukeで学習", "seed": 42, "root": "/content/drive/MyDrive/kaggle/Jigsaw/raw", "n_fold": 5, "epoch": 5, "max_length": 128, "environment": "AWS", "project": "Jigsaw", "entity": "dataskywalker", "exp_name": "027_exp", "margin": 0.5, "train_fold": [0, 1, 2, 3, 4], "trainer": { "gpus": 1, "accumulate_grad_batches": 8, "progress_bar_refresh_rate": 1, "fast_dev_run": True, "num_sanity_val_steps": 0, }, "train_loader": { "batch_size": 4, "shuffle": True, "num_workers": 1, "pin_memory": True, "drop_last": True, }, "valid_loader": { "batch_size": 4, "shuffle": False, "num_workers": 1, "pin_memory": True, "drop_last": False, }, "test_loader": { "batch_size": 4, "shuffle": False, "num_workers": 1, "pin_memory": True, "drop_last": False, }, "backbone": { "name": "microsoft/deberta-base", "output_dim": 1, }, "optimizer": { "name": "torch.optim.AdamW", "params": { "lr": 1e-6, }, }, "scheduler": { "name": "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "params": { "T_0": 20, "eta_min": 0, }, }, "loss": "nn.BCEWithLogitsLoss", } config = Box(config) config.tokenizer = DebertaTokenizer.from_pretrained(config.backbone.name) config.model = DebertaModel.from_pretrained(config.backbone.name) # pprint(config) config.tokenizer.save_pretrained(f"../data/processed/{config.backbone.name}") pretrain_model = DebertaModel.from_pretrained(config.backbone.name) pretrain_model.save_pretrained(f"../data/processed/{config.backbone.name}") # 個人的にAWSやKaggle環境やGoogle Colabを行ったり来たりしているのでまとめています import os import sys from pathlib import Path if config.environment == 'AWS': INPUT_DIR = Path('/mnt/work/data/kaggle/Jigsaw/') MODEL_DIR = Path(f'../models/{config.exp_name}/') OUTPUT_DIR = Path(f'../data/interim/{config.exp_name}/') UTIL_DIR = Path('/mnt/work/shimizu/kaggle/PetFinder/src/utils') os.makedirs(MODEL_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True) print(f"Your environment is 'AWS'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}\nUTIL_DIR is {UTIL_DIR}") elif config.environment == 'Kaggle': INPUT_DIR = Path('../input/*****') MODEL_DIR = Path('./') OUTPUT_DIR = Path('./') print(f"Your environment is 'Kaggle'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}") elif config.environment == 'Colab': INPUT_DIR = Path('/content/drive/MyDrive/kaggle/Jigsaw/raw') BASE_DIR = Path("/content/drive/MyDrive/kaggle/Jigsaw/interim") MODEL_DIR = BASE_DIR / f'{config.exp_name}' OUTPUT_DIR = BASE_DIR / f'{config.exp_name}/' os.makedirs(MODEL_DIR, exist_ok=True) os.makedirs(OUTPUT_DIR, exist_ok=True) if not os.path.exists(INPUT_DIR): print('Please Mount your Google Drive.') else: print(f"Your environment is 'Colab'.\nINPUT_DIR is {INPUT_DIR}\nMODEL_DIR is {MODEL_DIR}\nOUTPUT_DIR is {OUTPUT_DIR}") else: print("Please choose 'AWS' or 'Kaggle' or 'Colab'.\nINPUT_DIR is not found.") # Seed固定 seed_everything(config.seed) ## 処理時間計測 @contextmanager def timer(name:str, slack:bool=False): t0 = time.time() p = psutil.Process(os.getpid()) m0 = p.memory_info()[0] / 2. ** 30 print(f'<< {name} >> Start') yield m1 = p.memory_info()[0] / 2. ** 30 delta = m1 - m0 sign = '+' if delta >= 0 else '-' delta = math.fabs(delta) print(f"<< {name} >> {m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec", file=sys.stderr) ``` <br> <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> Data Load </h1> <br> ``` ## Data Check for dirnames, _, filenames in os.walk(INPUT_DIR): for filename in filenames: print(f'{dirnames}/{filename}') val_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/validation_data.csv") test_df = pd.read_csv("/mnt/work/data/kaggle/Jigsaw/comments_to_score.csv") display(val_df.head()) display(test_df.head()) ``` <br> <h2 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #eeebf1 ; color : #4c1c84; text-align: center; border-radius: 100px 100px;"> Jigsaw Classification </h2> <br> ``` train_df = pd.read_csv("../data/external/jigsaw-unbiased/train.csv") train_df = train_df.rename(columns={"target": "toxicity"}) train_df.iloc[:, :20].head() target_cols = [ "toxicity", "severe_toxicity", "identity_attack", "insult", "threat", "sexual_explicit" ] plt.figure(figsize=(12, 5)) sns.histplot(train_df["toxicity"], color="#4c1c84") plt.grid() plt.show() def sample_df(df:pd.DataFrame, frac=0.2): ''' train_dfからtoxicとnon_toxicを抽出 non_toxicの割合をfracで調整 ''' print(f"Before: {df.shape}") label_cols = [ "toxicity", "severe_toxicity", "identity_attack", "insult", "threat", "sexual_explicit" ] df["y"] = df[label_cols].sum(axis=1) df["y"] = df["y"]/df["y"].max() toxic_df = df[df["y"]>0].reset_index(drop=True) nontoxic_df = df[df["y"]==0].reset_index(drop=True) nontoxic_df = nontoxic_df.sample(frac=frac, random_state=config.seed) df = pd.concat([toxic_df, nontoxic_df], axis=0).sample(frac=1).reset_index(drop=True) print(f"After: {df.shape}") return df with timer("sampling df"): train_df = sample_df(train_df, frac=0.2) display(train_df.head()) ``` <br> <h1 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #4c1c84 ; color : #eeebf1; text-align: center; border-radius: 100px 100px;"> Pytorch Dataset </h1> <br> ``` class JigsawDataset: def __init__(self, df, tokenizer, max_length, mode, target_cols): self.df = df self.max_len = max_length self.tokenizer = tokenizer self.mode = mode self.target_cols = target_cols if self.mode == "train": self.text = df["comment_text"].values self.target = df[target_cols].values elif self.mode == "valid": self.more_toxic = df["more_toxic"].values self.less_toxic = df["less_toxic"].values else: self.text == df["text"].values def __len__(self): return len(self.df) def __getitem__(self, index): if self.mode == "train": text = self.text[index] target = self.target[index] inputs_text = self.tokenizer.encode_plus( text, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) text_ids = inputs_text["input_ids"] text_mask = inputs_text["attention_mask"] text_token_type_ids = inputs_text["token_type_ids"] return { 'text_ids': torch.tensor(text_ids, dtype=torch.long), 'text_mask': torch.tensor(text_mask, dtype=torch.long), 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.float) } elif self.mode == "valid": more_toxic = self.more_toxic[index] less_toxic = self.less_toxic[index] inputs_more_toxic = self.tokenizer.encode_plus( more_toxic, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) inputs_less_toxic = self.tokenizer.encode_plus( less_toxic, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) target = 1 more_toxic_ids = inputs_more_toxic["input_ids"] more_toxic_mask = inputs_more_toxic["attention_mask"] more_token_type_ids = inputs_more_toxic["token_type_ids"] less_toxic_ids = inputs_less_toxic["input_ids"] less_toxic_mask = inputs_less_toxic["attention_mask"] less_token_type_ids = inputs_less_toxic["token_type_ids"] return { 'more_toxic_ids': torch.tensor(more_toxic_ids, dtype=torch.long), 'more_toxic_mask': torch.tensor(more_toxic_mask, dtype=torch.long), 'more_token_type_ids': torch.tensor(more_token_type_ids, dtype=torch.long), 'less_toxic_ids': torch.tensor(less_toxic_ids, dtype=torch.long), 'less_toxic_mask': torch.tensor(less_toxic_mask, dtype=torch.long), 'less_token_type_ids': torch.tensor(less_token_type_ids, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.float) } else: text = self.text[index] input_text = self.tokenizer.encode_plus( text, truncation=True, return_attention_mask=True, return_token_type_ids=True, max_length = self.max_len, padding="max_length", ) text_ids = inputs_text["input_ids"] text_mask = inputs_text["attention_mask"] text_token_type_ids = inputs_text["token_type_ids"] return { 'text_ids': torch.tensor(text_ids, dtype=torch.long), 'text_mask': torch.tensor(text_mask, dtype=torch.long), 'text_token_type_ids': torch.tensor(text_token_type_ids, dtype=torch.long), } ``` <br> <h2 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #eeebf1 ; color : #4c1c84; text-align: center; border-radius: 100px 100px;"> DataModule </h2> <br> ``` class JigsawDataModule(LightningDataModule): def __init__(self, train_df, valid_df, test_df, cfg): super().__init__() self._train_df = train_df self._valid_df = valid_df self._test_df = test_df self._cfg = cfg def train_dataloader(self): dataset = JigsawDataset( df=self._train_df, tokenizer=self._cfg.tokenizer, max_length=self._cfg.max_length, mode="train", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.train_loader) def val_dataloader(self): dataset = JigsawDataset( df=self._valid_df, tokenizer=self._cfg.tokenizer, max_length=self._cfg.max_length, mode="valid", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.valid_loader) def test_dataloader(self): dataset = JigsawDataset( df=self._test_df, tokenizer = self._cfg.tokenizer, max_length=self._cfg.max_length, mode="test", target_cols=target_cols ) return DataLoader(dataset, **self._cfg.test_loader) ## DataCheck seed_everything(config.seed) sample_dataloader = JigsawDataModule(train_df, val_df, test_df, config).train_dataloader() for data in sample_dataloader: break print(data["text_ids"].size()) print(data["text_mask"].size()) print(data["text_token_type_ids"].size()) print(data["target"].size()) print(data["target"]) output = config.model( data["text_ids"], data["text_mask"], data["text_token_type_ids"], output_attentions=True ) print(output["last_hidden_state"].size(), output["attentions"][-1].size()) print(output["last_hidden_state"][:, 0, :].size(), output["attentions"][-1].size()) ``` <br> <h2 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #eeebf1 ; color : #4c1c84; text-align: center; border-radius: 100px 100px;"> LigitningModule </h2> <br> ``` class JigsawModel(pl.LightningModule): def __init__(self, cfg, fold_num): super().__init__() self.cfg = cfg self.__build_model() self.criterion = eval(self.cfg.loss)() self.save_hyperparameters(cfg) self.fold_num = fold_num def __build_model(self): self.base_model = DebertaModel.from_pretrained( self.cfg.backbone.name ) print(f"Use Model: {self.cfg.backbone.name}") self.norm = nn.LayerNorm(768) self.drop = nn.Dropout(p=0.3) self.head = nn.Linear(768, self.cfg.backbone.output_dim) def forward(self, ids, mask, token_type_ids): output = self.base_model( input_ids=ids, attention_mask=mask, token_type_ids=token_type_ids, output_attentions=True ) feature = self.norm(output["last_hidden_state"][:, 0, :]) out = self.drop(feature) out = self.head(out) return { "logits":out, "attention":output["attentions"], "mask":mask, } def training_step(self, batch, batch_idx): text_ids = batch["text_ids"] text_mask = batch['text_mask'] text_token_type_ids = batch['text_token_type_ids'] targets = batch['target'] outputs = self.forward(text_ids, text_mask, text_token_type_ids) loss = self.criterion(outputs["logits"], targets) return { "loss":loss, "targets":targets, } def training_epoch_end(self, training_step_outputs): loss_list = [] for out in training_step_outputs: loss_list.extend([out["loss"].cpu().detach().tolist()]) meanloss = sum(loss_list)/len(loss_list) logs = {f"train_loss/fold{self.fold_num+1}": meanloss,} self.log_dict( logs, on_step=False, on_epoch=True, prog_bar=True, logger=True ) def validation_step(self, batch, batch_idx): more_toxic_ids = batch['more_toxic_ids'] more_toxic_mask = batch['more_toxic_mask'] more_text_token_type_ids = batch['more_token_type_ids'] less_toxic_ids = batch['less_toxic_ids'] less_toxic_mask = batch['less_toxic_mask'] less_text_token_type_ids = batch['less_token_type_ids'] targets = batch['target'] more_outputs = self.forward( more_toxic_ids, more_toxic_mask, more_text_token_type_ids ) less_outputs = self.forward( less_toxic_ids, less_toxic_mask, less_text_token_type_ids ) more_outputs = torch.sum(more_outputs["logits"], 1) less_outputs = torch.sum(less_outputs["logits"], 1) outputs = more_outputs - less_outputs logits = outputs.clone() logits[logits > 0] = 1 loss = self.criterion(logits, targets) return { "loss":loss, "pred":outputs, "targets":targets, } def validation_epoch_end(self, validation_step_outputs): loss_list = [] pred_list = [] target_list = [] for out in validation_step_outputs: loss_list.extend([out["loss"].cpu().detach().tolist()]) pred_list.append(out["pred"].detach().cpu().numpy()) target_list.append(out["targets"].detach().cpu().numpy()) meanloss = sum(loss_list)/len(loss_list) pred_list = np.concatenate(pred_list) pred_count = sum(x>0 for x in pred_list)/len(pred_list) logs = { f"valid_loss/fold{self.fold_num+1}":meanloss, f"valid_acc/fold{self.fold_num+1}":pred_count, } self.log_dict( logs, on_step=False, on_epoch=True, prog_bar=True, logger=True ) def configure_optimizers(self): optimizer = eval(self.cfg.optimizer.name)( self.parameters(), **self.cfg.optimizer.params ) self.scheduler = eval(self.cfg.scheduler.name)( optimizer, **self.cfg.scheduler.params ) scheduler = {"scheduler": self.scheduler, "interval": "step",} return [optimizer], [scheduler] ``` <br> <h2 style = "font-size:45px; font-family:Comic Sans MS ; font-weight : normal; background-color: #eeebf1 ; color : #4c1c84; text-align: center; border-radius: 100px 100px;"> Training </h2> <br> ``` sns.distplot(train_df["y"]) skf = KFold( n_splits=config.n_fold, shuffle=True, random_state=config.seed ) for fold, (_, val_idx) in enumerate(skf.split(X=train_df, y=train_df["y"])): train_df.loc[val_idx, "kfold"] = int(fold) train_df["kfold"] = train_df["kfold"].astype(int) train_df.head() ## Debug config.trainer.fast_dev_run = True config.backbone.output_dim = len(target_cols) for fold in config.train_fold: print("★"*25, f" Fold{fold+1} ", "★"*25) df_train = train_df[train_df.kfold != fold].reset_index(drop=True) datamodule = JigsawDataModule(df_train, val_df, test_df, config) sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader() config.scheduler.params.T_0 = config.epoch * len(sample_dataloader) model = JigsawModel(config, fold) lr_monitor = callbacks.LearningRateMonitor() loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath=MODEL_DIR, ) wandb_logger = WandbLogger( project=config.project, entity=config.entity, name = f"{config.exp_name}", tags = ['DeBERTa-Base', "Jigsaw-Unbiased"] ) lr_monitor = LearningRateMonitor(logging_interval='step') trainer = pl.Trainer( max_epochs=config.epoch, callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()], # deterministic=True, logger=[wandb_logger], **config.trainer ) trainer.fit(model, datamodule=datamodule) ## Training config.trainer.fast_dev_run = False config.backbone.output_dim = len(target_cols) for fold in config.train_fold: print("★"*25, f" Fold{fold+1} ", "★"*25) df_train = train_df[train_df.kfold != fold].reset_index(drop=True) datamodule = JigsawDataModule(df_train, val_df, test_df, config) sample_dataloader = JigsawDataModule(df_train, val_df, test_df, config).train_dataloader() config.scheduler.params.T_0 = config.epoch * len(sample_dataloader) model = JigsawModel(config, fold) lr_monitor = callbacks.LearningRateMonitor() loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath=MODEL_DIR, ) wandb_logger = WandbLogger( project=config.project, entity=config.entity, name = f"{config.exp_name}", tags = ['DeBERTa-Base', "Jigsaw-Unbiased"] ) lr_monitor = LearningRateMonitor(logging_interval='step') trainer = pl.Trainer( max_epochs=config.epoch, callbacks=[loss_checkpoint, lr_monitor, RichProgressBar()], # deterministic=True, logger=[wandb_logger], **config.trainer ) trainer.fit(model, datamodule=datamodule) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f"Device == {device}") MORE = np.zeros(len(val_df)) LESS = np.zeros(len(val_df)) PRED = np.zeros(len(test_df)) attention_array = np.zeros((len(df), 256)) # attention格納 mask_array = np.zeros((len(df), 256)) # mask情報格納,後でattentionと掛け合わせる for fold in config.train_fold: pred_list = [] print("★"*25, f" Fold{fold+1} ", "★"*25) valid_dataloader = JigsawDataModule(train_df, val_df, test_df, config).val_dataloader() model = JigsawModel(config, fold) loss_checkpoint = callbacks.ModelCheckpoint( filename=f"best_acc_fold{fold+1}", monitor=f"valid_acc/fold{fold+1}", save_top_k=1, mode="max", save_last=False, dirpath="../input/toxicroberta/", ) model = model.load_from_checkpoint(MODEL_DIR/f"best_acc_fold{fold+1}.ckpt", cfg=config, fold_num=fold) model.to(device) model.eval() more_list = [] less_list = [] for step, data in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader)): more_toxic_ids = data['more_toxic_ids'].to(device) more_toxic_mask = data['more_toxic_mask'].to(device) more_text_token_type_ids = data['more_token_type_ids'].to(device) less_toxic_ids = data['less_toxic_ids'].to(device) less_toxic_mask = data['less_toxic_mask'].to(device) less_text_token_type_ids = data['less_token_type_ids'].to(device) more_outputs = model( more_toxic_ids, more_toxic_mask, more_text_token_type_ids, ) less_outputs = model( less_toxic_ids, less_toxic_mask, less_text_token_type_ids ) more_list.append(more_outputs[:, 0].detach().cpu().numpy()) less_list.append(less_outputs[:, 0].detach().cpu().numpy()) MORE += np.concatenate(more_list)/len(config.train_fold) LESS += np.concatenate(less_list)/len(config.train_fold) # PRED += pred_list/len(config.train_fold) plt.figure(figsize=(12, 5)) plt.scatter(LESS, MORE) plt.xlabel("less-toxic") plt.ylabel("more-toxic") plt.grid() plt.show() val_df["less_attack"] = LESS val_df["more_attack"] = MORE val_df["diff_attack"] = val_df["more_attack"] - val_df["less_attack"] attack_score = val_df[val_df["diff_attack"]>0]["diff_attack"].count()/len(val_df) print(f"Wiki Attack Score: {attack_score:.6f}") ```
github_jupyter
# Регрессия - последняя подготовка перед боем! > 🚀 В этой практике нам понадобятся: `numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2` > 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2, pandas==1.3.3, matplotlib==3.4.3, scikit-learn==0.24.2, seaborn==0.11.2` # Содержание <a name="content"></a> * [Лирическое вступление](#Liricheskoe_vstuplenie) * [Первые реальные данные](#Pervye_real_nye_dannye) * [Анализ одной переменной (унивариантный - univariate)](#Analiz_odnoj_peremennoj_(univariantnyj_-_univariate)) * [Анализ нескольких переменных (мультивариантный - multivariate)](#Analiz_neskol_kih_peremennyh_(mul_tivariantnyj_-_multivariate)) * [LSTAT - MEDV](#LSTAT_-_MEDV) * [RM - MEDV](#RM_-_MEDV) * [Подготовка кода предобработки](#Podgotovka_koda_predobrabotki) * [fit()](#fit()) * [transform()](#transform()) * [Back to programming!](#Back_to_programming!) * [Заключение](#Zakljuchenie) * [Вопросы для закрепления](#Voprosy_dlja_zakreplenija) * [Полезные ссылки](#Poleznye_ssylki) ``` # Настройки для визуализации # Если используется темная тема - лучше текст сделать белым import matplotlib import numpy as np import pandas as pd import seaborn as sns import random TEXT_COLOR = 'black' matplotlib.rcParams['figure.figsize'] = (15, 10) matplotlib.rcParams['text.color'] = TEXT_COLOR matplotlib.rcParams['font.size'] = 14 matplotlib.rcParams['lines.markersize'] = 15 matplotlib.rcParams['axes.labelcolor'] = TEXT_COLOR matplotlib.rcParams['xtick.color'] = TEXT_COLOR matplotlib.rcParams['ytick.color'] = TEXT_COLOR sns.set_style('darkgrid') # Зафиксируем состояние случайных чисел RANDOM_SEED = 42 np.random.seed(RANDOM_SEED) random.seed(RANDOM_SEED) ``` ## Лирическое вступление <a name="intro"></a> И снова привет! К этому моменту мы многому научились и уже знаем немало! Тем не менее, много знаний не бывает, ведь мы приближаемся к первой боевой задаче! Да-да, скоро вам предстоит самостоятельно провести работу с набором данных! Правда, мы немного считерим, потому что в этой практике с этими данными частично познакомимся, но сделаем это частично, чтобы не забирать у вас всё веселье! Ранее мы много говорили о том, как учить модель машинного обучения, как разделять данные, как анализировать модель и т.д. В работе с данными эта часть зовётся "обучение и анализ модели". В этой практике мы поговорим о совершенно новой части в работе с данными и научимся данные анализировать. Зачем это нужно? Ну, просто обучить модель на данных - это зовётся **baseline**. **Baseline** как правило - это самое быстрое и простое решение, которое даёт результат! Вот, например, у нас есть данные о ценах на земли в городе. Задача - на основе этих данных предсказывать цены на другие участки земли. Самым простым решением будет взять сумму целевых значений (цен) и поделить на количество! Так мы получим среднее значение цены в данных и его можно постоянно предсказывать! Вот таким простым способом мы получили модель, которая всё время предсказывает постоянное значение. Да, у неё есть какая-то ошибка, да, это вообще не будет похоже на зависимость в данных, но не это важно! Важно то, что имея baseline, вы будете точно знать, относительно какого решения нужно улучшать вашу модель! Уже и MAE/RMSE есть с чем сравнить - одни плюсы! > Обратите внимание, что показатель R2 как раз в этом случае будет равень 0, так как значения больше нуля - а значит, модель лучше, чем простое предсказание среднего! > 🤓 **Baseline решение** - простое и быстро достижимое решение, используется для дальнейшей оценки улучшений предсказаний при работе с данными. Так вот к чему всё это? Сейчас мы пока что с вами научились строить baseline модели. А как научиться делать что-то лучше? Вот тут то и не хватает недостающей части, о которой мы с вами поговорим! И часть это зовется - **анализ данных**! Но зачем он нужен, если модель делает всё за нас? Учится на данных, регуляризацией мы убираем оверфит, на всякий проверим показатели на тестовой выборке - куда лучше? Поверьте, есть куда стремиться! В работе с реальными данными есть простое правило - не сложность модели определяет, кто будет круче, а качество и количество данных! > ⚠️ Ещё раз, данные важнее, чем модели! То есть, важно понимать, что происходит с моделью, оверфит это или нужна сложность модели побольше (недообучение). Но хорошее качество и количество данных могут дать намного больший прирост точности, так как шума и выбросов в них будет меньше, а зависимости более выражены. И как же тогда нам сделать данные качественнее, если вот у нас есть датасет, и сделать его больше мы не можем? Ответ прост - как можно лучше понять данные и предобработать, а для этого - проанализировать их в первую очередь! > ⚠️⚠️ Очень важный аспект - **понимание данных**. Если вы хорошо понимаете, что за данные вы имеете и что каждый признак означает, то высока вероятность, что вы лучше их обработаете и очистите! В таком случае, подводим **итог**! Создавать baseline модели на тех данных, что мы имеем - полезный навык. Но если мы хотим сделать нашу модель ещё круче и эффективнее, то нужно данные проанализировать и подготовить. > ⚠️ Все новые термины **обработка**, **очистка** и другие действия с данными относятся к общему понятию **подготовка данных** для модели. Baseline может строиться на неподготовленных данных и решать задачу (вероятнее всего плохо), подготовка данных нацелена на улучшение качества данных, чтобы модель, которая на них учится, выявила необходимые зависимости без влияния шума. > ⚠️ Для реализации хорошей **подготовки данных** необходимо провести **анализ данных**, чтобы данные лучше понять. Это всё слова, но пора к делу! Вы ещё увидите, почему анализ данных иногда бывает намного интереснее простого обучения модельки! ## Первые реальные данные <a name="real_data"></a> Настройтесь, сейчас мы с вами загрузим наши первые реальные данные и начнём с ними работать. Чувствуете это предвкушение? <p align="center"><img src="https://vk.com/sticker/1-2920-512-9" width=300/></p> Стоп, а где эти данные взять? Не переживайте, сегодня не вы одни занимаете наукой о данных, поэтому есть очень много ресурсов с разными данными, а мы постучимся на [Kaggle](https://www.kaggle.com/)! Для начала вам нужно там зарегистрироваться, если вы этого ещё не сделали! Дальше, нам нужно достать данные, которые нам сейчас нужны - мы воспользуемся [этим датасетом](https://www.kaggle.com/fedesoriano/the-boston-houseprice-data). После регистрации у вас будет возможность скачать CSV файл `boston.csv`. После этого всё зависит от того, где вы работаете. Если вы проходите практики на Google Colab, то вам нужно загрузить файл с данными на сам Colab (для этого есть меню слева). Если вы работаете локально, на своей машине (компьютере), то достаточно положить рядом с ноутбуком! > ✨ Если вы всё выполнили верно, то код дальше будет выполняться без проблем. Если нет - обратитесь к преподавателю за помощью! ``` df_src = pd.read_csv('boston.csv') ``` Когда данные успешно загружены, то важно первым делом посмотреть на размер данных и на сами данные! ``` df_src.shape df_src.head(10) df_src.info() # И конечно, сразу посмотреть на общие пропуски в данных df_src.isnull().sum() ``` Смотрите, пара действий, а мы уже видим некоторую информацию о данных. * Во-первых, у нас есть 14 переменных, из которых как минимум одну мы планируем предсказывать. * Во-вторых, во всём наборе данных есть всего 506 записей (примеров). Это немного, но хватит, чтобы много обсудить! Но здесь есть важная особенность, каждая колонка имеет название, но все они в виде аббревиатур! Это плохо, так как это затруднит разбор данных и может ухудшить понимание. Небольшой поиск по странице датасета и в интернете даёт как минимум два источника, в которых есть следующая информация о данных: - https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html#:~:text=The%20Boston%20Housing%20Dataset,the%20area%20of%20Boston%20Mass - https://scikit-learn.org/stable/datasets/toy_dataset.html#boston-house-prices-dataset Информация о колонках: - CRIM - per capita crime rate by town - ZN - proportion of residential land zoned for lots over 25,000 sq.ft. - INDUS - proportion of non-retail business acres per town - CHAS - Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) - NOX - nitric oxides concentration (parts per 10 million) - RM - average number of rooms per dwelling - AGE - proportion of owner-occupied units built prior to 1940 - DIS - weighted distances to five Boston employment centres - RAD - index of accessibility to radial highways - TAX - full-value property-tax rate per $10,000 - PTRATIO - pupil-teacher ratio by town - B - 1000(Bk - 0.63)^2 where Bk is the proportion of black people by town - LSTAT - % lower status of the population - MEDV - Median value of owner-occupied homes in $1000’s Отлично, какая-то информация есть и её можно перевести с английского, что даёт нам: - CRIM - уровень преступности на душу населения по городам - ZN - доля жилой земли, зонированной для участков площадью более 25 000 кв. футов. - INDUS - доля акров нетоварного бизнеса в городе - CHAS - переменная-флаг приближенности к реке (= 1 если рядом с рекой; 0 в ином случае) - NOX - концентрация оксидов азота (частей на 10 миллионов) - RM - среднее количество комнат в одном жилом помещении - AGE - доля квартир, занятых владельцами, построенных до 1940 года - DIS - взвешенные расстояния до пяти бостонских центров занятости - RAD - индекс доступности радиальных магистралей - TAX - недвижимость с полной стоимостью-ставка налога за 10 000 долларов США - PTRATIO - соотношение числа учащихся и учителей по городам - B - 1000(Bk - 0.63)^2, где Bk - доля чернокожего населения по городам - LSTAT - процент бедности населения - MEDV - средняя стоимость домов, занятых владельцами, в 1000 долларов США Шикарно, это пригодится нам в ходе анализа! Уже сейчас мы можем сформировать постановку задачи предсказания - нам нужно предсказывать **цену дома (MEDV)** по 13-ти имеющимся признакам. Не факт, что мы всеми признаками воспользуемся, но всё-таки это то, что мы сейчас имеем. > Не бойтесь, работа с 13 переменными, когда мы вот только работали всего с одной - не так страшна, как кажется. Более того, когда мы строили полиномиальную регрессию 15-го порядка, то там у нас было аж 15 признаков! Так с чего же начинается анализ данных? Самое простое - с анализа каждой переменной! Что мы хотим увидеть? В анализе одной переменной важно понять: - что представляет из себя переменная - есть ли у неё пропуски и как лучше их заполнитиь - есть ли у переменной явные выбросы - какое у переменной распределение и есть ли смещение - и другие интересности, которые мы заметим =) В этой практике мы пройдёмся по наиболее важным переменным, а вот в реальной задаче вам предстоит проанализировать каждую переменную! Так можно составить более полную картину данных! > ⚠️ Этот список не исчерпывающий, но он сообщает, что любые странности и закономерности в данных важно выявить и проанализировать на предмет того, полезный ли эффект наблюдается или его лучше убрать, чтобы моделе было проще искать базовые зависимости в данных. ## Анализ одной переменной (унивариантный - univariate) <a name="uni"></a> Начнем с анализа под названием унивариантный. Он так называется, потому что мы анализируем каждую переменную по отдельности. Обычно, самым простым вариантом является построение распределения переменной, чтобы понять характер распределения. Здесь для примера мы возьмем переменную RM (среднее количество комнат в одном жилом помещении). ``` sns.displot(df_src['RM'], kde=True, height=7, aspect=1.5) ``` Что мы видим на графике? Распределение этой переменной близко к нормальному (Gauss-like - близко к Гауссовому). Пределы значений в диапазоне около [3; 9] комнат. Здесь важный акцент мы сделаем на "нормальности" распределения, так как бывают разные вариации нормальности. При анализе другой переменной мы это увидим. Тогда по этой переменной мы можем заключить следующее: * по таблице пропусков переменная пропусков не имеет * распределение близкое к нормальному * значения лежат в пределах, ожидаемых для описания этой переменной - количество комнат. Не сложно, правда? Другую переменную мы возьмём явно с интересным эффектом: ``` sns.displot(df_src['DIS'], kde=True, height=7, aspect=1.5) ``` Вот эту переменную уже сложнее назвать нормально распределённой. Она имеет явное **смещение влево**. Ещё это назвают **правый хвост**, так как правая часть похожа на хвост. Что делать с такими переменными? Ну, есть разные способы. Тут мы уже с вами говорим про методы модификации данных, а значит начинаем строить план обработки данных! Можно выделить два наиболее явных способа исправления распределения: - исправление с помощью логарифма (он исправляет левое смещение) - воспользоваться автоматизированными способами коррекции, например, [PowerTransformer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html) Первый способ мы попробуем сейчас, а вот со вторым вы можете разобраться самостоятельно, когда в следующей практике ринетесь в бой! ``` dis_log_col = np.log(df_src['DIS']) sns.displot(dis_log_col, kde=True, height=7, aspect=1.5) ``` Как видите, центр распределения сместился ближе к середине и само распределение стало больше похоже на нормальное, результат - успех! > 🔥 Не только в DS, но и в других областях, где вы модифицируете данные - всегда проверяйте результат и сравнивайте с ожиданиями! Это важно, так как без проверки промежуточного результата может появиться проблема, которая доставит много головной боли потом! > ⚠️ Исправление распределения очень важно для линейных моделей. Мы сейчас не заостряем внимание на этом, но в следующей самостоятельной практике обязательно сравните результаты с исправлением и без него! В результате, вывод по переменной: * пропусков не имеет * *распределение смещено, поэтому требуется исправление* Последний вывод важно записать в список дел, так как по результатам мы будм делать всю обработку данных единым образом. Давайте для примера возьмём ещё одну переменную, чтобы проанализировать нестандартное распределение: ``` sns.displot(df_src['CHAS'], kde=True, height=7, aspect=1.5) ``` Можно было бы сказать, что распределение смещено влево, но обратите внимание - в данных всего два значения: 0 и 1. Давайте это проверим: ``` df_src['CHAS'].unique() ``` Действительно, что же нам в таком случае делать? Да ничего, это распределение бимодальное, поэтому мы не будем пытаться его исправить. Вывод по переменной: * пропусков нет * распределение бимодальное Делать с этой переменной пока ничего не будем! Остальные переменные мы оставим за кадром, чтобы вам тоже было, с чем поработать! По результату анализа одной переменной делается вывод об основных особенностях каждой переменной. Мы с вами ещё научимся другим подходам анализа и многому интересному, но пока достаточно понимать следующие вещи: - имеет ли переменная пропуски (как их заполнять узнаем потом)? - понимаем ли мы суть переменной, сходится ли с описанием и логичные ли значения? - нужно ли корректировать распределение? ## Анализ нескольких переменных (мультивариантный - multivariate) <a name="multi"></a> Вот мы переходим к более вкусному анализу - зависимости между переменными! И начнем мы с определения **корреляций**! Мы уже много говорили о том, что в данных есть зависимости, но наблюдали мы их только на графиках. Как и во всех методах - хорошо бы иметь метод, который численно подтвердит наличие зависимости в данных! Есть он у меня для вас! Для примера мы возьмём пару переменных - полный анализ (все переменные) вы проведёте самостоятельно! ``` # Для примера выберем следующие признаки # Мы специально включили целевую переменную, чтобы показать, как проводить вместе в ней анализ features = ['CRIM', 'LSTAT', 'RM', 'MEDV'] correlation_mtrx = df_src[features].corr() correlation_mtrx ``` Таблица - это хорошо, но, как обычно, график лучше воспринимается =) ``` sns.heatmap(correlation_mtrx, annot=True, fmt='.2f') ``` Корреляция - это способ численно показать наличие зависимости между двумя переменными. Давайте попробуем проанализировать то, что мы видим здесь. С целевой переменной (MEDV) имеют близкую к высокой корреляция (считается, что высокая корреляция +/- 0.8-0.85 и выше по модулю) переменные RM и LSTAT. Это **может** означать, что эти переменные сильнее влияют на формирование цены, чем признак CRIM. Почему **может**? Да потому, что коэффициент корреляции - это лишь число, которое может не полностью отражать картину, поэтому такие выводы должны лишь заставлять задуматься, но ни в коем случае не делать конечные выводы лишь на основе корреляции! > 🤓 Корреляция всегда оценивается по модулю. Она может быть как высокой положительной, так и высокой отрицательной. Это для случая коэффициента Пирсона. Есть и другие коэффициенты, которые имеют диапазон [0; 1], но это уже совсем другая история =) Поглядите, что такое корреляция на более общем представлении разных ситуаций: <p align="center"><img src="https://raw.githubusercontent.com/kail4ek/ml_edu/master/assets/correlations.png" width=600/></p> > ⚠️ Высокая корреляция переменных между собой является эффектом **мультиколлинеарности признаков**. Это плохой эффект для модели, так как в случае сильной взаимосвязи переменных между собой модель может запутаться в расставлении весов независимым переменным. Они ведь не просто так зовутся независимыми! Одна из практик - в данных для предсказания оставлять одну из пары зависимых между собой переменных, а другую убирать из данных. По умолчанию, метод `.corr()` вычисляет коэффициент корреляции Пирсона. Этот тип коэффициента корреляции хорошо оценивает линейные зависимости. Попробуйте разобраться в документации, как оценить корреляцию по Спирману (Spearman) и выведите матрицу. Оцените, как изменились коэффициенты. Как изменился показатель на LSTAT-MEDV? Почему? ``` # TODO - выведите матрицу корреляции по Спирману и проанализируйте ее ``` Отлично, вот так незатейливо мы научились анализировать зависимости в данных без просмотра данных. На основе этого мы можем построить первоначальные выводы, но не посмотреть на данные (визуализировать их) - это сродне очень серьезной ошибке. Всегда важно по максимуму визуализировать данные и просматривать их. Так можно тщательнее провести анализ и узнать больше полезной информации о данных! Поэтому, давайте воспользуемся хитрым графиком для отображения зависимостей между данными: ``` sns.pairplot(df_src[features], diag_kind='auto', height=6) ``` Что мы видим на графике? По главной диагонали отображается распределение самой переменной, так как на 2d графике показывать точки переменной самой с собой - это будет просто линия. В отличных от диагональных ячейках располагаются графики распределения в плоскости одной переменной против другой. Здесь сразу можно сделать два вывода: - LSTAT-MEDV имееть нелинейную зависимость (видите, как замедляется уменьшение MEDV при увеличении LSTAT?) - На графике RM-MEDV видны точки, который очень "странно" лежат. Явно видно, что с увеличением RM MEDV растёт, но есть несколько точек, которые лежат как бы на прямой, вне зависимости от RM. Их нужно проанализировать! Давайте перейдем к конкретному разбору! ### LSTAT - MEDV <a name="lstat_medv"></a> Попробуем вывести точечный график переменных: ``` sns.scatterplot(x='LSTAT', y='MEDV', data=df_src) ``` Здесь явно выделяется нелинейная зависимость, поэтому мы в ходе предобработки сформируем новый признак - вторая степень от LSTAT. Это обусловлено этой явной нелинейностью. Запишем в планы! ### RM - MEDV <a name="rm_medv"></a> Аналогично более подробно смотрим точечный график переменных: ``` sns.scatterplot(x='RM', y='MEDV', data=df_src) ``` Смотрите, у на есть два типа потенциальных **выбросов**. * Одни выбросы - лежат на прямой на уровне около MEDV ~= 50. * Другие - выбиваются от общей зависимости в диапазонах: RM < 4 и (RM > 8 & MEDV < 30). При обработке выбросов важно смотреть, что из себя представляют данные, поэтому выведем примеры и глянем на них: ``` outliers_1 = df_src[df_src['MEDV'] >= 50] outliers_2 = df_src[(df_src['RM'] < 4) | ((df_src['RM'] > 8) & (df_src['MEDV'] < 30))] outliers_1 outliers_2 ``` Давайте посмотрим, выбросы по уровню цены = 50, которые очень нестандартно лежат на плоскости. По данным явно не видно очевидной зависимости, поэтому трудно сразу сказать, что это явные выбросы. Как правило, выбросы имеют сильные искажения в данных, что видно и по другим переменным. Если всмотреться, то выбиваются именно точки, которые имеют RM < 7, а у них значение TAX = 666. Если построить распределение переменной TAX (вы это проделаете сами), то можно заметить, что значение 666 отстоит от основных данных, но таких записей с этим значением - аж 130, что сложно назвать выбросом. Тем не менее, это повторяется и в выбросах, которые отстают от основной группы точек, что наводит на мысль, что это всё-таки их обощает. Одно из предположений, которое можно сделать - **цензурирование данных**. Это подход, при котором в данных суммы и информация, которую важно закрыть, заменяется каким-то константным значением. Поэтому, при обработке, мы удалим эти данные, так как цензурирование искажает зависимости и это может сказаться на результатах работы. Давайте попробуем подчистить данные и посмотреть, как изменятся распределения точек на графиках: > ⚠️ Очистка данных - процесс очень выборочный, поэтому важно ещё раз всё перепроверять, чтобы не совершить ошибки, так как в результате данных становится меньше. > ⚠️ В ходе очистки удаляются записи данных - строки. ``` outliers_mask_1 = df_src['MEDV'] == 50 outliers_mask_2 = df_src['RM'] < 4 outliers_mask_3 = (df_src['RM'] > 8) & (df_src['MEDV'] < 30) outliers_mask = outliers_mask_1 | outliers_mask_2 | outliers_mask_3 df_cleaned = df_src.loc[~outliers_mask] sns.pairplot(df_cleaned[features], diag_kind='auto', height=6) ``` Как видите, график стал почище, а зависимость RM-MEDV стала более выраженной. Можем даже по-новой проверить корреляцию: > ⚠️ Если вы обратили внимание, что на графике CRIM-MEDV много точек лежит на значении CRIM=0 - молодцы! Внимательность - это отлично! В данном случае мы не рассматриваем их в качестве кандидатов на выбросы, так как их мало и нам ещё помогает **смысл переменной**: много домов с низким криминальным уровнем - это нормально. ``` sns.heatmap(df_cleaned[features].corr(), annot=True, fmt='.2f') ``` RM-MEDV ранее был 0.7, а теперь стал 0.73 и всё благодаря чистке данных! Как видите, как анализ одной переменной, так и анализ нескольких переменных не отличается чем-то сверх-научным. Как правило, данные достаточно посмотреть, пропустить через пару вычислений (как, например, корреляция) и уже можно составлять определённую картину. Также, в подготовке и очистке данных помогает понимание данных. Так, например, если бы в наших данных количество комнат (RM) имело бы значения -1, то мы понимали бы, что такого быть не может и тоже рассматривали бы это как выбросы. В результате, мы научились базовому анализу нескольких переменных (multivariate), рассмотрели, как можно детектировать выбросы и как оценивать зависимости численно - отличный результат, мы молодцы! ## Подготовка кода предобработки <a name="preproc"></a> Помимо того, что на каждом из этапов анализа проверяется своя подготовка, очистка и другая обработка данных - важно в конечном итоге сформировать единый код для предобработки данных, чтобы пользоваться было им удобно и он был более-менее универсален (была возможность применить его на новых данных). Давайте выделим два этапа: * очистка данных * предобработка Очистка делается для процесса обучения, чтобы модели предоставить более чистые данные без выбросов и лишнего шума. Предобработка делатся как для обучения, так и для обработки новых данных. > ⚠️ Помним, что конечная цель модели машинного обучения не просто обучиться и показать высокую метрику, а давать предсказания на новых данных и делать это хорошо. Так вот важно предобработку нормально оформить, чтобы потом не пришлось корячиться с кодом, когда надо будет его разворачивать в облаке =) Для этого нам поможет парадигма классов в Python! Но перед этим, мы быстренько оформим код очитки данных: ``` # TODO - напишите функцию clean_dataset(), который принимает DataFrame на вход и выдает его очищенным # NOTE - в функции надо выбрать выбросы той методикой, которую мы уже выработали и вернуть почищенный датасет # TEST _test_df = pd.DataFrame({ 'MEDV': [10, 20, 50, 50, 30, 10], 'RM': [5, 6, 7, 7, 3, 8], }) _test_result = clean_dataset(_test_df) pd.testing.assert_index_equal(pd.Index([0, 1, 5]), _test_result.index) print("Well done!") ``` Отлично, функция очистки написана и её мы применим только для нашего датасета, поэтому её универсальность не так важна! А теперь приступим к проработке класса для нашей собственной предобработки! Начнём с архитектуры, вот так будет выглядеть наш класс: ``` class DataPreprocessing: def __init__(self): pass def fit(self, df): pass def transform(self, df): return df ``` Вот и весь класс, ничего страшного =) Только, его методы (а-ля функции) ещё не реализованы, поэтому рано говорить о размерах кода =) Давайте обсудим, что мы уже написали и зачем нужны эти методы: ### fit() <a name="fit"></a> `.fit()` - это метод, который занимается сбором статистики с данных, чтобы их потом обработать. Собранную статистику мы будет хранить в атрибутах класса. Что такое *сбор статистики*? Всё просто. Давайте вспомним, как в прошлый раз масштабировали данные с помощью MinMaxScale. По сути, нам нужно вычислить минимум и максимум в данных и затем применить формулу с этими константами. А теперь вспомним, что нам надо масштабировать на обучающей выборке и выборке для теста. Давайте рассмотрим плохой вариант (*неправильный*): мы вычисляем мин-макс на обучающей выборке, допустим, получили (минимум = 10 и максимум = 100). Преобразовали обучающую выборку и всё ок. Теперь, берём тестовую и вычисляем то же самое (получаем, минимум = 20 и максимум = 105). Преобразовали тестовую выборку. А что дальше? Ну, модель обучится, ведь обучение - простая математика и предсказания будут как-то работать, но будет **концептуальная** ошибка! Именно в том, что модель учится на данных, ей приходит значение признака 1.0, а в исходных данных 1.0 ~ 100 (ведь максимум на обучающей = 100). Потом мы передаём тестовую и там тоже есть значение 1.0, но только на тестовой это означает 105. К чему это приводит? Модель ничего не заметит, сделает предсказание, а в нём будет ошибка! Ведь мы, хоть и не специально, начинаем модель путать, подавая данные, которые означают совсем другое, нежели на чём модель училась. Что же мы можем сделать? А что если, мы на обучающей выборке найдем минимум и максимум, запомним их и применим как к обучающей, так и тестовой выборке! Тогда, во всех данных (и даже в новых), 1.0 будет означать 100 и мы никого путать не будем! > 🤓 Да, в нашем случае на тестовой будут значения больше 1.0, но это не страшно! Главное для масштабирования - привести к одинаковым порядкам, а для правильной обработки - собрать статистику на обучающей выборке (train) и дальше применять её для трансформации как на обучающей, так и на тестовой выборке! Так вот мы и подошли к главному правилу в организации `fit()-transform()`: `fit()` всегда применяется только на train выборке! Эта функция собирает статистику, а её надо собирать только на обучающей выборке! На полной (train+test), не тестовой (test), а только на обучающей (train)! ### transform() <a name="transform"></a> Ну тут уже все проще. Все этапы обработки данных, что требуют сбор статистики - собирают в `fit()`, ну а дальше просто применяем всю обработку в `transform()`! Все просто! =) ## Back to programming! <a name="prog"></a> Отлично, мы разобрались, зачем нужен каждый метод! Давайте попробуем написать свой класс для предобработки! Реализуем следующую предобработку: - Выравнивание распределения для признака `DIS` с помощью логарифма - Нужно создать новый признак `DIS_log`, а старый удалить - Генерация полиномиального признака для `LSTAT` с названием `LSTAT_poly_2` - MinMaxScale - посмотрите на класс [MinMaxScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html) - Сделайте масштабирование всех признаков По сути, это небольшой набор того, как мы запланировали предобработать данные по результатам анализа! > 🔥 Объекты трансформеров из `sklearn` работают по аналогичному принципу, как мы с вами обсудили. Поэтому, при работе с ними можно сами объекты трансформеров создавать прямо в конструкторе нашего класса. `fit()` трансформеров вызывать в нашем методе `fit()`, ну и `transform()`, соответственно. ``` # TODO - реализуйте описанную предобработку class DataPreprocessing: def __init__(self): pass def fit(self, df): # Скопируем исходные данные, чтобы не изменять их df_copy = df.copy() # Здесь обратите внимание, что нужно сгенерировать полином и выровнять логарифмом, чтобы MinMaxScaler обучился и на них тоже pass def transform(self, df): # Возвращать transform() должен тоже DataFrame! return df # TEST _test_df = pd.DataFrame({'DIS': [2.3, 1.9, 0.4, 2.2], 'LSTAT': [0.1, 0.2, 0.3, 0.4], 'MORE_FEAT': [1, 2, 3, 4]}, index=[4, 6, 10, 12]) preproc = DataPreprocessing() preproc.fit(_test_df) _test_result = preproc.transform(_test_df) _test_expected = pd.DataFrame({ 'DIS_log': [1.0, 0.8907756387942631, 0.0, 0.9745873735075969], 'LSTAT': [0.0, 0.333, 0.666, 1.0], 'LSTAT_poly_2': [0.0, 0.2, 0.5333, 1.], 'MORE_FEAT': [0.0, 0.333, 0.666, 1.0] }, index=_test_df.index) pd.testing.assert_frame_equal(_test_result, _test_expected, check_like=True, atol=1e-3) print("Well done!") ``` Если вы прошли тест - значит вы большие молодцы!! В результате такой класс можно спокойно применять для подготовки данных для обучения модели и более того, для подготовки данных при поступлении новых! А это значит, мы ещё не обучили, но уже готовы предсказывать и показывать, как круто наша модель работает! Стремимся к высоким целям! ## Заключение <a name="conclusion"></a> В результате прохождения этой практики вы узнали очень важный факт (а может и несколько). **Анализ данных нужен и важен!** Конечно, мы только увидели пару приёмов, но в следующей практике, вы попробуете их в бою и увидите, что это действительно работает! ## Вопросы для закрепления <a name="qa"></a> А теперь пара вопросов, чтобы закрепить материал! 1. Зачем нужны классы в DS? 2. Чем полезна предобработка данных? 3. Опасно ли удалять какие-то данные из исходных? Когда можно такое делать? 4. На какой выборке применяется метод-fit? 5. На какой выборке применяется метод-transform? # Полезные ссылки <a name='links'></a> * [Linear Discriminant Analysis (LDA) от StatQuest](https://www.youtube.com/watch?v=azXCzI57Yfc) * [Basic Statistics for Data Science на Medium](https://medium.com/mlearning-ai/important-statistical-concepts-for-data-scientists-54e09106b75e) * [Quartiles for Beginners in DS на Medium](https://medium.com/@vinitasilaparasetty/quartiles-for-beginners-in-data-science-2ca5a640b07b) * [Understanding Value of Correlations in DS на Medium](https://medium.com/fintechexplained/did-you-know-the-importance-of-finding-correlations-in-data-science-1fa3943debc2) * [Correlation](https://luminousmen.com/post/data-science-correlation) * [Fundamentals of Statistics](https://towardsdatascience.com/fundamentals-of-statistics-for-data-scientists-and-data-analysts-69d93a05aae7)
github_jupyter
# Hypothesis Testing From lecture, we know that hypothesis testing is a critical tool in determing what the value of a parameter could be. We know that the basis of our testing has two attributes: **Null Hypothesis: $H_0$** **Alternative Hypothesis: $H_a$** The tests we have discussed in lecture are: * One Population Proportion * Difference in Population Proportions * One Population Mean * Difference in Population Means In this tutorial, I will introduce some functions that are extremely useful when calculating a t-statistic and p-value for a hypothesis test. Let's quickly review the following ways to calculate a test statistic for the tests listed above. The equation is: $$\frac{Best\ Estimate - Hypothesized\ Estimate}{Standard\ Error\ of\ Estimate}$$ We will use the examples from our lectures and use python functions to streamline our tests. ``` import statsmodels.api as sm import numpy as np import pandas as pd ``` ### One Population Proportion #### Research Question In previous years 52% of parents believed that electronics and social media was the cause of their teenager’s lack of sleep. Do more parents today believe that their teenager’s lack of sleep is caused due to electronics and social media? **Population**: Parents with a teenager (age 13-18) **Parameter of Interest**: p **Null Hypothesis:** p = 0.52 **Alternative Hypthosis:** p > 0.52 1018 Parents 56% believe that their teenager’s lack of sleep is caused due to electronics and social media. ``` help(sm.stats.proportions_ztest) n = 1018 pnull = .52 phat = .56 sm.stats.proportions_ztest(phat * n, n, pnull) ``` ### Difference in Population Proportions #### Research Question Is there a significant difference between the population proportions of parents of black children and parents of Hispanic children who report that their child has had some swimming lessons? **Populations**: All parents of black children age 6-18 and all parents of Hispanic children age 6-18 **Parameter of Interest**: p1 - p2, where p1 = black and p2 = hispanic **Null Hypothesis:** p1 - p2 = 0 **Alternative Hypthosis:** p1 - p2 $\neq$ 0 247 Parents of Black Children 36.8% of parents report that their child has had some swimming lessons. 308 Parents of Hispanic Children 38.9% of parents report that their child has had some swimming lessons. ``` help(sm.stats.ttest_ind) n1 = 247 p1 = .37 n2 = 308 p2 = .39 population1 = np.random.binomial(1, p1, n1) population2 = np.random.binomial(1, p2, n2) sm.stats.ttest_ind(population1, population2) ``` ### One Population Mean #### Research Question Is the average cartwheel distance (in inches) for adults more than 80 inches? **Population**: All adults **Parameter of Interest**: $\mu$, population mean cartwheel distance. **Null Hypothesis:** $\mu$ = 80 **Alternative Hypthosis:** $\mu$ > 80 25 Adults $\mu = 82.46$ $\sigma = 15.06$ ``` df = pd.read_csv("Cartwheeldata.csv") df.head() n = len(df) mean = df["CWDistance"].mean() sd = df["CWDistance"].std() (n, mean, sd) help(sm.stats.proportions_ztest) sm.stats.ztest(df["CWDistance"], value = 80, alternative = "larger") ``` ### Difference in Population Means #### Research Question Considering adults in the NHANES data, do males have a significantly higher mean Body Mass Index than females? **Population**: Adults in the NHANES data. **Parameter of Interest**: $\mu_1 - \mu_2$, Body Mass Index. **Null Hypothesis:** $\mu_1 = \mu_2$ **Alternative Hypthosis:** $\mu_1 \neq \mu_2$ 2976 Females $\mu_1 = 29.94$ $\sigma_1 = 7.75$ 2759 Male Adults $\mu_2 = 28.78$ $\sigma_2 = 6.25$ $\mu_1 - \mu_2 = 1.16$ ``` url = "nhanes_2015_2016.csv" da = pd.read_csv(url) da.head() females = da[da["RIAGENDR"] == 2] male = da[da["RIAGENDR"] == 1] n1 = len(females) mu1 = females["BMXBMI"].mean() sd1 = females["BMXBMI"].std() (n1, mu1, sd1) n2 = len(male) mu2 = male["BMXBMI"].mean() sd2 = male["BMXBMI"].std() (n2, mu2, sd2) sm.stats.ztest(females["BMXBMI"].dropna(), male["BMXBMI"].dropna()) ```
github_jupyter
``` from systemtools.hayj import * from systemtools.basics import * from systemtools.file import * from systemtools.printer import * from systemtools.logger import * from annotator.annot import * from datatools.jsonutils import * from nlptools.tokenizer import * from datatools.htmltools import * from newssource.goodarticle.utils import * import numpy as np from sklearn.model_selection import GridSearchCV from sklearn.svm import SVR, LinearSVC from sklearn import linear_model from sklearn.model_selection import StratifiedKFold data = [] for file in sortedGlob("goodarticle*.json"): data += fromJsonFile(file) stopwords = set(fileToStrList("stopwords.txt")) startswithExcludes = set(fileToStrList("startswith-excludes.txt")) newData = [] for i in range(len(data)): data[i]["text"] = newsPreclean(data[i]["text"], startswithExcludes=startswithExcludes) if len(data[i]["text"]) > 0: newData.append(data[i]) data = newData bp(data, 2) print(len(data)) def basicFeatures\ ( text, longLine=140, shortLine=20, tooLongDocument=60000, stopwords={}, punct={',', ')', '...', "'", ';', '-', '!', ':', '?', '"', '.', '('}, logger=None, verbose=True, asDict=False, asNpArray=True, ): # Checking vars: if stopwords is None or len(stopwords) == 0 or punct is None or len(punct) == 0: logWarning("Please give a stopwords list and a punct list", logger, verbose=verbose) features = OrderedDict() # Too long document ? features["tooLongDocument"] = len(text) >= tooLongDocument # Len of the text: features["length"] = len(text) # The count of non-blank lines: lines = [e for e in text.split("\n") if e != ''] features["linesCount"] = len(lines) # The count of tokens: loweredText = text.lower() tokens = [e for e in text.split() if e != ''] loweredTokens = [e for e in loweredText.split() if e != ''] features["tokensCount"] = len(tokens) # Count of long lines, mean lines length, count of short lines: longLinesCount = 0 shortLinesCount = 0 meanLinesLength = 0 for line in lines: if len(line) >= longLine: longLinesCount += 1 if len(line) <= shortLine: shortLinesCount += 1 meanLinesLength += len(line) meanLinesLength = meanLinesLength / len(lines) features["longLinesCount"] = longLinesCount features["shortLinesCount"] = shortLinesCount features["meanLinesLength"] = meanLinesLength features["longLinesRatio"] = longLinesCount / len(lines) features["shortLinesRatio"] = shortLinesCount / len(lines) # The ratio of stopwords / punct: stopwordsAndPunct = stopwords.union(punct) c = len([e for e in loweredTokens if e in stopwordsAndPunct]) features["stopwordsPunctRatio"] = c / len(loweredTokens) # The mean overlap: nonSWPTokens = [e for e in loweredTokens if e not in stopwordsAndPunct] c = dict() for token in nonSWPTokens: if token not in c: c[token] = 0 c[token] += 1 theMean = 0 for token, count in c.items(): theMean += count theMean = theMean / len(c) features["nonSWPMeanOverlap"] = theMean # Ratio of only uppercased words: upperWordCount = len([e for e in tokens if hasLetter(e) and not hasLowerLetter(e)]) features["upperWordCount"] = upperWordCount features["upperWordRatio"] = upperWordCount / len(tokens) # Ratio of non words: nonWordCount = len([e for e in tokens if not hasLetter(e)]) features["nonWordCount"] = nonWordCount features["nonWordRatio"] = nonWordCount / len(tokens) # Ratio of html: htmlCharCount = len(text) - len(html2Text(text)) if htmlCharCount < 0: htmlCharCount = 0 features["htmlCharCount"] = htmlCharCount features["htmlCharRatio"] = htmlCharCount / len(text) # Ratio of words that has at least on upper case: c = 0 for token in tokens: if hasUpperLetter(token): c += 1 features["hasUpperRatio"] = c / len(tokens) # Ratio of lines that start with a non word: c = 0 for line in lines: line = line.split() if len(line) > 0: if not hasLetter(line[0]): c += 1 features["lineStartWithNonWordRatio"] = c / len(lines) # Encoding prob count: encCount = 0 encCount += text.count("â") encCount += text.count("ï") encCount += text.count("U+") encCount += text.count("Ï") encCount += text.count("À") encCount += text.count("Á") encCount += text.count("Ã") encCount += text.count("�") encCount += text.count("—") features["encodingProbCount"] = encCount # Finally we return all features: if asDict: return features else: result = list(features.values()) if asNpArray: return np.array(result) else: return result def accuracy(predictions, y, thresholds=[0.25, 0.75]): assert len(predictions) == len(y) wellClassified = 0 for i in range(len(y)): prediction = predictions[i] currentPredictedClass = continuous2discret(prediction, thresholds) currentY = y[i] currentClass = continuous2discret(currentY, thresholds) if currentPredictedClass == currentClass: wellClassified += 1 return wellClassified / len(y) def continuous2discret(y, thresholds): currentClass = 0 for threshold in thresholds: if y <= threshold: return currentClass currentClass += 1 return currentClass for i, current in enumerate(data): if current["relevance"] == 0.0: text = current["text"] text = newsPreclean(text, startswithExcludes=startswithExcludes) bp(basicFeatures(text, stopwords=stopwords, asDict=True), 5) print() print(text) print() print() print() if i >= 3: break X = np.array([basicFeatures(current["text"], stopwords=stopwords) for current in data]) y = np.array([continuous2discret(current["relevance"], [0.51]) for current in data]) bp(X) bp(y) print(len(y)) if False: from sklearn.metrics import make_scorer scorer = make_scorer(accuracy, greater_is_better=True) # scorer(clf, X, y) param_grid = \ { 'loss': ['squared_hinge'], # 'hinge' 'penalty': ['l1', 'l2',], # l1, l2 'C': list(range(0, 20 + 1)), 'multi_class': ['ovr'], # , 'crammer_singer' 'dual': [False, True], 'random_state': [0], } clf = GridSearchCV(LinearSVC(), param_grid=param_grid, scoring='accuracy', cv=StratifiedKFold(n_splits=5, random_state=0, shuffle=True), n_jobs=cpuCount(), error_score=0.0) clf.fit(X, y) print("Best parameters set found on development set:") print() print(clf.best_params_) print(clf.best_score_) print() print("Grid scores on development set:") print() means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) best = clf.best_estimator_ predictions = best.predict(X) predictions wellClassified = 0 for i in range(len(predictions)): if predictions[i] == y[i]: wellClassified += 1 else: print("prediction: " + str(predictions[i])) print("true label: " + str(y[i])) print(data[i]["text"]) print() print() print() print() print(wellClassified / len(y)) bestParams = {'C': 6, 'dual': False, 'loss': 'squared_hinge', 'multi_class': 'ovr', 'penalty': 'l2', 'random_state': 0} clf = LinearSVC(**bestParams) clf.fit(X, y) best = clf import pickle serialize(clf, "best.pickle") best = deserialize("best.pickle") s ```
github_jupyter
``` %pylab inline import numpy as np import matplotlib.pyplot as plt # PyTorch imports import torch # This has neural network layer primitives that you can use to build things quickly import torch.nn as nn # This has things like activation functions and other useful nonlinearities from torch.nn import functional as F # This has various gradient descent algorithms import torch.optim # In order to take derivatives, we have to wrap things as a Variable or a Parameter. # Variables are things like inputs to the model # Parameters are things like weights # If you make a child class of nn.Module, it automatically keeps tracks of all parameters declared during # __init__ for you - really handy! from torch.autograd import Variable from torch.nn import Parameter from IPython import display import time ``` ## Generative Adversarial Networks Generative adversarial networks (GANs) are a method to learn to produce samples from high-dimensional distributions based only on a set of samples from that distribution. The basic idea is that you have two networks which are competing with eachother on a shared game. One network (the Generator) must create samples from the target distribution, while the other network (the Discriminator) must correctly predict whether a given sample came from the Generator or from the actual data set. For this game, the Nash equilibrium is for the Generator to produce samples exactly according to the probability density of the data distribution, and for the Discriminator to return the probability density of a given input sample. So a trained GAN in principle gives you both a way to sample from a distribution as well as a way to evaluate the local probability density around a sample. In practice, the Generator and Discriminator may not converge to the Nash equilibrium, but will often oscillate around it, overspecialize to sub-regions of the distribution ('mode collapse'), etc. As such, there are a large family of algorithms designed to improve the convergence properties of the basic setup. In this example, we'll just implement a basic GAN to reproduce some 2d distributions (so that the quality of the reconstruction can be easily checked). ``` # Some utility functions def toFloatVar(x): return Variable(torch.FloatTensor(x), requires_grad=False) def toLongVar(x): return Variable(torch.LongTensor(x), requires_grad=False) ``` ## Generator network First we'll specify the Generator. This network needs to produce a distribution of outcomes, not just an input-output relationship or single output, so we need to provide it a source of noise that it will transform into the target distribution. In essence, the Generator implements a transform from one probability distribution $p(z)$ to a target distribution (in a different set of variables) $q(x)$ - one sample at a time. So basically the procedure is, we sample a random $z$ from $p(z)$ (which will just be a high-dimensional Gaussian), then apply the network to get $x = G(z)$. ``` class Generator(nn.Module): def __init__(self, noiseDimension = 16, hiddenDimension = 64, targetDimension = 2): super(Generator,self).__init__() self.layer1 = nn.Linear(noiseDimension, hiddenDimension) self.layer2 = nn.Linear(hiddenDimension, hiddenDimension) self.layer3 = nn.Linear(hiddenDimension, hiddenDimension) self.layer4 = nn.Linear(hiddenDimension, targetDimension) self.noiseDimension = noiseDimension # Each network will have its own optimizer, so we can train them at cross purposes to each-other self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3) # For forward, we want to get samples based on specific values of the noise input def forward(self, x): z = F.relu(self.layer1(x)) z = F.relu(self.layer2(z)) z = F.relu(self.layer3(z)) z = self.layer4(z) return z # For convenience, lets also make a function that generates a batch of random samples def sample(self, N=100): z = toFloatVar(np.random.randn(N, self.noiseDimension)) return self.forward(z) ``` ## Discriminator Network The Discriminator network takes a sample either from the true dataset or from fakes made by the Generator, and should return a probability that the sample is real or fake. ``` class Discriminator(nn.Module): def __init__(self, hiddenDimension = 64, targetDimension = 2): super(Discriminator,self).__init__() self.layer1 = nn.Linear(targetDimension, hiddenDimension) self.layer2 = nn.Linear(hiddenDimension, hiddenDimension) self.layer3 = nn.Linear(hiddenDimension, hiddenDimension) self.layer4 = nn.Linear(hiddenDimension, 1) # Each network will have its own optimizer, so we can train them at cross purposes to each-other self.optimizer = torch.optim.Adam(self.parameters(), lr = 1e-3) def forward(self, x): z = F.relu(self.layer1(x)) z = F.relu(self.layer2(z)) z = F.relu(self.layer3(z)) # Clamp for numerical stability z = torch.clamp( F.sigmoid(self.layer4(z)), 1e-6, 1-1e-6) return z ``` ## Training The training procedure involves two steps: training the Discriminator and training the Generator. We'll do these separately for clarity, despite that introducing a bit of redundancy. Training the discriminator: - Form a batch which contains 50% samples from true distribution and 50% samples from the generator - If $D()$ is the output of the discriminator and $x$ the true data, minimize the logistic loss: $L = -\log(D(x)) - \log(1-D(G(z)))$ - Update the discriminator weights only Training the generator: - Form a batch containing 100% samples from the generator - Apply the discriminator to get $D(G(z))$ - Update the generator to maximize the discriminator's loss: $L = \log(1-D(G(z)))$. ``` def trainDiscriminator(data, generator, discriminator): fakes = generator.sample(N=data.shape[0]) # Zero the discriminator gradient discriminator.zero_grad() # Get the fake batch and true batch p_fakes = discriminator.forward(fakes) p_true = discriminator.forward(data) # Compute the loss loss = torch.mean(-torch.log(p_true)) + torch.mean(-torch.log(1-p_fakes)) # Update the discriminator weights only loss.backward() discriminator.optimizer.step() # Get the loss to follow training progress return loss.data.numpy().copy() # Training the generator doesn't require access to the dataset # Careful though - training to completion on a fixed discriminator leads to mode collapse # We have to train them together dynamically def trainGenerator(generator, discriminator): # Zero generator gradient generator.zero_grad() fakes = generator.sample(N=250) p_fakes = discriminator.forward(fakes) # Get the generator loss loss = torch.mean(torch.log(1-p_fakes)) # Update generator weights loss.backward() generator.optimizer.step() # Track generator loss for training return loss.data.numpy().copy() ``` ## Data distribution We'll learn a simple bimodal distribution to test the GAN ``` def generateData(N): # Generate which mode we're in x = np.random.randint(2,size=(N,1)) # Generate Gaussian fluctuations around the mode z = np.random.randn(N,2)*0.5 # Centers of the two modes centers = np.array([[-1.5,0.5], [0.6, 1.3]]) return centers[x[:,0]] + z data = generateData(250) plt.scatter(data[:,0],data[:,1]) plt.show() ``` ## Training the GAN ``` generator = Generator() discriminator = Discriminator() gen_loss = [] disc_loss = [] for epoch in range(1000): # It's often better for the discriminator to be slightly better than the generator for stability # So we'll use two steps here dl = trainDiscriminator(toFloatVar(data), generator, discriminator) dl = trainDiscriminator(toFloatVar(data), generator, discriminator) gl = trainGenerator(generator, discriminator) gen_loss.append(gl) disc_loss.append(dl) if epoch%5 == 0: samples = generator.sample(N=250) plt.clf() plt.subplot(1,2,1) plt.title("Generated Distribution") plt.scatter(data[:,0],data[:,1]) plt.scatter(samples[:,0],samples[:,1]) plt.xlim(-4,2.5) plt.ylim(-1.5,4) plt.subplot(1,2,2) plt.title("Training Loss") plt.plot(disc_loss,label="Discriminator") plt.plot(gen_loss,label="Generator") plt.legend() plt.gcf().set_size_inches((12,6)) display.clear_output(wait=True) display.display(plt.gcf()) time.sleep(0.01) ```
github_jupyter
# Advanced Tutorial: Creating Gold Annotation Labels with BRAT This is a short tutorial on how to use BRAT (Brat Rapid Annotation Tool), an online environment for collaborative text annotation. http://brat.nlplab.org/ ``` %load_ext autoreload %autoreload 2 %matplotlib inline import os # TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE # Note that this is necessary for parallel execution amongst other things... # os.environ['SNORKELDB'] = 'postgres:///snorkel-intro' from snorkel import SnorkelSession session = SnorkelSession() ``` ## Step 1: Define a `Candidate` Type We repeat our definition of the `Spouse` `Candidate` subclass from Parts II and III. ``` from snorkel.models import candidate_subclass, Document, Candidate Spouse = candidate_subclass('Spouse', ['person1', 'person2']) ``` ### a) Select an example `Candidate` and `Document` Candidates are divided into 3 splits, each mapped to a unique integer id: - 0: _training_ - 1: _development_ - 2: _testing_ In this tutorial, we'll load our training set candidates and create gold labels for a document using the BRAT interface ## Step 2: Launching BRAT BRAT runs as as seperate server application. Snorkel will automatically download and configure a BRAT instance for you. When you first initialize this server, you need to provide your applications `Candidate` type. For this tutorial, we use the `Spouse` relation defined above, which consists of a pair of `PERSON` named entities connected by marriage. Currently, we only support 1 relation type per-application. ``` from snorkel.contrib.brat import BratAnnotator brat = BratAnnotator(session, Spouse, encoding='utf-8') ``` ### a) Initialize our document collection BRAT creates a local copy of all the documents and annotations found in a `split` set. We initialize a document collection by defining a unique set name, _spouse/train_, and then passing in our training set candidates via the `split` id. Annotations are stored as plain text files in [standoff](http://brat.nlplab.org/standoff.html) format. <img align="left" src="imgs/brat-login.jpg" width="200px" style="margin-right:50px"> After launching the BRAT annotator for the first time, you will need to login to begin editing annotations. Navigate your mouse to the upper right-hand corner of the BRAT interface (see Fig. 1) click 'login' and enter the following information: - **login**: _brat_ - **password**: _brat_ Advanced BRAT users can setup multiple annotator accounts by adding USER/PASSWORD key pairs to the `USER_PASSWORD` dictionary found in `snokel/contrib/brat/brat-v1.3_Crunchy_Frog/config.py`. This is useful if you would like to keep track of multiple annotator judgements for later adjudication or use as labeling functions as per our tutorial on using [Snorkel for Crowdsourcing](https://github.com/HazyResearch/snorkel/blob/master/tutorials/crowdsourcing/Crowdsourced_Sentiment_Analysis.ipynb). ``` brat.init_collection("spouse/train", split=0) ``` We've already generated some BRAT annotations for you, so let's import an existing collection for purposes of this tutorial. ``` brat.import_collection("data/brat_spouse.zip", overwrite=True) ``` ### b) Launch BRAT Interface in a New Window Once our collection is initialized, we can view specific documents for annotation. The default mode is to generate a HTML link to a new BRAT browser window. Click this link to connect to launch the annotator editor. Optionally, you can launch BRAT in an embedded window by calling: brat.view("spouse/train", doc, new_window=False) ``` doc_name = '5ede8912-59c9-4ba9-93df-c58cebb542b7' doc = session.query(Document).filter(Document.name==doc_name).one() brat.view("spouse/train", doc) ``` If you do not have a specific document to edit, you can optionally launch BRAT and use their file browser to navigate through all files found in the target collection. ``` brat.view("spouse/train") ``` ## Step 3: Creating Gold Label Annotations ### a) Annotating Named Entities `Spouse` relations consist of 2 `PERSON` named entities. When annotating our validation documents, the first task is to identify our target entities. In this tutorial, we will annotate all `PERSON` mentions found in our example document, though for your application you may choose to only label those that particpate in a true relation. <img align="right" src="imgs/brat-anno-dialog.jpg" width="400px" style="margin-left:50px"> Begin by selecting and highlighting the text corresponding to a `PERSON` entity. Once highlighted, an annotation dialog will appear on your screen (see image of the BRAT Annotation Dialog Window to the right). If this is correct, click ok. Repeat this for every entity you find in the document. **Annotation Guidelines** When developing gold label annotations, you should always discuss and agree on a set of _annotator guidelines_ to share with human labelers. These are the guidelines we used to label the `Spouse` relation: - **<span style="color:red">Do not</span>** include formal titles associated with professional roles e.g., _**Pastor** Jeff_, _**Prime Minister** Prayut Chan-O-Cha_ - Do include English honorifics unrelated to a professional role, e.g., _**Mr.** John Cleese_. - **<span style="color:red">Do not</span>** include family names/surnames that do not reference a single individual, e.g., _the Duggar family_. - Do include informal titles, stage names, fictional characters, and nicknames, e.g., _**Dog the Bounty Hunter**_ - Include possessive's, e.g., _Anna**'s**_. ### b) Annotating Relations To annotate `Spouse` relations, we look through all pairs of `PERSON` entities found within a single sentence. BRAT identifies the bounds of each sentence and renders a numbered row in the annotation window (see the left-most column in the image below). <img align="right" src="imgs/brat-relation.jpg" width="500px" style="margin-left:50px"> Annotating relations is done through simple drag and drop. Begin by clicking and holding on a single `PERSON` entity and then drag that entity to its corresponding spouse entity. That is it! **Annotation Guidelines** - Restrict `PERSON` pairs to those found in the same sentence. - The order of `PERSON` arguments does not matter in this application. - **<span style="color:red">Do not</span>** include relations where a `PERSON` argument is wrong or otherwise incomplete. ## Step 4: Scoring Models using BRAT Labels ### a) Evaluating System Recall Creating gold validation data with BRAT is a critical evaluation step because it allows us to compute an estimate of our model's _true recall_. When we create labeled data over a candidate set created by Snorkel, we miss mentions of relations that our candidate extraction step misses. This causes us to overestimate the system's true recall. In the code below, we show how to map BRAT annotations to an existing set of Snorkel candidates and compute some associated metrics. ``` train_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all() ``` ### b) Mapping BRAT Annotations to Snorkel Candidates We annotated a single document using BRAT to illustrate the difference in scores when we factor in the effects of candidate generation. ``` %time brat.import_gold_labels(session, "spouse/train", train_cands) ``` Our candidate extractor only captures 7/14 (50%) of true mentions in this document. Our real system's recall is likely even worse, since we won't correctly predict the label for all true candidates. ### c) Re-loading the Trained LSTM We'll load the LSTM model we trained in [Workshop_4_Discriminative_Model_Training.ipynb](Workshop_4_Discriminative_Model_Training.ipynb) and use to to predict marginals for our test candidates. ``` test_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all() from snorkel.learning.disc_models.rnn import reRNN lstm = reRNN(seed=1701, n_threads=None) lstm.load("spouse.lstm") marginals = lstm.marginals(test_cands) ``` ### d) Create a Subset of Test for Evaluation Our measures assume BRAT annotations are complete for the given set of documents! Rather than manually annotating the entire test set, we define a small subset of 10 test documents for hand lableing. We'll then compute the full, recall-corrected metrics for this subset. First, let's build a query to initalize this candidate collection. ``` doc_ids = set(open("data/brat_test_docs.tsv","rb").read().splitlines()) cid_query = [c.id for c in test_cands if c.get_parent().document.name in doc_ids] brat.init_collection("spouse/test-subset", cid_query=cid_query) brat.view("spouse/test-subset") ``` ### e) Comparing Unadjusted vs. Adjusted Scores ``` import matplotlib.pyplot as plt plt.hist(marginals, bins=20) plt.show() from snorkel.annotations import load_gold_labels L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, load_as_array=True, zero_one=True) L_gold_test = load_gold_labels(session, annotator_name='gold', split=2, zero_one=True) ``` **Recall-uncorrected Score** If we don't account for candidates missed during extraction, our model score will overestimate real performance, as is the case for the model evaluation below. ``` brat.score(session, test_cands, marginals, "spouse/test-subset", recall_correction=False) ``` **Recall-corrected Score** Though this is a small sample of documents, we see how missing candidates can impact our real system score. ``` brat.score(session, test_cands, marginals, "spouse/test-subset") ``` This is the full model, evaluated on all our gold candidate labels. ``` tp, fp, tn, fn = lstm.error_analysis(session, test_cands, L_gold_test) ```
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Validation of the total water level inputs required: * historical wave conditions * emulator output - synthetic wave conditions of TWL * emulator output - synthetic wave conditions of TWL with 3 scenarios of SLR in this notebook: * Comparison of the extreme distributions ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import numpy as np import xarray as xr from datetime import datetime import matplotlib.pyplot as plt # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..', '..')) # teslakit from teslakit.database import Database from teslakit.climate_emulator import Climate_Emulator from teslakit.extremes import Peaks_Over_Threshold as POT from teslakit.util.time_operations import xds_reindex_daily from teslakit.plotting.extremes import Plot_ReturnPeriodValidation_CC from teslakit.plotting.estela import Plot_DWTs_Probs from teslakit.plotting.wts import Plot_Probs_WT_WT from teslakit.plotting.outputs import Plot_LevelVariables_Histograms ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/albacid/Projects/TeslaKit_projects' # offshore db = Database(p_data) db.SetSite('ROI') # climate change - S1 db_S1 = Database(p_data) db_S1.SetSite('ROI_CC_S1') # climate change - S2 db_S2 = Database(p_data) db_S2.SetSite('ROI_CC_S2') # climate change - S3 db_S3 = Database(p_data) db_S3.SetSite('ROI_CC_S3') # -------------------------------------- # Load complete hourly data for extremes analysis # Historical HIST_C_h = db.Load_HIST_OFFSHORE(vns=['TWL'],decode_times=True) # Simulation (1000 yrs) SIM_C_h = db.Load_SIM_OFFSHORE_all(vns=['TWL'], decode_times=True, use_cftime=True) # Simulation climate change S1 (100 yrs) SIM_C_h_CChange_S1 = db_S1.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True) # Simulation climate change S2 (100 yrs) SIM_C_h_CChange_S2 = db_S2.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True) # Simulation climate change S3 (100 yrs) SIM_C_h_CChange_S3 = db_S3.Load_SIM_OFFSHORE_all(decode_times=True, use_cftime=True) # Keep first 100 years of simulation without climate change SIM_C_h = SIM_C_h.isel(time=slice(0, len(SIM_C_h_CChange_S1.time))) # 100 years ``` ## Level Variables (TWL) - Histograms ``` from teslakit.plotting.outputs import axplot_compare_histograms from teslakit.plotting.config import _faspect, _fsize import matplotlib.gridspec as gridspec # Plot TWL histogram comparison between historical and simulated data for different SLR scenarios data_fit = HIST_C_h['TWL'].values[:]; data_fit = data_fit[~np.isnan(data_fit)] data_sim = SIM_C_h['TWL'].sel(n_sim = 0).values[:]; data_sim = data_sim[~np.isnan(data_sim)] data_sim_1 = SIM_C_h_CChange_S1['TWL'].sel(n_sim = 0).values[:]; data_sim_1 = data_sim_1[~np.isnan(data_sim_1)] data_sim_2 = SIM_C_h_CChange_S2['TWL'].sel(n_sim = 0).values[:]; data_sim_2 = data_sim_2[~np.isnan(data_sim_2)] data_sim_3 = SIM_C_h_CChange_S3['TWL'].sel(n_sim = 0).values[:]; data_sim_3 = data_sim_3[~np.isnan(data_sim_3)] # plot figure fig = plt.figure(figsize=(_faspect*_fsize, _fsize*2/2.3)) gs = gridspec.GridSpec(2, 2) n_bins = np.linspace(np.nanmin([np.nanmin(data_fit), np.nanmin(data_sim_3)]),np.nanmax([np.nanmax(data_fit), np.nanmax(data_sim_3)]), 40) ax = plt.subplot(gs[0, 0]) axplot_compare_histograms(ax, data_fit, data_sim, ttl='TWL', n_bins=n_bins, color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7, label_1='Historical', label_2='Simulation') ax = plt.subplot(gs[0, 1]) axplot_compare_histograms(ax, data_sim, data_sim_1, ttl='TWL', n_bins=n_bins, color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7, label_1='Simulation', label_2='Simulation Climate Change S1') ax = plt.subplot(gs[1, 0]) axplot_compare_histograms(ax, data_sim, data_sim_2, ttl='TWL', n_bins=n_bins, color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7, label_1='Simulation', label_2='Simulation Climate Change S2') ax = plt.subplot(gs[1, 1]) axplot_compare_histograms(ax, data_sim, data_sim_3, ttl='TWL', n_bins=n_bins, color_1='white', color_2='skyblue', alpha_1=0.9, alpha_2=0.7, label_1='Simulation', label_2='Simulation Climate Change S3') ``` ## TWL - Annual Maxima for different SLR scenarios ``` # Plot TWL annual maxima # calculate Annual Maxima values for historical and simulated data hist_A = HIST_C_h['TWL'].groupby('time.year').max(dim='time') sim_A = SIM_C_h['TWL'].groupby('time.year').max(dim='time') ``` ### SLR S1 (intermediate low, +0.5m) ``` sim_B = SIM_C_h_CChange_S1['TWL'].groupby('time.year').max(dim='time') # Return Period historical vs. simulations Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose()); ``` ### SLR S2 (intermediate, +1m) ``` sim_B = SIM_C_h_CChange_S2['TWL'].groupby('time.year').max(dim='time') # Return Period historical vs. simulations Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose()); ``` ### SLR S3 (intermediate high, +1.5m) ``` sim_B = SIM_C_h_CChange_S3['TWL'].groupby('time.year').max(dim='time') # Return Period historical vs. simulations Plot_ReturnPeriodValidation_CC(hist_A, sim_A.transpose(), sim_B.transpose()); ```
github_jupyter
<small><small><i> All the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/02_Python_Datatypes)** </i></small></small> # Python Strings In this class you will learn to create, format, modify and delete strings in Python. Also, you will be introduced to various string operations and functions. ## What is String in Python? A string is a built-in type sequence of characters. It is used to handle **textual data** in python. Python **Strings are immutable sequences** of **Unicode** points. Creating Strings are simplest and easy to use in Python. A character is simply a symbol. For example, the English language has 26 characters. Computers do not deal with characters, they deal with numbers (binary). Even though you may see characters on your screen, internally it is stored and manipulated as a combination of 0s and 1s. This conversion of character to a number is called encoding, and the reverse process is decoding. ASCII and Unicode are some of the popular encodings used. In Python, a string is a sequence of Unicode characters. Unicode was introduced to include every character in all languages and bring uniformity in encoding. These Unicodes range from **$0_{hex}$** to **$10FFFF_{hex}$**. Normally, a Unicode is referred to by writing **"U+"** followed by its **hexadecimal** number. Thus strings in Python are a sequence of Unicode values. You can learn about Unicode from **[Python Unicode](https://docs.python.org/3.3/howto/unicode.html)**. <div> <img src="img/s0.png" width="600"/> </div> ## How to create a string in Python? Strings can be created by enclosing characters inside a **single quote** or **double-quotes**. Even **triple quotes** can be used in Python but generally used to represent multiline strings and docstrings. ``` # Example: # defining strings in Python # all of the following are equivalent my_string = 'Hello' print(my_string) my_string = "Hello" print(my_string) my_string = '''Hello''' print(my_string) # triple quotes string can extend multiple lines my_string = """Hello, welcome to the world of Python""" print(my_string) a = "Hello," b= 'World!' print(a+b) print(a+" "+b) string1='World' string2='!' print('Hello,' + " " + string1 + string2) ``` ## How to access characters in a string? * In Python, Strings are stored as individual characters in a **contiguous memory location**. * The benefit of using String is that it can be accessed from both the **directions** (forward and backward). * Both forward as well as backward indexing are provided using Strings in Python. * Forward indexing starts with **`0,1,2,3,.... `** * Backward indexing starts with **`-1,-2,-3,-4,.... `** * Trying to access a character out of index range will raise an **`IndexError`**. The index must be an integer. We can't use floats or other types, this will result into **`IndexError`**. * Strings can be indexed with square brackets. Indexing starts from zero in Python. * We can access a range of items in a string by using the slicing operator **`:`**(colon). * And the **`len()`** function provides the length of a string ```python str[0] = 'P' = str[-6] , str[1] = 'Y' = str[-5] , str[2] = 'T' = str[-4] , str[3] = 'H' = str[-3] , str[4] = 'O' = str[-2] , # refers to the second last item str[5] = 'N' = str[-1]. # refers to the last item ``` <div> <img src="img/s3.png" width="300"/> </div> ``` # Accessing string characters in Python str = 'PYTHON' print('str = ', str) #first character print('str[0] = ', str[0]) #last character print('str[-1] = ', str[-1]) #slicing 2nd to 5th character print('str[1:5] = ', str[1:5]) #slicing 6th to 2nd last character print('str[5:-2] = ', str[3:-1]) ``` If we try to access an index out of the range or use numbers other than an integer, we will get errors. ``` # Accessing string characters in Python str = 'PYTHON' print('str = ', str) # index must be in range print('str[15] = ', str[15]) # Accessing string characters in Python str = 'PYTHON' print('str = ', str) # index must be an integer print('str[1.50] = ', str[1.5]) s = '123456789' #Indexing strats from 0 to 8 print("The string '%s' string is %d characters long" %(s, len(s)) ) print('First character of',s,'is',s[0]) print('Last character of',s,'is',s[8]) print('Last character of',s,'is',s[len(s)-1]) # [9-1] = [8] is 9 ``` Negative indices can be used to start counting from the back ``` print('First character of',s,'is',s[-len(s)]) print('First character of',s,'is',s[(-9)]) print('Second character of',s,'is',s[(-8)]) print('Last character of',s,'is',s[-1]) ``` Finally a substring (range of characters) an be specified as using $a:b$ to specify the characters at index $a,a+1,\ldots,b-1$. Note that the last charcter is *not* included. ``` print("First three characters",s[0:3]) print("Next three characters",s[3:6]) ``` An empty beginning and end of the range denotes the beginning/end of the string: ``` s = '123456789' #Indexing strats from 0 to 8 print("First three characters", s[:3]) print("Last three characters", s[-3:]) # Here, we are creating a simple program to retrieve String in reverse as well as normal form. name="Milan" length=len(name) i=0 for n in range(-1,(-length-1),-1): print(name[i],"\t",name[n]) i+=1 ``` ## How to slice a string in Python? Python String **slice** can be defined as a **substring** which is the part of the string. Therefore further substring can be obtained from a string. There can be many forms to slice a string, as string can be accessed or indexed from both the direction and hence string can also be sliced from both the directions. Slicing can be best visualized by considering the index to be between the elements as shown below. If we want to access a range, we need the index that will slice the portion from the string. <div> <img src="img/s16.png" width="300"/> </div> **Syntax** of Slice Operator : ```python str[start : stop : step ] ``` other syntax of slice: ```python str[start : stop] # items start through stop-1 str[start : ] # items start through the rest of the array str[ : stop] # items from the beginning through stop-1 str[ : ] # a copy of the whole array ``` ``` # Example: s="Milan Python" print(s[6:10]) print(s[-12:-7]) print(s[-1: :-1]) #reversed all string print(s[2: 10: 2]) #step = 2 print(s[ : : -1]) #reversed all string print(s[ : 5]) #from 0 to 4 print(s[3 : ]) #from 3 to end of the string print(s[ : ]) #copy all string ``` **NOTE**: Both the operands passed for concatenation must be of same type, else it will show an error. ## Breaking appart strings When processing text, the ability to split strings appart is particularly useful. * `partition(separator)`: breaks a string into three parts based on a separator * `split()`: breaks string into words separated by white-space (optionally takes a separator as argument) * `join()`: joins the result of a split using string as separator ``` s = "one -> two -> three" print( s.partition("->") ) print( s.split() ) print( s.split(" -> ") ) print( ";".join( s.split(" -> ") ) ) "This will split all words into a list".split() ' '.join(['This', 'will', 'join', 'all', 'words', 'into', 'a', 'string']) 'Happy New Year'.find('ew') 'Happy New Year'.replace('Happy','Brilliant') ``` ## How to change or delete a string? Strings are immutable. This means that elements of a string cannot be changed once they have been assigned. We can simply reassign different strings to the same name. ``` my_string = 'python' my_string[5] = 'a' s='012345' sX=s[:2]+'X'+s[3:] # this creates a new string with 2 replaced by X print("creating new string",sX,"OK") sX=s.replace('2','X') # the same thing print(sX,"still OK") s[2] = 'X' # an error!!! ``` We cannot delete or remove characters from a string. But deleting the string entirely is possible using the **`del`** keyword. ``` my_string = 'python' del my_string[1] # deleting element of string generates error! my_string = 'python' del my_string # deleting whole string using 'del' keyword can delete it. my_string ``` ## Python Strings Operations There are many operations that can be performed with strings which makes it one of the most used data types in Python. To learn more about the data types available in Python visit: **[Python Data Types](https://github.com/milaan9/01_Python_Introduction/blob/main/009_Python_Data_Types.ipynb)**. To perform operation on string, Python provides basically 3 types of Operators that are given below. * Basic Operators/Concatenation of Two or More Strings. * Membership Operators. * Relational Operators. ### 1. Basic Operators for concatenation of two or more strings There are two types of basic operators in String **`+`** and **`*`**. The **`+`** (concatenation) operator can be used to concatenates two or more string literals together. The **`*`** (Replication) operator can be used to repeat the string for a given number of times. #### String Concatenation Operator (**`+`**) Joining of two or more strings into a single one is called concatenation. ``` # Example: str1="Hello" str2="World!" print(str1+str2) ``` | Expression | Output | |:----| :--- | | **`"10" + "50"`** | **"1050"** | | **`"hello" + "009"`** | **"hello009"** | | **`"hello99" + "world66" `** | **"hello99world66"** | >**Note:** Both the operands passed for concatenation must be of same type, else it will show an error. ``` # Example: print("HelloWorld"+99) ``` #### Python String Replication Operator (**`*`**) **Replication operator** uses two parameters for operation, One is the integer value and the other one is the String argument. The Replication operator is used to **repeat a string** number of times. The string will be repeated the number of times which is given by the **integer value**. | Expression | Output | |:----| :--- | | **`"ArcX" \* 2`** | **"ArcXArcX"** | | **`3 *'5'`** | **"555"** | | **`'@'* 5 `** | **"@@@@@"** | >**Note:**: We can use Replication operator in any way i.e., int **`*`** string or string **`*`** **`int`**. Both the parameters passed cannot be of same type. ``` # Example: print("HelloWorld" * 5) print(3 * "Python") print("Hello World! "*5) #note the space in between 'Hello' and 'World!' # Python String Operations str1 = 'Hello' str2 ='World!' # using + print('str1 + str2 = ', str1 + str2) # using * print('str1 * 3 =', str1 * 3) ``` If we want to concatenate strings in different lines, we can use parentheses **`()`**. ``` # two string literals together 'Hello ''World!' # using parentheses s = ('Hello ' 'World') s ``` ### Iterating Through a string We can iterate through a string using a **[for loop](https://github.com/milaan9/03_Python_Flow_Control/blob/main/005_Python_for_Loop.ipynb)**. Here is an example to count the number of 'l's in a string. ``` # Iterating through a string count = 0 for letter in 'Hello World': if(letter == 'l'): count += 1 print(count,'letters found') ``` ### 2. Python String Membership Operators Membership Operators are already discussed in the Operators section. Let see with context of String. There are two types of Membership operators : 1. **`in`** - "in" operator returns true if a character or the entire substring is present in the specified string, otherwise false. 2. **`not in`** - "not in" operator returns true if a character or entire substring does not exist in the specified string, otherwise false. ``` # Example: str1="HelloWorld" str2="Hello" str3="World" str4="Milan" print('Exmple of in operator ::') print(str2 in str1) print(str3 in str1) print(str4 in str1) print() print(str2 not in str1) print(str3 not in str1) print(str4 not in str1) >>> 'a' in 'program' True >>> 'at' not in 'battle' False ``` ### 3. Python Relational Operators All the comparison (relational) operators i.e., **(<, ><=, >=, ==, !=, <>)** are also applicable for strings. The Strings are compared based on the **ASCII value** or **Unicode**(i.e., dictionary Order). ``` # Example: print("HelloWorld"=="HelloWorld") print("helloWorld">="HelloWorld") print("H"<"h") ``` **Explanation:** The ASCII value of a is 97, b is 98, c is 99 and so on. The ASCII value of A is 65, B is 66, C is 67 and so on. The comparison between strings are done on the basis on ASCII value. The **`%`** operator is used to format a string inserting the value that comes after. It relies on the string containing a format specifier that identifies where to insert the value. The most common types of format specifiers are: - **`%s`** -> string - **`%d`** -> Integer - **`%f`** -> Float - **`%o`** -> Octal - **`%x`** -> Hexadecimal - **`%e`** -> exponential These will be very familiar to anyone who has ever written a C or Java program and follow nearly exactly the same rules as the **[printf() function](https://en.wikipedia.org/wiki/Printf_format_string)**. ``` print("Hello %s" % string1) print("Actual Number = %d" %19) print("Float of the number = %f" %19) print("Octal equivalent of the number = %o" %19) print("Hexadecimal equivalent of the number = %x" %19) print("Exponential equivalent of the number = %e" %19) ``` When referring to multiple variables parentheses is used. Values are inserted in the order they appear in the parantheses (more on tuples in the next section) ``` print("Hello %s %s. My name is Bond, you can call me %d" %(string1,string2,99)) ``` We can also specify the width of the field and the number of decimal places to be used. For example: ``` print('Print width 10: |%10s|'%'x') print('Print width 10: |%-10s|'%'x') # left justified print("The number pi = %.1f to 1 decimal places"%3.1415) print("The number pi = %.2f to 2 decimal places"%3.1415) print("More space pi = %10.2f"%3.1415) print("Pad pi with 0 = %010.2f"%3.1415) # pad with zeros ``` ### Built-in functions to Work with Python Various built-in functions that work with sequence work with strings as well. Some of the commonly used ones are **`enumerate()`** and **`len()`**. The **[enumerate()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/018_Python_enumerate%28%29.ipynb)** function returns an enumerate object. It contains the index and value of all the items in the string as pairs. This can be useful for iteration. Similarly, **[len()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/040_Python_len%28%29.ipynb)** returns the length (number of characters) of the string. ``` str = 'cold' # enumerate() list_enumerate = list(enumerate(str)) print('list(enumerate(str) = ', list_enumerate) #character count print('len(str) = ', len(str)) ``` ## Python String Formatting ### Escape Sequence If we want to print a text like `He said, "What's there?"`, we can neither use single quotes nor double quotes. This will result in a SyntaxError as the text itself contains both single and double quotes. ``` print("He said, "What's there?"") ``` One way to get around this problem is to use triple quotes. Alternatively, we can use escape sequences. An escape sequence starts with a backslash and is interpreted differently. If we use a single quote to represent a string, all the single quotes inside the string must be escaped. Similar is the case with double quotes. Here is how it can be done to represent the above text. ``` # using triple quotes print('''He said, "What's there?"''') # escaping single quotes print('He said, "What\'s there?"') # escaping double quotes print("He said, \"What's there?\"") ``` ### Here is a list of all the escape sequences supported by Python. | Escape Sequence | Description | |:----:| :--- | | **`\newline`** | Backslash and newline ignored | | **`\\`** | Backslash | | **`\'`** | Single quote | | **`\"`** | Double quote | | **`\a`** | ASCII Bell | | **`\b`** | ASCII Backspace | | **`\f`** | ASCII Formfeed | | **`\n`** | ASCII Linefeed | | **`\r`** | ASCII Carriage Return | | **`\t`** | ASCII Horizontal Tab | | **`\v`** | ASCII Vertical Tab | | **`\ooo`** | Character with octal value ooo | | **`\xHH`** | Character with hexadecimal value HH | ``` # Here are some examples print("C:\\Python32\\Lib") #C:\Python32\Lib print("This is printed\nin two lines") #This is printed #in two lines print("This is \x48\x45\x58 representation") #This is HEX representation ``` ### Raw String to ignore escape sequence Sometimes we may wish to ignore the escape sequences inside a string. To do this we can place **`r`** or **`R`** in front of the string. This will imply that it is a raw string and any escape sequence inside it will be ignored. ``` print("This is \x61 \ngood example") print(r"This is \x61 \ngood example") ``` ### The `format()` Method for Formatting Strings The **`format()`** method that is available with the string object is very versatile and powerful in formatting strings. Format strings contain curly braces **`{}`** as placeholders or replacement fields which get replaced. We can use positional arguments or keyword arguments to specify the order. ``` # Python string format() method # default(implicit) order default_order = "{}, {} and {}".format('Allan','Bill','Cory') print('\n--- Default Order ---') print(default_order) # order using positional argument positional_order = "{1}, {0} and {2}".format('Allan','Bill','Cory') print('\n--- Positional Order ---') print(positional_order) # order using keyword argument keyword_order = "{s}, {b} and {j}".format(j='Allan',b='Bill',s='Cory') print('\n--- Keyword Order ---') print(keyword_order) ``` The **`format()`** method can have optional format specifications. They are separated from the field name using colon. For example, we can left-justify **`<`**, right-justify **`>`** or center **`^`** a string in the given space. We can also format integers as binary, hexadecimal, etc. and floats can be rounded or displayed in the exponent format. There are tons of formatting you can use. Visit here for all the **[string formatting available with the format()](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String_Methods/009_Python_String_format%28%29.ipynb)** method. ``` # formatting integers "Binary representation of {0} is {0:b}".format(12) # formatting floats "Exponent representation: {0:e}".format(1966.365) # round off "One third is: {0:.3f}".format(1/3) # string alignment "|{:<10}|{:^10}|{:>10}|".format('bread','butter','jam') ``` ### Old style formatting We can even format strings like the old **`sprintf()`** style used in C programming language. We use the **`%`** operator to accomplish this. ``` x = 36.3456789 print('The value of x is %3.2f' %x) print('The value of x is %3.4f' %x) ``` ## Common Python String Methods There are numerous methods available with the string object. The **`format()`** method that we mentioned above is one of them. Strings can be tranformed by a variety of functions that are all methods on a string. That is they are called by putting the function name with a **`.`** after the string. They include: * Upper vs lower case: **`upper()`**, **`lower()`**, **`captialize()`**, **`title()`** and **`swapcase()`**, **`join()`**, **`split()`**, **`find()`**, **`replace()`** etc, with mostly the obvious meaning. Note that `capitalize` makes the first letter of the string a capital only, while **`title`** selects upper case for the first letter of every word. * Padding strings: **`center(n)`**, **`ljust(n)`** and **`rjust(n)`** each place the string into a longer string of length n padded by spaces (centered, left-justified or right-justified respectively). **`zfill(n)`** works similarly but pads with leading zeros. * Stripping strings: Often we want to remove spaces, this is achived with the functions **`strip()`**, **`lstrip()`**, and **`rstrip()`** respectively to remove from spaces from the both end, just left or just the right respectively. An optional argument can be used to list a set of other characters to be removed. Here is a complete list of all the **[built-in methods to work with Strings in Python](https://github.com/milaan9/02_Python_Datatypes/tree/main/002_Python_String_Methods)**. ``` # Example: s="heLLo wORLd!" print(s.capitalize(),"vs",s.title()) print("upper case: '%s'"%s.upper(),"lower case: '%s'"%s.lower(),"and swapped: '%s'"%s.swapcase()) print('|%s|' % "Hello World".center(30)) # center in 30 characters print('|%s|'% " lots of space ".strip()) # remove leading and trailing whitespace print('%s without leading/trailing d,h,L or ! = |%s|',s.strip("dhL!")) print("Hello World".replace("World","Class")) ``` #### Inspecting Strings There are also lost of ways to inspect or check strings. Examples of a few of these are given here: * Checking the start or end of a string: **`startswith("string")`** and **`endswith("string")`** checks if it starts/ends with the string given as argument * Capitalisation: There are boolean counterparts for all forms of capitalisation, such as **`isupper()`**, **`islower()`** and **`istitle()`** * Character type: does the string only contain the characters: * 0-9: **`isdecimal()`**. Note there is also **`isnumeric()`** and **`isdigit()`** which are effectively the same function except for certain unicode characters * a-zA-Z: **`isalpha()`** or combined with digits: **`isalnum()`** * non-control code: **`isprintable()`** accepts anything except '\n' an other ASCII control codes * \t\n \r (white space characters): **`isspace()`** * Suitable as variable name: **`isidentifier()`** * Find elements of string: **`s.count(w)`** finds the number of times **`w`** occurs in **`s`**, while **`s.find(w)`** and **`s.rfind(w)`** find the first and last position of the string **`w`** in **`s`**. ``` # Example: s="Hello World" print("The length of '%s' is"%s,len(s),"characters") # len() gives length of the string s.startswith("Hello") and s.endswith("World") # check start/end # count strings print("There are %d 'l's but only %d World in %s" % (s.count('l'),s.count('World'),s)) print('"el" is at index',s.find('el'),"in",s) #index from 0 or -1 ``` ## Advanced string processing For more advanced string processing there are many libraries available in Python including for example: * **re** for regular expression based searching and splitting of strings * **html** for manipulating HTML format text * **textwrap** for reformatting ASCII text * ... and many more
github_jupyter
``` import pandas as pd import numpy as np visit = pd.read_csv("visitorCount.csv",dtype=str) a = visit.melt( id_vars=['time']) # a.to_csv("visitorMelt.csv") movement = pd.read_csv("movements.csv") movement = movement.astype('category') len(movement) stations = pd.read_csv("stations.csv") stations['double_count'] = False stations[stations['serial']=="000000007b5207b6"] any(movement[movement['hash']=='013c76b508f0d5d70b060e9f7248771ef4314b90b811f7b0b1734824'].groupby(['time']).size() > 1) # df = pd.DataFrame(columns=['Hash',"Serial"]) mat = np.array([]) for i in movement.groupby(['hash','serial']).size()[0:70].index: try: if(i[0] == a[0]): mat = np.append(mat,np.array(a)) mat = np.append(mat,np.array(i)) except: pass a = i mat = mat.reshape(int(len(mat)/2),2) mat for row in mat: if(any(movement[movement['hash']==row[0]].groupby(['time']).size() > 1)): a = stations[stations['serial']==row[1]].index[0] stations.at[a,'double_count'] = True # stations.to_csv("stations_dbl_count.csv") hashToAddress = dict(list(zip(stations['serial'].values,stations['address'].values))) hashToAddress from sklearn.preprocessing import normalize weather = pd.read_csv("Helsinki_weather_data.csv", dtype=str) weather['Time'] = weather['d'] + "/" + weather['m'] + "/" + weather['Year'] + " "+ weather['Time'] # normalize(weather[['Cloud amount (1/8)','Pressure (msl) (hPa)','Relative humidity (%)', # 'Precipitation intensity (mm/h)','Snow depth (cm)','Air temperature (degC)', # 'Dew-point temperature (degC)','Horizontal visibility (m)','Wind direction (deg)', # 'Gust speed (m/s)','Wind speed (m/s)']], axis=1).ravel() # weather[['Cloud amount (1/8)','Pressure (msl) (hPa)']] # weather.to_csv("weather_mod.csv") from datetime import datetime new = pd.DataFrame() weather['time'] = weather['Time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M')) visit['time'] = visit['time'].apply(lambda x: datetime.strptime(x,'%d/%m/%Y %H:%M')) # sum(new['w_time'] == new['v_time']) weather.drop(columns='Time',inplace=True) weather.dtypes visit.rename(columns=hashToAddress,inplace=True) visit output = visit.set_index('time').join(weather.set_index('time'),how="left",rsuffix = "_").reset_index() output.to_csv('joined_Visit_Weather_updated.csv') output output accuracy = pd.DataFrame() for hashs in hashToAddress.keys(): try: a = pd.read_csv(hashs+"Prediction.csv") a['station'] = hashToAddress[hashs] a['Time'] = pd.date_range(start='19/8/2019', end='20/8/2019',freq="120s") accuracy = accuracy.append(a) # print(a.head()) except: pass accuracy.to_csv("accuracyPlot_updated.csv") accuracy pd.date_range(start='19/8/2019', end='20/8/2019',freq="120s") ```
github_jupyter
# Time series in Pastas *R.A. Collenteur, University of Graz, 2020* Time series are at the heart of time series analysis, and therefore need to be considered carefully when dealing with time series models. In this notebook more background information is provided on important characteristics of time series and how these may influence your modeling results. In general, Pastas depends heavily on Pandas for dealing with time series, but adds capabilities to deal with irregular time series and missing data. All time series should be provided to Pastas as `pandas.Series` with a `pandas.DatetimeIndex`. Internally these time series are stored in a `pastas.TimeSeries` object. The goal of this object is to validate the user-provided time series and enable resampling (changing frequencies) of the independent time series. The TimeSeries object also has capabilities to deal with missing data in the user-provided time series. As much of these operations occur internally, this notebook is meant to explain users what is happening and how to check for this. <div class="alert alert-info"> <b>Note</b> * The standard Pastas data type for a date is the `pandas.Timestamp`. * The standard Pastas data type for a sequence of dates is the `pandas.DatetimeIndex` with `pandas.Timestamp`. * The standard Pastas data type for a time series is a `pandas.Series` with a `pandas.DatetimeIndex` </div> ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import pastas as ps ps.show_versions() ``` ## Different types of time series Time series data may generally be defined as a set of data values observed at certain times, ordered in a way that the time indices are increasing. Many time series analysis method assume that the time step between the observations is regular, the time series has evenly-spaced observations. These evenly spaced time series may have missing data, but it will still be possible to lay the values on a time-grid with constant time steps. This is generally also assumed to be the case for the independent time series in hydrological studies. For example, the precipitation records may have some missing data but the precipitation is reported as the total rainfall over one day. In the case of missing data, we may impute a zero (no rain) or the rainfall amount from a nearby measurement station. Groundwater level time series do generally not share these characteristics with other hydrological time series, and are measured at irregular time intervals. This is especially true for historic time series that were measured by hand. The result is that the measurements can not be laid on a regular time grid. The figure below graphically shows the difference between the three types of time series. ``` regular = pd.Series(index=pd.date_range("2000-01-01", "2000-01-10", freq="D"), data=np.ones(10)) missing_data = regular.copy() missing_data.loc[["2000-01-03", "2000-01-08"]] = np.nan index = [t + pd.Timedelta(np.random.rand()*24, unit="H") for t in missing_data.index] irregular = missing_data.copy() irregular.index = index fig, axes = plt.subplots(3,1, figsize=(6, 5), sharex=True, sharey=True) regular.plot(ax=axes[0], linestyle=" ", marker="o", x_compat=True) missing_data.plot(ax=axes[1], linestyle=" ", marker="o", x_compat=True) irregular.plot(ax=axes[2], linestyle=" ", marker="o", x_compat=True) for i, name in enumerate(["(a) Regular time steps", "(b) Missing Data", "(c) Irregular time steps"]): axes[i].grid() axes[i].set_title(name) plt.tight_layout() ``` ## Independent and dependent time series We can differentiate between two types of input time series for Pastas models: the dependent and independent time series. The dependent time series are those that we want to explain (e.g., the groundwater levels) and the independent time series are those that we use to explain the dependent time series (e.g., precipitation or evaporation). The requirements for these time series are different: - The dependent time series may be of any kind: regular, missing data or irregular. - The independent time series has to have regular time steps. In practice, this means that the time series provided to `pastas.Model` may be of any kind, and that the time series used by the stressmodels (e.g., `pastas.RerchargeModel`) need to have regular time steps. The regular time steps are required to simulate contributions to the groundwater level fluctuations. As there are virtually no restrictions on the dependent time series, the remainder of this notebook will discuss primarily the independent time series. ## How does the TimeSeries object validate a time series? To ensure that a time series can be used for simulation a number of things are checked and changed: 1. Make sure the values are floats. Values are change to dtype=float if not. 2. Make sure the index is a `pandas.DatetimeIndex`. Index is changed if not. 3. Make sure the timestamps in the index are increasing. Index is sorted if not. 4. Make sure there are no nan-values at the start and end of a time series. 5. Determine the frequency of the time series. 6. Make sure there are no duplicate indices. Values are averaged if this is the case. 7. Remove or fill up nan-values, depending on the settings. For each of these steps an INFO message will be returned by Pastas to inform the user if a change is made. The first four steps generally do not have a large impact and are there to prevent some basic issues. Preferably, no changes are reported. ### Frequency of the input data Pastas tries to determine the frequency in step 5, and will **always** report the result. It is generally good practice to double-check if the reported frequency agrees with what you know about the time series. Pastas will also report if no frequency can be inferred. If no frequency is reported there is probably some wrong and the user should fix either fix the input time series or provide Pastas with more information. Below we consider a time series with precipitation data, measured every day. We will use `settings="prec` as a shortcut for the settings to fill nans and resample. We will come back to those settings later. ``` rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True) ps.TimeSeries(rain, settings="prec") ``` Pastas correctly report the frequency and we can continue with this time series. Note that the input time series thus agrees with all the checks for the time series validation. Let's now introduce a nan-value and see what happens. ``` rain["1989-01-01"] = np.nan ps.TimeSeries(rain, settings="prec") ``` This also works fine. The frequency was inferred (stored as freq_original) and one nan-value was filled up with 0.0. Now we take the same time series, but drop the nan-value. ``` ps.TimeSeries(rain.dropna(), settings="prec") ``` The above result is probably not what we want. Pastas could not infer the frequency and therefore resorts to the `timestep_weighted_resample` method. Documentation for this method is available in utils.py. If we know the original frequency of the time series, we can tell this to Pastas through the `freq_original` argument. As we can see below, the user-provided frequency is used. ``` rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True) rain["1989-01-01"] = np.nan ps.TimeSeries(rain.dropna(), settings="prec", freq_original="D") ``` The above example shows how to obtain the same or different result with four different methods. Some of these methods requires good knowledge about the TimeSeries object and how it processes your time series. It is often preferred to provide Pastas with a better initial time series by resampling it yourself. This has the additional benefit that you are interacting more closely with the data. Most of the examples also follow this pattern. <div class="alert alert-info"> <b>Best practice</b> Try and modify your original time series such that Pastas returns a message that it was able to infer the frequency from the time series itself: **INFO: Inferred frequency for time series rain: freq=D** </div> ## Time series settings In the examples above we used the `settings` keyword when creating the TimeSeries. This is a shortcut method to select a number of settings from a predefined set of options. These predefined options can accessed through `ps.rcParams["timeseries"]`: ``` pd.DataFrame.from_dict(ps.rcParams["timeseries"]) ``` Each column name is a valid option for the `settings` argument. The rows shows the settings that may be chosen for changing the original time series. Once a TimeSeries is created, we can access the existing settings as follows: ``` ts = ps.TimeSeries(rain, settings="prec") ts.settings ``` This settings dictionary now includes both settings used to resample (sample_up, sample_down), extend (fill_before, fill_after), normalize (norm), and fill nans in the time series, but also dynamic settings such as the start and end date (tmin, tmax), the frequency (freq) and the time offset. To update these settings you the `update_series` method is available. For example, if we want to resample the above time series to a 7-day frequency and sum up the values we can use: ``` ts.update_series(freq="7D", sample_down="sum") ``` Because the original series are stored in the TimeSeries object as well, it is also possible to go back again. The changing made to the time series are always started from the original validated time series again. For more information on the possible settings see the API-docs for the [TimeSeries and update_series method](https://pastas.readthedocs.io/en/latest/api/timeseries.html) on the documentation website. ## An example with a Pastas Model By now you may be wondering why all these settings exist in the first place. The main reason (apart from validating the user-provided time series) is to change the time step of the simulation of the independent time series. It may also be used to extend the time series in time. Below we load some time series, visualize them and create a Pastas model with precipitation and evaporation to explain the groundwater level fluctuations. It is generally recommended to plot your time series for a quick visual check of the input data. ``` head = pd.read_csv("../examples/data/B32C0639001.csv", parse_dates=['date'], index_col='date', squeeze=True) rain = pd.read_csv('../examples/data/rain_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True) evap = pd.read_csv('../examples/data/evap_nb1.csv', parse_dates=['date'], index_col='date', squeeze=True) fig, axes = plt.subplots(3,1, figsize=(10,6), sharex=True) head.plot(ax=axes[0], x_compat=True, linestyle=" ", marker=".") evap.plot(ax=axes[1], x_compat=True) rain.plot(ax=axes[2], x_compat=True) axes[0].set_ylabel("Head [m]") axes[1].set_ylabel("Evap [mm/d]") axes[2].set_ylabel("Rain [mm/d]") plt.xlim("1985", "2005"); ml = ps.Model(head) rch = ps.rch.Linear() rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Gamma, name="rch") ml.add_stressmodel(rm) ml.solve(noise=True, tmin="1990", report="basic") ``` ## What is the model freq? The output below shows that the time series have frequencies of `freq=D`. The fit report also shows a frequency of `freq=D`. The frequency reported in the fit_report is the time step of the simulation for the independent time series, and is internally passed on to the stressmodels. The user-provided dependent time series are stored in the stressmodel object and can be accessed as follows. ``` ml.stressmodels["rch"].stress ``` If we want to change the resample method, for example we want to sum the precipitation and evaporation when sampling down (e.g., daily to weekly) we may do the following: ``` for stress in ml.stressmodels["rch"].stress: stress.update_series(sample_down="sum") ``` After changing the methods for sampling down, we now solve the model with a simulation time step of 14 days. The precipitation and evaporation are then summed up over 14 day intervals, before being translated to a groundwater fluctuation using a respons function. ``` ml.settings ml.solve(freq="14D", tmin="1980", report="basic") ml.plots.results(figsize=(10,6), tmin="1970"); ml.stressmodels["rch"].stress[1].update_series(tmin="1960") ml.stressmodels["rch"].stress[1].settings ``` Another method to obtain the settings of the time series used in a stressmodel is as follows: ``` ml.get_stressmodel_settings("rch") ``` ## Warnings Because the TimeSeries object is a relatively complicated object that can potentially change model results extra care has to be taken in some cases. Below is a number of outstanding warnings and the related GitHub issues. <div class="alert alert-warning"> <b>A note on dependent time series</b> The dependent time series (stored as `ml.oseries`) are also stored in a TimeSeries object and therefore have the same capabilities. Usage of these methods on the dependent time series is however experimental and not recommended for real world use cases. See also [Issue #68](https://github.com/pastas/pastas/issues/68) and [Discussion #199](https://github.com/pastas/pastas/discussions/199) </div> <div class="alert alert-warning"> <b>A note on monthly data</b> Monthly data is strictly irregular data, and poses additional challenges when resampling to regular frequencies. Pastas does not differentiate between monthly data reported at months end (`freq=M`) or months beginning (`freq=MS`) and the default settings are selected for `freq=M`. There may also be issues with extending the time series. See also [Issue #239](https://github.com/pastas/pastas/issues/239) </div>
github_jupyter
``` #import the necessary modules %matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import scipy import sklearn import itertools from itertools import cycle import os.path as op import timeit import json import math import multiprocessing as m_proc m_proc.cpu_count() # Import MDAnalysis import MDAnalysis as mda import statsmodels as stats from MDAnalysis.analysis import polymer, distances, rdf import matplotlib.font_manager as font_manager from polymer_MD_analysis import pers_length, get_rg_pers_poly, bavg_pers_cnt ``` ## PEG/dmso system analysis ### N = 6 PEG/DMSO ``` # For the right Rg calculation using MD Analysis, use trajactory without pbc n6_peg_dmso = mda.Universe("n6peg_dmso/n6pegonly_dmso.pdb", "n6peg_dmso/nodmso_n6peg.xtc") n6_peg_dmso.trajectory len(n6_peg_dmso.trajectory) #Select the polymer heavy atoms peg_n6dmso = n6_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H") crv_n6peg_dmso = pers_length(peg_n6dmso,6) crv_n6peg_dmso com_bond = np.zeros(shape=(1,18000)) count = 0 for ts in n6_peg_dmso.trajectory[0:18000]: n6_mon1_dmso = n6_peg_dmso.select_atoms("resid 1") n6_mon2_dmso = n6_peg_dmso.select_atoms("resid 2") oo_len = mda.analysis.distances.distance_array(n6_mon1_dmso.center_of_mass(), n6_mon2_dmso.center_of_mass(), box=n6_peg_dmso.trajectory.ts.dimensions) com_bond[0, count] = oo_len count += 1 com_bond lb_avg_pn6 = np.mean(com_bond) lb_avg_pn6 np.std(com_bond) ``` ### Radius of Gyration vs. time N = 6 PEG/dmso ``` n6peg_rgens_dmso, cor_n6peg_dmso, N6peg_cos_dmso, rgdmso_n6peg = get_rg_pers_poly(peg_n6dmso, n6_peg_dmso, 0, 18000) n6peg_rgens_dmso[0].shape cor_n6peg_dmso[3] N6peg_cos_dmso rgdmso_n6peg np.std(n6peg_rgens_dmso) trj_len = np.arange(18000) #trj_len += 1 trj_len plt.figure(figsize=(7,7)) plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01) plt.xlabel(r'Time [ns]', fontsize=15) plt.ylabel(r'$R_{g}$ [nm]', fontsize=15) plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69') plt.tick_params(labelsize=14) plt.legend(['N = 6 in DMSO'], frameon=False, fontsize=14) #plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial') plt.xlim(0,180) plt.ylim(0.2,2) ``` #### Correlation values at each arc length for the whole 180 ns trajectory, N = 6 PEG/dmso ``` # x values blen_dmso = cor_n6peg_dmso[3]*lb_avg_pn6 #nt_tt[0] = 0 blen_dmso # Error prop. into natural log std deviation mk_n6p_dmso = cor_n6peg_dmso[1]/cor_n6peg_dmso[0] mk_n6p_dmso plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='b', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') # All the points give the best fits for N = 6 peg in water n6_blkspeg_dmso , n6peg_lpdmso = bavg_pers_cnt(5, peg_n6dmso, n6_peg_dmso, lb_avg_pn6, 5, 3000 , 18000) n6_blkspeg_dmso n6peg_lpdmso n6peg_lpdmso[2] np.mean(n6peg_lpdmso[3]) def line_fit(slope, x): return slope*x blen_dmso gg_n6peg_dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_dmso) gg_n6peg_dmso ``` ### Block averaged Radius of gyration and persistence length, N = 6 PEG/DMSO ``` np.mean(n6_blkspeg_dmso["Avg persistence length"]) np.std(n6_blkspeg_dmso["Avg persistence length"]) np.mean(n6_blkspeg_dmso["Avg Radius of gyration"]) np.std(n6_blkspeg_dmso["Avg Radius of gyration"]) plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(blen_dmso, gg_n6peg_dmso, color='#1F2E69') plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01) plt.xlabel(r'Bond Length', fontsize=15) plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15) #plt.ylim(-1.9,0) font = font_manager.FontProperties(family='Arial', style='normal', size='14') plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font) plt.tick_params(labelsize=14) #plt.text(0.5, -6.94,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69') rgpeg_olig_dmso = pd.DataFrame(data=n6_blkspeg_dmso["Avg Radius of gyration"], columns=['$R_{g}$ [Angstrom] N = 6 PEG DMSO']) rgpeg_olig_dmso pers_pegt_dmso = pd.DataFrame(data=n6_blkspeg_dmso["Avg persistence length"], columns=[r"$L_{p}$ [Angstrom] N = 6 PEG DMSO "]) pers_pegt_dmso ``` ### N = 8 PEG/DMSO ``` # For the right Rg calculation using MD Analysis, use trajactory without pbc n8_peg_dmso = mda.Universe("n8peg_dmso/n8pegonly_dmso.pdb", "n8peg_dmso/nodmso_n8peg.xtc") n8_peg_dmso.trajectory len(n8_peg_dmso.trajectory) #Select the polymer heavy atoms peg_n8dmso = n8_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H") crv_n8peg_dmso = pers_length(peg_n8dmso,8) crv_n8peg_dmso com_bond_n8dmso = np.zeros(shape=(1,18000)) count = 0 for ts in n8_peg_dmso.trajectory[0:18000]: n8_mon1_dmso = n8_peg_dmso.select_atoms("resid 1") n8_mon2_dmso = n8_peg_dmso.select_atoms("resid 2") oo_len = mda.analysis.distances.distance_array(n8_mon1_dmso.center_of_mass(), n8_mon2_dmso.center_of_mass(), box=n8_peg_dmso.trajectory.ts.dimensions) com_bond_n8dmso[0, count] = oo_len count += 1 com_bond lb_avg_pn6 np.std(com_bond) np.mean(com_bond_n8dmso) np.std(com_bond_n8dmso) ``` ### Radius of Gyration vs. time N = 8 PEG/dmso ``` n8peg_rgens_dmso, cor_n8peg_dmso, N8peg_cos_dmso, rgdmso_n8peg = get_rg_pers_poly(peg_n8dmso, n8_peg_dmso, 0, 18000) n8peg_rgens_dmso[0].shape cor_n8peg_dmso[3] N8peg_cos_dmso rgdmso_n8peg np.std(n8peg_rgens_dmso) trj_len = np.arange(18000) #trj_len += 1 trj_len plt.figure(figsize=(7,7)) plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01) plt.xlabel(r'Time [ns]', fontsize=15) plt.ylabel(r'$R_{g}$ [nm]', fontsize=15) plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69') plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED') plt.tick_params(labelsize=14) plt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14) #plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial') plt.xlim(0,180) plt.ylim(0.2,2) ``` #### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso ``` # x values blen_n8dmso = cor_n8peg_dmso[3]*lb_avg_pn6 #nt_tt[0] = 0 blen_n8dmso # Error prop. into natural log std deviation mk_n8p_dmso = cor_n8peg_dmso[1]/cor_n8peg_dmso[0] mk_n8p_dmso plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.legend(['N = 6 in DMSO','N = 8 in DMSO'], frameon=False, fontsize=14) # All the points give the best fits for N = 6 peg in water n8_blkspeg_dmso , n8peg_lpdmso = bavg_pers_cnt(5, peg_n8dmso, n8_peg_dmso, lb_avg_pn6, 5, 3000 , 18000) n8_blkspeg_dmso n8peg_lpdmso n8peg_lpdmso[2] np.mean(n8peg_lpdmso[3]) blen_dmso blen_n8dmso gg_n8peg_dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n8dmso) gg_n6peg_n8dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n8dmso) gg_n8peg_dmso ``` ### Block averaged Radius of gyration and persistence length, N = 8 PEG/DMSO ``` np.mean(n8_blkspeg_dmso["Avg persistence length"]) np.std(n8_blkspeg_dmso["Avg persistence length"]) np.mean(n8_blkspeg_dmso["Avg Radius of gyration"]) np.std(n8_blkspeg_dmso["Avg Radius of gyration"]) plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(blen_n8dmso, gg_n6peg_n8dmso, color='#1F2E69') plt.plot(blen_n8dmso, gg_n8peg_dmso, color='#4C80ED') plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01) plt.xlabel(r'Bond Length', fontsize=15) plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15) plt.ylim(-6,1) plt.xlim(0,30) #plt.ylim(-1.9,0) font = font_manager.FontProperties(family='Arial', style='normal', size='14') #plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font) plt.tick_params(labelsize=14) plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69') plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED') rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 8 PEG DMSO "] = n8_blkspeg_dmso["Avg Radius of gyration"] rgpeg_olig_dmso pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 8 PEG DMSO "] = n8_blkspeg_dmso["Avg persistence length"] pers_pegt_dmso ``` ### N = 10 PEG/DMSO ``` # For the right Rg calculation using MD Analysis, use trajactory without pbc n10_peg_dmso = mda.Universe("n10peg_dmso/n10pegonly_dmso.pdb", "n10peg_dmso/nodmso_n10peg.xtc") n10_peg_dmso.trajectory len(n10_peg_dmso.trajectory) #Select the polymer heavy atoms peg_n10dmso = n10_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H") crv_n10peg_dmso = pers_length(peg_n10dmso,10) crv_n10peg_dmso com_bond_n10dmso = np.zeros(shape=(1,18000)) count = 0 for ts in n10_peg_dmso.trajectory[0:18000]: n10_mon1_dmso = n10_peg_dmso.select_atoms("resid 1") n10_mon2_dmso = n10_peg_dmso.select_atoms("resid 2") oo_len = mda.analysis.distances.distance_array(n10_mon1_dmso.center_of_mass(), n10_mon2_dmso.center_of_mass(), box=n10_peg_dmso.trajectory.ts.dimensions) com_bond_n10dmso[0, count] = oo_len count += 1 com_bond lb_avg_pn6 np.std(com_bond) np.mean(com_bond_n10dmso) np.std(com_bond_n10dmso) ``` ### Radius of Gyration vs. time N = 10 PEG/dmso ``` n10peg_rgens_dmso, cor_n10peg_dmso, N10peg_cos_dmso, rgdmso_n10peg = get_rg_pers_poly(peg_n10dmso, n10_peg_dmso, 0, 18000) n10peg_rgens_dmso[0].shape cor_n10peg_dmso[3] N10peg_cos_dmso rgdmso_n10peg np.std(n10peg_rgens_dmso) trj_len = np.arange(18000) #trj_len += 1 trj_len plt.figure(figsize=(7,7)) plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01) plt.xlabel(r'Time [ns]', fontsize=15) plt.ylabel(r'$R_{g}$ [nm]', fontsize=15) plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69') plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED') plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC') plt.tick_params(labelsize=14) plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO' ], frameon=False, fontsize=14) #plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial') plt.xlim(0,180) plt.ylim(0.2,2) ``` #### Correlation values at each arc length for the whole 180 ns trajectory, N = 8 PEG/dmso ``` # x values blen_n10dmso = cor_n10peg_dmso[3]*lb_avg_pn6 #nt_tt[0] = 0 blen_n10dmso # Error prop. into natural log std deviation mk_n10p_dmso = cor_n10peg_dmso[1]/cor_n10peg_dmso[0] mk_n10p_dmso plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO'], frameon=False, fontsize=14) # All the points give the best fits for N = 6 peg in water n10_blkspeg_dmso , n10peg_lpdmso = bavg_pers_cnt(5, peg_n10dmso, n10_peg_dmso, lb_avg_pn6, 5, 3000 , 18000) n10_blkspeg_dmso n10peg_lpdmso n10peg_lpdmso[2] np.mean(n10peg_lpdmso[3]) blen_dmso blen_n10dmso gg_n10peg_dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n10dmso) gg_n6peg_n10dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n10dmso) gg_n8peg_n10dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n10dmso) gg_n10peg_dmso ``` ### Block averaged Radius of gyration and persistence length, N = 10 PEG/DMSO ``` np.mean(n10_blkspeg_dmso["Avg persistence length"]) np.std(n10_blkspeg_dmso["Avg persistence length"]) np.mean(n10_blkspeg_dmso["Avg Radius of gyration"]) np.std(n10_blkspeg_dmso["Avg Radius of gyration"]) plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(blen_n10dmso, gg_n6peg_n10dmso, color='#1F2E69') plt.plot(blen_n10dmso, gg_n8peg_n10dmso, color='#4C80ED') plt.plot(blen_n10dmso, gg_n10peg_dmso, color='#8C52FC') plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01) plt.xlabel(r'Bond Length', fontsize=15) plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15) plt.ylim(-6,1) plt.xlim(0,30) #plt.ylim(-1.9,0) font = font_manager.FontProperties(family='Arial', style='normal', size='14') #plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font) plt.tick_params(labelsize=14) plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69') plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED') plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC') rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 10 PEG DMSO "] = n10_blkspeg_dmso["Avg Radius of gyration"] rgpeg_olig_dmso pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 10 PEG DMSO "] = n10_blkspeg_dmso["Avg persistence length"] pers_pegt_dmso ``` ### N = 20 PEG/DMSO ``` # For the right Rg calculation using MD Analysis, use trajactory without pbc n20_peg_dmso = mda.Universe("n20peg_dmso/n20pegonly_dmso.pdb", "n20peg_dmso/nodmso_n20peg.xtc") n20_peg_dmso.trajectory len(n20_peg_dmso.trajectory) #Select the polymer heavy atoms peg_n20dmso = n20_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H") crv_n20peg_dmso = pers_length(peg_n20dmso,20) crv_n20peg_dmso com_bond_n20dmso = np.zeros(shape=(1,18000)) count = 0 for ts in n20_peg_dmso.trajectory[0:18000]: n20_mon1_dmso = n20_peg_dmso.select_atoms("resid 1") n20_mon2_dmso = n20_peg_dmso.select_atoms("resid 2") oo_len = mda.analysis.distances.distance_array(n20_mon1_dmso.center_of_mass(), n20_mon2_dmso.center_of_mass(), box=n20_peg_dmso.trajectory.ts.dimensions) com_bond_n20dmso[0, count] = oo_len count += 1 com_bond lb_avg_pn6 np.std(com_bond) np.mean(com_bond_n20dmso) np.std(com_bond_n20dmso) ``` ### Radius of Gyration vs. time N = 20 PEG/dmso ``` n20peg_rgens_dmso, cor_n20peg_dmso, N20peg_cos_dmso, rgdmso_n20peg = get_rg_pers_poly(peg_n20dmso, n20_peg_dmso, 0, 18000) n20peg_rgens_dmso[0].shape cor_n20peg_dmso[3] N20peg_cos_dmso rgdmso_n20peg np.std(n20peg_rgens_dmso) plt.figure(figsize=(7,7)) plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01) plt.xlabel(r'Time [ns]', fontsize=15) plt.ylabel(r'$R_{g}$ [nm]', fontsize=15) plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69') plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED') plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC') plt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47') plt.tick_params(labelsize=14) plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14) #plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial') plt.xlim(0,180) plt.ylim(0.2,2) ``` #### Correlation values at each arc length for the whole 180 ns trajectory, N = 20 PEG/dmso ``` # x values blen_n20dmso = cor_n20peg_dmso[3]*lb_avg_pn6 #nt_tt[0] = 0 blen_n20dmso # Error prop. into natural log std deviation mk_n20p_dmso = cor_n20peg_dmso[1]/cor_n20peg_dmso[0] mk_n20p_dmso plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO'], frameon=False, fontsize=14) # All the points give the best fits for N = 6 peg in water n20_blkspeg_dmso , n20peg_lpdmso = bavg_pers_cnt(5, peg_n20dmso, n20_peg_dmso, lb_avg_pn6, 5, 3000 , 18000) n20_blkspeg_dmso n20peg_lpdmso n20peg_lpdmso[2] np.mean(n20peg_lpdmso[3]) blen_dmso blen_n20dmso gg_n20peg_dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n20dmso) gg_n6peg_n20dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n20dmso) gg_n8peg_n20dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n20dmso) gg_n10peg_n20dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n20dmso) gg_n20peg_dmso ``` ### Block averaged Radius of gyration and persistence length, N = 20 PEG/DMSO ``` np.mean(n20_blkspeg_dmso["Avg persistence length"]) np.std(n20_blkspeg_dmso["Avg persistence length"]) np.mean(n20_blkspeg_dmso["Avg Radius of gyration"]) np.std(n20_blkspeg_dmso["Avg Radius of gyration"]) plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(blen_n20dmso[:15], gg_n6peg_n20dmso[:15], color='#1F2E69') plt.plot(blen_n20dmso[:15], gg_n8peg_n20dmso[:15], color='#4C80ED') plt.plot(blen_n20dmso[:15], gg_n10peg_n20dmso[:15], color='#8C52FC') plt.plot(blen_n20dmso[:15], gg_n20peg_dmso[:15], color='#8B7F47') plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01) plt.xlabel(r'Bond Length', fontsize=15) plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15) plt.ylim(-6,1) plt.xlim(0,70) #plt.ylim(-1.9,0) font = font_manager.FontProperties(family='Arial', style='normal', size='14') #plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font) plt.tick_params(labelsize=14) plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69') plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED') plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC') plt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\AA$ ± 1.21 $\AA$', fontsize=15, color='#8B7F47') rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 20 PEG DMSO "] = n20_blkspeg_dmso["Avg Radius of gyration"] rgpeg_olig_dmso pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 20 PEG DMSO "] = n20_blkspeg_dmso["Avg persistence length"] pers_pegt_dmso ``` ### N = 30 PEG/DMSO ``` # For the right Rg calculation using MD Analysis, use trajactory without pbc n30_peg_dmso = mda.Universe("n30peg_dmso/n30pegonly_dmso.pdb", "n30peg_dmso/nodmso_n30peg.xtc") n30_peg_dmso.trajectory len(n30_peg_dmso.trajectory) #Select the polymer heavy atoms peg_n30dmso = n30_peg_dmso.select_atoms("resname sPEG PEG tPEG and not type H") crv_n30peg_dmso = pers_length(peg_n30dmso,30) crv_n30peg_dmso com_bond_n30dmso = np.zeros(shape=(1,18000)) count = 0 for ts in n30_peg_dmso.trajectory[0:18000]: n30_mon1_dmso = n30_peg_dmso.select_atoms("resid 1") n30_mon2_dmso = n30_peg_dmso.select_atoms("resid 2") oo_len = mda.analysis.distances.distance_array(n30_mon1_dmso.center_of_mass(), n30_mon2_dmso.center_of_mass(), box=n30_peg_dmso.trajectory.ts.dimensions) com_bond_n30dmso[0, count] = oo_len count += 1 com_bond lb_avg_pn6 np.std(com_bond) np.mean(com_bond_n30dmso) np.std(com_bond_n30dmso) ``` ### Radius of Gyration vs. time N = 30 PEG/dmso ``` n30peg_rgens_dmso, cor_n30peg_dmso, N30peg_cos_dmso, rgdmso_n30peg = get_rg_pers_poly(peg_n30dmso, n30_peg_dmso, 0, 18000) n30peg_rgens_dmso[0].shape cor_n30peg_dmso[3] N30peg_cos_dmso rgdmso_n30peg np.std(n30peg_rgens_dmso) plt.figure(figsize=(7,7)) plt.title(r'PEG Radius of Gyration', fontsize=18, y=1.01) plt.xlabel(r'Time [ns]', fontsize=15) plt.ylabel(r'$R_{g}$ [nm]', fontsize=15) plt.plot(trj_len/100, n6peg_rgens_dmso[0]/10,linewidth=2, color='#1F2E69') plt.plot(trj_len/100, n8peg_rgens_dmso[0]/10,linewidth=2, color='#4C80ED') plt.plot(trj_len/100, n10peg_rgens_dmso[0]/10,linewidth=2, color='#8C52FC') plt.plot(trj_len/100, n20peg_rgens_dmso[0]/10,linewidth=2, color='#8B7F47') plt.plot(trj_len/100, n30peg_rgens_dmso[0]/10,linewidth=2, color='#63ACBE') plt.tick_params(labelsize=14) plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14) #plt.text(127, 0.96,r'N = 6 in water', fontsize=18, color='#1F2E69', family='Arial') plt.xlim(0,180) plt.ylim(0.2,3) ``` #### Correlation values at each arc length for the whole 180 ns trajectory, N = 30 PEG/dmso ``` # x values blen_n30dmso = cor_n30peg_dmso[3]*lb_avg_pn6 #nt_tt[0] = 0 blen_n30dmso # Error prop. into natural log std deviation mk_n30p_dmso = cor_n30peg_dmso[1]/cor_n30peg_dmso[0] mk_n30p_dmso plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.legend(['N = 6 in DMSO','N = 8 in DMSO','N = 10 in DMSO','N = 20 in DMSO','N = 30 in DMSO'], frameon=False, fontsize=14) # All the points give the best fits for N = 6 peg in water n30_blkspeg_dmso , n30peg_lpdmso = bavg_pers_cnt(5, peg_n30dmso, n30_peg_dmso, lb_avg_pn6, 5, 3000 , 18000) n30_blkspeg_dmso n30peg_lpdmso n30peg_lpdmso[2] np.mean(n30peg_lpdmso[3]) blen_dmso blen_n30dmso gg_n30peg_dmso = line_fit(np.mean(n30peg_lpdmso[2]),blen_n30dmso) gg_n6peg_n30dmso = line_fit(np.mean(n6peg_lpdmso[2]),blen_n30dmso) gg_n8peg_n30dmso = line_fit(np.mean(n8peg_lpdmso[2]),blen_n30dmso) gg_n10peg_n30dmso = line_fit(np.mean(n10peg_lpdmso[2]),blen_n30dmso) gg_n20peg_n30dmso = line_fit(np.mean(n20peg_lpdmso[2]),blen_n30dmso) gg_n30peg_dmso ``` ### Block averaged Radius of gyration and persistence length, N = 30 PEG/DMSO ``` np.mean(n30_blkspeg_dmso["Avg persistence length"]) np.std(n30_blkspeg_dmso["Avg persistence length"]) np.mean(n30_blkspeg_dmso["Avg Radius of gyration"]) np.std(n30_blkspeg_dmso["Avg Radius of gyration"]) plt.figure(figsize=(7,7)) plt.errorbar(blen_dmso, np.log(cor_n6peg_dmso[0]), yerr=mk_n6p_dmso, color='#1F2E69', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n8dmso, np.log(cor_n8peg_dmso[0]), yerr=mk_n8p_dmso, color='#4C80ED', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n10dmso, np.log(cor_n10peg_dmso[0]), yerr=mk_n10p_dmso, color='#8C52FC', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n20dmso, np.log(cor_n20peg_dmso[0]), yerr=mk_n20p_dmso, color='#8B7F47', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.errorbar(blen_n30dmso, np.log(cor_n30peg_dmso[0]), yerr=mk_n30p_dmso, color='#63ACBE', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(blen_n20dmso[:15], gg_n6peg_n30dmso[:15], color='#1F2E69') plt.plot(blen_n20dmso[:15], gg_n8peg_n30dmso[:15], color='#4C80ED') plt.plot(blen_n20dmso[:15], gg_n10peg_n30dmso[:15], color='#8C52FC') plt.plot(blen_n20dmso[:15], gg_n20peg_n30dmso[:15], color='#8B7F47') plt.plot(blen_n30dmso[:15], gg_n30peg_dmso[:15], color='#63ACBE') plt.title(r'Ensemble Averaged ln(Cosine $\theta$) in DMSO', fontsize=15, y=1.01) plt.xlabel(r'Bond Length', fontsize=15) plt.ylabel(r'ln$\left< Cos(\theta)\right >$', fontsize=15) plt.ylim(-6,1) plt.xlim(0,90) #plt.ylim(-1.9,0) font = font_manager.FontProperties(family='Arial', style='normal', size='14') #plt.legend([r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.60 $\AA$'], loc=3, frameon=0, fontsize=14, prop=font) plt.tick_params(labelsize=14) plt.text(0.5, -5.9,r'$N_{PEG}$ = 6: $L_{p}$ = 18.8 $\AA$ ± 3.61 $\AA$', fontsize=15, color='#1F2E69') plt.text(0.5, -5.58,r'$N_{PEG}$ = 8: $L_{p}$ = 22.1 $\AA$ ± 2.49 $\AA$', fontsize=15, color='#4C80ED') plt.text(0.5, -5.23,r'$N_{PEG}$ = 10: $L_{p}$ = 21.2 $\AA$ ± 2.31 $\AA$', fontsize=15, color='#8C52FC') plt.text(0.5, -4.90,r'$N_{PEG}$ = 20: $L_{p}$ = 22.9 $\AA$ ± 1.21 $\AA$', fontsize=15, color='#8B7F47') plt.text(0.5, -4.50,r'$N_{PEG}$ = 30: $L_{p}$ = 24.2 $\AA$ ± 1.25 $\AA$', fontsize=15, color='#63ACBE') rgpeg_olig_dmso[r"$R_{g}$ [Angstrom] N = 30 PEG DMSO "] = n30_blkspeg_dmso["Avg Radius of gyration"] rgpeg_olig_dmso pers_pegt_dmso[r"$L_{p}$ [Angstrom] N = 30 PEG DMSO "] = n30_blkspeg_dmso["Avg persistence length"] pers_pegt_dmso rgpeg_olig_dmso.to_pickle("PEG_dmso_Rg.pkl") pers_pegt_dmso.to_pickle("PEG_dmso_Lp.pkl") ``` ### Fluory Exponent, PEG/DMSO systems ``` n_peg = np.array([6,8,10,20,30]) rg_npeg_dmso = np.array([np.mean(n6_blkspeg_dmso["Avg Radius of gyration"]) ,np.mean(n8_blkspeg_dmso["Avg Radius of gyration"]),np.mean(n10_blkspeg_dmso["Avg Radius of gyration"]) ,np.mean(n20_blkspeg_dmso["Avg Radius of gyration"]),np.mean(n30_blkspeg_dmso["Avg Radius of gyration"])]) rg_npeg_dmso rgdmso_npeg_std = np.array([np.std(np.log10(n6_blkspeg_dmso["Avg Radius of gyration"])) ,np.std(np.log10(n8_blkspeg_dmso["Avg Radius of gyration"])) ,np.std(np.log10(n10_blkspeg_dmso["Avg Radius of gyration"])) ,np.std(np.log10(n20_blkspeg_dmso["Avg Radius of gyration"])) ,np.std(np.log10(n30_blkspeg_dmso["Avg Radius of gyration"]))]) rgdmso_npeg_std n_peg np.log10(rg_npeg_dmso) np.log10(n_peg) # From fitting all points, I get best fit from sklearn.linear_model import LinearRegression model_vdmso = LinearRegression(fit_intercept=True) model_vdmso.fit(np.log10(n_peg).reshape(-1,1), np.log10(rg_npeg_dmso)) # Slope here is in nanometers print("Model slope: ", model_vdmso.coef_[0]) print("Model intercept:", model_vdmso.intercept_) gg_dmso = model_vdmso.predict(np.log10(n_peg.reshape(-1,1))) gg_dmso print("Mean Std Error:", sklearn.metrics.mean_squared_error(np.log10(rg_npeg_dmso), gg_dmso)) print("R2 score:", sklearn.metrics.r2_score(np.log10(rg_npeg_dmso), gg_dmso)) # Residuals between the true y data and model y data resid_vdmso = np.log10(rg_npeg_dmso) - gg_dmso resid_vdmso # How to calculate Sum((Xi - avg(X))^2): X values are the bond length values nt_ttace = np.log10(n_peg) nt_ttace -= np.mean(nt_ttace) nhui_ace = nt_ttace**2 np.sum(nhui_ace) # t-value with 95 % confidence intervals scipy.stats.t.ppf(0.975, 4) # How to calculate 95% confidence interval for the slope flc_vdmso = scipy.stats.t.ppf(0.975, 4)*np.sqrt((np.sum(resid_vdmso**2)/len(resid_vdmso))/(np.sum(nhui_ace))) flc_vdmso plt.figure(figsize=(7,7)) plt.errorbar(np.log10(n_peg), np.log10(rg_npeg_dmso), yerr=rgdmso_npeg_std, color='#A58262', linestyle="None",marker='o', capsize=5, capthick=1, ecolor='black') plt.plot(np.log10(n_peg), gg_dmso, color='#A58262') plt.title(r'Fluory Exponent', fontsize=15) plt.xlabel(r'Log($N_{PEG}$)', fontsize=15) plt.ylabel(r'Log($R_{g}$)', fontsize=15) plt.tick_params(labelsize=14) plt.text(1.1, 0.75, r'$v_{DMSO}$ = 0.63 ± 0.02', fontsize=15, color='#A58262') ```
github_jupyter
# Data Cleaning And Feature Engineering * Data is very dirty so we have to clean our data for analysis. * Also have many missing values represented by -1(have to fix it is very important). ``` import pandas as pd data=pd.read_csv('original_data.csv') data.head() data.shape #droping duplicates data=data.drop_duplicates(data.columns) data.shape ``` # Salary column ``` #droping salary which have -1 i.e no salary provided data=data[data['Salary Estimate'] != '-1'] data.shape data.head(20) #replacing ₹ and k to 000 data['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.replace('₹','').replace('K','000').replace(',','')) data.head() data.dtypes data['Salary Estimate'][0:50] #making another column with 0 1 #1 if salary is by hourly else 0 data['hourly'] = data['Salary Estimate'].apply(lambda x: 1 if '/hr' in x.lower() else 0) #making another column with 0 1 #1 if salary is by monthly else 0 data['monthly'] = data['Salary Estimate'].apply(lambda x: 1 if '/mo' in x.lower() else 0) #removing /hr and /mo data['Salary Estimate']=data['Salary Estimate'].apply(lambda x: x.lower().replace('/hr','').replace('/mo','')) #if needed in the future data['min_salary'] = data['Salary Estimate'].apply(lambda x: (x.split('-')[0])) #check point data.to_csv('clean.csv',index=False) df=pd.read_csv('clean.csv') def avg_salary(x): lst=x.split('-') l=len(lst) if l>1: return (float(lst[1])+float(lst[0]))/2 else: return float(lst[0]) df['avg_salary'] = df['Salary Estimate'].apply(avg_salary) df.head() df.shape #hourly salary to annual df['avg_salary'] = df.apply(lambda x: x.avg_salary*2000 if x.hourly ==1 else x.avg_salary, axis =1) #monthly salry to annual df['avg_salary'] = df.apply(lambda x: x.avg_salary*12 if x.monthly ==1 else x.avg_salary, axis =1) ``` # Company Name Column ``` #cleaning company name df['Company Name']=df['Company Name'].apply(lambda x: x.split('\n')[0]) df.head() ``` # Founded column ``` data[data['Founded']==-1] #adding new column company_age #age of company df['company_age'] = df.Founded.apply(lambda x: x if x <1 else 2020 - x) ``` # job description Column ``` import numpy as np def clean_des(x): try: return x.replace('\n', ' ') except AttributeError: return np.NaN #cleaning job description #job description have an values df['Job Description']=df['Job Description'].apply(clean_des) df.tail() ``` # Job Title Column ``` df['Job Title'].value_counts() def title_simplifier(title): if 'data scientist' in title.lower() or 'data science' in title.lower(): return 'data scientist' elif 'data engineer' in title.lower(): return 'data engineer' elif 'analyst' in title.lower(): return 'analyst' elif 'machine learning' in title.lower(): return 'machine learning engineer' elif 'manager' in title.lower(): return 'manager' elif 'director' in title.lower(): return 'director' else: return 'other' #simplifing titles to simplify thw work as there are 282 unique values which have the mostly same work df['job_title_simplified'] = df['Job Title'].apply(title_simplifier) df['job_title_simplified'].value_counts() #if required for analysis df['number_competitors'] = df['Competitors'].apply(lambda x: len(x.split(',')) if x != '-1' else 'not provided') df.head() ``` # Revenue Column * exploring revenue column as it can be a important feature in analysis ``` # replace -1 values with NaN (missing value) df = df.replace(to_replace = -1, value = np.nan) #null value in revenue #df[df['Revenue']=='Unknown / Non-Applicable'] #making another column same as Revenue so that we can make changes to this new column that will not effect origial Revenue column. df['revenue']=df['Revenue'] df.head() df['revenue']=df['revenue'].apply(lambda x: x.replace('Unknown / Non-Applicable','-1')) ``` ### cleaning revenue column. ``` #replaceing all the characters that are not numbers df['revenue']=df['revenue'].apply(lambda x: x.replace('₹','').replace('+','').replace('INR','').replace('()','').replace('billion','')) #making another column with 0 1 #1 if revenue is in million else 0 df['Revenue_million'] = df['revenue'].apply(lambda x: 1 if 'million' in x.lower() else 0) #replaceing million df['revenue']=df['revenue'].apply(lambda x: x.replace('million','')) df['revenue']=df['revenue'].apply(lambda x: x.replace('to','-')) ``` ### Making another column for avg of revenue as original revenue have values in form of ranges but we want a specific value for analysis. ``` #there are -1 so when split on - it raise an error that is why use try block def avg_revenue(x): lst=x.split('-') l=len(lst) if l>1: try: return (float(lst[1])+float(lst[0]))/2 except: return np.nan else: return float(lst[0]) df['avg_revenue'] = df['revenue'].apply(avg_revenue) #### making unit of average revenue as uniform df['avg_revenue'] = df.apply(lambda x: x.avg_revenue/1000 if x.Revenue_million ==1 else x.avg_revenue, axis =1) #check percentage of NaN data in every column round((100*df.isnull().sum())/len(df.index),2) ``` #### Avg_Revenue have about 47% of missing values.It is said that column that have missing value % greater than 30 will be droped but Revenue can be a important column for analysis so we will fill missing values bt using advanced techniques like KNN-Imputer. #### AS we will fill there values there will be possiblity that analysis around revenue may be wrong we will see it what is the effect of revenue on salary. ``` #import required libraries from advanced imputation techniques from sklearn.impute import KNNImputer pd.set_option('display.max_rows',None) X=df.drop(['Company Name', 'Competitors', 'Headquarters', 'Industry', 'Job Description', 'Job Title', 'Location','Founded','revenue', 'Salary Estimate', 'Sector', 'Size', 'Type of ownership', 'hourly', 'monthly', 'min_salary','Revenue','company_age','Rating','avg_salary', 'job_title_simplified', 'number_competitors', 'Revenue_million'],axis=1) X imputer = KNNImputer(n_neighbors=3) df['avg_revenue']=imputer.fit_transform(X) df['avg_revenue']=round(df['avg_revenue']) df.head() df.columns df2=df.drop(columns=[ 'hourly', 'monthly', 'min_salary','number_competitors', 'revenue','Revenue_million']) df2.head() df2.to_csv('final_cleaned_data.csv',index=False) ```
github_jupyter
# Chapter 3: Deep Learning Libraries This chapter discusses the important libraries and frameworks that one needs to get started in artificial intelligence. We'll cover the basic functions of the three most popular deep learning frameworks: Tensorflow, Pytorch, and Keras, and show you how to get up and running in each of these frameworks as we will be utilizing them in the following chapters. We'll touch upon computing for Artificial Intelligence, and discuss how GPUs and other advanced memory units can improve AI. Lastly, we'll discuss the fundamentals of two popular cloud computing frameworks for deep learning, AWS and Google Cloud. ``` import numpy as np ``` ## TensorFlow Basics ``` import tensorflow as tf ## Define two constants x = tf.constant(2) y = tf.constant(2) ## Multiply the constants product = tf.multiply(x, y) init = tf.initialize_all_variables() ## In Tensorflow, we must first initialize a session object sess = tf.Session() sess.run(init) ## Run the session print(sess.run(product)) ## Close the session sess.close() ``` Creating a new graph ``` my_graph = tf.Graph() with new_graph.as_default(): x = tf.constant(2) y = tf.constant(2) ``` Scopes: ``` with tf.name_scope("my_scope"): ## Define two constants const1 = tf.constant([4]) const2 = tf.constant([5]) ## Multiply the constants product = tf.multiply(const1, const2) ``` ## Keras Basics As Keras is designed as a model-level library, it does not contain methods for doing basic operations as PyTorch of base TensorFlow does. Instead, it utilizes TensorFlow as a backend. As such, its basic operations are the same as basic TensorFlow operations: ``` import keras.backend as K x = K.constant(5) y = K.constant(6) product = x * y ``` ## PyTorch ``` import torch x = torch.IntTensor([4]) y = torch.IntTensor([5]) product = x * y ``` It's easy to switch between numpy and pytorch ``` ## Create a numpy array numpy_array = np.random.randn(10,10) ##Convert the numpy array to a pytorch tesnor pytorch_tensor = torch.from_numpy(numpy_array) ## Convert it back to Numpy numpy_again = pytorch_tensor.numpy() ``` Pytorch tensors can be manipulated in a way that is similar to numpy ``` tensor = torch.FloatTensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) ## print the third element of the 2nd row of the tensor print(tensor[1][2]) ## replace the second value of the first tensor tensor[0][1] = 1 print(tensor) ``` Like TensorFlow, PyTorch runs on the concept of variables, which are values that are intended to change and be updated during training processes ``` from torch.autograd import Variable ## Create a tensor tensor_two = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) ## Convert it to a variable variable = Variable(tensor_two) variable.data ``` ## TensorFlow Logging ``` my_list = [] ## Iterate through the available GPUs for device in ['/gpu:0', '/gpu:1']: ## Utilize the TensorFlow device manager with tf.device(device): x = tf.constant([1,2,3], shape=[1,3]) y = tf.constant([1,2,3],shape [3,1]) my_list.append(tf.matmul(x, y)) with tf.device('/cpu:0'): sum_operation = tf.add(x,y) ## Run everything through a session sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) sess.run(sum_operation) ```
github_jupyter
<table> <tr> <td><img src='SystemLink_icon.png' /></td> <td ><h1><strong>NI SystemLink Python API</strong></h1></td> </tr> </table> ## Test Monitor Service Example *** The Test Monitor Service API provides functions to create, update, delete and query Test results and Test steps. *** # Prerequisites - The **NI SystemLink Server Test Module** needs to be installed in order to run this example - The **NI SystemLink Client** needs to be installed on a system which has TestStand installed and is registered to the SystemLink server. Configure the SystemLink TestStand plugin reporting to enable publishing test results. - Before you run this example, TestStand mock test results are needed: - From **TestStand** open the **'Computer Motherboard Test Sequence.seq'**: - Go to Help -> Find Examples and follow the instructions to open the Examples workspace (Examples.tsw) - From the Workspace tab, expand **Demos** and select **Computer Motherboard Test**. Open one of the sequence files, based on your language of choice - Run the sequence at least 10 times - Make sure you fail several tests, on different components # Summary This notebook uses the Test Monitor Service API to import test and step results into Python. The data is used to do custom analytics. - Get all the test results that were created from the 'Computer Motherboard Test Sequence.seq' - Create a Pandas Dataframe with the information we want to process for each test - Plot pass vs. fail tests - Visualize test run vs. test duration - Pareto graph (step type) *** ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from systemlink.testmonclient import TestMonitorClient, testmon_messages testmonclient = TestMonitorClient(service_name='TestMonitorClient') # Create pandas dataframe with the relevant test results information, to be used later def get_dataframe_from_results(results): return pd.concat([pd.DataFrame({'status': result.status.status_name, 'startedAt': result.started_at, 'updatedAt': result.updated_at, 'programName': result.program_name, 'id': result.id, 'systemId': result.system_id, 'operator': result.operator, 'serialNumber': result.serial_number, 'totalTimeInSeconds': result.total_time_in_seconds, }, index=[idx]) for idx, result in enumerate(results)]) # Only query test results that belong to the 'Computer Motherboard Test Sequence.seq' test program query = testmon_messages.ResultQuery(None, None, None, ['Computer Motherboard Test Sequence.seq'], None, None, None, None, None, None, None, None, None) results, _ = testmonclient.query_results(query) df_results = get_dataframe_from_results(results) # Show the first elements of the dataframe, which holds the data we will use for further analysis df_results[:2] ``` # Bar Plot of Test Results Group the tests results by pass/fail. Create a bar plot to visualize the test runs by result. ``` # Visualize tests results (pass/fail) bar_width = 0.4 opacity = 0.4 res = df_results.groupby('status').count() failed = res['id']['Failed'] passed = res['id']['Passed'] plt.style.use('fivethirtyeight') fig = plt.figure(figsize=(7, 7)) plt.bar(1, passed, bar_width, alpha=opacity, color='b', label='Pass') plt.bar(1.5, failed, bar_width, alpha=opacity, color='r', label='Fail') plt.xticks([1, 1.5], ['Pass', 'Fail'], size='15') plt.ylabel('Runs', size='15') plt.title('Total Runs: ' + str(passed + failed), weight='bold', size='15') plt.show() ``` # Plot Test Run vs. Duration Visualize the test runs vs. duration, with red/green color indicating pass/fail. ``` # Visualize test failures vs duration result_idx = np.arange(df_results.shape[0]) df_time = df_results[['totalTimeInSeconds', 'status']] color = ['r' if status == 'Failed' else 'g' for status in df_time['status']] fig = plt.figure(figsize=(10, 7)) plt.scatter(result_idx, df_time['totalTimeInSeconds'], s=150, c=color, alpha='0.5') plt.title('Test Results - Duration', weight='bold', size='15') plt.xlabel('Test Runs', size='15') plt.ylabel('Time (seconds)', size='15') plt.show() ``` # Pareto distribution Get a Pandas Dataframe with all the step failures. Visualize the failures in a Pareto graph, which helps visualize the failure distribution, by step type. ``` # Pareto distribution of step failures visualization # Create pandas dataframe with the step results information that we want for further processing def get_failed_steps_dataframe(steps): failed_steps = [step for step in steps if step.status.status_name == 'Failed' and step.step_type != 'SequenceCall'] return pd.concat([pd.DataFrame({'name': step.name, 'id': step.step_id, 'totalTimeInSeconds': step.total_time_in_seconds, }, index=[idx]) for idx, step in enumerate(failed_steps)]) results_ids = [result.id for result in results] step_query = testmon_messages.StepQuery(None, None, None, results_ids, None, None, None, None, None, None) steps, _ = testmonclient.query_steps(step_query) steps_df = get_failed_steps_dataframe(steps) res = steps_df.groupby('name').count() res = res.sort_values('id', ascending=False) fig, ax1 = plt.subplots() fig.set_size_inches(15, 7) plt.title('Failures by Test', weight='bold', size='15') plt.ylabel('Number of Runs', size='15') plt.xlabel('Test Type', size='15') ax1.get_xaxis().set_ticks([]) # Create the Pareto chart bars previous_val = 0 cumulative = [] for idx, row in res.iterrows(): val = row['id'] cumulative.append(val + previous_val) previous_val = val + previous_val ax1.bar(idx, val, bar_width, alpha=opacity, label=idx) # Add a legend labels = list(steps_df['name']) plt.legend(labels, loc='upper right') # Cumulative line, in percentage cumulative_percentage = cumulative/cumulative[-1] * 100 ax2 = ax1.twinx() ax2.set_ylim([0, 100]) ax2.plot(cumulative_percentage) plt.ylabel('Failure Percentage', size='15') plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/satyajitghana/PadhAI-Course/blob/master/13_OverfittingAndRegularization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.colors import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, mean_squared_error, log_loss from tqdm import tqdm_notebook import seaborn as sns sns.set() from sklearn.preprocessing import OneHotEncoder from sklearn.datasets import load_iris from numpy.linalg import norm my_cmap = 'inferno' np.random.seed(0) ``` ## Generate data ``` iris=load_iris() data = iris.data[:, :2] # take only the first two features labels = iris.target plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap) plt.show() print("Data shape",data.shape) print("Labels shape",labels.shape) ``` ## Multi class classification ``` X_train, X_val, Y_train, Y_val = train_test_split(data, labels, stratify=labels, random_state=0,test_size=0.2) print(X_train.shape, X_val.shape, labels.shape) enc = OneHotEncoder() y_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray() y_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray() print(y_OH_train.shape, y_OH_val.shape) ``` ## FF Class ``` class FFNetwork: def __init__(self, num_hidden=2, init_method = 'xavier', activation_function = 'sigmoid', leaky_slope = 0.1): self.params={} self.num_layers=2 self.layer_sizes = [2, num_hidden, 3] self.activation_function = activation_function self.leaky_slope = leaky_slope np.random.seed(0) if init_method == "random": for i in range(1,self.num_layers+1): self.params["W"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i]) self.params["B"+str(i)] = np.random.randn(1,self.layer_sizes[i]) elif init_method == "he": for i in range(1,self.num_layers+1): self.params["W"+str(i)] = np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(2/self.layer_sizes[i-1]) self.params["B"+str(i)] = np.random.randn(1,self.layer_sizes[i]) elif init_method == "xavier": for i in range(1,self.num_layers+1): self.params["W"+str(i)]=np.random.randn(self.layer_sizes[i-1],self.layer_sizes[i])*np.sqrt(1/self.layer_sizes[i-1]) self.params["B"+str(i)]=np.random.randn(1,self.layer_sizes[i]) self.gradients={} self.update_params={} self.prev_update_params={} for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)]=0 self.update_params["v_b"+str(i)]=0 self.update_params["m_b"+str(i)]=0 self.update_params["m_w"+str(i)]=0 self.prev_update_params["v_w"+str(i)]=0 self.prev_update_params["v_b"+str(i)]=0 def forward_activation(self, X): if self.activation_function == "sigmoid": return 1.0/(1.0 + np.exp(-X)) elif self.activation_function == "tanh": return np.tanh(X) elif self.activation_function == "relu": return np.maximum(0,X) elif self.activation_function == "leaky_relu": return np.maximum(self.leaky_slope*X,X) def grad_activation(self, X): if self.activation_function == "sigmoid": return X*(1-X) elif self.activation_function == "tanh": return (1-np.square(X)) elif self.activation_function == "relu": return 1.0*(X>0) elif self.activation_function == "leaky_relu": d=np.zeros_like(X) d[X<=0]=self.leaky_slope d[X>0]=1 return d def get_accuracy(self): Y_pred_train = model.predict(X_train) Y_pred_train = np.argmax(Y_pred_train,1) Y_pred_val = model.predict(X_val) Y_pred_val = np.argmax(Y_pred_val,1) accuracy_train = accuracy_score(Y_pred_train, Y_train) accuracy_val = accuracy_score(Y_pred_val, Y_val) return accuracy_train,accuracy_val def softmax(self, X): exps = np.exp(X) return exps / np.sum(exps, axis=1).reshape(-1,1) def forward_pass(self, X, params = None): if params is None: params = self.params self.A1 = np.matmul(X, params["W1"]) + params["B1"] # (N, 2) * (2, 2) -> (N, 2) self.H1 = self.forward_activation(self.A1) # (N, 2) self.A2 = np.matmul(self.H1, params["W2"]) + params["B2"] # (N, 2) * (2, 2) -> (N, 2) self.H2 = self.softmax(self.A2) # (N, 2) return self.H2 def grad(self, X, Y, params = None): if params is None: params = self.params self.forward_pass(X, params) m = X.shape[0] self.gradients["dA2"] = self.H2 - Y # (N, 4) - (N, 4) -> (N, 4) self.gradients["dW2"] = np.matmul(self.H1.T, self.gradients["dA2"]) # (2, N) * (N, 4) -> (2, 4) self.gradients["dB2"] = np.sum(self.gradients["dA2"], axis=0).reshape(1, -1) # (N, 4) -> (1, 4) self.gradients["dH1"] = np.matmul(self.gradients["dA2"], params["W2"].T) # (N, 4) * (4, 2) -> (N, 2) self.gradients["dA1"] = np.multiply(self.gradients["dH1"], self.grad_activation(self.H1)) # (N, 2) .* (N, 2) -> (N, 2) self.gradients["dW1"] = np.matmul(X.T, self.gradients["dA1"]) # (2, N) * (N, 2) -> (2, 2) self.gradients["dB1"] = np.sum(self.gradients["dA1"], axis=0).reshape(1, -1) # (N, 2) -> (1, 2) def fit(self, X, Y, epochs=1, algo= "GD",l2_norm=False, lambda_val=0.8, display_loss=False, eta=1): train_accuracies={} val_accuracies={} if display_loss: loss = [] weight_mag = [] for num_epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): m = X.shape[0] self.grad(X, Y) for i in range(1,self.num_layers+1): if l2_norm: self.params["W"+str(i)] -= (eta * lambda_val)/m * self.params["W"+str(i)] + eta * (self.gradients["dW"+str(i)]/m) else: self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/m) self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/m) train_accuracy,val_accuracy=self.get_accuracy() train_accuracies[num_epoch]=train_accuracy val_accuracies[num_epoch]=val_accuracy if display_loss: Y_pred = self.predict(X) loss.append(log_loss(np.argmax(Y, axis=1), Y_pred)) weight_mag.append((norm(self.params["W1"]) + norm(self.params["W2"]) + norm(self.params["B1"]) + norm(self.params["B2"]))/18) plt.plot(list(train_accuracies.values()),label="Train accuracy") plt.plot(list(val_accuracies.values()),label="Validation accuracy") plt.plot(np.ones((epochs, 1))*0.9) plt.plot(np.ones((epochs, 1))*0.33) plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() if display_loss: fig, ax1 = plt.subplots() color = 'tab:red' ax1.set_xlabel('epochs') ax1.set_ylabel('Log Loss', color=color) ax1.plot(loss, '-o', color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('Weight Magnitude', color=color) # we already handled the x-label with ax1 ax2.plot(weight_mag, '-*', color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() plt.show() def predict(self, X): Y_pred = self.forward_pass(X) return np.array(Y_pred).squeeze() def print_accuracy(): Y_pred_train = model.predict(X_train) Y_pred_train = np.argmax(Y_pred_train,1) Y_pred_val = model.predict(X_val) Y_pred_val = np.argmax(Y_pred_val,1) accuracy_train = accuracy_score(Y_pred_train, Y_train) accuracy_val = accuracy_score(Y_pred_val, Y_val) print("Training accuracy", round(accuracy_train, 4)) print("Validation accuracy", round(accuracy_val, 4)) if False: plt.scatter(X_train[:,0], X_train[:,1], c=Y_pred_train, cmap=my_cmap, s=15*(np.abs(np.sign(Y_pred_train-Y_train))+.1)) plt.show() model = FFNetwork(num_hidden=1) model.fit(X_train, y_OH_train, epochs=100, eta=0.1) print_accuracy() model = FFNetwork(num_hidden=2) model.fit(X_train, y_OH_train, epochs=100, eta=1, display_loss=False) print_accuracy() model = FFNetwork(num_hidden=4) model.fit(X_train, y_OH_train, epochs=400, eta=0.25, display_loss=False) print_accuracy() model = FFNetwork(num_hidden=8) model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False) print_accuracy() model = FFNetwork(num_hidden=32) model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=False) print_accuracy() model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False) print_accuracy() ``` ## Add L2 Regularization ``` model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=0.1, display_loss=True) print_accuracy() model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=1, display_loss=True) print_accuracy() model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=5, display_loss=True) print_accuracy() model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=True, lambda_val=10, display_loss=True) print_accuracy() ``` ## Add noise to training data set ``` model = FFNetwork(num_hidden=64) model.fit(X_train, y_OH_train, epochs=2000, eta=0.1, l2_norm=False) print_accuracy() for noise_fraction in [0.01, 0.05, 0.1, 0.15, 0.18, 0.2]: print(noise_fraction) X_train_noisy = X_train * (1 - noise_fraction*np.random.randn(X_train.shape[0], X_train.shape[1])) model = FFNetwork(num_hidden=64) model.fit(X_train_noisy, y_OH_train, epochs=2000, eta=0.1, l2_norm=False) print_accuracy() ``` ## Early stopping ``` model = FFNetwork(num_hidden=32) model.fit(X_train, y_OH_train, epochs=500, eta=0.2, display_loss=True) print_accuracy() model = FFNetwork(num_hidden=32) model.fit(X_train, y_OH_train, epochs=100, eta=0.2, display_loss=True) print_accuracy() ```
github_jupyter
# Marginal Gaussianization * Author: J. Emmanuel Johnson * Email: jemanjohnson34@gmail.com In this demonstration, we will show how we can do the marginal Gaussianization on a 2D dataset using the Histogram transformation and Inverse CDF Gaussian distribution. ``` import os, sys cwd = os.getcwd() # sys.path.insert(0, f"{cwd}/../") sys.path.insert(0, "/home/emmanuel/code/rbig") from rbig.data import ToyData from rbig.transform.gaussianization import MarginalGaussianization # from rbig.transform.gaussianization import HistogramGaussianization, KDEGaussianization from rbig.transform import InverseGaussCDF import numpy as np from scipy import stats # Plot Functions import matplotlib.pyplot as plt import seaborn as sns sns.reset_defaults() #sns.set_style('whitegrid') #sns.set_context('talk') sns.set_context(context='talk',font_scale=0.7) %matplotlib inline %load_ext autoreload %autoreload 2 ``` ## Data For this example, we are looking at a 2D dataset. ``` def plot_2d_joint(data, color='blue', title='Original Data'): fig = plt.figure(figsize=(5, 5)) g = sns.jointplot(x=data[:, 0], y=data[:, 1], kind='hex', color=color) plt.xlabel('X') plt.ylabel('Y') plt.suptitle(title) plt.tight_layout() plt.show() def plot_prob(data, probs, title='Probabilities'): fig, ax = plt.subplots() h = ax.scatter(data[:, 0], data[:, 1], s=1, c=probs, cmap='Reds') ax.set_xlabel('X') ax.set_ylabel('Y') cbar = plt.colorbar(h, ) ax.set_title(title) plt.show() seed = 123 rng = np.random.RandomState(seed=seed) dataset = 'rbig' n_samples = 10_000 n_features = 2 noise = 0.25 random_state=1 clusters = 2 data = ToyData( dataset=dataset, n_samples=n_samples, n_features=n_features, noise=noise, random_state=random_state, clusters=clusters, ).generate_samples() X = data[:, 0] Y = data[:, 1] plot_2d_joint(data, title='Original Data') ``` ## Uniformization Transformation ``` from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization # from rbig.density.histogram import ScipyHistogram, QuantileHistogram # from rbig.den ``` #### Initialize Uniformization Algorithm ``` # INITIALIZE UNIFORMIZATION ALGORITHM #=== # uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None) uniform_clf = KDEUniformization(n_quantiles=50, method='fft') # density_clf = KDEScipy(n_quantiles=50, bw_method='scott', support_extension=10) # density_clf = KDESklearn(n_quantiles=100, support_extension=10) ``` #### Add it to Marginal Transformation Algorithm ``` mg_uniformizer = MarginalUniformization(uniform_clf) mg_uniformizer.fit(data) X_trans = mg_uniformizer.transform(data) plot_2d_joint(X_trans, title='Transformed Data') data_approx = mg_uniformizer.inverse_transform(X_trans) plot_2d_joint(data_approx, title='Transformed Data') X_ldj = mg_uniformizer.log_abs_det_jacobian(data) plot_2d_joint(X_ldj, title='Transformed Data') plot_2d_joint(np.exp(X_ldj), title='Transformed Data') plot_prob(data, X_ldj.sum(-1), title='Log Probabilities') plot_prob(data, np.exp(X_ldj.sum(-1)), title='Probabilities') ``` ## Marginal Gaussinization ``` from rbig.transform.uniformization import HistogramUniformization, KDEUniformization, MarginalUniformization from rbig.transform.gaussianization import MarginalGaussianization uniform_clf = HistogramUniformization(bins=100, support_extension=10, alpha=1e-4, n_quantiles=None) uniform_clf = KDEUniformization(n_quantiles=50, method='fft', ) mg_gaussianizer = MarginalGaussianization(uniform_clf) mg_gaussianizer.fit(data) X_trans = mg_gaussianizer.transform(data) plot_2d_joint(X_trans, title='Transformed Data') data_approx = mg_gaussianizer.inverse_transform(X_trans) plot_2d_joint(data_approx, title='Transformed Data') X_ldj = mg_gaussianizer.log_abs_det_jacobian(data) plot_2d_joint(X_ldj, title='Transformed Data') plot_2d_joint(np.exp(X_ldj), title='Transformed Data') X_lprob = mg_gaussianizer.score_samples(data) plot_prob(data, X_lprob, title='Log Probabilities') plot_prob(data, np.exp(X_lprob), title='Probabilities') ``` ### Negative Log Likelihood ``` X_nll = mg_gaussianizer.score(data,) print(f"Negative Log-Likelihood Score: {X_nll:.4f}") ``` ## Marginal Histogram Transformation So, for this transformation, we are going to transform our data from the current distribution to a marginally Gaussian distribution and then perform a rotation. In theory, if we do enough of these, we will eventually convert to a Gaussian distribution. ``` # parameters nbins = 1_000 # number of bins to do the histogram transform alpha = 1e-05 # adds some regularization (noise) support_extension = 10 # initialize the transformer mg_transformer = HistogramGaussianization( nbins=nbins, alpha=alpha ) # fit the transformer to the data mg_transformer.fit(data); ``` ### 1. Forward Transformation For this transformation, we will be applying the following: $$\Psi(\mathbf{x}) = \Phi^{-1}(\mathbf{x})$$ where $\Phi^{-1}(\cdot)$ is the inverse CDF of the Gaussian distribution. ``` data_trans = mg_transformer.transform(data) plot_2d_joint(data_trans, title='Transformed Data') ``` So clearly we can see that the transformation works. Both of the marginals are Gaussian distributed.. ### 2. Inverse Transformation For this step, we will apply the inverse transformation: $$\Psi^{-1}(\mathbf{x}) = \Phi \left( \mathbf{x} \right)$$ where $\Phi(\cdot)$ is the CDF of the Gaussian distribution. ``` data_approx = mg_transformer.inverse_transform(data_trans) # check that its more or less equal np.testing.assert_array_almost_equal(data_approx, data, decimal=1e-5) ``` We see that this transformation is very close to the original. In fact, it's close to approximately 1e-5 decimal places. The errors will definitely stem from the boundaries. ``` # Plot results plot_2d_joint(data_approx, title='Inverse Transformed Data') ``` ## Log Absolute Determinant Jacobian Using the derivative of inverse-functions theorem, we can calculate the derivative like so: $$\nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = \frac{1}{\phi (\Phi^{-1} (x)) }$$ where $\phi(\cdot)$ is the PDF of the Gaussian distribution. Taking the log of these terms gives us: $$ \log \nabla_\mathbf{x} \Phi^{-1}(\mathbf{x}) = - \log \phi (\Phi^{-1} (x))$$ ``` X_slogdet = mg_transformer.log_abs_det_jacobian(data) print(X_slogdet.min(), X_slogdet.max()) print(np.exp(X_slogdet).min(), np.exp(X_slogdet).max()) # plot the gradients plot_2d_joint(np.exp(X_slogdet), title='Jacobian Data') ``` ## Log Probability $$\log p_\theta(\mathbf{x}) = \log p_\theta \left( \mathbf{z} \right) + \log \left| \nabla_\mathbf{x} \mathbf{z} \right|$$ where $\mathbf{z} = \Psi(\mathbf{x})$ ``` # score samples log_prob = mg_transformer.score_samples(data) # score samples log_prob = mg_transformer.score_samples(data) plot_prob(data, log_prob, title='Log Probabilities') ``` ## Probability This is the same as above but without the log scale: $$p_\theta(\mathbf{x}) = p_\theta \left( \mathbf{z} \right) \left| \nabla_\mathbf{x} \mathbf{z} \right|$$ where $\mathbf{z} = \Psi(\mathbf{x})$ ``` plot_prob(data, np.exp(log_prob), title='Probabilities') ``` ## Negative Log-Likelihood We need to take the expected value (mean) of all log probabilities. $$\text{nll} = \frac{1}{N} \sum_{n=1}^{N} \log p_\theta(\mathbf{x})$$ ``` score = mg_transformer.score(data) print(f"Negative Log-Likelihood Score: {score:.4f}") ```
github_jupyter
``` !pip install -q --upgrade jax jaxlib from __future__ import print_function, division import jax.numpy as np from jax import grad, jit, vmap from jax import random key = random.PRNGKey(0) ``` # The Autodiff Cookbook *alexbw@, mattjj@* JAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics. ## Gradients ### Starting with `grad` You can differentiate a function with `grad`: ``` grad_tanh = grad(np.tanh) print(grad_tanh(2.0)) ``` `grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\nabla f$. That means `grad(f)(x)` represents the value $\nabla f(x)$. Since `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like: ``` print(grad(grad(np.tanh))(2.0)) print(grad(grad(grad(np.tanh)))(2.0)) ``` Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup: ``` def sigmoid(x): return 0.5 * (np.tanh(x / 2) + 1) # Outputs probability of a label being true. def predict(W, b, inputs): return sigmoid(np.dot(inputs, W) + b) # Build a toy dataset. inputs = np.array([[0.52, 1.12, 0.77], [0.88, -1.08, 0.15], [0.52, 0.06, -1.30], [0.74, -2.49, 1.39]]) targets = np.array([True, True, False, True]) # Training loss is the negative log-likelihood of the training examples. def loss(W, b): preds = predict(W, b, inputs) label_probs = preds * targets + (1 - preds) * (1 - targets) return -np.sum(np.log(label_probs)) # Initialize random model coefficients key, W_key, b_key = random.split(key, 3) W = random.normal(W_key, (3,)) b = random.normal(b_key, ()) ``` Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments. ``` # Differentiate `loss` with respect to the first positional argument: W_grad = grad(loss, argnums=0)(W, b) print('W_grad', W_grad) # Since argnums=0 is the default, this does the same thing: W_grad = grad(loss)(W, b) print('W_grad', W_grad) # But we can choose different values too, and drop the keyword: b_grad = grad(loss, 1)(W, b) print('b_grad', b_grad) # Including tuple values W_grad, b_grad = grad(loss, (0, 1))(W, b) print('W_grad', W_grad) print('b_grad', b_grad) ``` This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the "Prologue" section of *Functional Differential Geometry* for a defense of this notation. Essentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\partial_i f$. ### Differentiating with respect to nested lists, tuples, and dicts Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like. ``` def loss2(params_dict): preds = predict(params_dict['W'], params_dict['b'], inputs) label_probs = preds * targets + (1 - preds) * (1 - targets) return -np.sum(np.log(label_probs)) print(grad(loss2)({'W': W, 'b': b})) ``` You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.). ### Evaluate a function and its gradient using `value_and_grad` Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value: ``` from jax import value_and_grad loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b) print('loss value', loss_value) print('loss value', loss(W, b)) ``` ### Checking against numerical differences A great thing about derivatives is that they're straightforward to check with finite differences: ``` # Set a step size for finite differences calculations eps = 1e-4 # Check b_grad with scalar finite differences b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps print('b_grad_numerical', b_grad_numerical) print('b_grad_autodiff', grad(loss, 1)(W, b)) # Check W_grad with finite differences in a random direction key, subkey = random.split(key) vec = random.normal(subkey, W.shape) unitvec = vec / np.sqrt(np.vdot(vec, vec)) W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps print('W_dirderiv_numerical', W_grad_numerical) print('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec)) ``` JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like: ``` from jax.test_util import check_grads check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives ``` ### Hessian-vector products with `grad`-of-`grad` One thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.) A Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)). For a scalar-valued function $f : \mathbb{R}^n \to \mathbb{R}$, the Hessian at a point $x \in \mathbb{R}^n$ is written as $\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate $\qquad v \mapsto \partial^2 f(x) \cdot v$ for any $v \in \mathbb{R}^n$. The trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store. Luckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity $\qquad \partial^2 f (x) v = \partial [x \mapsto \partial f(x) \cdot v] = \partial g(x)$, where $g(x) = \partial f(x) \cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Nottice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient. In JAX code, we can just write this: ``` def hvp(f, x, v): return grad(lambda x: np.vdot(grad(f)(x), v)) ``` This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused. We'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode. ## Jacobians and Hessians using `jacfwd` and `jacrev` You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions: ``` from jax import jacfwd, jacrev # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) J = jacfwd(f)(W) print("jacfwd result, with shape", J.shape) print(J) J = jacrev(f)(W) print("jacrev result, with shape", J.shape) print(J) ``` These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for "tall" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for "wide" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`. You can also use `jacfwd` and `jacrev` with container types: ``` def predict_dict(params, inputs): return predict(params['W'], params['b'], inputs) J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs) for k, v in J_dict.items(): print("Jacobian from {} to logits is".format(k)) print(v) ``` For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on! Using a composition of two of these functions gives us a way to compute dense Hessian matrices: ``` def hessian(f): return jacfwd(jacrev(f)) H = hessian(f)(W) print("hessian, with shape", H.shape) print(H) ``` This shape makes sense: if we start with a function $f : \mathbb{R}^n \to \mathbb{R}^m$, then at a point $x \in \mathbb{R}^n$ we expect to get the shapes * $f(x) \in \mathbb{R}^m$, the value of $f$ at $x$, * $\partial f(x) \in \mathbb{R}^{m \times n}$, the Jacobian matrix at $x$, * $\partial^2 f(x) \in \mathbb{R}^{m \times n \times n}$, the Hessian at $x$, and so on. To implement `hessian`, we could have used `jacrev(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \mathbb{R}^n \to \mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\nabla f : \mathbb{R}^n \to \mathbb{R}^n$), which is where forward-mode wins out. ## How it's made: two foundational autodiff functions ### Jacobian-Vector products (JVPs, aka forward-mode autodiff) JAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background. #### JVPs in math Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}^m$, the Jacobian matrix of $f$ evaluated at an input point $x \in \mathbb{R}^n$, denoted $\partial f(x)$, is often thought of as a matrix in $\mathbb{R}^m \times \mathbb{R}^n$: $\qquad \partial f(x) \in \mathbb{R}^{m \times n}$. But we can also think of $\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\mathbb{R}^m$): $\qquad \partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. This map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis. If we don't commit to one specific input point $x$, then we can think of the function $\partial f$ as first taking an input point and returning the Jacobian linear map at that input point: $\qquad \partial f : \mathbb{R}^n \to \mathbb{R}^n \to \mathbb{R}^m$. In particular, we can uncurry things so that given input point $x \in \mathbb{R}^n$ and a tangent vector $v \in \mathbb{R}^n$, we get back an output tangent vector in $\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as $\qquad (x, v) \mapsto \partial f(x) v$ #### JVPs in JAX code Back in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \mapsto (f(x), \partial f(x) v)$. ``` from jax import jvp # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) key, subkey = random.split(key) v = random.normal(subkey, W.shape) # Push forward the vector `v` along `f` evaluated at `W` y, u = jvp(f, (W,), (v,)) ``` In terms of Haskell-like type signatures, we could write ```haskell jvp :: (a -> b) -> a -> T a -> (b, T b) ``` where we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`. The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a "JVP rule" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values. That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 2x the cost of just evaluating the function. Put another way, for a fixed primal point $x$, we can evaluate $v \mapsto \partial f(x) \cdot v$ for about the same cost as evaluating $f$. That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning? To answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with "tall" Jacobians, but inefficient for "wide" Jacobians. If you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\mathbb{R}^n$ to a scalar loss value in $\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\partial f(x) \in \mathbb{R}^{1 \times n}$, which we often identify with the Gradient vector $\nabla f(x) \in \mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale. To do better for functions like this, we just need to use reverse-mode. ### Vector-Jacobian products (VJPs, aka reverse-mode autodiff) Where forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time. #### VJPs in math Let's again consider a function $f : \mathbb{R}^n \to \mathbb{R}^m$. Starting from our notation for JVPs, the notation for VJPs is pretty simple: $\qquad (x, v) \mapsto v \partial f(x)$, where $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \mathbb{R}^m \to \mathbb{R}$, and when we write $v \partial f(x)$ we mean function composition $v \circ \partial f(x)$, where the types work out because $\partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between "column vectors" and "row vectors" without much comment. With that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP: $\qquad (x, v) \mapsto \partial f(x)^\mathsf{T} v$. For a given point $x$, we can write the signature as $\qquad \partial f(x)^\mathsf{T} : \mathbb{R}^m \to \mathbb{R}^n$. The corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry)) of $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function. #### VJPs in JAX code Switching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$. ``` from jax import vjp # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) y, vjp_fun = vjp(f, W) key, subkey = random.split(key) u = random.normal(subkey, y.shape) # Pull back the covector `u` along `f` evaluated at `W` v = vjp_fun(u) ``` In terms of Haskell-like type signatures, we could write ```haskell vjp :: (a -> b) -> a -> (b, CT b -> CT a) ``` where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`. This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$ is only about twice the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \mathbb{R}^n \to \mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters. There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!). For more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/). ## Hessian-vector products using both forward- and reverse-mode In a previous section, we implemented a Hessian-vector product function just using reverse-mode: ``` def hvp(f, x, v): return grad(lambda x: np.vdot(grad(f)(x), v)) ``` That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode. Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}$ to differentiate, a point $x \in \mathbb{R}^n$ at which to linearize the function, and a vector $v \in \mathbb{R}^n$, the Hessian-vector product function we want is $(x, v) \mapsto \partial^2 f(x) v$ Consider the helper function $g : \mathbb{R}^n \to \mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \partial f(x)$. All we need is its JVP, since that will give us $(x, v) \mapsto \partial g(x) v = \partial^2 f(x) v$. We can translate that almost directly into code: ``` from jax import jvp, grad # forward-over-reverse def hvp(f, primals, tangents): return jvp(grad(f), primals, tangents)[1] ``` Even better, since we didn't have to call `np.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`. Here's an example of how to use it: ``` def f(X): return np.sum(np.tanh(X)**2) key, subkey1, subkey2 = random.split(key, 3) X = random.normal(subkey1, (30, 40)) V = random.normal(subkey2, (30, 40)) ans1 = hvp(f, (X,), (V,)) ans2 = np.tensordot(hessian(f)(X), V, 2) print(np.allclose(ans1, ans2, 1e-4, 1e-4)) ``` Another way you might consider writing this is using reverse-over-forward: ``` # reverse-over-forward def hvp_revfwd(f, primals, tangents): g = lambda primals: jvp(f, primals, tangents)[1] return grad(g)(primals) ``` That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best: ``` # reverse-over-reverse, only works for single arguments def hvp_revrev(f, primals, tangents): x, = primals v, = tangents return grad(lambda x: np.vdot(grad(f)(x), v))(x) print("Forward over reverse") %timeit -n10 -r3 hvp(f, (X,), (V,)) print("Reverse over forward") %timeit -n10 -r3 hvp_revfwd(f, (X,), (V,)) print("Reverse over reverse") %timeit -n10 -r3 hvp_revrev(f, (X,), (V,)) print("Naive full Hessian materialization") %timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2) ``` ## Composing VJPs, JVPs, and `vmap` ### Jacobian-Matrix and Matrix-Jacobian products Now that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's [`vmap` transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products. ``` # Isolate the function from the weight matrix to the predictions f = lambda W: predict(W, b, inputs) # Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`. # First, use a list comprehension to loop over rows in the matrix M. def loop_mjp(f, x, M): y, vjp_fun = vjp(f, x) return np.vstack([vjp_fun(mi) for mi in M]) # Now, use vmap to build a computation that does a single fast matrix-matrix # multiply, rather than an outer loop over vector-matrix multiplies. def vmap_mjp(f, x, M): y, vjp_fun = vjp(f, x) return vmap(vjp_fun)(M) key = random.PRNGKey(0) num_covecs = 128 U = random.normal(key, (num_covecs,) + y.shape) loop_vs = loop_mjp(f, W, M=U) print('Non-vmapped Matrix-Jacobian product') %timeit -n10 -r3 loop_mjp(f, W, M=U) print('\nVmapped Matrix-Jacobian product') vmap_vs = vmap_mjp(f, W, M=U) %timeit -n10 -r3 vmap_mjp(f, W, M=U) assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical' def loop_jmp(f, x, M): # jvp immediately returns the primal and tangent values as a tuple, # so we'll compute and select the tangents in a list comprehension return np.vstack([jvp(f, (W,), (si,))[1] for si in S]) def vmap_jmp(f, x, M): _jvp = lambda s: jvp(f, (W,), (s,))[1] return vmap(_jvp)(M) num_vecs = 128 S = random.normal(key, (num_vecs,) + W.shape) loop_vs = loop_jmp(f, W, M=S) print('Non-vmapped Jacobian-Matrix product') %timeit -n10 -r3 loop_jmp(f, W, M=S) vmap_vs = vmap_jmp(f, W, M=S) print('\nVmapped Jacobian-Matrix product') %timeit -n10 -r3 vmap_jmp(f, W, M=S) assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical' ``` ### The implementation of `jacfwd` and `jacrev` Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once. ``` from jax import jacrev as builtin_jacrev def our_jacrev(f): def jacfun(x): y, vjp_fun = vjp(f, x) # Use vmap to do a matrix-Jacobian product. # Here, the matrix is the Euclidean basis, so we get all # entries in the Jacobian at once. J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y))) return J return jacfun assert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!' from jax import jacfwd as builtin_jacfwd def our_jacfwd(f): def jacfun(x): _jvp = lambda s: jvp(f, (x,), (s,))[1] Jt =vmap(_jvp, in_axes=1)(np.eye(len(x))) return np.transpose(Jt) return jacfun assert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!' ``` Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation of reverse-mode `jacobian` in Autograd](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`. Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example: ``` def f(x): try: if x < 3: return 2 * x ** 3 else: raise ValueError except ValueError: return np.pi * x y, f_vjp = vjp(f, 4.) print(jit(f_vjp)(1.)) ``` ## Complex numbers and differentiation JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), JAX follows [Autograd's convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers) for encoding complex derivatives. Consider a complex-to-complex function $f: \mathbb{C} \to \mathbb{C}$ that we break down into its component real-to-real functions: ``` def f(z): x, y = real(z), imag(z) return u(x, y), v(x, y) * 1j ``` That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$. We define `grad(f)` to correspond to ``` def grad_f(z): x, y = real(z), imag(z) return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j ``` In math symbols, that means we define $\partial f(z) \triangleq \partial_0 u(x, y) + \partial_1 u(x, y)$. So we throw out $v$, ignoring the complex component function of $f$ entirely! This convention covers three important cases: 1. If `f` evaluates a holomorphic function, then we get the usual complex derivative, since $\partial_0 u = \partial_1 v$ and $\partial_1 u = - \partial_0 v$. 2. If `f` is evaluates the real-valued loss function of a complex parameter `x`, then we get a result that we can use in gradient-based optimization by taking steps in the direction of the conjugate of `grad(f)(x)`. 3. If `f` evaluates a real-to-real function, but its implementation uses complex primitives internally (some of which must be non-holomorphic, e.g. FFTs used in convolutions) then we get the same result that an implementation that only used real primitives would have given. By throwing away `v` entirely, this convention does not handle the case where `f` evaluates a non-holomorphic function and you want to evaluate all of $\partial_0 u$, $\partial_1 u$, $\partial_0 v$, and $\partial_1 v$ at once. But in that case the answer would have to contain four real values, and so there's no way to express it as a single complex number. You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix: ``` A = np.array([[5., 2.+3j, 5j], [2.-3j, 7., 1.+7j], [-5j, 1.-7j, 12.]]) def f(X): L = np.linalg.cholesky(X) return np.sum((L - np.sin(L))**2) grad(f)(A) ``` For primitives' JVP rules, writing the primals as $z = a + bi$ and the tangents as $t = c + di$, we define the Jacobian-vector product $t \mapsto \partial f(z) \cdot t$ as $t \mapsto \begin{matrix} \begin{bmatrix} 1 & 1 \end{bmatrix} \\ ~ \end{matrix} \begin{bmatrix} \partial_0 u(a, b) & -\partial_0 v(a, b) \\ - \partial_1 u(a, b) i & \partial_1 v(a, b) i \end{bmatrix} \begin{bmatrix} c \\ d \end{bmatrix}$. See Chapter 4 of [Dougal's PhD thesis](https://dougalmaclaurin.com/phd-thesis.pdf) for more details. # More advanced autodiff In this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful. There's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a "Advanced Autodiff Cookbook" include: - Gauss-Newton Vector Products, linearizing once - Custom VJPs and JVPs - Efficient derivatives at fixed-points - Estimating the trace of a Hessian using random Hessian-vector products. - Forward-mode autodiff using only reverse-mode autodiff. - Taking derivatives with respect to custom data types. - Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting). - Optimizing VJPs with Jacobian pre-accumulation.
github_jupyter
# Multivariate Resemblance Analysis (MRA) Dataset A In this notebook the multivariate resemblance analysis of Dataset A is performed for all STDG approaches. ``` #import libraries import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd from matplotlib import pyplot as plt import os print('Libraries imported!!') #define directory of functions and actual directory HOME_PATH = '' #home directory of the project FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE' ACTUAL_DIR = os.getcwd() #change directory to functions directory os.chdir(HOME_PATH + FUNCTIONS_DIR) #import functions for univariate resemblance analisys from multivariate_resemblance import get_numerical_correlations from multivariate_resemblance import plot_correlations from multivariate_resemblance import get_categorical_correlations from multivariate_resemblance import compute_mra_score #change directory to actual directory os.chdir(ACTUAL_DIR) print('Functions imported!!') ``` ## 1. Read real and synthetic datasets In this part real and synthetic datasets are read. ``` #Define global variables DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP'] SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP'] FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/A_Diabetes_Data_Real_Train.csv', 'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/A_Diabetes_Data_Synthetic_GM.csv', 'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/A_Diabetes_Data_Synthetic_SDV.csv', 'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/A_Diabetes_Data_Synthetic_CTGAN.csv', 'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/A_Diabetes_Data_Synthetic_WGANGP.csv'} categorical_columns = ['gender','age','admission_type_id','discharge_disposition_id','admission_source_id','max_glu_serum', 'A1Cresult','change','diabetesMed','readmitted'] data = dict() #iterate over all datasets filepaths and read each dataset for name, path in FILEPATHS.items() : data[name] = pd.read_csv(path) for col in categorical_columns : data[name][col] = data[name][col].astype('category') data ``` ## 2. Plot PPC matrixes and calculate matrixes norms ``` #compute correlation matrixes for all datasets cors_numerical = dict() norms_numerical = dict() for name in DATA_TYPES : cors_numerical[name], norms_numerical[name] = get_numerical_correlations(data[name]) norms_numerical fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5)) axs_idxs = range(6) idx = dict(zip(DATA_TYPES,axs_idxs)) for name_idx, name in enumerate(DATA_TYPES) : ax = axs[idx[name]] matrix = cors_numerical[name] if name_idx != len(DATA_TYPES) - 1: plot_correlations(matrix, ax, color_bar=False) else: plot_correlations(matrix, ax, color_bar=True) if name_idx > 0: ax.set_yticks([]) if name == 'Real' : ax.set_title(name) else : score = compute_mra_score(cors_numerical['Real'], matrix) ax.set_title(name + ' (' + str(score) + ')') fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/PPC_Matrices.svg', bbox_inches='tight') ``` ## 3. Plot correlations for categorical variables and calculate matrixes norms ``` #compute correlation matrixes for all datasets cors_categorical = dict() norms_categorical = dict() for name in DATA_TYPES : cors_categorical[name], norms_categorical[name] = get_categorical_correlations(data[name]) norms_categorical fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(15, 2.5)) axs_idxs = range(6) idx = dict(zip(DATA_TYPES,axs_idxs)) first = True for name_idx, name in enumerate(DATA_TYPES) : ax = axs[idx[name]] matrix = cors_categorical[name] if name_idx != len(DATA_TYPES) - 1: plot_correlations(matrix, ax, color_bar=False) else: plot_correlations(matrix, ax, color_bar=True) if name_idx > 0: ax.set_yticks([]) if name == 'Real' : ax.set_title(name) else : score = compute_mra_score(cors_categorical['Real'], matrix) ax.set_title(name + ' (' + str(score) + ')') fig.savefig('MULTIVARIATE RESEMBLANCE RESULTS/Categorical_Matrices.svg', bbox_inches='tight') ``` ## 4. Explore the results ``` norms_numerical norms_categorical norms_data = [np.asarray(list(norms_numerical.values())), np.asarray(list(norms_categorical.values()))] df_norms = pd.DataFrame(data=norms_data, columns=DATA_TYPES, index=['PPC_MATRIX_NORMS','CATEGORICAL_CORS_MATRIX_NORMS']) df_norms.to_csv('MULTIVARIATE RESEMBLANCE RESULTS/Correlation_Matrix_Norms.csv') df_norms ```
github_jupyter
``` from qiskit.tools.jupyter import * from qiskit import IBMQ IBMQ.load_account() #provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main') provider=IBMQ.get_provider(hub='ibm-q-research', group='uni-maryland-1', project='main') backend = provider.get_backend('ibmq_armonk') backend_config = backend.configuration() assert backend_config.open_pulse, "Backend doesn't support Pulse" dt = backend_config.dt print(f"Sampling time: {dt*1e9} ns") # The configuration returns dt in seconds, so multiply by # 1e9 to get nanoseconds backend_defaults = backend.defaults() import numpy as np # unit conversion factors -> all backend properties returned in SI (Hz, sec, etc) GHz = 1.0e9 # Gigahertz MHz = 1.0e6 # Megahertz us = 1.0e-6 # Microseconds ns = 1.0e-9 # Nanoseconds # We will find the qubit frequency for the following qubit. qubit = 0 # The sweep will be centered around the estimated qubit frequency. center_frequency_Hz = backend_defaults.qubit_freq_est[qubit] # The default frequency is given in Hz # warning: this will change in a future release print(f"Qubit {qubit} has an estimated frequency of {center_frequency_Hz / GHz} GHz.") # scale factor to remove factors of 10 from the data scale_factor = 1e-14 # We will sweep 40 MHz around the estimated frequency frequency_span_Hz = 20 * MHz # in steps of 1 MHz. frequency_step_Hz = 1 * MHz # We will sweep 20 MHz above and 20 MHz below the estimated frequency frequency_min = center_frequency_Hz - frequency_span_Hz / 2 frequency_max = center_frequency_Hz + frequency_span_Hz / 2 # Construct an np array of the frequencies for our experiment frequencies_GHz = np.arange(frequency_min / GHz, frequency_max / GHz, frequency_step_Hz / GHz) print(f"The sweep will go from {frequency_min / GHz} GHz to {frequency_max / GHz} GHz \ in steps of {frequency_step_Hz / MHz} MHz.") # number of shots for our experiments NUM_SHOTS = 1024 # samples need to be multiples of 16 def get_closest_multiple_of_16(num): return int(num + 8 ) - (int(num + 8 ) % 16) from qiskit import pulse # This is where we access all of our Pulse features! from qiskit.pulse import Play # This Pulse module helps us build sampled pulses for common pulse shapes from qiskit.pulse import library as pulse_lib # Drive pulse parameters (us = microseconds) drive_sigma_us = 0.075 # This determines the actual width of the gaussian drive_samples_us = drive_sigma_us*8 # This is a truncating parameter, because gaussians don't have # a natural finite length drive_sigma = get_closest_multiple_of_16(drive_sigma_us * us /dt) # The width of the gaussian in units of dt drive_samples = get_closest_multiple_of_16(drive_samples_us * us /dt) # The truncating parameter in units of dt drive_amp = 0.05 # Drive pulse samples drive_pulse = pulse_lib.gaussian(duration=drive_samples, sigma=drive_sigma, amp=drive_amp, name='freq_sweep_excitation_pulse') # Find out which group of qubits need to be acquired with this qubit meas_map_idx = None for i, measure_group in enumerate(backend_config.meas_map): if qubit in measure_group: meas_map_idx = i break assert meas_map_idx is not None, f"Couldn't find qubit {qubit} in the meas_map!" inst_sched_map = backend_defaults.instruction_schedule_map measure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx]) ### Collect the necessary channels drive_chan = pulse.DriveChannel(qubit) meas_chan = pulse.MeasureChannel(qubit) acq_chan = pulse.AcquireChannel(qubit) # Create the base schedule # Start with drive pulse acting on the drive channel schedule = pulse.Schedule(name='Frequency sweep') schedule += Play(drive_pulse, drive_chan) # The left shift `<<` is special syntax meaning to shift the start time of the schedule by some duration schedule += measure << schedule.duration # Create the frequency settings for the sweep (MUST BE IN HZ) frequencies_Hz = frequencies_GHz*GHz schedule_frequencies = [{drive_chan: freq} for freq in frequencies_Hz] schedule.draw(label=True) from qiskit import assemble num_shots_per_frequency = 1024 frequency_sweep_program = assemble(schedule, backend=backend, meas_level=1, meas_return='avg', shots=num_shots_per_frequency, schedule_los=schedule_frequencies) job = backend.run(frequency_sweep_program) print(job.job_id()) from qiskit.tools.monitor import job_monitor job_monitor(job) frequency_sweep_results = job.result(timeout=120) # timeout parameter set to 120 seconds import matplotlib.pyplot as plt sweep_values = [] for i in range(len(frequency_sweep_results.results)): # Get the results from the ith experiment res = frequency_sweep_results.get_memory(i)*scale_factor # Get the results for `qubit` from this experiment sweep_values.append(res[qubit]) plt.scatter(frequencies_GHz, np.real(sweep_values), color='black') # plot real part of sweep values plt.xlim([min(frequencies_GHz), max(frequencies_GHz)]) plt.xlabel("Frequency [GHz]") plt.ylabel("Measured signal [a.u.]") plt.show() from scipy.optimize import curve_fit def fit_function(x_values, y_values, function, init_params): fitparams, conv = curve_fit(function, x_values, y_values, init_params) y_fit = function(x_values, *fitparams) return fitparams, y_fit fit_params, y_fit = fit_function(frequencies_GHz, np.real(sweep_values), lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C, [-5, 4.975, 1, 5] # initial parameters for curve_fit ) plt.scatter(frequencies_GHz, np.real(sweep_values), color='black') plt.plot(frequencies_GHz, y_fit, color='red') plt.xlim([min(frequencies_GHz), max(frequencies_GHz)]) plt.xlabel("Frequency [GHz]") plt.ylabel("Measured Signal [a.u.]") plt.show() A, rough_qubit_frequency, B, C = fit_params rough_qubit_frequency = rough_qubit_frequency*GHz # make sure qubit freq is in Hz print(f"We've updated our qubit frequency estimate from " f"{round(backend_defaults.qubit_freq_est[qubit] / GHz, 8)} GHz to {round(rough_qubit_frequency/GHz, 8)} GHz.") # This experiment uses these values from the previous experiment: # `qubit`, # `measure`, and # `rough_qubit_frequency`. # Rabi experiment parameters num_rabi_points = 50 # Drive amplitude values to iterate over: 50 amplitudes evenly spaced from 0 to 0.75 drive_amp_min = 0 drive_amp_max = 0.75 drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points) # Build the Rabi experiments: # A drive pulse at the qubit frequency, followed by a measurement, # where we vary the drive amplitude each time. rabi_schedules = [] for drive_amp in drive_amps: rabi_pulse = pulse_lib.gaussian(duration=drive_samples, amp=drive_amp, sigma=drive_sigma, name=f"Rabi drive amplitude = {drive_amp}") this_schedule = pulse.Schedule(name=f"Rabi drive amplitude = {drive_amp}") this_schedule += Play(rabi_pulse, drive_chan) # Reuse the measure instruction from the frequency sweep experiment this_schedule += measure << this_schedule.duration rabi_schedules.append(this_schedule) rabi_schedules[-1].draw(label=True) # Assemble the schedules into a Qobj num_shots_per_point = 1024 rabi_experiment_program = assemble(rabi_schedules, backend=backend, meas_level=1, meas_return='avg', shots=num_shots_per_point, schedule_los=[{drive_chan: rough_qubit_frequency}] * num_rabi_points) print(job.job_id()) job = backend.run(rabi_experiment_program) job_monitor(job) rabi_results = job.result(timeout=120) # center data around 0 def baseline_remove(values): return np.array(values) - np.mean(values) rabi_values = [] for i in range(num_rabi_points): # Get the results for `qubit` from the ith experiment rabi_values.append(rabi_results.get_memory(i)[qubit]*scale_factor) rabi_values = np.real(baseline_remove(rabi_values)) plt.xlabel("Drive amp [a.u.]") plt.ylabel("Measured signal [a.u.]") plt.scatter(drive_amps, rabi_values, color='black') # plot real part of Rabi values plt.show() fit_params, y_fit = fit_function(drive_amps, rabi_values, lambda x, A, B, drive_period, phi: (A*np.cos(2*np.pi*x/drive_period - phi) + B), [4, -4, 0.4, 2*np.pi]) plt.scatter(drive_amps, rabi_values, color='black') plt.plot(drive_amps, y_fit, color='red') drive_period = fit_params[2] # get period of rabi oscillation plt.axvline(drive_period/2, color='red', linestyle='--') plt.axvline(drive_period, color='red', linestyle='--') plt.annotate("", xy=(drive_period, 0), xytext=(drive_period/2,0), arrowprops=dict(arrowstyle="<->", color='red')) plt.annotate("$\pi$", xy=(drive_period/2-0.03, 0.1), color='red') plt.xlabel("Drive amp [a.u.]", fontsize=15) plt.ylabel("Measured signal [a.u.]", fontsize=15) plt.show() pi_amp_01 = abs(drive_period / 2) print(f"Pi Amplitude = {pi_amp_01}") pi_pulse_01 = pulse_lib.gaussian(duration=drive_samples, amp=pi_amp_01, sigma=drive_sigma, name='pi_pulse_01') # Create the two schedules # Ground state schedule zero_schedule = pulse.Schedule(name="zero schedule") zero_schedule |= measure # Excited state schedule one_schedule = pulse.Schedule(name="one schedule") one_schedule |= pulse.Play(pi_pulse_01, drive_chan) one_schedule |= measure << one_schedule.duration zero_schedule.draw(label=True) one_schedule.draw(label=True) # Assemble the schedules into a program IQ_01_program = assemble([zero_schedule, one_schedule], backend=backend, meas_level=1, meas_return='single', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequency}] * 2) IQ_01_job = backend.run(IQ_01_program) print(IQ_01_job.job_id()) job_monitor(IQ_01_job) def get_job_data(job, average): """Retrieve data from a job that has already run. Args: job (Job): The job whose data you want. average (bool): If True, gets the data assuming data is an average. If False, gets the data assuming it is for single shots. Return: list: List containing job result data. """ job_results = job.result(timeout=120) # timeout parameter set to 120 s result_data = [] for i in range(len(job_results.results)): if average: # get avg data result_data.append(job_results.get_memory(i)[qubit]*scale_factor) else: # get single data result_data.append(job_results.get_memory(i)[:, qubit]*scale_factor) return result_data def get_closest_multiple_of_16(num): """Compute the nearest multiple of 16. Needed because pulse enabled devices require durations which are multiples of 16 samples. """ return (int(num) - (int(num)%16)) # Get job data (single); split for zero and one IQ_01_data = get_job_data(IQ_01_job, average=False) zero_data = IQ_01_data[0] one_data = IQ_01_data[1] def IQ_01_plot(x_min, x_max, y_min, y_max): """Helper function for plotting IQ plane for |0>, |1>. Limits of plot given as arguments.""" # zero data plotted in blue plt.scatter(np.real(zero_data), np.imag(zero_data), s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$') # one data plotted in red plt.scatter(np.real(one_data), np.imag(one_data), s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$') # Plot a large dot for the average result of the zero and one states. mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts mean_one = np.mean(one_data) plt.scatter(np.real(mean_zero), np.imag(mean_zero), s=200, cmap='viridis', c='black',alpha=1.0) plt.scatter(np.real(mean_one), np.imag(mean_one), s=200, cmap='viridis', c='black',alpha=1.0) plt.xlim(x_min, x_max) plt.ylim(y_min,y_max) plt.legend() plt.ylabel('I [a.u.]', fontsize=15) plt.xlabel('Q [a.u.]', fontsize=15) plt.title("0-1 discrimination", fontsize=15) x_min = -25 x_max = -5 y_min = -25 y_max = 0 IQ_01_plot(x_min, x_max, y_min, y_max) def reshape_complex_vec(vec): """Take in complex vector vec and return 2d array w/ real, imag entries. This is needed for the learning. Args: vec (list): complex vector of data Returns: list: vector w/ entries given by (real(vec], imag(vec)) """ length = len(vec) vec_reshaped = np.zeros((length, 2)) for i in range(len(vec)): vec_reshaped[i]=[np.real(vec[i]), np.imag(vec[i])] return vec_reshaped # Create IQ vector (split real, imag parts) zero_data_reshaped = reshape_complex_vec(zero_data) one_data_reshaped = reshape_complex_vec(one_data) IQ_01_data = np.concatenate((zero_data_reshaped, one_data_reshaped)) print(IQ_01_data.shape) # verify IQ data shape from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import train_test_split # construct vector w/ 0's and 1's (for testing) state_01 = np.zeros(NUM_SHOTS) # shots gives number of experiments state_01 = np.concatenate((state_01, np.ones(NUM_SHOTS))) print(len(state_01)) # Shuffle and split data into training and test sets IQ_01_train, IQ_01_test, state_01_train, state_01_test = train_test_split(IQ_01_data, state_01, test_size=0.5) # Set up the LDA LDA_01 = LinearDiscriminantAnalysis() LDA_01.fit(IQ_01_train, state_01_train) # test on some simple data print(LDA_01.predict([[0,0], [10, 0]])) # Compute accuracy score_01 = LDA_01.score(IQ_01_test, state_01_test) print(score_01) # Plot separatrix on top of scatter def separatrixPlot(lda, x_min, x_max, y_min, y_max, shots): nx, ny = shots, shots xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx), np.linspace(y_min, y_max, ny)) Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z = Z[:, 1].reshape(xx.shape) plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='black') IQ_01_plot(x_min, x_max, y_min, y_max) separatrixPlot(LDA_01, x_min, x_max, y_min, y_max, NUM_SHOTS) ``` ## Discriminating the |0⟩, |1⟩ and |2⟩ states ``` from qiskit.pulse.library import Waveform ## replaced by Wafeform after 0.25.0 def apply_sideband(pulse, freq): """Apply a sinusoidal sideband to this pulse at frequency freq. Args: pulse (SamplePulse): The pulse of interest. freq (float): LO frequency for which we want to apply the sweep. Return: SamplePulse: Pulse with a sideband applied (oscillates at difference between freq and cal_qubit_freq). """ # time goes from 0 to dt*drive_samples, sine arg of form 2*pi*f*t t_samples = np.linspace(0, dt*drive_samples, drive_samples) sine_pulse = np.sin(2*np.pi*(freq-rough_qubit_frequency)*t_samples) # no amp for the sine # create sample pulse w/ sideband applied # Note: need to make sq_pulse.samples real, multiply elementwise sideband_pulse = Waveform(np.multiply(np.real(pulse.samples), sine_pulse), name='sideband_pulse') return sideband_pulse def create_excited_freq_sweep_program(freqs, drive_power): """Builds a program that does a freq sweep by exciting the |1> state. This allows us to obtain the 1->2 frequency. We get from the |0> to |1> state via a pi pulse using the calibrated qubit frequency. To do the frequency sweep from |1> to |2>, we use a sideband method by tacking a sine factor onto the sweep drive pulse. Args: freqs (np.ndarray(dtype=float)): Numpy array of frequencies to sweep. drive_power (float) : Value of drive amplitude. Raises: ValueError: Thrown if use more than 75 frequencies; currently, an error will be thrown on the backend if you try more than 75 frequencies. Returns: Qobj: Program for freq sweep experiment. """ if len(freqs) > 75: raise ValueError("You can only run 75 schedules at a time.") print(f"The frequency sweep will go from {freqs[0] / GHz} GHz to {freqs[-1]/ GHz} GHz \ using {len(freqs)} frequencies. The drive power is {drive_power}.") base_12_pulse = pulse_lib.gaussian(duration=drive_samples, sigma=drive_sigma, amp=drive_power, name='base_12_pulse') schedules = [] for jj, freq in enumerate(freqs): # add sideband to gaussian pulse freq_sweep_12_pulse = apply_sideband(base_12_pulse, freq) # add commands to schedule schedule = pulse.Schedule(name="Frequency = {}".format(freq)) # Add 0->1 pulse, freq sweep pulse and measure schedule |= pulse.Play(pi_pulse_01, drive_chan) schedule |= pulse.Play(freq_sweep_12_pulse, drive_chan) << schedule.duration schedule |= measure << schedule.duration # shift measurement to after drive pulses schedules.append(schedule) num_freqs = len(freqs) # draw a schedule display(schedules[-1].draw(channels=[drive_chan, meas_chan], label=True, scale=1.0)) # assemble freq sweep program # Note: LO is at cal_qubit_freq for each schedule; accounted for by sideband excited_freq_sweep_program = assemble(schedules, backend=backend, meas_level=1, meas_return='avg', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequency}] * num_freqs) return excited_freq_sweep_program # sweep 400 MHz below 0->1 frequency to catch the 1->2 frequency num_freqs = 75 excited_sweep_freqs = rough_qubit_frequency + np.linspace(-400*MHz, 30*MHz, num_freqs) excited_freq_sweep_program = create_excited_freq_sweep_program(excited_sweep_freqs, drive_power=0.3) # Plot an example schedule to make sure it's valid excited_freq_sweep_job = backend.run(excited_freq_sweep_program) print(excited_freq_sweep_job.job_id()) job_monitor(excited_freq_sweep_job) # Get job data (avg) excited_freq_sweep_data = get_job_data(excited_freq_sweep_job, average=True) # Note: we are only plotting the real part of the signal plt.scatter(excited_sweep_freqs/GHz, excited_freq_sweep_data, color='black') plt.xlim([min(excited_sweep_freqs/GHz)+0.01, max(excited_sweep_freqs/GHz)]) # ignore min point (is off) plt.xlabel("Frequency [GHz]", fontsize=15) plt.ylabel("Measured Signal [a.u.]", fontsize=15) plt.title("1->2 Frequency Sweep (first pass)", fontsize=15) plt.show() from scipy.optimize import curve_fit from scipy.signal import find_peaks # Prints out relative maxima frequencies in output_data; height gives lower bound (abs val) def rel_maxima(freqs, output_data, height): """ Prints out relative maxima frequencies in output_data (can see peaks); height gives upper bound (abs val). Be sure to set the height properly or the peak will be ignored! Args: freqs (list): frequency list output_data (list): list of resulting signals height (float): upper bound (abs val) on a peak Returns: list: List containing relative maxima frequencies """ peaks, _ = find_peaks(output_data, height) print("Freq. dips: ", freqs[peaks]) return freqs[peaks] maxima = rel_maxima(excited_sweep_freqs, np.real(excited_freq_sweep_data), 18) approx_12_freq = maxima # smaller range refined sweep num_freqs = 75 refined_excited_sweep_freqs = approx_12_freq + np.linspace(-20*MHz, 20*MHz, num_freqs) refined_excited_freq_sweep_program = create_excited_freq_sweep_program(refined_excited_sweep_freqs, drive_power=0.3) refined_excited_freq_sweep_job = backend.run(refined_excited_freq_sweep_program) print(refined_excited_freq_sweep_job.job_id()) job_monitor(refined_excited_freq_sweep_job) # Get the refined data (average) refined_excited_freq_sweep_data = get_job_data(refined_excited_freq_sweep_job, average=True) # do fit in Hz (refined_excited_sweep_fit_params, refined_excited_sweep_y_fit) = fit_function(refined_excited_sweep_freqs, refined_excited_freq_sweep_data, lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C, [-12, 4.624*GHz, 0.05*GHz, 3*GHz] # initial parameters for curve_fit ) # Note: we are only plotting the real part of the signal plt.scatter(refined_excited_sweep_freqs/GHz, refined_excited_freq_sweep_data, color='black') plt.plot(refined_excited_sweep_freqs/GHz, refined_excited_sweep_y_fit, color='red') plt.xlim([min(refined_excited_sweep_freqs/GHz), max(refined_excited_sweep_freqs/GHz)]) plt.xlabel("Frequency [GHz]", fontsize=15) plt.ylabel("Measured Signal [a.u.]", fontsize=15) plt.title("1->2 Frequency Sweep (refined pass)", fontsize=15) plt.show() _, qubit_12_freq, _, _ = refined_excited_sweep_fit_params print(f"Our updated estimate for the 1->2 transition frequency is " f"{round(qubit_12_freq/GHz, 7)} GHz.") # experimental configuration num_rabi_points = 75 # number of experiments (ie amplitudes to sweep out) # Drive amplitude values to iterate over: 75 amplitudes evenly spaced from 0 to 1.0 drive_amp_min = 0 drive_amp_max = 1.0 drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points) # Create schedule rabi_12_schedules = [] # loop over all drive amplitudes for ii, drive_amp in enumerate(drive_amps): base_12_pulse = pulse_lib.gaussian(duration=drive_samples, sigma=drive_sigma, amp=drive_amp, name='base_12_pulse') # apply sideband at the 1->2 frequency rabi_12_pulse = apply_sideband(base_12_pulse, qubit_12_freq) # add commands to schedule schedule = pulse.Schedule(name='Rabi Experiment at drive amp = %s' % drive_amp) schedule |= pulse.Play(pi_pulse_01, drive_chan) # 0->1 schedule |= pulse.Play(rabi_12_pulse, drive_chan) << schedule.duration # 1->2 Rabi pulse schedule |= measure << schedule.duration # shift measurement to after drive pulse rabi_12_schedules.append(schedule) # Assemble the schedules into a program # Note: The LO frequency is at cal_qubit_freq to support the 0->1 pi pulse; # it is modified for the 1->2 pulse using sidebanding rabi_12_expt_program = assemble(rabi_12_schedules, backend=backend, meas_level=1, meas_return='avg', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequencyqubit_frequencyqubit_frequencyqubit_frequencyqubit_frequency_qubit_frequency_qubit_frequency}] * num_rabi_points) rabi_12_job = backend.run(rabi_12_expt_program) print(rabi_12_job.job_id()) job_monitor(rabi_12_job) # Get the job data (average) rabi_12_data = get_job_data(rabi_12_job, average=True) print(len(rabi_12_data)*0.6) # Note: We only plot the real part of the signal. rabi_12_data = np.real(baseline_remove(rabi_12_data)) (rabi_12_fit_params, rabi_12_y_fit) = fit_function(drive_amps[:45], rabi_12_data[:45], lambda x, A, B, drive_12_period, phi: (A*np.cos(2*np.pi*x/drive_12_period - phi) + B), [2, -2.5, 0.4,0.5]) plt.scatter(drive_amps, rabi_12_data, color='black') plt.plot(drive_amps[:45], rabi_12_y_fit, color='red') drive_12_period = rabi_12_fit_params[2] # account for phi in computing pi amp pi_amp_12 = (drive_12_period/2/np.pi) *(np.pi+rabi_12_fit_params[3]) plt.axvline(pi_amp_12, color='red', linestyle='--') plt.axvline(pi_amp_12+drive_12_period/2, color='red', linestyle='--') plt.annotate("", xy=(pi_amp_12+drive_12_period/2, 0), xytext=(pi_amp_12,0), arrowprops=dict(arrowstyle="<->", color='red')) plt.annotate("$\pi$", xy=(pi_amp_12-0.03, 0.1), color='red') plt.xlabel("Drive amp [a.u.]", fontsize=15) plt.ylabel("Measured signal [a.u.]", fontsize=15) plt.title('Rabi Experiment (1->2)', fontsize=20) plt.show() print(f"Our updated estimate for the 1->2 transition frequency is " f"{round(qubit_12_freq/GHz, 7)} GHz.") print(f"Pi Amplitude (1->2) = {pi_amp_12}") pi_pulse_12 = pulse_lib.gaussian(duration=drive_samples, amp=pi_amp_12, sigma=drive_sigma, name='pi_pulse_12') # make sure this pulse is sidebanded pi_pulse_12 = apply_sideband(pi_pulse_12, qubit_12_freq) # Create the three schedules # Ground state schedule zero_schedule = pulse.Schedule(name="zero schedule") zero_schedule |= measure # Excited state schedule one_schedule = pulse.Schedule(name="one schedule") one_schedule |= pulse.Play(pi_pulse_01, drive_chan) one_schedule |= measure << one_schedule.duration # Excited state schedule two_schedule = pulse.Schedule(name="two schedule") two_schedule |= pulse.Play(pi_pulse_01, drive_chan) two_schedule |= pulse.Play(pi_pulse_12, drive_chan) << two_schedule.duration two_schedule |= measure << two_schedule.duration # Assemble the schedules into a program IQ_012_program = assemble([zero_schedule, one_schedule, two_schedule], backend=backend, meas_level=1, meas_return='single', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequency}] * 3) # Assemble the schedules into a program IQ_012_program = assemble([zero_schedule, one_schedule, two_schedule], backend=backend, meas_level=1, meas_return='single', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequency}] * 3) IQ_012_job = backend.run(IQ_012_program) print(IQ_012_job.job_id()) job_monitor(IQ_012_job) # Get job data (single); split for zero, one and two IQ_012_data = get_job_data(IQ_012_job, average=False) zero_data = IQ_012_data[0] one_data = IQ_012_data[1] two_data = IQ_012_data[2] def IQ_012_plot(x_min, x_max, y_min, y_max): """Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given as arguments.""" # zero data plotted in blue plt.scatter(np.real(zero_data), np.imag(zero_data), s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$') # one data plotted in red plt.scatter(np.real(one_data), np.imag(one_data), s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$') # two data plotted in green plt.scatter(np.real(two_data), np.imag(two_data), s=5, cmap='viridis', c='green', alpha=0.5, label=r'$|2\rangle$') # Plot a large dot for the average result of the 0, 1 and 2 states. mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts mean_one = np.mean(one_data) mean_two = np.mean(two_data) plt.scatter(np.real(mean_zero), np.imag(mean_zero), s=200, cmap='viridis', c='black',alpha=1.0) plt.scatter(np.real(mean_one), np.imag(mean_one), s=200, cmap='viridis', c='black',alpha=1.0) plt.scatter(np.real(mean_two), np.imag(mean_two), s=200, cmap='viridis', c='black',alpha=1.0) plt.xlim(x_min, x_max) plt.ylim(y_min,y_max) plt.legend() plt.ylabel('I [a.u.]', fontsize=15) plt.xlabel('Q [a.u.]', fontsize=15) plt.title("0-1-2 discrimination", fontsize=15) x_min = -10 x_max = 20 y_min = 10 y_max = 35 IQ_012_plot(x_min, x_max, y_min, y_max) # Create IQ vector (split real, imag parts) zero_data_reshaped = reshape_complex_vec(zero_data) one_data_reshaped = reshape_complex_vec(one_data) two_data_reshaped = reshape_complex_vec(two_data) IQ_012_data = np.concatenate((zero_data_reshaped, one_data_reshaped, two_data_reshaped)) print(IQ_012_data.shape) # verify IQ data shape # construct vector w/ 0's, 1's and 2's (for testing) state_012 = np.zeros(NUM_SHOTS) # shots gives number of experiments state_012 = np.concatenate((state_012, np.ones(NUM_SHOTS))) state_012 = np.concatenate((state_012, 2*np.ones(NUM_SHOTS))) print(len(state_012)) # Shuffle and split data into training and test sets IQ_012_train, IQ_012_test, state_012_train, state_012_test = train_test_split(IQ_012_data, state_012, test_size=0.5) # Set up the LDA LDA_012 = LinearDiscriminantAnalysis() LDA_012.fit(IQ_012_train, state_012_train) # test on some simple data print(LDA_012.predict([[0, 0], [-10, 0], [-15, -5]])) # Compute accuracy score_012 = LDA_012.score(IQ_012_test, state_012_test) print(score_012) IQ_012_plot(x_min, x_max, y_min, y_max) separatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS) from sklearn.neural_network import MLPClassifier IQ_012_plot(x_min, x_max, y_min, y_max) NN_012 = MLPClassifier(solver='lbfgs',max_iter=1000) NN_012.fit(IQ_012_train, state_012_train) separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS) print(NN_012.score(IQ_012_test, state_012_test)) from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis IQ_012_plot(x_min, x_max, y_min, y_max) QDA_012 = QuadraticDiscriminantAnalysis() QDA_012.fit(IQ_012_train, state_012_train) scoreq_012 = QDA_012.score(IQ_012_test, state_012_test) print(scoreq_012) from sklearn.neural_network import MLPClassifier IQ_012_plot(x_min, x_max, y_min, y_max) NN_012 = MLPClassifier(hidden_layer_sizes = (100,100,),solver='lbfgs',max_iter=1000) NN_012.fit(IQ_012_train, state_012_train) separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS) print(NN_012.score(IQ_012_test, state_012_test)) ``` ### Qudit Gates Class Currently we only have a few gate operations supported: xcyc (cyclic shift), axcyc (acyclic shift), x01, x12,x02, h01, h12, and h02 gates. ``` from qiskit.pulse import * from qiskit.pulse import library as pulse_lib class QuditGates: def __init__(self, pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, chan, dt): # self.sched = sched self.dt = dt self.pi_amp_01 = pi_amp_01 self.pi_amp_12 = pi_amp_12 self.chan = chan self.trans_freq_01 = trans_freq_01 self.trans_freq_12 = trans_freq_12 drive_sigma = 0.075*1e-6 self.drive_sigma_dt = int(drive_sigma/dt) - (int(drive_sigma/dt%16)) self.drive_samples_dt = 8*self.drive_sigma_dt self.pi_pul_01 = pulse_lib.gaussian(duration=self.drive_samples_dt, amp=self.pi_amp_01, sigma=self.drive_sigma_dt, ) self.base_pulse = pulse_lib.gaussian(duration=self.drive_samples_dt, sigma=self.drive_sigma_dt, amp=self.pi_amp_12) # pi_pulse_12 = apply_sideband_n(base_pulse, trans_freq_12) def apply_sideband_n(self, base_pulse, freq): t_samples = np.linspace(0, dt*self.drive_samples_dt, num=self.drive_samples_dt) sine_pulse = np.sin(2*np.pi*(freq-self.trans_freq_01)*t_samples) sideband_pulse = Waveform(np.multiply(np.real(base_pulse.samples), sine_pulse), name='sideband_pulse') return sideband_pulse def xcyc(self): '''This the gate corresponding to the operator |n> --> |(n+1) mod 3>.''' pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12) with build() as xcyc_pulse: play(pi_pul_12, self.chan) play(self.pi_pul_01, self.chan) sched = Schedule() sched += xcyc_pulse return sched def xacyc(self): '''This the gate corresponding to the operator |n> --> |(n-1) mod 3>.''' pi_pul_12 = self.apply_sideband_n(self.base_pulse, trans_freq_12) with build as xacyc_pulse: play(self.pi_pul_01, self.chan) play(pi_pul_12, self.chan) sched = Schedule() sched += xacyc_pulse return sched def x01(self, chan): '''This the gate corresponding to the operator |0> --> |1> and |1> --> |0>.''' with pulse.build as x01_pulse: pulse.play(self.pi_pul_01, chan) sched = Schedule() return sched += x01_pulse def x12(self, chan): '''This the gate corresponding to the operator |1> --> |2> and |2> --> |1>.''' with pulse.build as x12_pulse: pulse.play(self.pi_pul_12, chan) sched = Schedule() return sched += x12_pulse def x02(self, chan): '''This the gate corresponding to the operator |0> --> |2> and |2> --> |0>.''' with pulse.build as x02_pulse: pulse.play(self.pi_pul_01, chan) pulse.play(self.pi_pul_12, chan) pulse.play(self.pi_pul_01, chan) sched = Schedule() return sched += x02_pulse def h01(self): qc = QuantumCircuit(1) qc.h(0) with build(self.backend) as h01_pulse: call(qc) sched = Schedule() sched += h01_pulse return sched def h12(self): qc = QuantumCircuit(1) qc.h(0) with build(self.backend) as h12_pulse: call(qc) sched = Schedule() sched += h12_pulse return sched def h02(self): qc = QuantumCircuit(1) qc.h(0) with build(self.backend) as h02_pulse: call(qc) sched = Schedule() sched += Play(self.pi_pul_01,self.chan) sched += h02_pulse sched += Play(self.pi_pul_01,self.chan) return sched from qiskit.pulse import Schedule trans_freq_01=4971800930 trans_freq_12=4623781600 pi_amp_01=0.14096173612665405 pi_amp_12=0.2493075855271005 gate=QuditGates(pi_amp_01, pi_amp_12, trans_freq_01, trans_freq_12, drive_chan, dt) cyclic_shift_sched = Schedule() cyclic_shift_sched += gate.xcyc() inst_sched_map = backend_defaults.instruction_schedule_map measure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx]) cyclic_shift_sched |= measure << cyclic_shift_sched.duration cyclic_shift_program = assemble(cyclic_shift_sched, backend=backend, meas_level=1, meas_return='single', shots=NUM_SHOTS, schedule_los=[{drive_chan: rough_qubit_frequency}]) cyclic_shift_sched.draw(label=True) cyclic_shift_job = backend.run(cyclic_shift_program) job_monitor(cyclic_shift_job) cyclic_shift_data = get_job_data(cyclic_shift_job, average=False) cyclic_shift_data cyclic_shift_reshaped=reshape_complex_vec(cyclic_shift_data[0]) def IQ_012_measure_plot(x_min, x_max, y_min, y_max): """Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given as arguments.""" # measured data plotted in blue plt.scatter(np.real(cyclic_shift_data), np.imag(cyclic_shift_data), s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$') mean_data = np.mean(cyclic_shift_data) # takes mean of both real and imaginary parts plt.scatter(np.real(mean_data), np.imag(mean_data), s=200, cmap='viridis', c='black',alpha=1.0) plt.xlim(x_min, x_max) plt.ylim(y_min,y_max) plt.legend() plt.ylabel('I [a.u.]', fontsize=15) plt.xlabel('Q [a.u.]', fontsize=15) plt.title("0-1-2 discrimination of measured data", fontsize=15) x_min = -30 x_max = 30 y_min = -60 y_max = 30 IQ_012_measure_plot(x_min, x_max, y_min, y_max) output=NN_012.predict(cyclic_shift_reshaped) separatrixPlot(NN_012, x_min, x_max, y_min, y_max, NUM_SHOTS) print(output) unique, counts = np.unique(output, return_counts=True) dict(zip(unique, counts)) ``` The discriminator seems to be away from the measure data of the cyclic shift. This problem needs to be solved in the future. ### Compare to LDA discriminator ``` x_min = -30 x_max = 30 y_min = -60 y_max = 30 IQ_012_measure_plot(x_min, x_max, y_min, y_max) output_LDA=LDA_012.predict(cyclic_shift_reshaped) separatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS) unique, counts = np.unique(output_LDA, return_counts=True) dict(zip(unique, counts)) ``` It has the same issue of classification of the measured data.
github_jupyter
# eICU Experiments ``` import tensorflow as tf import numpy as np import h5py from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import tensorflow_probability as tfp import sklearn from sklearn import metrics import seaborn as sns import random ``` Follow Read-me instruction to download the medical data. After having downloaded the data in '../data/eICU_data.h5', upload the data: ``` def get_data(test=True): hf = h5py.File('../data/eICU_data.h5', 'r') data_total = np.array(hf.get('x')) endpoints_total = np.array(hf.get('y')) hf.close() data_train, data_val, y_train, endpoints_total_val = train_test_split(data_total[:int(len(data_total) * 0.85)], endpoints_total[:int(len(data_total) * 0.85)], test_size=0.20, random_state=42) if test: data_val = data_total[int(len(data_total) * 0.85):] endpoints_total_val = endpoints_total[int(len(data_total) * 0.85):] return data_train, data_val, y_train, endpoints_total_val def batch_generator(data_train, data_val, endpoints_total_val, batch_size, mode="train"): while True: if mode == "train": for i in range(len(data_train) // batch_size): time_series = data_train[i * batch_size: (i + 1) * batch_size] yield time_series, i elif mode == "val": for i in range(len(data_val) // batch_size): time_series = data_val[i * batch_size: (i + 1) * batch_size] time_series_endpoint = endpoints_total_val[i * batch_size: (i + 1) * batch_size] yield time_series, time_series_endpoint, i else: raise ValueError("The mode has to be in {train, val}") ``` Insert the name of the job in ex_name: ``` ex_name="hyperopt_LSTM_20_16-16_2020-02-17_35a17" ``` Get the data: ``` batch_size=128 modelpath = "../models/{}/{}".format(ex_name, ex_name) data_train, data_val, endpoints_total_train, endpoints_total_val = get_data(test=True) ``` ## Create heat-maps, trajectories and probability distributions ``` som_dim = [16,16] latent_dim=10 val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode="val") num_batches = len(data_val) // 300 tf.reset_default_graph() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.import_meta_graph(modelpath+".meta") saver.restore(sess, modelpath) graph = tf.get_default_graph() k = graph.get_tensor_by_name("k/k:0") z_e = graph.get_tensor_by_name("z_e_sample/z_e:0") x = graph.get_tensor_by_name("inputs/x:0") is_training = graph.get_tensor_by_name("is_training/is_training:0") graph = tf.get_default_graph() z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0") q = graph.get_tensor_by_name("q/distribution/q:0") embeddings = graph.get_tensor_by_name("embeddings/embeddings:0") reconstruction = graph.get_tensor_by_name("reconstruction_e/x_hat:0") print("Evaluation...") test_k_all = [] labels_val_all = [] z_e_all=[] z_q_all = [] qq = [] for i in range(num_batches): batch_data, batch_labels, ii = next(val_gen) f_dic = {x: batch_data} test_k_all.extend(sess.run(k, feed_dict=f_dic)) labels_val_all.extend(batch_labels) z_q_all.extend(sess.run(q, feed_dict=f_dic)) qq.extend(sess.run(q, feed_dict=f_dic)) z_e_all.extend(sess.run(z_e, feed_dict=f_dic)) labels_val_all = np.array(labels_val_all) k_all = np.array(test_k_all) qq = np.array(qq) labels_val_all = np.reshape(labels_val_all, (-1, labels_val_all.shape[-1])) NMI_24 = metrics.normalized_mutual_info_score(labels_val_all[:, 3], k_all) NMI_12 = metrics.normalized_mutual_info_score(labels_val_all[:, 2], k_all) NMI_6 = metrics.normalized_mutual_info_score(labels_val_all[:, 1], k_all) NMI_1 = metrics.normalized_mutual_info_score(labels_val_all[:, 0], k_all) embb = sess.run(embeddings, feed_dict={x: data_val}) ``` Get the labels: ``` labels_12 = labels_val_all[:,2] labels_1 = labels_val_all[:,0] labels_6 = labels_val_all[:,1] labels_24 = labels_val_all[:,3] hosp_disc_1 = labels_val_all[:,4] hosp_disc_6 = labels_val_all[:,5] hosp_disc_12 = labels_val_all[:,6] hosp_disc_24 = labels_val_all[:,7] u_disc_1 = labels_val_all[:,8] u_disc_6 = labels_val_all[:,9] u_disc_12 = labels_val_all[:,10] u_disc_24 = labels_val_all[:, 11] labels_1 = labels_1.astype(int) labels_6 = labels_6.astype(int) labels_12 = labels_12.astype(int) labels_24 = labels_24.astype(int) hosp_disc_12 = hosp_disc_12.astype(int) hosp_disc_24 = hosp_disc_24.astype(int) hosp_disc_1 = hosp_disc_1.astype(int) hosp_disc_6 = hosp_disc_6.astype(int) ``` ### Moran Index ``` sd = som_dim[0]*som_dim[1] mean = np.sum(labels_val_all[:, 0]) / len(labels_val_all[:, 0]) ones = np.ones((len(np.reshape(k_all, (-1))))) clust_matr1 = np.zeros(som_dim[0]*som_dim[1]) labels= labels_val_all[:, 0] for i in range(som_dim[0]*som_dim[1]): dd = np.sum(ones[np.where(np.reshape(k_all, (-1))==i)]) if dd == 0: s1 = 0 else: s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)]) clust_matr1[i] = s1 k = np.arange(0,sd) k1 = k // som_dim[0] k2 = k % som_dim[0] W = np.zeros((sd,sd)) for i in range(sd): for j in range(sd): d1 = np.abs((k1[i] - k1[j])) d2 = np.abs((k2[i] - k2[j])) d1 = min(som_dim[0]-d1, d1) d2 = min(som_dim[0]-d2, d2) W[i,j] = np.exp(-(d1+d2)) if i==j: W[i,j]=0 M = 0 N_n = 0 for i in range(sd): for j in range(sd): M += (clust_matr1[i] -mean)*(clust_matr1[j] -mean)* W[i,j] for i in range(sd): N_n += (clust_matr1[i]-mean)**2 W_n = np.sum(W) I = M * sd / (N_n*W_n) print(I) ``` ### APACHE score heatmap: ``` labels = labels_1 ones = np.ones((len(np.reshape(k_all, (-1))))) clust_matr1 = np.zeros(som_dim[0]*som_dim[1]) clust_matr2 = np.zeros(som_dim[0]*som_dim[1]) for i in range(som_dim[0]*som_dim[1]): s1 = np.sum(labels[np.where(np.reshape(k_all, (-1))==i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)]) clust_matr1[i] = s1 clust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1])) ax = sns.heatmap(clust_matr1, cmap="YlGnBu") plt.show() ``` ### Trajectories ``` T = [] S = [] for i in range(1000): h = np.reshape(u_disc_1, (-1,72)) if np.max(h[i]) == 1: T.append(i) else: S.append(i) ind_r = np.random.random_integers(0, 50, 10) ind_s = np.random.random_integers(0, 50, 10) T = np.array(T) S = np.array(S) a = np.concatenate([S[ind_s], T[ind_r]]) k_all.shape labels = labels_1 it = 0 k_all = np.reshape(k_all, (-1,72)) fig, ax = plt.subplots(5, 4, figsize=(50,43)) ones = np.ones((len(np.reshape(k_all, (-1))))) clust_matr1 = np.zeros(som_dim[0]*som_dim[1]) clust_matr2 = np.zeros(som_dim[0]*som_dim[1]) for i in range(som_dim[0]*som_dim[1]): s1 = np.sum(labels[np.where(np.reshape(k_all, (-1)) == i)]) / np.sum(ones[np.where(np.reshape(k_all, (-1))==i)]) clust_matr1[i] = s1 clust_matr1 = np.reshape(clust_matr1, (som_dim[0],som_dim[1])) for t in a: #fig, ax = plt.subplots(figsize=(10,7.5)) if it > 9: c = "r" #print(t) else: c = "g" cc = it % 4 rr = it // 4 g = sns.heatmap(clust_matr1, cmap="YlGnBu",ax=ax[rr][cc]) k_1 = k_all[t] // som_dim[1] k_2 = k_all[t] % som_dim[1] ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=4) ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=200, label='Start') ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=5, marker='.') ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=500, linewidth=4, marker='x', label='End') ax[rr][cc].legend(loc=2, prop={'size': 20}) it +=1 plt.show() ``` Probability distribution over trajectory: ``` qq.shape prob_q = np.reshape(qq, (-1, 72, som_dim[0]*som_dim[1])) i = np.random.randint(0, 50) #Randomly sampled patient it = 0 fig, ax = plt.subplots(2, 3, figsize=(50,25)) k_all = np.reshape(k_all, (-1,72)) for t in [0, 17, 40, 57, 64, 71]: cc = it % 3 rr = it // 3 k_1 = k_all[i] // som_dim[1] k_2 = k_all[i] % som_dim[1] c = "black" g1 = sns.heatmap(np.reshape(prob_q[i, t], (som_dim[0],som_dim[1])), cmap='Reds', alpha=1, ax=ax[rr][cc]) ax[rr][cc].plot(k_2[:] + 0.5, k_1[:] + 0.5, color=c, linewidth=6) ax[rr][cc].scatter(k_2[0] + 0.5, k_1[0] + 0.5, color=c, s=800, label='Start') ax[rr][cc].scatter(k_2[1:-1] + 0.5, k_1[1:-1] + 0.5, color=c, linewidth=10, marker='.') ax[rr][cc].scatter(k_2[-1] + 0.5, k_1[-1] + 0.5, color=c, s=1200, linewidth=10, marker='x', label='End') ax[rr][cc].legend(loc=2, prop={'size': 30}) ax[rr][cc].set_title("Time-step = {}".format(it*14), fontsize=40) it +=1 plt.show() ``` ## Unrolling future time-steps and prediction ``` def z_dist_flat(z_e, embeddings): """Computes the distances between the encodings and the embeddings.""" emb = np.reshape(embeddings, (som_dim[0]*som_dim[1], -1)) z = np.reshape(z_e, (z_e.shape[0], 1, latent_dim)) z = np.tile(z, [1,som_dim[0]*som_dim[1], 1]) z_dist = np.square(z-emb) z_dist_red = np.sum(z_dist, axis=-1) return z_dist_red val_gen = batch_generator(data_train, data_val, endpoints_total_val, 300, mode="val") tf.reset_default_graph() num_batches = len(data_val) // 300 latent_dim = 20 num_pred = 6 som = 16*16 max_n_step = 72 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.import_meta_graph(modelpath+".meta") saver.restore(sess, modelpath) graph = tf.get_default_graph() k = graph.get_tensor_by_name("k/k:0") z_e = graph.get_tensor_by_name("z_e_sample/z_e:0") next_z_e = graph.get_tensor_by_name("prediction/next_z_e:0") x = graph.get_tensor_by_name("inputs/x:0") is_training = graph.get_tensor_by_name("is_training/is_training:0") graph = tf.get_default_graph() init_1 = graph.get_tensor_by_name("prediction/next_state/init_state:0") z_e_p = graph.get_tensor_by_name("prediction/next_state/input_lstm:0") state1 = graph.get_tensor_by_name("prediction/next_state/next_state:0") q = graph.get_tensor_by_name("q/distribution/q:0") embeddings = graph.get_tensor_by_name("embeddings/embeddings:0") z_p = graph.get_tensor_by_name('reconstruction_e/decoder/z_e:0') reconstruction = graph.get_tensor_by_name("reconstruction_e/x_hat:0") print("Evaluation...") training_dic = {is_training: True, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)), init_1: np.zeros((2, batch_size, 100)), z_p: np.zeros((max_n_step * len(data_val), latent_dim))} k_all = [] z_e_all=[] z_q_all = [] qq = [] x_rec = [] for i in range(num_batches): batch_data, batch_labels, ii = next(val_gen) f_dic = {x: batch_data} k_all.extend(sess.run(k, feed_dict=f_dic)) z_q_all.extend(sess.run(q, feed_dict=f_dic)) z_e_all.extend(sess.run(z_e, feed_dict=f_dic)) qq.extend(sess.run(q, feed_dict=f_dic)) f_dic.update(training_dic) x_rec.extend(sess.run(reconstruction, feed_dict=f_dic)) z_e_all = np.array(z_e_all) k_all = np.array(k_all) qq = np.array(qq) x_rec = np.array(x_rec) z_e_all = z_e_all.reshape((-1, max_n_step, latent_dim)) k_all = k_all.reshape((-1, max_n_step)) t = 72-num_pred embeddings = sess.run(embeddings, feed_dict={x: data_val[:, :t, :]}) embeddings = np.reshape(embeddings,(-1, latent_dim)) z_e_o = z_e_all[:, :t, :] k_o = k_all[:, :t] k_eval=[] next_z_e_o = [] state1_o =[] for i in range(num_batches): batch_data, batch_labels, ii = next(val_gen) batch_data=batch_data[:, :t, :] f_dic = {x: batch_data} f_dic.update(training_dic) next_z_e_o.extend(sess.run(next_z_e, feed_dict=f_dic)) if i == 0: state1_o = sess.run(state1, feed_dict=f_dic) else: state1_o = np.concatenate([state1_o, sess.run(state1, feed_dict=f_dic)], axis=1) next_z_e_o = np.array(next_z_e_o) state1_o = np.array(state1_o) next_z_e_o_all = np.reshape(next_z_e_o[:, -1, :], (-1,1,latent_dim)) next_z_e_o = next_z_e_o[:, -1, :] k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1) k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1) z_e_o = np.concatenate([z_e_o, np.expand_dims(next_z_e_o, 1)], axis=1) f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((1 * len(data_val), latent_dim)), z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))} x_pred_hat = np.reshape(sess.run(reconstruction, feed_dict=f_dic), (-1, 1, 98)) for i in range(num_pred-1): print(i) inp = data_val[:1500, (t + i), :] f_dic = {x: np.reshape(inp, (inp.shape[0],1,inp.shape[1]))} val_dic = {is_training: False, z_e_p: next_z_e_o, init_1: state1_o, z_p: np.zeros((max_n_step * len(inp), latent_dim))} f_dic.update(val_dic) next_z_e_o = sess.run(next_z_e, feed_dict=f_dic) state1_o = sess.run(state1, feed_dict=f_dic) next_z_e_o_all = np.concatenate([next_z_e_o_all, next_z_e_o], axis=1) k_next = np.argmin(z_dist_flat(next_z_e_o, embeddings), axis=-1) k_o = np.concatenate([k_o, np.expand_dims(k_next,1)], axis=1) z_e_o = np.concatenate([z_e_o, next_z_e_o], axis=1) next_z_e_o = np.reshape(next_z_e_o, (-1, latent_dim)) f_dic = {x: np.zeros((len(data_val),1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * len(data_val), latent_dim)), z_p: next_z_e_o, init_1: np.zeros((2, batch_size, 100))} final_x = sess.run(reconstruction, feed_dict=f_dic) x_pred_hat = np.concatenate([x_pred_hat, np.reshape(final_x, (-1, 1, 98))], axis = 1) f_dic = {x: np.zeros((1500,1, 98)), is_training: False, z_e_p: np.zeros((max_n_step * 1500, latent_dim)), z_p: z_e_all[:, t-1, :], init_1: np.zeros((2, batch_size, 100))} final_x = sess.run(reconstruction, feed_dict=f_dic) sklearn.metrics.mean_squared_error(np.reshape(x_pred_hat, (-1, 98)), np.reshape(data_val[:1500, -num_pred:], (-1, 98))) ``` Accuracy of unrolled state: ``` k_true = np.reshape(k_all[:, -num_pred:], (-1)) k_pred = np.reshape(k_o[:, -num_pred:], (-1)) tot = 0 acc = 0 for i in range(len(k_true)): tot += 1 if k_true[i] == k_pred[i]: acc += 1 acc = acc / tot acc ```
github_jupyter
# TensorFlow Tutorial Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow: - Initialize variables - Start your own session - Train algorithms - Implement a Neural Network Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code. ## 1 - Exploring the Tensorflow Library To start, you will import the library: ``` import math import numpy as np import h5py import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.python.framework import ops from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict %matplotlib inline np.random.seed(1) ``` Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example. $$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$ ``` y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36. y = tf.constant(39, name='y') # Define y. Set to 39 loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss init = tf.global_variables_initializer() # When init is run later (session.run(init)), # the loss variable will be initialized and ready to be computed with tf.Session() as session: # Create a session and print the output session.run(init) # Initializes the variables print(session.run(loss)) # Prints the loss ``` Writing and running programs in TensorFlow has the following steps: 1. Create Tensors (variables) that are not yet executed/evaluated. 2. Write operations between those Tensors. 3. Initialize your Tensors. 4. Create a Session. 5. Run the Session. This will run the operations you'd written above. Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value. Now let us look at an easy example. Run the cell below: ``` a = tf.constant(2) b = tf.constant(10) c = tf.multiply(a,b) print(c) ``` As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it. ``` sess = tf.Session() print(sess.run(c)) ``` Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**. Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later. To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session. ``` # Change the value of x in the feed_dict x = tf.placeholder(tf.int64, name = 'x') print(sess.run(2 * x, feed_dict = {x: 3})) sess.close() ``` When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session. Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph. ### 1.1 - Linear function Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector. **Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1): ```python X = tf.constant(np.random.randn(3,1), name = "X") ``` You might find the following functions helpful: - tf.matmul(..., ...) to do a matrix multiplication - tf.add(..., ...) to do an addition - np.random.randn(...) to initialize randomly ``` # GRADED FUNCTION: linear_function def linear_function(): """ Implements a linear function: Initializes W to be a random tensor of shape (4,3) Initializes X to be a random tensor of shape (3,1) Initializes b to be a random tensor of shape (4,1) Returns: result -- runs the session for Y = WX + b """ np.random.seed(1) ### START CODE HERE ### (4 lines of code) X = np.random.randn(3, 1) W = np.random.randn(4, 3) b = np.random.randn(4, 1) Y = tf.add(tf.matmul(W, X), b) ### END CODE HERE ### # Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate ### START CODE HERE ### sess = tf.Session() result = sess.run( Y ) ### END CODE HERE ### # close the session sess.close() return result print( "result = " + str(linear_function())) ``` *** Expected Output ***: <table> <tr> <td> **result** </td> <td> [[-2.15657382] [ 2.95891446] [-1.08926781] [-0.84538042]] </td> </tr> </table> ### 1.2 - Computing the sigmoid Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input. You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session. ** Exercise **: Implement the sigmoid function below. You should use the following: - `tf.placeholder(tf.float32, name = "...")` - `tf.sigmoid(...)` - `sess.run(..., feed_dict = {x: z})` Note that there are two typical ways to create and use sessions in tensorflow: **Method 1:** ```python sess = tf.Session() # Run the variables initialization (if needed), run the operations result = sess.run(..., feed_dict = {...}) sess.close() # Close the session ``` **Method 2:** ```python with tf.Session() as sess: # run the variables initialization (if needed), run the operations result = sess.run(..., feed_dict = {...}) # This takes care of closing the session for you :) ``` ``` # GRADED FUNCTION: sigmoid def sigmoid(z): """ Computes the sigmoid of z Arguments: z -- input value, scalar or vector Returns: results -- the sigmoid of z """ ### START CODE HERE ### ( approx. 4 lines of code) # Create a placeholder for x. Name it 'x'. x = tf.placeholder(tf.float32,name="x") # compute sigmoid(x) sigmoid = tf.sigmoid(x) # Create a session, and run it. Please use the method 2 explained above. # You should use a feed_dict to pass z's value to x. with tf.Session() as sess: # Run session and call the output "result" result = sess.run( sigmoid, feed_dict={x:z} ) ### END CODE HERE ### return result print ("sigmoid(0) = " + str(sigmoid(0))) print ("sigmoid(12) = " + str(sigmoid(12))) ``` *** Expected Output ***: <table> <tr> <td> **sigmoid(0)** </td> <td> 0.5 </td> </tr> <tr> <td> **sigmoid(12)** </td> <td> 0.999994 </td> </tr> </table> <font color='blue'> **To summarize, you how know how to**: 1. Create placeholders 2. Specify the computation graph corresponding to operations you want to compute 3. Create the session 4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values. ### 1.3 - Computing the Cost You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m: $$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$ you can do it in one line of code in tensorflow! **Exercise**: Implement the cross entropy loss. The function you will use is: - `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)` Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes $$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$ ``` # GRADED FUNCTION: cost def cost(logits, labels): """     Computes the cost using the sigmoid cross entropy          Arguments:     logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)     labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y.          Returns:     cost -- runs the session of the cost (formula (2)) """ ### START CODE HERE ### # Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines) z = tf.placeholder(tf.float32, name="z") y = tf.placeholder(tf.float32, name="y") # Use the loss function (approx. 1 line) cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y) # Create a session (approx. 1 line). See method 1 above. sess = tf.Session() # Run the session (approx. 1 line). cost = sess.run(cost, feed_dict={ z:logits, y:labels }) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return cost logits = sigmoid(np.array([0.2,0.4,0.7,0.9])) cost = cost(logits, np.array([0,0,1,1])) print ("cost = " + str(cost)) ``` ** Expected Output** : <table> <tr> <td> **cost** </td> <td> [ 1.00538719 1.03664088 0.41385433 0.39956614] </td> </tr> </table> ### 1.4 - Using One Hot encodings Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows: <img src="images/onehot.png" style="width:600px;height:150px;"> This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code: - tf.one_hot(labels, depth, axis) **Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this. ``` # GRADED FUNCTION: one_hot_matrix def one_hot_matrix(labels, C): """ Creates a matrix where the i-th row corresponds to the ith class number and the jth column corresponds to the jth training example. So if example j had a label i. Then entry (i,j) will be 1. Arguments: labels -- vector containing the labels C -- number of classes, the depth of the one hot dimension Returns: one_hot -- one hot matrix """ ### START CODE HERE ### # Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line) C = tf.constant( C, name='C' ) # Use tf.one_hot, be careful with the axis (approx. 1 line) one_hot_matrix = tf.one_hot( labels, C, axis=0 ) # Create the session (approx. 1 line) sess = tf.Session() # Run the session (approx. 1 line) one_hot = sess.run(one_hot_matrix) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return one_hot labels = np.array([1,2,3,0,2,1]) one_hot = one_hot_matrix(labels, C = 4) print ("one_hot = " + str(one_hot)) ``` **Expected Output**: <table> <tr> <td> **one_hot** </td> <td> [[ 0. 0. 0. 1. 0. 0.] [ 1. 0. 0. 0. 0. 1.] [ 0. 1. 0. 0. 1. 0.] [ 0. 0. 1. 0. 0. 0.]] </td> </tr> </table> ### 1.5 - Initialize with zeros and ones Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively. **Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones). - tf.ones(shape) ``` # GRADED FUNCTION: ones def ones(shape): """ Creates an array of ones of dimension shape Arguments: shape -- shape of the array you want to create Returns: ones -- array containing only ones """ ### START CODE HERE ### # Create "ones" tensor using tf.ones(...). (approx. 1 line) ones = tf.ones(shape) # Create the session (approx. 1 line) sess = tf.Session() # Run the session to compute 'ones' (approx. 1 line) ones = sess.run(ones) # Close the session (approx. 1 line). See method 1 above. sess.close() ### END CODE HERE ### return ones print ("ones = " + str(ones([3]))) ``` **Expected Output:** <table> <tr> <td> **ones** </td> <td> [ 1. 1. 1.] </td> </tr> </table> # 2 - Building your first neural network in tensorflow In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model: - Create the computation graph - Run the graph Let's delve into the problem you'd like to solve! ### 2.0 - Problem statement: SIGNS Dataset One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language. - **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number). - **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number). Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs. Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels. <img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center> Run the following code to load the dataset. ``` # Loading the dataset X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() ``` Change the index below and run the cell to visualize some examples in the dataset. ``` # Example of a picture index = 0 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index]))) ``` As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so. ``` # Flatten the training and test images X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T # Normalize image vectors X_train = X_train_flatten/255. X_test = X_test_flatten/255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6) Y_test = convert_to_one_hot(Y_test_orig, 6) print ("number of training examples = " + str(X_train.shape[1])) print ("number of test examples = " + str(X_test.shape[1])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) ``` **Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing. **Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one. **The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes. ### 2.1 - Create placeholders Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session. **Exercise:** Implement the function below to create the placeholders in tensorflow. ``` # GRADED FUNCTION: create_placeholders def create_placeholders(n_x, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288) n_y -- scalar, number of classes (from 0 to 5, so -> 6) Returns: X -- placeholder for the data input, of shape [n_x, None] and dtype "float" Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float" Tips: - You will use None because it let's us be flexible on the number of examples you will for the placeholders. In fact, the number of examples during test/train is different. """ ### START CODE HERE ### (approx. 2 lines) X = tf.placeholder(tf.float32, [n_x, None], name="X") Y = tf.placeholder(tf.float32, [n_y, None], name="Y") ### END CODE HERE ### return X, Y X, Y = create_placeholders(12288, 6) print ("X = " + str(X)) print ("Y = " + str(Y)) ``` **Expected Output**: <table> <tr> <td> **X** </td> <td> Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1) </td> </tr> <tr> <td> **Y** </td> <td> Tensor("Placeholder_2:0", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2) </td> </tr> </table> ### 2.2 - Initializing the parameters Your second task is to initialize the parameters in tensorflow. **Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use: ```python W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1)) b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer()) ``` Please use `seed = 1` to make sure your results match ours. ``` # GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes parameters to build a neural network with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable("W1", [25, 12288], initializer = tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable("b1", [25, 1], initializer = tf.zeros_initializer()) W2 = tf.get_variable("W2", [12, 25], initializer = tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable("b2", [12, 1], initializer = tf.zeros_initializer()) W3 = tf.get_variable("W3", [6, 12], initializer = tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable("b3", [6, 1], initializer = tf.zeros_initializer()) ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2, "W3": W3, "b3": b3} return parameters tf.reset_default_graph() with tf.Session() as sess: parameters = initialize_parameters() print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected Output**: <table> <tr> <td> **W1** </td> <td> < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref > </td> </tr> <tr> <td> **b1** </td> <td> < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref > </td> </tr> <tr> <td> **W2** </td> <td> < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref > </td> </tr> <tr> <td> **b2** </td> <td> < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref > </td> </tr> </table> As expected, the parameters haven't been evaluated yet. ### 2.3 - Forward propagation in tensorflow You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are: - `tf.add(...,...)` to do an addition - `tf.matmul(...,...)` to do a matrix multiplication - `tf.nn.relu(...)` to apply the ReLU activation **Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`! ``` # GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3 tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) print("Z3 = " + str(Z3)) ``` **Expected Output**: <table> <tr> <td> **Z3** </td> <td> Tensor("Add_2:0", shape=(6, ?), dtype=float32) </td> </tr> </table> You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation. ### 2.4 Compute cost As seen before, it is very easy to compute the cost using: ```python tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...)) ``` **Question**: Implement the cost function below. - It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you. - Besides, `tf.reduce_mean` basically does the summation over the examples. ``` # GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ # to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...) logits = tf.transpose(Z3) labels = tf.transpose(Y) ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) ### END CODE HERE ### return cost tf.reset_default_graph() with tf.Session() as sess: X, Y = create_placeholders(12288, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) print("cost = " + str(cost)) ``` **Expected Output**: <table> <tr> <td> **cost** </td> <td> Tensor("Mean:0", shape=(), dtype=float32) </td> </tr> </table> ### 2.5 - Backward propagation & parameter updates This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model. After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate. For instance, for gradient descent the optimizer would be: ```python optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost) ``` To make the optimization you would do: ```python _ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ``` This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs. **Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable). ### 2.6 - Building the model Now, you will bring it all together! **Exercise:** Implement the model. You will be calling the functions you had previously implemented. ``` def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001, num_epochs = 1500, minibatch_size = 32, print_cost = True): """ Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number of training examples = 1080) Y_train -- test set, of shape (output size = 6, number of training examples = 1080) X_test -- training set, of shape (input size = 12288, number of training examples = 120) Y_test -- test set, of shape (output size = 6, number of test examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set) n_y = Y_train.shape[0] # n_y : output size costs = [] # To keep track of the cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 100 == 0: print ("Cost after epoch %i: %f" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # lets save the parameters in a variable parameters = sess.run(parameters) print ("Parameters have been trained!") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train})) print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test})) return parameters ``` Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes! ``` parameters = model(X_train, Y_train, X_test, Y_test) ``` **Expected Output**: <table> <tr> <td> **Train Accuracy** </td> <td> 0.999074 </td> </tr> <tr> <td> **Test Accuracy** </td> <td> 0.716667 </td> </tr> </table> Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy. **Insights**: - Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting. - Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters. ### 2.7 - Test with your own image (optional / ungraded exercise) Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right! ``` import scipy from PIL import Image from scipy import ndimage ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "thumbs_up.jpg" ## END CODE HERE ## # We preprocess your image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T my_image_prediction = predict(my_image, parameters) plt.imshow(image) print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction))) ``` You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects". <font color='blue'> **What you should remember**: - Tensorflow is a programming framework used in deep learning - The two main object classes in tensorflow are Tensors and Operators. - When you code in tensorflow you have to take the following steps: - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...) - Create a session - Initialize the session - Run the session to execute the graph - You can execute the graph multiple times as you've seen in model() - The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
github_jupyter
# 1D Variability hypothesis testing for HBEC IFN experiment ``` import scanpy as sc import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats from pybedtools import BedTool import pickle as pkl %matplotlib inline import sys sys.path.append('/home/ssm-user/Github/scrna-parameter-estimation/dist/memento-0.0.6-py3.8.egg') sys.path.append('/home/ssm-user/Github/misc-seq/miscseq/') import encode import memento data_path = '/data_volume/memento/hbec/' ``` ### Read the processed RNA data Focus on the club and bc/club cells and type I interferons for now. Encode the timestamps to integers. ``` adata = sc.read(data_path + 'HBEC_type_I_filtered_counts_deep.h5ad') adata = adata[:, ~adata.var.index.str.startswith('MT-')].copy() # adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'basal/club' else 'bc') # adata.obs['cell_type'] = adata.obs['cell_type'].apply(lambda x: x if x != 'ionocyte/tuft' else 'ion-tuft') ``` ``` converter = {'basal/club':'BC', 'basal':'B', 'ciliated':'C', 'goblet':'G', 'ionocyte/tuft':'IT', 'neuroendo':'N'} adata.obs['ct'] = adata.obs['cell_type'].apply(lambda x: converter[x]) ``` ### Setup memento ``` def assign_q(batch): if batch == 0: return 0.387*0.25 elif batch == 1: return 0.392*0.25 elif batch == 2: return 0.436*0.25 else: return 0.417*0.25 adata.obs['q'] = adata.obs['batch'].apply(assign_q) memento.setup_memento(adata, q_column='q') ``` ### Run memento for each subset, comparing to control ``` cts = ['C', 'B', 'BC'] tps = ['3', '6', '9', '24', '48'] stims = ['alpha', 'beta', 'gamma', 'lambda'] import os done_files = os.listdir(data_path + 'binary_test_latest/') for ct in cts: for tp in tps: for stim in stims: fname = '{}_{}_{}.h5ad'.format('-'.join(ct), stim, tp) if fname in done_files: print('Skipping', fname) continue print('starting', ct, tp, stim) adata_stim = adata.copy()[ adata.obs.ct.isin([ct]) & \ adata.obs.stim.isin(['control', stim]) & \ adata.obs.time.isin(['0',tp]), :].copy() time_converter={0:0, int(tp):1} adata_stim.obs['time_step'] = adata_stim.obs['time'].astype(int).apply(lambda x: time_converter[x]) memento.create_groups(adata_stim, label_columns=['time_step', 'donor']) memento.compute_1d_moments(adata_stim, min_perc_group=.9) memento.ht_1d_moments( adata_stim, formula_like='1 + time_step + donor', treatment_col='time_step', num_boot=10000, verbose=1, num_cpus=93, resampling='permutation', approx=True) adata_stim.write(data_path + 'binary_test_latest/{}_{}_{}.h5ad'.format(ct, stim, tp)) ```
github_jupyter
``` %matplotlib notebook import numpy as np import matplotlib.pyplot as plt ``` # Utilizando un modelo pre-entrenado [`torchvision.models`](https://pytorch.org/vision/stable/models.html) ofrece una serie de modelos famosos de la literatura de *deep learning* Por defecto el modelo se carga con pesos aleatorios Si indicamos `pretrained=True` se descarga un modelo entrenado Se pueden escoger modelos para clasificar, localizar y segmentar ## Modelo para clasificar imágenes torchvision tiene una basta cantidad de modelos para clasificar incluyendo distintas versiones de VGG, ResNet, AlexNet, GoogLeNet, DenseNet, entre otros Cargaremos un modelo [resnet18](https://arxiv.org/pdf/1512.03385.pdf) [pre-entrenado](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.resnet18) en [ImageNet](http://image-net.org/) ``` from torchvision import models model = models.resnet18(pretrained=True, progress=True) model.eval() ``` Los modelos pre-entrenados esperan imágenes con - tres canales (RGB) - al menos 224x224 píxeles - píxeles entre 0 y 1 (float) - normalizadas con normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ``` img from PIL import Image import torch from torchvision import transforms img = Image.open("img/dog.jpg") my_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) # Las clases con probabilidad más alta son probs = torch.nn.Softmax(dim=1)(model.forward(my_transform(img).unsqueeze(0))) best = probs.argsort(descending=True) display(best[0, :10], probs[0, best[0, :10]]) ``` ¿A qué corresponde estas clases? Clases de ImageNet: https://gist.github.com/ageitgey/4e1342c10a71981d0b491e1b8227328b ## Modelo para detectar entidades en imágenes Adicional a los modelos de clasificación torchvision también tiene modelos para - Detectar entidades en una imagen: Faster RCNN - Hacer segmentación por instancia: Mask RCNN - Hacer segmentación semántica: FCC, DeepLab - Clasificación de video A continuación probaremos la [Faster RCNN](https://arxiv.org/abs/1506.01497) para hace detección Este modelo fue pre-entrenado en la base de datos [COCO](https://cocodataset.org/) El modelo retorna un diccionario con - 'boxes': Los bounding box de las entidades - 'labels': La etiqueta de la clase más probable de la entidad - 'score': La probabilidad de la etiqueta ``` model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True) model.eval() transform = transforms.ToTensor() img = Image.open("img/pelea.jpg") # No require normalización de color img_tensor = transform(img) result = model(img_tensor.unsqueeze(0))[0] def filter_results(result, threshold=0.9): mask = result['scores'] > threshold bbox = result['boxes'][mask].detach().cpu().numpy() lbls = result['labels'][mask].detach().cpu().numpy() return bbox, lbls from PIL import ImageFont, ImageDraw #fnt = ImageFont.truetype("arial.ttf", 20) label2name = {1: 'persona', 2: 'bicicleta', 3: 'auto', 4: 'moto', 8: 'camioneta', 18: 'perro'} def draw_rectangles(img, bbox, lbls): draw = ImageDraw.Draw(img) for k in range(len(bbox)): if lbls[k] in label2name.keys(): draw.rectangle(bbox[k], fill=None, outline='white', width=2) draw.text([int(d) for d in bbox[k][:2]], label2name[lbls[k]], fill='white') bbox, lbls = filter_results(result) img = Image.open("img/pelea.jpg") draw_rectangles(img, bbox, lbls) display(img) ``` # Transferencia de Aprendizaje A continuación usaremos la técnicas de transferencia de aprendizaje para aprender un clasificador de imágenes para un fragmento de la base de datos food 5k El objetivo es clasificar si la imagen corresponde a comida o no Guardamos las imagenes con la siguiente estructura de carpetas ``` !ls img/food5k/ !ls img/food5k/train !ls img/food5k/valid ``` Con esto podemos usar `torchvision.datasets.ImageFolder` para crear los dataset de forma muy sencilla Dado que usaremos un modelo preentrenado debemos transformar entregar las imágenes en tamaño 224x224 y con color normalizado Usaremos también aumentación de datos en el conjunto de entrenamiento ``` from torchvision import datasets train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) train_dataset = datasets.ImageFolder('img/food5k/train', transform=train_transforms) valid_dataset = datasets.ImageFolder('img/food5k/valid', transform=valid_transforms) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=256, shuffle=False) for image, label in train_loader: break fig, ax = plt.subplots(1, 6, figsize=(9, 2), tight_layout=True) for i in range(6): ax[i].imshow(image[i].permute(1,2,0).numpy()) ax[i].axis('off') ax[i].set_title(label[i].numpy()) ``` Usaremos el modelo ResNet18 ``` model = models.resnet18(pretrained=True, progress=True) # model = models.squeezenet1_1(pretrained=True, progress=True) display(model) ``` En este caso re-entrenaremos sólo la última capa: `fc` Las demás capas las congelaremos Para congelar una capa simplemente usamos `requires_grad=False` en sus parámetros Cuando llamemos `backward` no se calculará gradiente para estas capas ``` #Congelamos todos los parámetros for param in model.parameters(): param.requires_grad = False # La reemplazamos por una nueva capa de salida model.fc = torch.nn.Linear(model.fc.in_features , 2) # Para resnet #model.classifier = torch.nn.Sequential(torch.nn.Dropout(p=0.5, inplace=False), # torch.nn.Conv2d(512, 2, kernel_size=(1, 1), stride=(1, 1)), # torch.nn.ReLU(inplace=True), # torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))) # Para Squeezenet criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) for epoch in range(10): for x, y in train_loader: optimizer.zero_grad() yhat = model.forward(x) loss = criterion(yhat, y) loss.backward() optimizer.step() epoch_loss = 0.0 for x, y in valid_loader: yhat = model.forward(x) loss = criterion(yhat, y) epoch_loss += loss.item() print(f"{epoch}, {epoch_loss:0.4f}, {torch.sum(yhat.argmax(dim=1) == y).item()/100}") targets, predictions = [], [] for mbdata, label in valid_loader: logits = model.forward(mbdata) predictions.append(logits.argmax(dim=1).detach().numpy()) targets.append(label.numpy()) predictions = np.concatenate(predictions) targets = np.concatenate(targets) from sklearn.metrics import confusion_matrix, classification_report cm = confusion_matrix(targets, predictions) display(cm) print(classification_report(targets, predictions)) ``` ¿Cómo se compara lo anterior a entrenar una arquitectura convolucional desde cero? A modo de ejemplo se adapta la arquitectura Lenet5 para aceptar imágenes a color de 224x224 ¿Cuánto desempeño se obtiene entrenando la misma cantidad de épocas? ``` import torch.nn as nn class Lenet5(nn.Module): def __init__(self): super(type(self), self).__init__() # La entrada son imágenes de 3x224x224 self.features = nn.Sequential(nn.Conv2d(3, 6, 5), nn.ReLU(), nn.MaxPool2d(3), nn.Conv2d(6, 16, 5), nn.ReLU(), nn.MaxPool2d(3), nn.Conv2d(16, 32, 5), nn.ReLU(), nn.MaxPool2d(3)) self.classifier = nn.Sequential(nn.Linear(32*6*6, 120), nn.ReLU(), nn.Linear(120, 84), nn.ReLU(), nn.Linear(84, 2)) def forward(self, x): z = self.features(x) #print(z.shape) # Esto es de tamaño Mx16x5x5 z = z.view(-1, 32*6*6) # Esto es de tamaño Mx400 return self.classifier(z) model = Lenet5() criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) for epoch in range(10): for x, y in train_loader: optimizer.zero_grad() yhat = model.forward(x) loss = criterion(yhat, y) loss.backward() optimizer.step() epoch_loss = 0.0 for x, y in valid_loader: yhat = model.forward(x) loss = criterion(yhat, y) epoch_loss += loss.item() print(f"{epoch}, {epoch_loss:0.4f}, {torch.sum(yhat.argmax(dim=1) == y).item()/100}") targets, predictions = [], [] for mbdata, label in valid_loader: logits = model.forward(mbdata) predictions.append(logits.argmax(dim=1).detach().numpy()) targets.append(label.numpy()) predictions = np.concatenate(predictions) targets = np.concatenate(targets) from sklearn.metrics import confusion_matrix, classification_report cm = confusion_matrix(targets, predictions) display(cm) print(classification_report(targets, predictions)) ``` # Resumen Aspectos a considerar durante el entrenamiento de redes neuronales - Arquitecturas: cantidad y organización de capas, funciones de activación - Funciones de costo, optimizadores y sus parámetros (tasa de aprendizaje, momentum) - Verificar convergencia y sobreajuste: - Checkpoint: Guardar el último modelo y el con menor costo de validación - Early stopping: Detener el entrenamiento si el error de validación no disminuye en un cierto número de épocas - Inicialización de los parámetros: Probar varios entrenamientos desde inicios aleatorios distintos - Si el modelo se sobreajusta pronto - Disminuir complejidad - Incorporar regularización: Aumentación de datos, decaimiento de pesos, Dropout - Si quiero aprovechar un modelo preentrenado - Transferencia de aprendizaje - [Zoológico de modelos](https://modelzoo.co/) - [Papers with code](https://paperswithcode.com/) Estrategia agil > Desarrolla rápido e itera: Empieza simple. Propón una solución, impleméntala, entrena y evalua. Analiza las fallas, modifica e intenta de nuevo Mucho exito en sus desarrollos futuros!
github_jupyter
# Tutorial about loading localization data from file ``` from pathlib import Path import locan as lc lc.show_versions(system=False, dependencies=False, verbose=False) ``` Localization data is typically provided as text or binary file with different formats depending on the fitting software. Locan provides functions for loading various localization files. All available functions can be looked up in the [API documentation](https://locan.readthedocs.io/en/latest/source/generated/locan.locan_io.locdata.html#module-locan.locan_io.locdata). In locan there are functions availabel to deal with file types according to the constant enum `FileType`: ``` list(lc.FileType._member_names_) ``` Currently the following io functions are available: ``` [name for name in dir(lc.locan_io) if not name.startswith("__")] ``` Throughout this manual it might be helpful to use pathlib to provide path information. In all cases a string path is also usable. ## Load rapidSTORM data file Here we identify some data in the test_data directory and provide a path using pathlib (a pathlib object is returned by `lc.ROOT_DIR`): ``` path = lc.ROOT_DIR / 'tests/test_data/rapidSTORM_dstorm_data.txt' print(path, '\n') ``` The data is then loaded from a rapidSTORM localization file. The file header is read to provide correct property names. The number of localisations to be read can be limited by *nrows* ``` dat = lc.load_rapidSTORM_file(path=path, nrows=1000) ``` Print information about the data: ``` print('Data head:') print(dat.data.head(), '\n') print('Summary:') dat.print_summary() print('Properties:') print(dat.properties) ``` Column names are exchanged with standard locan property names according to the following mapping. If no mapping is defined a warning is issued and the original column name is kept. ``` lc.RAPIDSTORM_KEYS ``` ## Load Zeiss Elyra data file The Elyra super-resolution microscopy system from Zeiss uses as slightly different file format. Elyra column names are exchanged with locan property names upon loading the data. ``` path_Elyra = lc.ROOT_DIR / 'tests/test_data/Elyra_dstorm_data.txt' print(path_Elyra, '\n') dat_Elyra = lc.load_Elyra_file(path=path_Elyra, nrows=1000) print('Data head:') print(dat_Elyra.data.head(), '\n') print('Summary:') dat_Elyra.print_summary() print('Properties:') print(dat_Elyra.properties) ``` ## Localization data from a custom text file Other custom text files can be read with a function that wraps the pandas.read_table() method. ``` path_csv = lc.ROOT_DIR / 'tests/test_data/five_blobs.txt' print(path_csv, '\n') ``` Here data is loaded from a comma-separated-value file. Column names are read from the first line and a warning is given if the naming does not comply with locan conventions. Column names can also be provided as *column*. The separater, e.g. a tab '\t' can be provided as *sep*. ``` dat_csv = lc.load_txt_file(path=path_csv, sep=',', columns=None, nrows=100) print('Data head:') print(dat_csv.data.head(), '\n') print('Summary:') dat_csv.print_summary() print('Properties:') print(dat_csv.properties) ``` ## Load localization data file A general function for loading localization data is provided. Targeting specific localization file formats is done through the `file_format` parameter. ``` path = lc.ROOT_DIR / 'tests/test_data/rapidSTORM_dstorm_data.txt' print(path, '\n') dat = lc.load_locdata(path=path, file_type=lc.FileType.RAPIDSTORM, nrows=1000) dat.print_summary() ``` The file type can be specified by using the enum class `FileType` and use tab control to make a choice. ``` lc.FileType.__members__ lc.FileType.RAPIDSTORM ```
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Vertex Pipelines: AutoML tabular regression pipelines using google-cloud-pipeline-components <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/google_cloud_pipeline_components_automl_tabular.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/google_cloud_pipeline_components_automl_tabular.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/google_cloud_pipeline_components_automl_tabular.ipynb"> Open in Google Cloud Notebooks </a> </td> </table> <br/><br/><br/> ## Overview This notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML tabular regression workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines). ### Dataset The dataset used for this tutorial is the [California Housing dataset from the 1990 Census](https://developers.google.com/machine-learning/crash-course/california-housing-data-description) The dataset predicts the median house price. ### Objective In this tutorial, you create an AutoML tabular regression using a pipeline with components from `google_cloud_pipeline_components`. The steps performed include: - Create a `Dataset` resource. - Train an AutoML `Model` resource. - Creates an `Endpoint` resource. - Deploys the `Model` resource to the `Endpoint` resource. The components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform). ### Costs This tutorial uses billable components of Google Cloud: * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ### Set up your local development environment If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step. Otherwise, make sure your environment meets this notebook's requirements. You need the following: - The Cloud Storage SDK - Git - Python 3 - virtualenv - Jupyter notebook running in a virtual environment with Python 3 The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: 1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/). 2. [Install Python 3](https://cloud.google.com/python/setup#installing_python). 3. [Install virtualenv](Ihttps://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. 4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter. 5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter. 6. Open this notebook in the Jupyter Notebook Dashboard. ## Installation Install the latest version of Vertex SDK for Python. ``` import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG ``` Install the latest GA version of *google-cloud-storage* library as well. ``` ! pip3 install -U google-cloud-storage $USER_FLAG ``` Install the latest GA version of *google-cloud-pipeline-components* library as well. ``` ! pip3 install $USER kfp google-cloud-pipeline-components --upgrade ``` ### Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` Check the versions of the packages you installed. The KFP SDK version should be >=1.6. ``` ! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" ! python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))" ``` ## Before you begin ### GPU runtime This tutorial does not require a GPU runtime. ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com) 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. **Click Create service account**. In the **Service account name** field, enter a name, and click **Create**. In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. ``` # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` #### Service Account **If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below. ``` SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your GCP project id from gcloud shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].strip() print("Service Account:", SERVICE_ACCOUNT) ``` #### Set service account access for Vertex Pipelines Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account. ``` ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants ``` import google.cloud.aiplatform as aip ``` #### Vertex AI constants Setup up the following constants for Vertex AI: - `API_ENDPOINT`: The Vertex AI API service endpoint for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` services. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) ``` #### Vertex Pipelines constants Setup up the following constants for Vertex Pipelines: ``` PIPELINE_ROOT = "{}/pipeline_root/cal_housing".format(BUCKET_NAME) ``` Additional imports. ``` import kfp from google_cloud_pipeline_components import aiplatform as gcc_aip ``` ## Initialize Vertex SDK for Python Initialize the Vertex SDK for Python for your project and corresponding bucket. ``` aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME) ``` ## Define AutoML tabular regression model pipeline that uses components from `google_cloud_pipeline_components` Next, you define the pipeline. Create and deploy an AutoML tabular regression `Model` resource using a `Dataset` resource. ``` TRAIN_FILE_NAME = "california_housing_train.csv" ! gsutil cp gs://aju-dev-demos-codelabs/sample_data/california_housing_train.csv {PIPELINE_ROOT}/data/ gcs_csv_path = f"{PIPELINE_ROOT}/data/{TRAIN_FILE_NAME}" @kfp.dsl.pipeline(name="automl-tab-training-v2") def pipeline(project: str = PROJECT_ID): dataset_create_op = gcc_aip.TabularDatasetCreateOp( project=project, display_name="housing", gcs_source=gcs_csv_path ) training_op = gcc_aip.AutoMLTabularTrainingJobRunOp( project=project, display_name="train-automl-cal_housing", optimization_prediction_type="regression", optimization_objective="minimize-rmse", column_transformations=[ {"numeric": {"column_name": "longitude"}}, {"numeric": {"column_name": "latitude"}}, {"numeric": {"column_name": "housing_median_age"}}, {"numeric": {"column_name": "total_rooms"}}, {"numeric": {"column_name": "total_bedrooms"}}, {"numeric": {"column_name": "population"}}, {"numeric": {"column_name": "households"}}, {"numeric": {"column_name": "median_income"}}, {"numeric": {"column_name": "median_house_value"}}, ], dataset=dataset_create_op.outputs["dataset"], target_column="median_house_value", ) deploy_op = gcc_aip.ModelDeployOp( # noqa: F841 model=training_op.outputs["model"], project=project, machine_type="n1-standard-4", ) ``` ## Compile the pipeline Next, compile the pipeline. ``` from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tabular regression_pipeline.json".replace(" ", "_"), ) ``` ## Run the pipeline Next, run the pipeline. ``` DISPLAY_NAME = "cal_housing_" + TIMESTAMP job = aip.PipelineJob( display_name=DISPLAY_NAME, template_path="tabular regression_pipeline.json".replace(" ", "_"), pipeline_root=PIPELINE_ROOT, ) job.run() ``` Click on the generated link to see your run in the Cloud Console. <!-- It should look something like this as it is running: <a href="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" width="40%"/></a> --> In the UI, many of the pipeline DAG nodes will expand or collapse when you click on them. Here is a partially-expanded view of the DAG (click image to see larger version). <a href="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" width="40%"/></a> # Cleaning up To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial -- *Note:* this is auto-generated and not all resources may be applicable for this tutorial: - Dataset - Pipeline - Model - Endpoint - Batch Job - Custom Job - Hyperparameter Tuning Job - Cloud Storage Bucket ``` delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True try: if delete_model and "DISPLAY_NAME" in globals(): models = aip.Model.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) model = models[0] aip.Model.delete(model) print("Deleted model:", model) except Exception as e: print(e) try: if delete_endpoint and "DISPLAY_NAME" in globals(): endpoints = aip.Endpoint.list( filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time" ) endpoint = endpoints[0] endpoint.undeploy_all() aip.Endpoint.delete(endpoint.resource_name) print("Deleted endpoint:", endpoint) except Exception as e: print(e) if delete_dataset and "DISPLAY_NAME" in globals(): if "tabular" == "tabular": try: datasets = aip.TabularDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TabularDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "image": try: datasets = aip.ImageDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.ImageDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "text": try: datasets = aip.TextDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TextDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "video": try: datasets = aip.VideoDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.VideoDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) try: if delete_pipeline and "DISPLAY_NAME" in globals(): pipelines = aip.PipelineJob.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) pipeline = pipelines[0] aip.PipelineJob.delete(pipeline.resource_name) print("Deleted pipeline:", pipeline) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME ```
github_jupyter
``` import azureml from azureml.core import Workspace, Experiment, Datastore, Environment from azureml.core.runconfig import RunConfiguration from azureml.data.datapath import DataPath, DataPathComputeBinding from azureml.data.data_reference import DataReference from azureml.core.compute import ComputeTarget, AmlCompute from azureml.core.compute_target import ComputeTargetException from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter from azureml.pipeline.steps import PythonScriptStep, EstimatorStep from azureml.widgets import RunDetails from azureml.train.estimator import Estimator import os print("Azure ML SDK Version: ", azureml.core.VERSION) ``` # Setup Variables ``` os.environ['STORAGE_ACCOUNT_KEY'] = 'YourAccountKeyHere' datastorename='seerdata' datastorepath='hardware' containername='seer-container' storageaccountname='aiml50setupstorage' storageaccountkey=os.environ.get('STORAGE_ACCOUNT_KEY') computetarget='twtcluster' ``` # Register/Reference a Datastore ``` # workspace ws = Workspace.from_config( path='./azureml-config.json') print(ws.datastores) # See if that datastore already exists and unregister it if so try: datastore = ws.datastores[datastorename] print ('Unregistering existing datastore') datastore.unregister() except: print ('Data store doesn\'t exist, no need to remove') finally: # register the datastore datastore = Datastore.register_azure_blob_container(workspace=ws, datastore_name=datastorename, container_name=containername, account_name=storageaccountname, account_key=storageaccountkey, create_if_not_exists=True) print('Datastore registered: ', datastore) # data datastore = ws.datastores['seerdata'] datareference = DataReference(datastore=datastore, data_reference_name="seerdata", path_on_datastore=datastorepath) ``` # Create Compute Resources ``` try: cpu_cluster = ComputeTarget(workspace=ws, name=computetarget) print('Found existing cluster, use it.') except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration( vm_size='STANDARD_NC6', min_nodes=1, max_nodes=4) cpu_cluster = ComputeTarget.create(ws, computetarget, compute_config) cpu_cluster.wait_for_completion(show_output=True) compute = ws.compute_targets[computetarget] print('Compute registered: ', compute) ``` # Define Pipeline! The following will be created and then run: 1. Pipeline Parameters 2. Data Process Step 3. Training Step 4. Model Registration Step 5. Pipeline registration 6. Submit the pipeline for execution ## Pipeline Parameters We need to tell the Pipeline what it needs to learn to see! ``` datapath = DataPath(datastore=datastore, path_on_datastore=datastorepath) data_path_pipeline_param = (PipelineParameter(name="data", default_value=datapath), DataPathComputeBinding(mode='mount')) print(data_path_pipeline_param) # Configuration for data prep and training steps dataprepEnvironment = Environment.from_pip_requirements('dataprepenv', 'requirements-dataprepandtraining.txt') dataprepRunConfig = RunConfiguration() dataprepRunConfig.environment = dataprepEnvironment ``` ## Data Process Step ``` seer_tfrecords = PipelineData( "tfrecords_set", datastore=datastore, is_directory=True ) prepStep = PythonScriptStep( 'parse.py', source_directory='.', name='Data Preparation', compute_target=compute, arguments=["--source_path", data_path_pipeline_param, "--target_path", seer_tfrecords], runconfig=dataprepRunConfig, inputs=[data_path_pipeline_param], outputs=[seer_tfrecords] ) print(prepStep) ``` ## Training Step ``` seer_training = PipelineData( "train", datastore=datastore, is_directory=True ) train = Estimator(source_directory='.', compute_target=compute, entry_script='train.py', pip_requirements_file='requirements-dataprepandtraining.txt') trainStep = EstimatorStep( name='Model Training', estimator=train, estimator_entry_script_arguments=["--source_path", seer_tfrecords, "--target_path", seer_training, "--epochs", 5, "--batch", 10, "--lr", 0.001], inputs=[seer_tfrecords], outputs=[seer_training], compute_target=compute ) print(trainStep) ``` # Register Model Step ``` registerEnvironment = Environment.from_pip_requirements('registerenv', 'requirements-registration.txt') registerRunConfig = RunConfiguration() registerRunConfig.environment = registerEnvironment seer_model = PipelineData( "model", datastore=datastore, is_directory=True ) registerStep = PythonScriptStep( 'register.py', source_directory='.', name='Model Registration', arguments=["--source_path", seer_training, "--target_path", seer_model], inputs=[seer_training], outputs=[seer_model], compute_target=compute, runconfig=registerRunConfig ) print(registerStep) ``` ## Create and publish the Pipeline ``` pipeline = Pipeline(workspace=ws, steps=[prepStep, trainStep, registerStep]) published_pipeline = pipeline.publish( name="Seer Pipeline", description="Transfer learned image classifier. Uses folders as labels.") # Submit the pipeline to be run pipeline_run = Experiment(ws, 'seer',).submit(published_pipeline) print('Run created with ID: ', pipeline_run.id) RunDetails(pipeline_run).show() ```
github_jupyter
# 6.7 门控循环单元(GRU) ## 6.7.2 读取数据集 ``` import numpy as np import torch from torch import nn, optim import torch.nn.functional as F import sys sys.path.append("..") import d2lzh_pytorch as d2l device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') (corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics() print(torch.__version__, device) ``` ## 6.7.3 从零开始实现 ### 6.7.3.1 初始化模型参数 ``` num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size print('will use', device) def get_params(): def _one(shape): ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32) return torch.nn.Parameter(ts, requires_grad=True) def _three(): return (_one((num_inputs, num_hiddens)), _one((num_hiddens, num_hiddens)), torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True)) W_xz, W_hz, b_z = _three() # 更新门参数 W_xr, W_hr, b_r = _three() # 重置门参数 W_xh, W_hh, b_h = _three() # 候选隐藏状态参数 # 输出层参数 W_hq = _one((num_hiddens, num_outputs)) b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True) return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]) ``` ### 6.7.3.2 定义模型 ``` def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid(torch.matmul(X, W_xz) + torch.matmul(H, W_hz) + b_z) R = torch.sigmoid(torch.matmul(X, W_xr) + torch.matmul(H, W_hr) + b_r) H_tilda = torch.tanh(torch.matmul(X, W_xh) + R * torch.matmul(H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = torch.matmul(H, W_hq) + b_q outputs.append(Y) return outputs, (H,) ``` ### 6.7.3.3 训练模型并创作歌词 ``` num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2 pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开'] d2l.train_and_predict_rnn(gru, get_params, init_gru_state, num_hiddens, vocab_size, device, corpus_indices, idx_to_char, char_to_idx, False, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes) ``` ## 6.7.4 简洁实现 ``` lr = 1e-2 gru_layer = nn.GRU(input_size=vocab_size, hidden_size=num_hiddens) model = d2l.RNNModel(gru_layer, vocab_size).to(device) d2l.train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device, corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes) ```
github_jupyter
# The JupyterLab Interface The JupyterLab interface consists of a main work area containing tabs of documents and activities, a collapsible left sidebar, and a menu bar. The left sidebar contains a file browser, the list of running terminals and kernels, the table of contents, and the extension manager. ![jupyter_lab_startup_page](figures/jupyter_lab_startup.png) JupyterLab sessions always reside in a workspace. Workspaces contain the state of JupyterLab: the files that are currently open, the layout of the application areas and tabs, etc. Reference: [https://jupyterlab.readthedocs.io/en/latest/user/interface.html](https://jupyterlab.readthedocs.io/en/latest/user/interface.html) # Notebook Currently you are looking at a Jupyter Notebook. A Jupyter Notebook is an interactive environment for writing and running ocde. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore it runs Python code. Reference: [https://github.com/jupyter/notebook/blob/6.1.x/docs/source/examples/Notebook/Running%20Code.ipynb](https://github.com/jupyter/notebook/blob/6.1.x/docs/source/examples/Notebook/Running%20Code.ipynb) ## Notebook Cell Types In a Jupyter Notebook we can have text cells and code cells. In text cells we can write markdown ([Markdown cheat sheet](https://www.markdownguide.org/cheat-sheet/)). In code cells we can write program code which is executed by the IPython kernel associated with the notebook. Code cells have brackets `[ ]:` in front of them: * `[ ]:` means that the cell is empty. * `[*]:` means that the cell is currently being executed. * `[1]:` here the number indicates the execution step in the notebook. This execution step is updated everytime the cell is executed. To render a text cell or execute a code cell you can press the run button in the toolbar above or press `Shift-Enter` on your keyboard. ``` 2 + 2 # If we want to write text in a code cell we have to comment it out with '#'. # Next we asign some variables. a = 2 b = 2 c = a + b print("a is", a) print("b is", b) print("a + b =", c) ``` # Displaying an Image ``` # import packages from tifffile import imread from matplotlib import pyplot as plt img = imread('imgs/t000.tif') plt.figure(figsize=(10, 10)) plt.imshow(img, cmap='gray') ``` # Python Basics ## If Statement The if statement allows us to branch code and act on different conditions. The statement has the following logic: ``` if condition_0: code_0 elif condition_1: code_1 else: code_2 ``` `code_0` is executed if `condition_0` holds true. If `condition_0` is false `condition_1` is evaluated and `code_1` is executed if `condition_1` is true. If both conditions evaluate to false `code_2` is executed. __Note:__ `elif` and `else` are optional. ``` # Assign value to number number = 3 # Test if the number is negative, zero or positive if number < 0: print("{} is a negative number.".format(number)) elif number == 0: print("The number is zero.") else: print("{} is a positive number.".format(number)) # The following code is outside of the if statement and always executed. print("Done") ``` ## Functions In Python we can define functions which can be reused in our code. It is good practice to define a function if we want to reuse the same code multiple times! ``` def categorize_number(number): """ Prints to standard output if the number is negative, zero or positive. Parameter: ---------- number: The number to categorize. """ if number < 0: print("{} is a negative number.".format(number)) elif number == 0: print("The number is zero.") else: print("{} is a positive number.".format(number)) categorize_number(number=-2) ``` ## Lists In python we can easily define a list. ``` numbers = [-1, 0, 1, 2] type(numbers) ``` ## For Loop If we want to apply some code (e.g. a function) to all elements of a list we can use a for loop. ``` for number in numbers: print("Currently processing number = {}.".format(number)) categorize_number(number) ``` ## Range A typical usecase is that we want to get all numbers from 0 up to a given number e.g. 100. Luckely we don't have to type all 100 numbers into a list to iterate over them. We can just use the `range`-function which is part of Python. ``` for i in range(100): categorize_number(number=i) import this ```
github_jupyter
# Check Cell Population Heterogeneity ## Libraries ``` import MySQLdb import pandas import numpy as np from matplotlib import pylab as plt import os import seaborn as sns from scipy.stats import mannwhitneyu as mw from scipy import stats import operator from sklearn.preprocessing import StandardScaler,RobustScaler from sklearn.decomposition import PCA from scipy import stats import operator ``` ## Routine Functions ``` def ensure_dir(file_path): ''' Function to ensure a file path exists, else creates the path :param file_path: :return: ''' directory = os.path.dirname(file_path) if not os.path.exists(directory): os.makedirs(directory) # Effect size def cohen_d(x, y): nx = len(x) ny = len(y) dof = nx + ny - 2 return (np.mean(x) - np.mean(y)) / np.sqrt( ((nx - 1) * np.std(x, ddof=1) ** 2 + (ny - 1) * np.std(y, ddof=1) ** 2) / dof) # Some Easy Outlier detection def reject_outliers_2(data, m=6.): d = np.abs(data - np.median(data)) mdev = np.median(d) s = d / (mdev if mdev else 1.) #return s < m return [data[i] for i in range(0, len(data)) if s[i] < m] ``` ## Load list of significant perturbations - Load all significant perturbations - Load drug decay - Load list of images that are excluded - Load list of features to investigate ### Significant perturbations ``` #Save significant perturbations significant_perturbations = [] #open the file indicating which drug perturbations are significant in a matter of mahalanobis distance to DMSO fp = open('../data/Investigate_CellularHeterogeneity/Single_Perturbation_Significance.csv') fp.next() #go through whole file for line in fp: #split row tmp = line.strip().split(',') #check if mahalanobis distance large than 7 try: batch1_significance = float(tmp[1]) batch2_significance = float(tmp[3]) if batch1_significance > 7: significant_perturbations.append((tmp[0]+'_Batch1',batch1_significance)) if batch2_significance > 7: significant_perturbations.append((tmp[0]+'_Batch2',batch2_significance)) except: continue #sort all perturbations and take the top 10 significant_perturbations.sort(key = operator.itemgetter(1), reverse = True) significant_perturbations = significant_perturbations[0:10] print significant_perturbations ``` ### Drug Decay ``` # Both thresholds need to be true to set a drug as decayed during experiment; threshold_decay is steepness and threshold_MaxDifference absolute difference threshold_decay = 0.05 threshold_MaxDifference = 0.3 # Load all the drug decay regressions # Created by checking the single drug responses over the different plates (there is a temporal context between plate 1 and 123) # One is interested both in the decay as well as the maximum change e.g. if gradient between 0.1 to 0.2, still ok # Create a dic that tells about the status of drug decay i.e. True if drug WORKED CORRECTLY path = '../data/Investigate_CellularHeterogeneity/DrugDecay_Combined.csv' fp = open(path) fp.next() drug_decay = {} batch1_Failed = 0 batch2_Failed = 0 for line in fp: tmp = line.strip().split(',') batch1_decay = float(tmp[1]) batch1_diff = float(tmp[2]) batch2_decay = float(tmp[3]) batch2_diff = float(tmp[4]) batch1_Status = True if batch1_decay >= threshold_decay and batch1_diff >= threshold_MaxDifference: batch1_Status = False batch1_Failed += 1 batch2_Status = True if batch2_decay >= threshold_decay and batch2_diff >= threshold_MaxDifference: batch2_Status = False batch2_Failed += 1 drug_decay[tmp[0]] = {'Batch1':batch1_Status,'Batch2':batch2_Status} fp.close() print 'Number of drugs that decayed in batch1: %d' %batch1_Failed print 'Number of drugs that decayed in batch2: %d' %batch2_Failed ``` ### Load selected features ``` selected_Features = [] fp = open('../data/Investigate_CellularHeterogeneity/Selected_Features.csv') for line in fp: selected_Features.append(line.strip()[7:]) print 'Number of features: %d' %len(selected_Features) ``` ### Load Problematic Images ``` problematic_images = {'Batch1':[],'Batch2':[]} batches = ['1','2'] for batch_ in batches: fp = open('../data/Investigate_CellularHeterogeneity/BadImages/Batch'+batch_+'.csv','r') for line in fp: tmp = line.strip().split(',') problematic_images['Batch'+batch_].append(tmp[0]) ``` ## Actual Analysis ### Load corresponding images ``` # establish link db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) ########### # DRUGS # ########### #this will contain all the image numbers that are associated with a specific drug (only singles!) Image_Number_For_Drugs = {} #go through the list of all significant perturbers for entry in significant_perturbations: drug,batch_ = entry[0].split('_') batch_ = batch_[5] # check if the drug is not decayed if drug_decay[drug]['Batch'+batch_] == True: #SQL string string = 'select ImageNumber,Image_Metadata_Plate from DPN1018Batch'+batch_+'Per_Image where Image_Metadata_ID_A like "'+drug+'" and Image_Metadata_ID_B like "DMSO";' #Extract data via pandas ImageNumbers = pandas.read_sql(string, con=db) #go through all rows for line in ImageNumbers.iterrows(): #extract ImageNumber and PlateNumber Drug_ImageNumber = line[1][0] Drug_PlateNumber = line[1][1] #add to dictionary if entry[0] not in Image_Number_For_Drugs: Image_Number_For_Drugs[entry[0]] = {Drug_PlateNumber:[Drug_ImageNumber]} elif Drug_PlateNumber not in Image_Number_For_Drugs[entry[0]]: Image_Number_For_Drugs[entry[0]][Drug_PlateNumber] = [Drug_ImageNumber] else: Image_Number_For_Drugs[entry[0]][Drug_PlateNumber].append(Drug_ImageNumber) ########### # DMSO # ########### # this will contain imagenumbers for DMSO Image_Number_For_DMSO = {} for batch_ in ['1','2']: #SQL string string = 'select ImageNumber,Image_Metadata_Plate from DPN1018Batch'+batch_+'Per_Image where Image_Metadata_ID_A like "DMSO" and Image_Metadata_ID_B like "None";' #Extract data via pandas ImageNumbers = pandas.read_sql(string, con=db) #go through all rows for line in ImageNumbers.iterrows(): #extract ImageNumber and PlateNumber Drug_ImageNumber = line[1][0] Drug_PlateNumber = line[1][1] #add to dictionary if batch_ not in Image_Number_For_DMSO: Image_Number_For_DMSO[batch_] = {Drug_PlateNumber:[Drug_ImageNumber]} elif Drug_PlateNumber not in Image_Number_For_DMSO[batch_]: Image_Number_For_DMSO[batch_][Drug_PlateNumber] = [Drug_ImageNumber] else: Image_Number_For_DMSO[batch_][Drug_PlateNumber].append(Drug_ImageNumber) db.close() ``` ### Defintions - drug colors - feature colors ``` # define color code for individual significant drugs (static) drug_colors = {'CLOUD031':'#8dd3c7','CLOUD053':'#ffffb3','CLOUD057':'#bebada','CLOUD089':'#fb8072','CLOUD112':'#80b1d3','CLOUD117':'#fdb462','CLOUD077':'#b3de69','CLOUD103':'#fccde5', 'CLOUD115':'#c51b8a','CLOUD129':'#bc80bd','DMSO':'grey'} feature_colors = {'AreaShape':'#D53D48', #red 'Intensity':'#BDCA27', 'RadialDistribution':'#BDCA27', #green 'Other':'grey', #grey 'Texture':'#F8B301', #orange 'Granularity':'#3AB9D1'} #blue #create the string for selecting all features selected_feature_string = ','.join(selected_Features) ## EXTRACT DMSO #### # Establish connections db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) #define plate and batch plate = 1315101 batch_ = '2' # create SQL string images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) string = 'select ImageNumber,ObjectNumber,'+selected_feature_string+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' # Extract only selected features (all DMSO cells) DMSO_allFeatures = pandas.read_sql(string, con=db) DMSO_allFeatures['Label'] = 'DMSO' DMSO_allFeatures = DMSO_allFeatures.dropna() db.close() DMSO_allFeatures.head() ## EXTRACT Drugs #### # Establish connections db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) # Get all drugs for a choosen plate images_drugs = [] image_to_drug = {} for key in Image_Number_For_Drugs: for current_plate in Image_Number_For_Drugs[key]: if current_plate == plate: images_drugs.extend(Image_Number_For_Drugs[key][current_plate]) for img in Image_Number_For_Drugs[key][current_plate]: image_to_drug[img] = key.split('_')[0] # Create SQL string imageNumberString_drug = ','.join([str(x) for x in images_drugs]) string = 'select ImageNumber,ObjectNumber,'+selected_feature_string+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' # Extract only selected features (all DMSO cells) Drug_allFeatures = pandas.read_sql(string, con=db) Drug_allFeatures['Label'] = 'Drug' for key in image_to_drug: Drug_allFeatures.loc[Drug_allFeatures['ImageNumber'] == key,['Label']] = image_to_drug[key] Drug_allFeatures = Drug_allFeatures.dropna() db.close() Drug_allFeatures.head() ``` #### Perform Scaling pooled scaling ``` DMSO_and_Drugs = pandas.concat([DMSO_allFeatures,Drug_allFeatures]) DMSO_and_Drugs_allFeatures_scaled = DMSO_and_Drugs.copy() #scaler = RobustScaler() scaler = StandardScaler() DMSO_and_Drugs_allFeatures_scaled[selected_Features] = scaler.fit_transform(DMSO_and_Drugs[selected_Features]) DMSO_and_Drugs_allFeatures_scaled.head() ``` ### Plot results for DMSO and selected drugs (Distributions) ``` sns.set_style("whitegrid", {'axes.grid' : False}) make_plots = True #check that folder exists ensure_dir('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/DMSO/') #sns.set() is_normal = 0 #go through all selected features for f in selected_Features: #extract DMSO values for this specific feature feature_values = DMSO_and_Drugs_allFeatures_scaled.loc[DMSO_and_Drugs_allFeatures_scaled['Label'] == 'DMSO'][f].values #Test for normality are essentiality useless (for small datasets chance of not enough power, while for large dataset everything gets rejected as non normal) pvals = [] for i in range(0,1000): pval_normal = stats.normaltest(np.random.choice(feature_values,50))[1] pvals.append(pval_normal) #pval_normal2 = stats.shapiro(feature_values)[1] if np.mean(pvals) >= 0.05: is_normal += 1 if make_plots: plt.hist(feature_values,bins=100, color='grey',density=True) plt.title(f + 'Normal: %.2f' % np.mean(pvals)) plt.savefig('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/DMSO/'+f+'.pdf') plt.close() print len(selected_Features) print is_normal ensure_dir('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/Drugs/') # Find drugs name all_drugs = list(set(image_to_drug.values())) #go through all selected features for f in selected_Features: #extract the DMSO values feature_values_DMSO = DMSO_and_Drugs_allFeatures_scaled.loc[DMSO_and_Drugs_allFeatures_scaled['Label'] == 'DMSO'][f].values for drug in all_drugs: #extract drug values feature_values = DMSO_and_Drugs_allFeatures_scaled.loc[DMSO_and_Drugs_allFeatures_scaled['Label'] == drug][f].values #overlay the two distributions plt.hist(feature_values_DMSO,bins='doane', color='grey', alpha=0.5, density=True) plt.hist(feature_values,bins='doane', color=drug_colors[drug], alpha=0.5, density=True) plt.savefig('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/Drugs/'+f+'_'+drug+'.pdf') plt.close() #colors for features feature_type_colors = [] compartment_type_colors = [] # contains KS results feature_results = [] # contains percentile results feature_results_effect = [] for f in selected_Features: compartment,featuretype,_ = f.split('_')[0:3] if featuretype in feature_colors.keys(): feature_type_colors.append(feature_colors[featuretype]) else: feature_type_colors.append(feature_colors['Other']) if compartment == 'Cells': compartment_type_colors.append('#a6611a') else: compartment_type_colors.append('#018571') #Get DMSO values for specific feature feature_values_DMSO = DMSO_and_Drugs_allFeatures_scaled.loc[DMSO_and_Drugs_allFeatures_scaled['Label'] == 'DMSO'][f].values #Define the top5 , top95 percentiles low_5 = np.percentile(feature_values_DMSO,5) top_95 = np.percentile(feature_values_DMSO,95) #temporary results (each row contains one feature - all drugs) tmp = [] tmp2 = [] #go through all drugs for drug in all_drugs: # Get Drug values for specific feature feature_values_drug = DMSO_and_Drugs_allFeatures_scaled.loc[DMSO_and_Drugs_allFeatures_scaled['Label'] == drug][f].values #Number of significant cells tmp2.append(len([x for x in feature_values_drug if x < low_5 or x > top_95])/float(len(feature_values_drug))) #Compare curves tmp.append(stats.ks_2samp(feature_values_drug,feature_values_DMSO)[0]) #add results to overall results lists feature_results.append(tmp) feature_results_effect.append(tmp2) #sns.set() sns.clustermap(data=feature_results, xticklabels=all_drugs,yticklabels=selected_Features, row_colors=[feature_type_colors,compartment_type_colors]) #sns.set(font_scale=0.5) plt.savefig('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/Clustermap_KS_Test.pdf') plt.close() #sns.set() sns.clustermap(data=feature_results_effect, xticklabels=all_drugs,yticklabels=selected_Features, row_colors=[feature_type_colors,compartment_type_colors]) sns.set(font_scale=5.5) plt.savefig('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/Clustermap_Percentiles.pdf') plt.close() sns.set() plt.scatter(feature_results,feature_results_effect) plt.plot([0,1],[0,1],ls='--',c='grey') plt.xlabel('Penetrance') plt.ylabel('Effect') plt.savefig('../results/Investigate_CellularHeterogeneity/Penetrance_PooledScaled/Penetrance_vs_Effect.pdf') plt.close() ``` ### Make PCA (all features) ``` db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) plate = 1315101 batch_ = '2' images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) string = 'select * from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' DMSO_allFeatures = pandas.read_sql(string, con=db) DMSO_allFeatures['Label'] = 'DMSO' for entry in list(Image_Number_For_Drugs.keys()): print entry drug,batch_ = entry.split('_') batch_ = batch_[5] images_drug = Image_Number_For_Drugs[entry][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) string = 'select * from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' drug_allFeatures = pandas.read_sql(string, con=db) drug_allFeatures['Label'] = 'Drug' # Put both dataframes together DMSO_drug_allFeatures = pandas.concat([drug_allFeatures,DMSO_allFeatures]) to_remove = [x for x in DMSO_drug_allFeatures.columns if 'Location' in x or 'Center' in x] DMSO_drug_allFeatures = DMSO_drug_allFeatures.drop(to_remove, axis=1) DMSO_drug_allFeatures = DMSO_drug_allFeatures.dropna() y = DMSO_drug_allFeatures['Label'].values x = DMSO_drug_allFeatures.iloc[:,3:-1].values # Standardizing the features x = StandardScaler().fit_transform(x) pca = PCA(n_components=2) Drug_DMSO_Fit = pca.fit_transform(x) pca_drug = [] pca_DMSO = [] for label,element in zip(y,list(Drug_DMSO_Fit)): if label == 'Drug': pca_drug.append(element) else: pca_DMSO.append(element) pca_drug = np.array(pca_drug) pca_DMSO = np.array(pca_DMSO) ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/') #plt.scatter(pca_drug[:,0],pca_drug[:,1], alpha=0.4) #plt.scatter(pca_DMSO[:,0],pca_DMSO[:,1], alpha=0.4) #plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/Scatter_AllFeatures.pdf') #plt.show() #plt.close() upper = 99.5 lower = 0.5 x_min = min([np.percentile(pca_drug[:,0],lower),np.percentile(pca_DMSO[:,0],lower)]) x_max = max([np.percentile(pca_drug[:,0],upper),np.percentile(pca_DMSO[:,0],upper)]) y_min = min([np.percentile(pca_drug[:,1],lower),np.percentile(pca_DMSO[:,1],lower)]) y_max = max([np.percentile(pca_drug[:,1],upper),np.percentile(pca_DMSO[:,1],upper)]) #bw = 1.5 sns.kdeplot(pca_drug[:,0],pca_drug[:,1],shade_lowest=False, alpha=0.5) sns.kdeplot(pca_DMSO[:,0],pca_DMSO[:,1],shade_lowest=False, alpha=0.5) plt.xlim([x_min,x_max]) plt.ylim([y_min,y_max]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/ContourPlot_AllFeatures.pdf') plt.close() sns.jointplot(pca_drug[:,0],pca_drug[:,1], kind='kde', bw = 'scott', color=drug_colors[drug], shade_lowest=False, alpha=0.5, xlim=[x_min,x_max], ylim=[y_min,y_max]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/JoinPlot_Drug_AllFaetures.pdf') plt.close() sns.jointplot(pca_DMSO[:,0],pca_DMSO[:,1], kind='kde', bw = 'scott', color="#D4D4D4", shade_lowest=False,alpha=0.5, xlim=[x_min,x_max], ylim=[y_min,y_max]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/JoinPlot_DMSO_AllFaetures.pdf') plt.close() ``` ### Make Violin plot selected features ``` db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) #features = ['Cells_Intensity_StdIntensity_MitoTracker','Cells_Granularity_1_BetaTubulin','Nuclei_AreaShape_MaximumRadius','Cells_AreaShape_MaxFeretDiameter'] features = selected_Features plate = 1315101 #batch_ = 2 drug_feature_results_to_plot = {} for entry in Image_Number_For_Drugs: drug,batch_ = entry.split('_') batch_ = batch_[5] drug_feature_results_to_plot[entry] = {} print drug images_drug = Image_Number_For_Drugs[entry][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) for feature in features: ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/') string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' result_drug = list(pandas.read_sql(string, con=db)[feature].values) result_drug = reject_outliers_2([x for x in result_drug if str(x) != 'nan'],6) string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' result_dmso = list(pandas.read_sql(string, con=db)[feature].values) result_dmso = reject_outliers_2([x for x in result_dmso if str(x) != 'nan'],6) drug_feature_results_to_plot[entry][feature] = {'Drug':result_drug, 'DMSO':result_dmso} db.close() #drug_colors = {'CLOUD031':'#8dd3c7','CLOUD053':'#ffffb3','CLOUD057':'#bebada','CLOUD089':'#fb8072','CLOUD112':'#80b1d3','CLOUD117':'#fdb462','CLOUD077':'#b3de69','CLOUD103':'#fccde5', # 'CLOUD115':'#d9d9d9','CLOUD129':'#bc80bd','DMSO':'grey',} for feature in features: data = [] drug_names = [] for entry in list(Image_Number_For_Drugs.keys()): drug,batch_ = entry.split('_') drug_names.append((drug,np.median(drug_feature_results_to_plot[entry][feature]['Drug']))) data.append((drug_feature_results_to_plot[entry][feature]['Drug'],np.median(drug_feature_results_to_plot[entry][feature]['Drug']))) #print data data.sort(key = operator.itemgetter(1)) drug_names.sort(key = operator.itemgetter(1)) data = [x[0] for x in data] drug_names = [x[0] for x in drug_names] data.append(drug_feature_results_to_plot[entry][feature]['DMSO']) drug_names.append('DMSO') Percent_95 = np.percentile(drug_feature_results_to_plot[entry][feature]['DMSO'],90) Percent_5 = np.percentile(drug_feature_results_to_plot[entry][feature]['DMSO'],10) my_pal = {0: drug_colors[drug_names[0]], 1: drug_colors[drug_names[1]], 2:drug_colors[drug_names[2]], 3:drug_colors[drug_names[3]],4:drug_colors[drug_names[4]],5:drug_colors[drug_names[5]], 6:drug_colors[drug_names[6]],7:drug_colors[drug_names[7]],8:drug_colors[drug_names[8]] ,9:drug_colors[drug_names[9]],10:drug_colors[drug_names[10]]} #sns.violinplot(data=data,scale='width',bw='scott', palette='Paired', orient='h') sns.violinplot(data=data,scale='width',bw='scott', palette=my_pal, orient='h') plt.axvline(Percent_95,ls='--',color='grey') plt.axvline(Percent_5,ls='--',color='grey') plt.yticks(range(0,len(data)+1),drug_names, fontsize=5) plt.ylabel('Treatment', fontsize=5) plt.xticks(fontsize=5) plt.xlabel(feature, fontsize=5) #sns.swarmplot(data=data) plt.savefig('../results/Investigate_CellularHeterogeneity/Final/'+str(feature)+'_Violin.pdf') #plt.show() plt.close() ``` ### Analyse Features for selected Drugs ``` fp_out = open('../results/Investigate_CellularHeterogeneity/Result_Overview.csv','w') fp_out.write('Batch,Drug,Plate,Feature,Cohens"D,Abs(CohenD),Coefficient_Variation,KS_Normality,MW_PVal\n') #selected_Features = ['Cells_Intensity_StdIntensity_MitoTracker','Cells_Granularity_12_BetaTubulin','Nuclei_AreaShape_MaximumRadius','Cells_AreaShape_MaxFeretDiameter'] selected_Features = ['Cells_AreaShape_FormFactor','Nuclei_AreaShape_MaxFeretDiameter','Cells_Granularity_1_BetaTubulin','Nuclei_Granularity_8_DAPI','Cells_Intensity_StdIntensity_MitoTracker','Nuclei_Intensity_IntegratedIntensity_DAPI'] db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) #print Image_Number_For_Drugs for entry in Image_Number_For_Drugs: print entry drug,batch_ = entry.split('_') batch_ = batch_[5] #plates = list(Image_Number_For_Drugs[entry].keys()) plates = [1315101] for plate in plates: images_drug = Image_Number_For_Drugs[entry][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) for feature in selected_Features: ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/') string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' result_drug = list(pandas.read_sql(string, con=db)[feature].values) result_drug = reject_outliers_2([x for x in result_drug if str(x) != 'nan'],6) string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' result_dmso = list(pandas.read_sql(string, con=db)[feature].values) result_dmso = reject_outliers_2([x for x in result_dmso if str(x) != 'nan'],6) #sns.violinplot(data=[result_drug,result_dmso],bw=0.5, cut=50) #plt.show() cd = cohen_d(result_drug,result_dmso) mw_Pval = min([1,mw(result_drug,result_dmso)[1] * (len(selected_Features) * len(list(Image_Number_For_Drugs[entry])) * 2)]) coev_var = np.std(result_drug)/np.mean(result_drug) #KS_Normality = stats.kstest(result_drug, 'norm')[1] KS_Normality = stats.shapiro(result_drug)[1] fp_out.write(batch_+','+drug+','+str(plate)+','+feature+','+str(cd)+','+str(abs(cd))+','+str(coev_var)+','+str(KS_Normality)+','+str(mw_Pval)+'\n') #continue #bins = 14 prettier plt.hist(result_drug, bins = 20, color = drug_colors[drug], alpha=0.3, density=True) plt.hist(result_dmso, bins = 20, color = 'grey', alpha=0.3,density=True) plt.xlim([min([np.percentile(result_drug,1),np.percentile(result_dmso,1)]),max([np.percentile(result_drug,99),np.percentile(result_dmso,99)])]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Hist.pdf') #plt.show() plt.close() plt.boxplot([result_drug,result_dmso], whis = 1.5, showfliers = True) plt.xticks([1,2],[drug,'DMSO']) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Box.pdf') #plt.show() plt.close() db.close() ``` ### Load actual cells ``` fp_out = open('../results/Investigate_CellularHeterogeneity/Result_Overview.csv','w') fp_out.write('Batch,Drug,Plate,Feature,Cohens"D,Abs(CohenD),Coefficient_Variation,KS_Normality,MW_PVal\n') #selected_Features = ['Cells_Intensity_StdIntensity_MitoTracker','Cells_Granularity_12_BetaTubulin','Nuclei_AreaShape_MaximumRadius','Cells_AreaShape_MaxFeretDiameter'] db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) #print Image_Number_For_Drugs for entry in Image_Number_For_Drugs: print entry drug,batch_ = entry.split('_') batch_ = batch_[5] plates = list(Image_Number_For_Drugs[entry].keys()) #print plates for plate in plates: images_drug = Image_Number_For_Drugs[entry][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) for feature in selected_Features: ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/') string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' result_drug = list(pandas.read_sql(string, con=db)[feature].values) result_drug = reject_outliers_2([x for x in result_drug if str(x) != 'nan'],6) string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' result_dmso = list(pandas.read_sql(string, con=db)[feature].values) result_dmso = reject_outliers_2([x for x in result_dmso if str(x) != 'nan'],6) #sns.violinplot(data=[result_drug,result_dmso],bw=0.5, cut=50) #plt.show() cd = cohen_d(result_drug,result_dmso) mw_Pval = min([1,mw(result_drug,result_dmso)[1] * (len(selected_Features) * len(list(Image_Number_For_Drugs[entry])) * 2)]) coev_var = np.std(result_drug)/np.mean(result_drug) #KS_Normality = stats.kstest(result_drug, 'norm')[1] KS_Normality = stats.shapiro(result_drug)[1] fp_out.write(batch_+','+drug+','+str(plate)+','+feature+','+str(cd)+','+str(abs(cd))+','+str(coev_var)+','+str(KS_Normality)+','+str(mw_Pval)+'\n') #continue #bins = 14 prettier plt.hist(result_drug, bins = 20, color = '#3AB9D1', alpha=0.3, density=True) plt.hist(result_dmso, bins = 20, color = 'grey', alpha=0.3,density=True) plt.xlim([min([np.percentile(result_drug,1),np.percentile(result_dmso,1)]),max([np.percentile(result_drug,99),np.percentile(result_dmso,99)])]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Hist.pdf') #plt.show() plt.close() plt.boxplot([result_drug,result_dmso], whis = 1.5, showfliers = True) plt.xticks([1,2],[drug,'DMSO']) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Box.pdf') #plt.show() plt.close() db.close() ``` ### Choose specific features / plate ``` db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) features = ['Cells_Intensity_StdIntensity_MitoTracker','Cells_Granularity_12_BetaTubulin','Nuclei_AreaShape_MaximumRadius','Cells_AreaShape_MaxFeretDiameter'] plate = 1315111 #batch_ = 2 drug_feature_results_to_plot = {} for entry in Image_Number_For_Drugs: drug,batch_ = entry.split('_') batch_ = batch_[5] drug_feature_results_to_plot[entry] = {} print drug images_drug = Image_Number_For_Drugs[entry][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) images_dmso = Image_Number_For_DMSO[batch_][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) for feature in features: ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/') string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' result_drug = list(pandas.read_sql(string, con=db)[feature].values) result_drug = reject_outliers_2([x for x in result_drug if str(x) != 'nan'],6) string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018Batch'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' result_dmso = list(pandas.read_sql(string, con=db)[feature].values) result_dmso = reject_outliers_2([x for x in result_dmso if str(x) != 'nan'],6) drug_feature_results_to_plot[entry][feature] = {'Drug':result_drug, 'DMSO':result_dmso} db.close() for feature in features: data = [] drug_names = [] for entry in list(Image_Number_For_Drugs.keys()): drug,batch_ = entry.split('_') drug_names.append(drug) data.append(drug_feature_results_to_plot[entry][feature]['Drug']) data.append(drug_feature_results_to_plot[entry][feature]['DMSO']) drug_names.append('DMSO') Percent_95 = np.percentile(drug_feature_results_to_plot[entry][feature]['DMSO'],95) Percent_5 = np.percentile(drug_feature_results_to_plot[entry][feature]['DMSO'],5) sns.violinplot(data=data,scale='width') plt.axhline(Percent_95,ls='--',color='grey') plt.axhline(Percent_5,ls='--',color='grey') plt.xticks(range(0,len(data)+1),drug_names, fontsize=5) plt.xlabel('Treatment') plt.ylabel(feature) #sns.swarmplot(data=data) plt.savefig('../results/Investigate_CellularHeterogeneity/Final/'+str(feature)+'_Violin.pdf') #plt.show() plt.close() Image_Number_For_Drugs = {'Batch1':{},'Batch2':{}} db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) batches = ['1','2'] for batch_ in batches: for drug in significant_perturbations['Batch'+batch_]: if drug == 'DMSO': string = 'select ImageNumber,Image_Metadata_Plate from DPN1018Batch'+batch_+'Per_Image where Image_Metadata_ID_A like "DMSO" and Image_Metadata_ID_B like "None";' ImageNumbers = pandas.read_sql(string, con=db) for line in ImageNumbers.iterrows(): Drug_ImageNumber = line[1][0] Drug_PlateNumber = line[1][1] if drug not in Image_Number_For_Drugs['Batch'+batch_]: Image_Number_For_Drugs['Batch'+batch_][drug] = {Drug_PlateNumber:[Drug_ImageNumber]} elif Drug_PlateNumber not in Image_Number_For_Drugs['Batch'+batch_][drug]: Image_Number_For_Drugs['Batch'+batch_][drug][Drug_PlateNumber] = [Drug_ImageNumber] else: Image_Number_For_Drugs['Batch'+batch_][drug][Drug_PlateNumber].append(Drug_ImageNumber) elif drug_decay[drug]['Batch'+batch_] == True: string = 'select ImageNumber,Image_Metadata_Plate from DPN1018Batch'+batch_+'Per_Image where Image_Metadata_ID_A like "'+drug+'" and Image_Metadata_ID_B like "DMSO";' ImageNumbers = pandas.read_sql(string, con=db) #print(ImageNumbers) for line in ImageNumbers.iterrows(): Drug_ImageNumber = line[1][0] Drug_PlateNumber = line[1][1] if drug not in Image_Number_For_Drugs['Batch'+batch_]: Image_Number_For_Drugs['Batch'+batch_][drug] = {Drug_PlateNumber:[Drug_ImageNumber]} elif Drug_PlateNumber not in Image_Number_For_Drugs['Batch'+batch_][drug]: Image_Number_For_Drugs['Batch'+batch_][drug][Drug_PlateNumber] = [Drug_ImageNumber] else: Image_Number_For_Drugs['Batch'+batch_][drug][Drug_PlateNumber].append(Drug_ImageNumber) db.close() fp_out = open('../results/Investigate_CellularHeterogeneity/Result_Overview.csv','w') fp_out.write('Batch,Drug,Plate,Feature,Cohens"D,Abs(CohenD),Coefficient_Variation,KS_Normality,MW_PVal\n') db = MySQLdb.connect("menchelabdb.int.cemm.at","root","cqsr4h","ImageAnalysisDDI" ) for batch_ in Image_Number_For_Drugs: print batch_ for drug in Image_Number_For_Drugs[batch_]: for plate in list(Image_Number_For_Drugs[batch_][drug])[0:1]: images_drug = Image_Number_For_Drugs[batch_][drug][plate] imageNumberString_drug = ','.join([str(x) for x in images_drug]) images_dmso = Image_Number_For_Drugs[batch_]['DMSO'][plate] imageNumberString_dmso = ','.join([str(x) for x in images_dmso]) for feature in selected_Features[0:2]: ensure_dir('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/') string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_drug+');' result_drug = list(pandas.read_sql(string, con=db)[feature].values) result_drug = [x for x in result_drug if str(x) != 'nan'] string = 'select ImageNumber,ObjectNumber,'+feature+' from DPN1018'+batch_+'Per_Object where ImageNumber in ('+imageNumberString_dmso+');' result_dmso = list(pandas.read_sql(string, con=db)[feature].values) result_dmso = [x for x in result_dmso if str(x) != 'nan'] #sns.violinplot(data=[result_drug,result_dmso],bw=0.5, cut=50) #plt.show() cd = cohen_d(result_drug,result_dmso) mw_Pval = min([1,mw(result_drug,result_dmso)[1] * (len(selected_Features) * len(list(Image_Number_For_Drugs[batch_][drug])) * 2)]) coev_var = np.std(result_drug)/np.mean(result_drug) #KS_Normality = stats.kstest(result_drug, 'norm')[1] KS_Normality = stats.shapiro(result_drug)[1] fp_out.write(batch_+','+drug+','+str(plate)+','+feature+','+str(cd)+','+str(abs(cd))+','+str(coev_var)+','+str(KS_Normality)+','+str(mw_Pval)+'\n') #continue #bins = 14 prettier plt.hist(result_drug, bins = 20, color = '#3AB9D1', alpha=0.3, density=True) plt.hist(result_dmso, bins = 20, color = 'grey', alpha=0.3,density=True) plt.xlim([min([np.percentile(result_drug,1),np.percentile(result_dmso,1)]),max([np.percentile(result_drug,99),np.percentile(result_dmso,99)])]) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Hist.pdf') #plt.show() plt.close() plt.boxplot([result_drug,result_dmso], whis = 1.5, showfliers = False) plt.xticks([1,2],[drug,'DMSO']) plt.savefig('../results/Investigate_CellularHeterogeneity/'+drug+'/'+feature+'/'+str(plate)+'_Box.pdf') #plt.show() plt.close() fp_out.close() ```
github_jupyter
# Large Scale Kernel Ridge Regression ``` import sys sys.path.insert(0, '/Users/eman/Documents/code_projects/kernellib') sys.path.insert(0, '/home/emmanuel/code/kernellib') import numpy as np from kernellib.large_scale import RKSKernelRidge, KernelRidge as RKernelRidge from kernellib.utils import estimate_sigma, r_assessment from sklearn.model_selection import GridSearchCV import matplotlib.pyplot as plt %matplotlib inline %load_ext autoreload %autoreload 2 ``` #### Sample Data ``` seed = 123 rng = np.random.RandomState(seed) n_train, n_test = 10000, 1000 d_dimensions = 1 noise = 0.1 xtrain = rng.randn(n_train, d_dimensions) ytrain = np.sin(xtrain) + noise * rng.randn(n_train, d_dimensions) xtest = rng.randn(n_test, d_dimensions) ytest = np.sin(xtest) + noise * rng.randn(n_test, d_dimensions) # training n_components = 10 alpha = 1e-3 sigma = estimate_sigma(xtrain) ``` ## Random Kitchen Sinks Regression In this method, I implement the Random Kitchen Sinks algorithm found [here](https://people.eecs.berkeley.edu/~brecht/kitchensinks.html) and [here](https://people.eecs.berkeley.edu/~brecht/kitchensinks.html). I don't try and transform the problem into a matrix approximation and then fit it into the KRR framework. This is largely because the RKS algorithm that they implement use complex values that need to be present in solving and transforming the data. If the complex values are taken out before the transformation, the results are garbage. Furthermore, some experiments that I ran (see below) show that the RKS as a transformer do not approximate the kernel matrix very well. So therefore, this algorithm comes as is. It's a shame that you cannot write the function as a transformer but the phenomenal results that you obtain make it worth it in my opinion. ``` rks_model = RKSKernelRidge(n_components=n_components, alpha=alpha, sigma=sigma, random_state=seed) rks_model.fit(xtrain, ytrain) y_pred = rks_model.predict(xtest) r_assessment(y_pred, ytest, verbose=1); %timeit rks_model.fit(xtrain, ytrain); %timeit rks_model.predict(xtest); fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = rks_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Random Kitchen Sinks Approximation') plt.show() ``` #### Cross Validation Compatibility ``` sigmaMin = np.log10(sigma*0.1); sigmaMax = np.log10(sigma*10); sigmas = np.logspace(sigmaMin,sigmaMax,20); param_grid = { 'n_components': [1, 5, 10, 25], 'alpha': [1e0, 1e-1, 1e-2, 1e-3], 'sigma': sigmas } n_jobs = 24 cv = 3 rks_grid_model = GridSearchCV(RKSKernelRidge(random_state=seed), param_grid=param_grid, n_jobs=n_jobs, cv=cv, verbose=1) rks_grid_model.fit(xtrain, ytrain); y_pred = rks_grid_model.predict(xtest) r_assessment(y_pred, ytest) fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = rks_grid_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Random Kitchen Sinks Approximation w/ Grid Search') plt.show() ``` ## Nystrom Approximation ``` approximation = 'nystrom' nys_model = RKernelRidge(n_components=n_components, alpha=alpha, sigma=sigma, kernel='rbf', random_state=seed, approximation=approximation) nys_model.fit(xtrain, ytrain); y_pred = nys_model.predict(xtest) r_assessment(y_pred, ytest, verbose=1); %timeit nys_model.fit(xtrain, ytrain); %timeit nys_model.predict(xtest); fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = nys_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Nystrom Approximation') plt.show() ``` ### Nystrom w/ Grid Search ``` sigmaMin = np.log10(sigma*0.1); sigmaMax = np.log10(sigma*10); sigmas = np.logspace(sigmaMin,sigmaMax,20); param_grid = { 'kernel': ['rbf'], 'n_components': [1, 5, 10, 25], 'alpha': [1e0, 1e-1, 1e-2, 1e-3], 'sigma': sigmas } n_jobs = 24 cv = 3 nys_grid_model = GridSearchCV(RKernelRidge(random_state=seed, approximation=approximation), param_grid=param_grid, n_jobs=n_jobs, cv=cv, verbose=1) nys_grid_model.fit(xtrain, ytrain); r_assessment(y_pred, ytest, verbose=1); print('Best sigma:', nys_grid_model.best_estimator_.sigma) print('Best alpha:',nys_grid_model.best_estimator_.alpha) print('Best Number of features:', nys_grid_model.best_estimator_.n_components) print('Best Kernel:', nys_grid_model.best_estimator_.kernel) fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = nys_grid_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Nystrom Approximation w/ Grid Search') plt.show() ``` ## Randomized Nystrom Matrix Approximation ``` approximation = 'rnystrom' k_rank = 10 rnys_model = RKernelRidge(n_components=n_components, alpha=alpha, sigma=sigma, kernel='rbf', random_state=seed, approximation=approximation, k_rank=k_rank) rnys_model.fit(xtrain, ytrain); y_pred = rnys_model.predict(xtest) r_assessment(y_pred, ytest, verbose=1); %timeit rnys_model.fit(xtrain, ytrain); %timeit rnys_model.predict(xtest); fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = rnys_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Randomized Nystrom Approximation') plt.show() ``` ## Random Fourier Features Approximation ``` approximation = 'rff' rff_model = RKernelRidge(n_components=n_components, alpha=alpha, sigma=sigma, kernel='rbf', random_state=seed, approximation=approximation) rff_model.fit(xtrain, ytrain); y_pred = rff_model.predict(xtest) r_assessment(y_pred, ytest, verbose=1); %timeit rff_model.fit(xtrain, ytrain); %timeit rff_model.predict(xtest); fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = rff_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Random Fourier Features') plt.show() ``` ### Fast Food ``` approximation = 'fastfood' fastfood_model = RKernelRidge(n_components=n_components, alpha=alpha, sigma=sigma, kernel='rbf', random_state=seed, approximation=approximation, trade_off='mem') fastfood_model.fit(xtrain, ytrain); y_pred = fastfood_model.predict(xtest) r_assessment(y_pred, ytest, verbose=1); %timeit fastfood_model.fit(xtrain, ytrain); %timeit fastfood_model.predict(xtest); fig, ax = plt.subplots() xplot = np.linspace(xtrain.min(), xtest.max(), 100)[:, np.newaxis] yplot = fastfood_model.predict(xplot) ax.scatter(xtrain, ytrain, color='r', label='Training Data') ax.plot(xplot, yplot, color='k', linewidth=2, label='Predictions') ax.legend() ax.set_title('Fast Food') plt.show() ``` ### Timing Comparison #### Number of Features ``` from sklearn.datasets import make_low_rank_matrix import seaborn; seaborn.set() m_range = (2 ** (np.arange(12.3, 20))).astype(int) print(m_range.shape, m_range.min(), m_range.max()) from sklearn.datasets import make_regression print(t1.average, t1.stdev) %%time t_rks = list() t_nys = list() t_rnys = list() t_rbf = list() t_rff = list() # training n_components = 50 alpha = 1e-3 gamma = 1.0 for m in m_range: xtrain, ytrain = make_regression(n_samples=m, n_features=2000, n_informative=200, n_targets=1, effective_rank=50, noise=0.2, random_state=seed) print(xtrain.shape) # ------------------------------- # Random Kitchen Sinks) # ------------------------------- rks_model = RKSKernelRidge(n_components=n_components, alpha=alpha, gamma=gamma, random_state=seed) t1 = %timeit -oq rks_model.fit(xtrain, ytrain) # ------------------------------ # Nystrom # ------------------------------ approximation = 'nystrom' nys_model = RKernelRidge(n_components=n_components, alpha=alpha, gamma=gamma, kernel='rbf', random_state=seed, approximation=approximation) t2 = %timeit -oq nys_model.fit(xtrain, ytrain); # ---------------------------- # Randomized Nystrom # ---------------------------- approximation = 'rnystrom' k_rank = n_components rnys_model = RKernelRidge(n_components=n_components, alpha=alpha, gamma=gamma, kernel='rbf', random_state=seed, approximation=approximation, k_rank=k_rank) t3 = %timeit -oq rnys_model.fit(xtrain, ytrain); # ----------------------------------- # RBF Sampler (Random Kitchen Sinks) # ----------------------------------- approximation = 'rks' rks_model = RKernelRidge(n_components=n_components, alpha=alpha, gamma=gamma, kernel='rbf', random_state=seed, approximation=approximation) t4 = %timeit -oq rks_model.fit(xtrain, ytrain); # ----------------------------- # Random Fourier Features # ----------------------------- approximation = 'rff' rff_model = RKernelRidge(n_components=n_components, alpha=alpha, gamma=gamma, kernel='rbf', random_state=seed, approximation=approximation) t5 = %timeit -oq rff_model.fit(xtrain, ytrain); t_rks.append(t1.best) t_nys.append(t2.best) t_rnys.append(t3.best) t_rbf.append(t4.best) t_rff.append(t5.best) plt.loglog(m_range, t_rks, label='Random Kitchen Sinks') plt.loglog(m_range, t_rff, label='Random Fourier Features') plt.loglog(m_range, t_nys, label='Nystrom') plt.loglog(m_range, t_rnys, label='Randomized Nystrom') plt.loglog(m_range, t_rbf, label='RBF Sampler') plt.legend(loc='upper left') plt.xlabel('Number of Elements') plt.ylabel('Execution Time (secs)'); plt.plot(m_range, t_rks, label='Random Kitchen Sinks') plt.plot(m_range, t_rff, label='Random Fourier Features') plt.plot(m_range, t_nys, label='Nystrom') plt.plot(m_range, t_rnys, label='Randomized Nystrom') plt.plot(m_range, t_rbf, label='RBF Sampler') plt.legend(loc='upper left') plt.xlabel('Number of Elements') plt.ylabel('Execution Time (secs)'); ```
github_jupyter
<a href="https://www.skills.network/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01"><img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DL0120ENedX/labs/Template%20for%20Instructional%20Hands-on%20Labs/images/IDSNlogo.png" width="400px" align="center"></a> <h1 align="center"><font size="5">RESTRICTED BOLTZMANN MACHINES</font></h1> <h3>Introduction</h3> <b>Restricted Boltzmann Machine (RBM):</b> RBMs are shallow neural nets that learn to reconstruct data by themselves in an unsupervised fashion. <h4>Why are RBMs important?</h4> An RBM are a basic form of autoencoder. It can automatically extract <b>meaningful</b> features from a given input. <h4>How does it work?</h4> RBM is a 2 layer neural network. Simply, RBM takes the inputs and translates those into a set of binary values that represents them in the hidden layer. Then, these numbers can be translated back to reconstruct the inputs. Through several forward and backward passes, the RBM will be trained, and a trained RBM can reveal which features are the most important ones when detecting patterns. <h4>What are the applications of an RBM?</h4> RBM is useful for <a href='http://www.cs.utoronto.ca/~hinton/absps/netflixICML.pdf?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01'> Collaborative Filtering</a>, dimensionality reduction, classification, regression, feature learning, topic modeling and even <b>Deep Belief Networks</b>. <h4>Is RBM a generative or Discriminative model?</h4> RBM is a generative model. Let me explain it by first, see what is different between discriminative and generative models: <b>Discriminative:</b> Consider a classification problem where we want to learn to distinguish between Sedan cars (y = 1) and SUV cars (y = 0), based on some features of cars. Given a training set, an algorithm like logistic regression tries to find a straight line, or <i>decision boundary</i>, that separates the suv and sedan. <b>Generative:</b> looking at cars, we can build a model of what Sedan cars look like. Then, looking at SUVs, we can build a separate model of what SUV cars look like. Finally, to classify a new car, we can match the new car against the Sedan model, and match it against the SUV model, to see whether the new car looks more like the SUV or Sedan. Generative Models specify a probability distribution over a dataset of input vectors. We can carry out both supervised and unsupervised tasks with generative models: <ul> <li>In an unsupervised task, we try to form a model for $P(x)$, where $P$ is the probability given $x$ as an input vector.</li> <li>In the supervised task, we first form a model for $P(x|y)$, where $P$ is the probability of $x$ given $y$(the label for $x$). For example, if $y = 0$ indicates that a car is an SUV, and $y = 1$ indicates that a car is a sedan, then $p(x|y = 0)$ models the distribution of SUV features, and $p(x|y = 1)$ models the distribution of sedan features. If we manage to find $P(x|y)$ and $P(y)$, then we can use <b>Bayes rule</b> to estimate $P(y|x)$, because: $$p(y|x) = \frac{p(x|y)p(y)}{p(x)}$$</li> </ul> Now the question is, can we build a generative model, and then use it to create synthetic data by directly sampling from the modeled probability distributions? Lets see. <h2>Table of Contents</h2> <ol> <li><a href="https://#ref1">Initialization</a></li> <li><a href="https://#ref2">RBM layers</a></li> <li><a href="https://#ref3">What RBM can do after training?</a></li> <li><a href="https://#ref4">How to train the model?</a></li> <li><a href="https://#ref5">Learned features</a></li> </ol> <p></p> </div> <br> <hr> <a id="ref1"></a> <h3>Initialization</h3> First, we have to load the utility file which contains different utility functions that are not connected in any way to the networks presented in the tutorials, but rather help in processing the outputs into a more understandable way. ``` import urllib.request with urllib.request.urlopen("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork/labs/Week4/data/utils.py") as url: response = url.read() target = open('utils.py', 'w') target.write(response.decode('utf-8')) target.close() ``` <h2>Installing TensorFlow </h2> We will installing TensorFlow version 2.2.0 and its required prerequistes. Also installing pillow\... ``` !pip install grpcio==1.24.3 !pip install tensorflow==2.2.0 !pip install pillow==8.1.0 ``` <b>Notice:</b> This notebook has been created with TensorFlow version 2.2, and might not work with other versions. Therefore we check: ``` import tensorflow as tf from IPython.display import Markdown, display def printmd(string): display(Markdown('# <span style="color:red">'+string+'</span>')) if not tf.__version__ == '2.2.0': printmd('<<<<<!!!!! ERROR !!!! please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)>>>>>') ``` Now, we load in all the packages that we use to create the net including the TensorFlow package: ``` import tensorflow as tf import numpy as np from PIL import Image from utils import tile_raster_images import matplotlib.pyplot as plt %matplotlib inline ``` <hr> <a id="ref2"></a> <h3>RBM layers</h3> An RBM has two layers. The first layer of the RBM is called the <b>visible</b> (or input layer). Imagine that our toy example, has only vectors with 7 values, so the visible layer must have $V=7$ input nodes. The second layer is the <b>hidden</b> layer, which has $H$ neurons in our case. Each hidden node takes on values of either 0 or 1 (i.e., $h_i = 1$ or $h_i$ = 0), with a probability that is a logistic function of the inputs it receives from the other $V$ visible units, called for example, $p(h_i = 1)$. For our toy sample, we'll use 2 nodes in the hidden layer, so $H = 2$. <center><img src="https://ibm.box.com/shared/static/eu26opvcefgls6vnwuo29uwp0nudmokh.png" alt="RBM Model" style="width: 400px;"></center> Each node in the first layer also has a <b>bias</b>. We will denote the bias as $v\_{bias}$, and this single value is shared among the $V$ visible units. The <b>bias</b> of the second is defined similarly as $h\_{bias}$, and this single value among the $H$ hidden units. ``` v_bias = tf.Variable(tf.zeros([7]), tf.float32) h_bias = tf.Variable(tf.zeros([2]), tf.float32) ``` We have to define weights among the input layer and hidden layer nodes. In the weight matrix, the number of rows are equal to the input nodes, and the number of columns are equal to the output nodes. We define a tensor $\mathbf{W}$ of shape = (7,2), where the number of visible neurons = 7, and the number of hidden neurons = 2. ``` W = tf.constant(np.random.normal(loc=0.0, scale=1.0, size=(7, 2)).astype(np.float32)) ``` <hr> <a id="ref3"></a> <h3>What RBM can do after training?</h3> Think of RBM as a model that has been trained based on images of a dataset of many SUV and sedan cars. Also, imagine that the RBM network has only two hidden nodes, where one node encodes the weight and, and the other encodes the size. In a sense, the different configurations represent different cars, where one is an SUV and the other is Sedan. In a training process, through many forward and backward passes, the RBM adjust its weights to send a stronger signal to either the SUV node (0, 1) or the sedan node (1, 0) in the hidden layer, given the pixels of images. Now, given an SUV in hidden layer, which distribution of pixels should we expect? RBM can give you 2 things. First, it encodes your images in hidden layer. Second, it gives you the probability of observing a case, given some hidden values. <h3>The Inference Process</h3> RBM has two phases: <ul> <li>Forward Pass</li> <li>Backward Pass or Reconstruction</li> </ul> <b>Phase 1) Forward pass:</b> Input one training sample (one image) $\mathbf{x}$ through all visible nodes, and pass it to all hidden nodes. Processing happens in each node in the hidden layer. This computation begins by making stochastic decisions about whether to transmit that input or not (i.e. to determine the state of each hidden layer). First, the probability vector is computed using the input feature vector $\mathbf{x}$, the weight matrix $\mathbf{W}$, and the bias term $h\_{bias}$, as $$p({h_j}|\mathbf x)= \sigma( \sum\_{i=1}^V W\_{ij} x_i + h\_{bias} )$$, where $\sigma(z) = (1+e^{-z})^{-1}$ is the logistic function. So, what does $p({h_j})$ represent? It is the <b>probability distribution</b> of the hidden units. That is, RBM uses inputs $x_i$ to make predictions about hidden node activations. For example, imagine that the hidden node activation values are \[0.51 0.84] for the first training item. It tells you that the conditional probability for each hidden neuron for Phase 1 is: $$p(h\_{1} = 1|\mathbf{v}) = 0.51$$ $$p(h\_{2} = 1|\mathbf{v}) = 0.84$$ As a result, for each row in the training set, vector of probabilities is generated. In TensorFlow, this is referred to as a `tensor` with a shape of (1,2). We then turn unit $j$ with probability $p(h\_{j}|\mathbf{v})$, and turn it off with probability $1 - p(h\_{j}|\mathbf{v})$ by generating a uniform random number vector $\mathbf{\xi}$, and comparing it to the activation probability as <center>If $\xi_j>p(h_{j}|\mathbf{v})$, then $h_j=1$, else $h_j=0$.</center> Therefore, the conditional probability of a configuration of $\mathbf{h}$ given $\mathbf{v}$ (for a training sample) is: $$p(\mathbf{h} \mid \mathbf{v}) = \prod\_{j=1}^H p(h_j \mid \mathbf{v})$$ where $H$ is the number of hidden units. Before we go further, let's look at a toy example for one case out of all input. Assume that we have a trained RBM, and a very simple input vector, such as \[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0].\ Let's see what the output of forward pass would look like: ``` X = tf.constant([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]], tf.float32) v_state = X print ("Input: ", v_state) h_bias = tf.constant([0.1, 0.1]) print ("hb: ", h_bias) print ("w: ", W) # Calculate the probabilities of turning the hidden units on: h_prob = tf.nn.sigmoid(tf.matmul(v_state, W) + h_bias) #probabilities of the hidden units print ("p(h|v): ", h_prob) # Draw samples from the distribution: h_state = tf.nn.relu(tf.sign(h_prob - tf.random.uniform(tf.shape(h_prob)))) #states print ("h0 states:", h_state) ``` <b>Phase 2) Backward Pass (Reconstruction):</b> The RBM reconstructs data by making several forward and backward passes between the visible and hidden layers. So, in the second phase (i.e. reconstruction phase), the samples from the hidden layer (i.e. $\mathbf h$) becomes the input in the backward pass. The same weight matrix and visible layer biases are used to passed to the sigmoid function. The reproduced output is a reconstruction which is an approximation of the original input. ``` vb = tf.constant([0.1, 0.2, 0.1, 0.1, 0.1, 0.2, 0.1]) print ("b: ", vb) v_prob = tf.nn.sigmoid(tf.matmul(h_state, tf.transpose(W)) + vb) print ("p(vi∣h): ", v_prob) v_state = tf.nn.relu(tf.sign(v_prob - tf.random.uniform(tf.shape(v_prob)))) print ("v probability states: ", v_state) ``` RBM learns a probability distribution over the input, and then, after being trained, the RBM can generate new samples from the learned probability distribution. As you know, <b>probability distribution</b>, is a mathematical function that provides the probabilities of occurrence of different possible outcomes in an experiment. The (conditional) probability distribution over the visible units v is given by $$p(\mathbf{v} \mid \mathbf{h}) = \prod\_{i=1}^V p(v_i \mid \mathbf{h}),$$ where, $$p(v_i \mid \mathbf{h}) = \sigma\left(\sum\_{j=1}^H W\_{ji} h_j + v\_{bias} \right)$$ so, given current state of hidden units and weights, what is the probability of generating \[1. 0. 0. 1. 0. 0. 0.] in reconstruction phase, based on the above <b>probability distribution</b> function? ``` inp = X print("input X:" , inp.numpy()) print("probablity vector:" , v_prob[0].numpy()) v_probability = 1 for elm, p in zip(inp[0],v_prob[0]) : if elm ==1: v_probability *= p else: v_probability *= (1-p) print("probability of generating X: " , v_probability.numpy()) ``` How similar are vectors $\mathbf{x}$ and $\mathbf{v}$? Of course, the reconstructed values most likely will not look anything like the input vector, because our network has not been trained yet. Our objective is to train the model in such a way that the input vector and reconstructed vector to be same. Therefore, based on how different the input values look to the ones that we just reconstructed, the weights are adjusted. <hr> <h2>MNIST</h2> We will be using the MNIST dataset to practice the usage of RBMs. The following cell loads the MNIST dataset. ``` #loading training and test data mnist = tf.keras.datasets.mnist (trX, trY), (teX, teY) = mnist.load_data() # showing an example of the Flatten class and operation from tensorflow.keras.layers import Flatten flatten = Flatten(dtype='float32') trX = flatten(trX/255.0) trY = flatten(trY/255.0) ``` Lets look at the dimension of the images. MNIST images have 784 pixels, so the visible layer must have 784 input nodes. For our case, we'll use 50 nodes in the hidden layer, so i = 50. ``` vb = tf.Variable(tf.zeros([784]), tf.float32) hb = tf.Variable(tf.zeros([50]), tf.float32) ``` Let $\mathbf W$ be the Tensor of 784x50 (784 - number of visible neurons, 50 - number of hidden neurons) that represents weights between the neurons. ``` W = tf.Variable(tf.zeros([784,50]), tf.float32) ``` Lets define the visible layer: ``` v0_state = tf.Variable(tf.zeros([784]), tf.float32) #testing to see if the matrix product works tf.matmul( [v0_state], W) ``` Now, we can define hidden layer: ``` #computing the hidden nodes probability vector and checking shape h0_prob = tf.nn.sigmoid(tf.matmul([v0_state], W) + hb) #probabilities of the hidden units print("h0_state shape: " , tf.shape(h0_prob)) #defining a function to return only the generated hidden states def hidden_layer(v0_state, W, hb): h0_prob = tf.nn.sigmoid(tf.matmul([v0_state], W) + hb) #probabilities of the hidden units h0_state = tf.nn.relu(tf.sign(h0_prob - tf.random.uniform(tf.shape(h0_prob)))) #sample_h_given_X return h0_state h0_state = hidden_layer(v0_state, W, hb) print("first 15 hidden states: ", h0_state[0][0:15]) ``` Now, we define reconstruction part: ``` def reconstructed_output(h0_state, W, vb): v1_prob = tf.nn.sigmoid(tf.matmul(h0_state, tf.transpose(W)) + vb) v1_state = tf.nn.relu(tf.sign(v1_prob - tf.random.uniform(tf.shape(v1_prob)))) #sample_v_given_h return v1_state[0] v1_state = reconstructed_output(h0_state, W, vb) print("hidden state shape: ", h0_state.shape) print("v0 state shape: ", v0_state.shape) print("v1 state shape: ", v1_state.shape) ``` <h3>What is the objective function?</h3> <b>Goal</b>: Maximize the likelihood of our data being drawn from that distribution <b>Calculate error:</b>\ In each epoch, we compute the "error" as a sum of the squared difference between step 1 and step n, e.g the error shows the difference between the data and its reconstruction. <b>Note:</b> tf.reduce_mean computes the mean of elements across dimensions of a tensor. ``` def error(v0_state, v1_state): return tf.reduce_mean(tf.square(v0_state - v1_state)) err = tf.reduce_mean(tf.square(v0_state - v1_state)) print("error" , err.numpy()) ``` <a id="ref4"></a> <h3>Training the Model</h3> <b>Warning...</b> The following part is math-heavy, but you can skip it if you just want to run the cells in the next section. As mentioned, we want to give a high probability to the input data we train on. So, in order to train an RBM, we have to maximize the product of probabilities assigned to all rows $\mathbf{v}$ (images) in the training set $\mathbf{V}$ (a matrix, where each row of it is treated as a visible vector $\mathbf{v}$) $$\arg \max_W \prod\_{\mathbf{v}\in\mathbf{V}\_T} p(\mathbf{v})$$ which is equivalent to maximizing the expectation of the log probability, given as $$\arg\max_W\left\[ \mathbb{E} \left(\prod\_{\mathbf v\in \mathbf V}\text{log} \left(p(\mathbf v)\right) \right) \right].$$ So, we have to update the weights $W\_{ij}$ to increase $p(\mathbf{v})$ for all $\mathbf{v}$ in our training data during training. So we have to calculate the derivative: $$\frac{\partial \log p(\mathbf v)}{\partial W\_{ij}}$$ This cannot be easily done by typical <b>gradient descent (SGD)</b>, so we can use another approach, which has 2 steps: <ol> <li>Gibbs Sampling</li> <li>Contrastive Divergence</li> </ol> <h3>Gibbs Sampling</h3> <h4>Gibbs Sampling Step 1</h4> Given an input vector $\mathbf{v}$, we are using $p(\mathbf{h}|\mathbf{v})$ to predict the hidden values $\mathbf{h}$. $$p({h_j}|\mathbf v)= \sigma\left(\sum_{i=1}^V W_{ij} v_i + h_{bias} \right)$$ The samples are generated from this distribution by generating the uniform random variate vector $\mathbf{\xi} \sim U[0,1]$ of length $H$ and comparing to the computed probabilities as <center>If $\xi_j>p(h_{j}|\mathbf{v})$, then $h_j=1$, else $h_j=0$.</center> <h4>Gibbs Sampling Step 2</h4> Then, knowing the hidden values, we use $p(\mathbf v| \mathbf h)$ for reconstructing of new input values v. $$p({v_i}|\mathbf h)= \sigma\left(\sum\_{j=1}^H W^{T}*{ij} h_j + v*{bias} \right)$$ The samples are generated from this distribution by generating a uniform random variate vector $\mathbf{\xi} \sim U\[0,1]$ of length $V$ and comparing to the computed probabilities as <center>If $\xi_i>p(v_{i}|\mathbf{h})$, then $v_i=1$, else $v_i=0$.</center> Let vectors $\mathbf v_k$ and $\mathbf h_k$ be for the $k$th iteration. In general, the $kth$ state is generrated as: <b>Iteration</b> $k$: $$\mathbf v\_{k-1} \Rightarrow p(\mathbf h\_{k-1}|\mathbf v\_{k-1})\Rightarrow \mathbf h\_{k-1}\Rightarrow p(\mathbf v\_{k}|\mathbf h\_{k-1})\Rightarrow \mathbf v_k$$ <h3>Contrastive Divergence (CD-k)</h3> The update of the weight matrix is done during the Contrastive Divergence step. Vectors v0 and vk are used to calculate the activation probabilities for hidden values h0 and hk. The difference between the outer products of those probabilities with input vectors v0 and vk results in the update matrix: $$\Delta \mathbf W_k =\mathbf v_k \otimes \mathbf h_k - \mathbf v\_{k-1} \otimes \mathbf h\_{k-1}$$ Contrastive Divergence is actually matrix of values that is computed and used to adjust values of the $\mathbf W$ matrix. Changing $\mathbf W$ incrementally leads to training of the $\mathbf W$ values. Then, on each step (epoch), $\mathbf W$ is updated using the following: $$\mathbf W_k = \mathbf W\_{k-1} + \alpha \* \Delta \mathbf W_k$$ Reconstruction steps: <ul> <li> Get one data point from data set, like <i>x</i>, and pass it through the following steps:</li> <b>Iteration</b> $k=1$: Sampling (starting with input image) $$\mathbf x = \mathbf v\_0 \Rightarrow p(\mathbf h\_0|\mathbf v\_0)\Rightarrow \mathbf h\_0 \Rightarrow p(\mathbf v\_1|\mathbf h\_0)\Rightarrow \mathbf v\_1$$\ followed by the CD-k step $$\Delta \mathbf W\_1 =\mathbf v\_1 \otimes \mathbf h\_1 - \mathbf v\_{0} \otimes \mathbf h\_{0}$$\ $$\mathbf W\_1 = \mathbf W\_{0} + \alpha \* \Delta \mathbf W\_1$$ <li> $\mathbf v_1$ is the reconstruction of $\mathbf x$ sent to the next iteration).</li> <b>Iteration</b> $k=2$: Sampling (starting with $\mathbf v\_1$) $$\mathbf v\_1 \Rightarrow p(\mathbf h\_1|\mathbf v\_1)\Rightarrow \mathbf h\_1\Rightarrow p(\mathbf v\_2|\mathbf h\_1)\Rightarrow \mathbf v\_2$$ followed by the CD-k step $$\Delta \mathbf W\_2 =\mathbf v\_2 \otimes \mathbf h\_2 - \mathbf v\_{1} \otimes \mathbf h\_{1}$$\ $$\mathbf W\_2 = \mathbf W\_{1} + \alpha \* \Delta \mathbf W\_2$$ <li> $\mathbf v_2$ is the reconstruction of $\mathbf v_1$ sent to the next iteration).</li> <b>Iteration</b> $k=K$: Sampling (starting with $\mathbf v\_{K-1}$) $$\mathbf v\_{K-1} \Rightarrow p(\mathbf h\_{K-1}|\mathbf v\_{K-1})\Rightarrow \mathbf h\_{K-1}\Rightarrow p(\mathbf v_K|\mathbf h\_{K-1})\Rightarrow \mathbf v_K$$ followed by the CD-k step $$\Delta \mathbf W_K =\mathbf v_K \otimes \mathbf h_K - \mathbf v\_{K-1} \otimes \mathbf h\_{K-1}$$\ $$\mathbf W_K = \mathbf W\_{K-1} + \alpha \* \Delta \mathbf W_K$$ <b>What is $\alpha$?</b>\ Here, alpha is some small step size, and is also known as the "learning rate". $K$ is adjustable, and good performance can be achieved with $K=1$, so that we just take one set of sampling steps per image. ``` h1_prob = tf.nn.sigmoid(tf.matmul([v1_state], W) + hb) h1_state = tf.nn.relu(tf.sign(h1_prob - tf.random.uniform(tf.shape(h1_prob)))) #sample_h_given_X ``` Lets look at the error of the first run: ``` print("error: ", error(v0_state, v1_state)) #Parameters alpha = 0.01 epochs = 1 batchsize = 200 weights = [] errors = [] batch_number = 0 K = 1 #creating datasets train_ds = \ tf.data.Dataset.from_tensor_slices((trX, trY)).batch(batchsize) for epoch in range(epochs): for batch_x, batch_y in train_ds: batch_number += 1 for i_sample in range(batchsize): for k in range(K): v0_state = batch_x[i_sample] h0_state = hidden_layer(v0_state, W, hb) v1_state = reconstructed_output(h0_state, W, vb) h1_state = hidden_layer(v1_state, W, hb) delta_W = tf.matmul(tf.transpose([v0_state]), h0_state) - tf.matmul(tf.transpose([v1_state]), h1_state) W = W + alpha * delta_W vb = vb + alpha * tf.reduce_mean(v0_state - v1_state, 0) hb = hb + alpha * tf.reduce_mean(h0_state - h1_state, 0) v0_state = v1_state if i_sample == batchsize-1: err = error(batch_x[i_sample], v1_state) errors.append(err) weights.append(W) print ( 'Epoch: %d' % epoch, "batch #: %i " % batch_number, "of %i" % int(60e3/batchsize), "sample #: %i" % i_sample, 'reconstruction error: %f' % err) ``` Let's take a look at the errors at the end of each batch: ``` plt.plot(errors) plt.xlabel("Batch Number") plt.ylabel("Error") plt.show() ``` What is the final weight matrix $W$ after training? ``` print(W.numpy()) # a weight matrix of shape (50,784) ``` <a id="ref5"></a> <h3>Learned features</h3> We can take each hidden unit and visualize the connections between that hidden unit and each element in the input vector. In our case, we have 50 hidden units. Lets visualize those. Let's plot the current weights: <b>tile_raster_images</b> helps in generating an easy to grasp image from a set of samples or weights. It transforms the <b>uw</b> (with one flattened image per row of size 784), into an array (of size $28\times28$) in which images are reshaped and laid out like tiles on a floor. ``` tile_raster_images(X=W.numpy().T, img_shape=(28, 28), tile_shape=(5, 10), tile_spacing=(1, 1)) import matplotlib.pyplot as plt from PIL import Image %matplotlib inline image = Image.fromarray(tile_raster_images(X=W.numpy().T, img_shape=(28, 28) ,tile_shape=(5, 10), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (18.0, 18.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Each tile in the above visualization corresponds to a vector of connections between a hidden unit and visible layer's units. Let's look at one of the learned weights corresponding to one of hidden units for example. In this particular square, the gray color represents weight = 0, and the whiter it is, the more positive the weights are (closer to 1). Conversely, the darker pixels are, the more negative the weights. The positive pixels will increase the probability of activation in hidden units (after multiplying by input/visible pixels), and negative pixels will decrease the probability of a unit hidden to be 1 (activated). So, why is this important? So we can see that this specific square (hidden unit) can detect a feature (e.g. a "/" shape) and if it exists in the input. ``` from PIL import Image image = Image.fromarray(tile_raster_images(X =W.numpy().T[10:11], img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) ### Plot image plt.rcParams['figure.figsize'] = (4.0, 4.0) imgplot = plt.imshow(image) imgplot.set_cmap('gray') ``` Let's look at the reconstruction of an image now. Imagine that we have a destructed image of figure 3. Lets see if our trained network can fix it: First we plot the image: ``` !wget -O destructed3.jpg https://ibm.box.com/shared/static/vvm1b63uvuxq88vbw9znpwu5ol380mco.jpg img = Image.open('destructed3.jpg') img ``` Now let's pass this image through the neural net: ``` # convert the image to a 1d numpy array sample_case = np.array(img.convert('I').resize((28,28))).ravel().reshape((1, -1))/255.0 sample_case = tf.cast(sample_case, dtype=tf.float32) ``` Feed the sample case into the network and reconstruct the output: ``` hh0_p = tf.nn.sigmoid(tf.matmul(sample_case, W) + hb) hh0_s = tf.round(hh0_p) print("Probability nodes in hidden layer:" ,hh0_p) print("activated nodes in hidden layer:" ,hh0_s) # reconstruct vv1_p = tf.nn.sigmoid(tf.matmul(hh0_s, tf.transpose(W)) + vb) print(vv1_p) #rec_prob = sess.run(vv1_p, feed_dict={ hh0_s: hh0_s_val, W: prv_w, vb: prv_vb}) ``` Here we plot the reconstructed image: ``` img = Image.fromarray(tile_raster_images(X=vv1_p.numpy(), img_shape=(28, 28),tile_shape=(1, 1), tile_spacing=(1, 1))) plt.rcParams['figure.figsize'] = (4.0, 4.0) imgplot = plt.imshow(img) imgplot.set_cmap('gray') ``` <hr> ## Want to learn more? Also, you can use **Watson Studio** to run these notebooks faster with bigger datasets.**Watson Studio** is IBM’s leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, **Watson Studio** enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of **Watson Studio** users today with a free account at [Watson Studio](https://cocl.us/ML0120EN_DSX).This is the end of this lesson. Thank you for reading this notebook, and good luck on your studies. ### Thanks for completing this lesson! Notebook created by: <a href = "https://ca.linkedin.com/in/saeedaghabozorgi?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01">Saeed Aghabozorgi</a> Updated to TF 2.X by <a href="https://ca.linkedin.com/in/nilmeier?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01"> Jerome Nilmeier</a><br /> ### References: [https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine](https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)\ [http://deeplearning.net/tutorial/rbm.html](http://deeplearning.net/tutorial/rbm.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)\ [http://www.cs.utoronto.ca/\~hinton/absps/netflixICML.pdf](http://www.cs.utoronto.ca/\~hinton/absps/netflixICML.pdf?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01)<br> <http://imonad.com/rbm/restricted-boltzmann-machine/> <hr> Copyright © 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDL0120ENSkillsNetwork20629446-2021-01-01).
github_jupyter
# Burgers Optimization with a Differentiable Physics Gradient To illustrate the process of computing gradients in a _differentiable physics_ (DP) setting, we target the same inverse problem (the reconstruction task) used for the PINN example in {doc}`physicalloss-code`. The choice of DP as a method has some immediate implications: we start with a discretized PDE, and the evolution of the system is now fully determined by the resulting numerical solver. Hence, the only real unknown is the initial state. We will still need to re-compute all the states between the initial and target state many times, just now we won't need an NN for this step. Instead, we can rely on our discretized model. Also, as we choose an initial discretization for the DP approach, the unknown initial state consists of the sampling points of the involved physical fields, and we can simply represent these unknowns as floating point variables. Hence, even for the initial state we do not need to set up an NN. Thus, our Burgers reconstruction problem reduces to a gradient-based optimization without any NN when solving it with DP. Nonetheless, it's a very good starting point to illustrate the process. First, we'll set up our discretized simulation. Here we can employ phiflow, as shown in the overview section on _Burgers forward simulations_. [[run in colab]](https://colab.research.google.com/github/tum-pbs/pbdl-book/blob/main/diffphys-code-burgers.ipynb) ## Initialization phiflow directly gives us a sequence of differentiable operations, provided that we don't use the _numpy_ backend. The important step here is to include `phi.tf.flow` instad of `phi.flow` (for _pytorch_ you could use `phi.torch.flow`). So, as a first step, let's set up some constants, and initialize a `velocity` field with zeros, and our constraint at $t=0.5$ (step 16), now as a `CenteredGrid` in phiflow. Both are using periodic boundary conditions (via `extrapolation.PERIODIC`) and a spatial discretization of $\Delta x = 1/128$. ``` #!pip install --upgrade --quiet phiflow from phi.tf.flow import * N = 128 DX = 2/N STEPS = 32 DT = 1/STEPS NU = 0.01/(N*np.pi) # allocate velocity grid velocity = CenteredGrid(0, extrapolation.PERIODIC, x=N, bounds=Box[-1:1]) # and a grid with the reference solution REFERENCE_DATA = math.tensor([0.008612174447657694, 0.02584669669548606, 0.043136357266407785, 0.060491074685516746, 0.07793926183951633, 0.0954779141740818, 0.11311894389663882, 0.1308497114054023, 0.14867023658641343, 0.1665634396808965, 0.18452263429574314, 0.20253084411376132, 0.22057828799835133, 0.23865132431365316, 0.25673879161339097, 0.27483167307082423, 0.2929182325574904, 0.3109944766354339, 0.3290477753208284, 0.34707880794585116, 0.36507311960102307, 0.38303584302507954, 0.40094962955534186, 0.4188235294008765, 0.4366357052408043, 0.45439856841363885, 0.4720845505219581, 0.4897081943759776, 0.5072391070000235, 0.5247011051514834, 0.542067187709797, 0.5593576751669057, 0.5765465453632126, 0.5936507311857876, 0.6106452944663003, 0.6275435911624945, 0.6443221318186165, 0.6609900633731869, 0.67752574922899, 0.6939334022562877, 0.7101938106059631, 0.7263049537163667, 0.7422506131457406, 0.7580207366534812, 0.7736033721649875, 0.7889776974379873, 0.8041371279965555, 0.8190465276590387, 0.8337064887158392, 0.8480617965162781, 0.8621229412131242, 0.8758057344502199, 0.8891341984763013, 0.9019806505391214, 0.9143881632159129, 0.9261597966464793, 0.9373647624856912, 0.9476871303793314, 0.9572273019669029, 0.9654367940878237, 0.9724097482283165, 0.9767381835635638, 0.9669484658390122, 0.659083299684951, -0.659083180712816, -0.9669485121167052, -0.9767382069792288, -0.9724097635533602, -0.9654367970450167, -0.9572273263645859, -0.9476871280825523, -0.9373647681120841, -0.9261598056102645, -0.9143881718456056, -0.9019807055316369, -0.8891341634240081, -0.8758057205293912, -0.8621229450911845, -0.8480618138204272, -0.833706571569058, -0.8190466131476127, -0.8041372124868691, -0.7889777195422356, -0.7736033858767385, -0.758020740007683, -0.7422507481169578, -0.7263049162371344, -0.7101938950789042, -0.6939334061553678, -0.677525822052029, -0.6609901538934517, -0.6443222327338847, -0.6275436932970322, -0.6106454472814152, -0.5936507836778451, -0.5765466491708988, -0.5593578078967361, -0.5420672759411125, -0.5247011730988912, -0.5072391580614087, -0.4897082914472909, -0.47208460952428394, -0.4543985995006753, -0.4366355580500639, -0.41882350871539187, -0.40094955631843376, -0.38303594105786365, -0.36507302109186685, -0.3470786936847069, -0.3290476440540586, -0.31099441589505206, -0.2929180880304103, -0.27483158663081614, -0.2567388003912687, -0.2386513127155433, -0.22057831776499126, -0.20253089403524566, -0.18452269630486776, -0.1665634500729787, -0.14867027528284874, -0.13084990929476334, -0.1131191325854089, -0.09547794429803691, -0.07793928430794522, -0.06049114408297565, -0.0431364527809777, -0.025846763281087953, -0.00861212501518312] , math.spatial('x')) SOLUTION_T16 = CenteredGrid( REFERENCE_DATA, extrapolation.PERIODIC, x=N, bounds=Box[-1:1]) ``` We can verify that the fields of our simulation are now backed by TensorFlow. ``` type(velocity.values.native()) ``` ## Gradients The `record_gradients` function of phiflow triggers the generation of a gradient tape to compute gradients of a simulation via `math.gradients(loss, values)`. To use it for the Burgers case we need to specify a loss function: we want the solution at $t=0.5$ to match the reference data. Thus we simply compute an $L^2$ difference between step number 16 and our constraint array as `loss`. Afterwards, we evaluate the gradient of the initial velocity state `velocity` with respect to this loss. ``` velocities = [velocity] with math.record_gradients(velocity.values): for time_step in range(STEPS): v1 = diffuse.explicit(1.0*velocities[-1], NU, DT, substeps=1) v2 = advect.semi_lagrangian(v1, v1, DT) velocities.append(v2) loss = field.l2_loss(velocities[16] - SOLUTION_T16)*2./N # MSE grad = math.gradients(loss, velocity.values) print('Loss: %f' % (loss)) ``` Because we're only constraining time step 16, we could actually omit steps 17 to 31 in this setup. They don't have any degrees of freedom and are not constrained in any way. However, for fairness regarding a comparison with the previous PINN case, we include them. Note that we've done a lot of calculations here: first the 32 steps of our simulation, and then another 16 steps backwards from the loss. They were recorded by the gradient tape, and used to backpropagate the loss to the initial state of the simulation. Not surprisingly, because we're starting from zero, there's also a significant initial error of ca. 0.38 for the 16th simulation step. So what do we get as a gradient here? It has the same dimensions as the velocity, and we can easily visualize it: Starting from the zero state for `velocity` (shown in blue), the first gradient is shown as a green line below. If you compare it with the solution it points in the opposite direction, as expected. The solution is much larger in magnitude, so we omit it here (see the next graph). ``` import pylab as plt fig = plt.figure().gca() pltx = np.linspace(-1,1,N) # first gradient fig.plot(pltx, grad.numpy('x') , lw=2, color='green', label="Gradient") fig.plot(pltx, velocity.values.numpy('x'), lw=2, color='mediumblue', label="u at t=0") plt.xlabel('x'); plt.ylabel('u'); plt.legend(); # some (optional) other fields to plot: #fig.plot(pltx, (velocities[16]).values.numpy('x') , lw=2, color='cyan', label="u at t=0.5") #fig.plot(pltx, (SOLUTION_T16).values.numpy('x') , lw=2, color='red', label="solution at t=0.5") #fig.plot(pltx, (velocities[16] - SOLUTION_T16).values.numpy('x') , lw=2, color='blue', label="difference at t=0.5") ``` This gives us a "search direction" for each velocity variable. Based on a linear approximation, the gradient tells us how to change each of them to increase the loss function (gradients _always_ point "upwards"). Thus, we can use the gradient to run an optimization and find an initial state `velocity` that minimizes our loss. ## Optimization Equipped with the gradient we can run a gradient descent optimization. Below, we're using a learning rate of `LR=5`, and we're re-evaluating the loss for the updated state to track convergence. In the following code block, we're additionally saving the gradients in a list called `grads`, such that we can visualize them later on. For a regular optimization we could of course discard the gradient after performing an update of the velocity. ``` LR = 5. grads=[] for optim_step in range(5): velocities = [velocity] with math.record_gradients(velocity.values): for time_step in range(STEPS): v1 = diffuse.explicit(1.0*velocities[-1], NU, DT) v2 = advect.semi_lagrangian(v1, v1, DT) velocities.append(v2) loss = field.l2_loss(velocities[16] - SOLUTION_T16)*2./N # MSE print('Optimization step %d, loss: %f' % (optim_step,loss)) grads.append( math.gradients(loss, velocity.values) ) velocity = velocity - LR * grads[-1] ``` Now we can check well the 16th state of the simulation actually matches the target after the 5 update steps. This is what the loss measures, after all. The next graph shows the constraints (i.e. the solution we'd like to obtain) in green, and the reconstructed state after the initial state `velocity` (which we updated five times via the gradient by now) was updated 16 times by the solver. ``` fig = plt.figure().gca() # target constraint at t=0.5 fig.plot(pltx, SOLUTION_T16.values.numpy('x'), lw=2, color='forestgreen', label="Reference") # optimized state of our simulation after 16 steps fig.plot(pltx, velocities[16].values.numpy('x'), lw=2, color='mediumblue', label="Simulated velocity") plt.xlabel('x'); plt.ylabel('u'); plt.legend(); plt.title("After 5 Optimization Steps at t=0.5"); ``` This seems to be going in the right direction! It's definitely not perfect, but we've only computed 5 GD update steps so far. The two peaks with a positive velocity on the left side of the shock and the negative peak on the right side are starting to show. This is a good indicator that the backpropagation of gradients through all of our 16 simulated steps is behaving correctly, and that it's driving the solution in the right direction. The graph above only hints at how powerful the setup is: the gradient that we obtain from each of the simulation steps (and each operation within them) can easily be chained together into more complex sequences. In the example above, we're backpropagating through all 16 steps of the simulation, and we could easily enlarge this "look-ahead" of the optimization with minor changes to the code. ## More optimization steps Before moving on to more complex physics simulations, or involving NNs, let's finish the optimization task at hand, and run more steps to get a better solution. ``` import time start = time.time() for optim_step in range(45): velocities = [velocity] with math.record_gradients(velocity.values): for time_step in range(STEPS): v1 = diffuse.explicit(1.0*velocities[-1], NU, DT) v2 = advect.semi_lagrangian(v1, v1, DT) velocities.append(v2) loss = field.l2_loss(velocities[16] - SOLUTION_T16)*2./N # MSE if optim_step%5==0: print('Optimization step %d, loss: %f' % (optim_step,loss)) grad = math.gradients(loss, velocity.values) velocity = velocity - LR * grad end = time.time() print("Runtime {:.2f}s".format(end-start)) ``` Thinking back to the PINN version from {doc}`diffphys-code-burgers`, we have a much lower error here after only 50 steps (by ca. an order of magnitude), and the runtime is also lower (roughly by a factor of 1.5 to 2). This behavior stems fro Let's plot again how well our solution at $t=0.5$ (blue) matches the constraints (green) now: ``` fig = plt.figure().gca() fig.plot(pltx, SOLUTION_T16.values.numpy('x'), lw=2, color='forestgreen', label="Reference") fig.plot(pltx, velocities[16].values.numpy('x'), lw=2, color='mediumblue', label="Simulated velocity") plt.xlabel('x'); plt.ylabel('u'); plt.legend(); plt.title("After 50 Optimization Steps at t=0.5"); ``` Not bad. But how well is the initial state recovered via backpropagation through the 16 simulation steps? This is what we're changing, and because it's only indirectly constrained via the observation later in time there is more room to deviate from a desired or expected solution. This is shown in the next plot: ``` fig = plt.figure().gca() pltx = np.linspace(-1,1,N) # ground truth state at time=0 , move down INITIAL_GT = np.asarray( [-np.sin(np.pi * x) for x in np.linspace(-1+DX/2,1-DX/2,N)] ) # 1D numpy array fig.plot(pltx, INITIAL_GT.flatten() , lw=2, color='forestgreen', label="Ground truth initial state") # ground truth initial state of sim fig.plot(pltx, velocity.values.numpy('x'), lw=2, color='mediumblue', label="Optimized initial state") # manual plt.xlabel('x'); plt.ylabel('u'); plt.legend(); plt.title("Initial State After 50 Optimization Steps"); ``` Naturally, this is a tougher task: the optimization receives direct feedback what the state at $t=0.5$ should look like, but due to the non-linear model equation, we typically have a large number of solutions that exactly or numerically very closely satisfy the constraints. Hence, our minimizer does not necessarily find the exact state we started from (we can observe some numerical oscillations from the diffusion operator here with the default settings). However, the solution is still quite close in this Burgers scenario. Before measuring the overall error of the reconstruction, let's visualize the full evolution of our system over time as this also yields the solution in the form of a numpy array that we can compare to the other versions: ``` import pylab def show_state(a): a=np.expand_dims(a, axis=2) for i in range(4): a = np.concatenate( [a,a] , axis=2) a = np.reshape( a, [a.shape[0],a.shape[1]*a.shape[2]] ) fig, axes = pylab.subplots(1, 1, figsize=(16, 5)) im = axes.imshow(a, origin='upper', cmap='inferno') pylab.colorbar(im) # get numpy versions of all states vels = [ x.values.numpy('x,vector') for x in velocities] # concatenate along vector/features dimension vels = np.concatenate(vels, axis=-1) # save for comparison with other methods import os; os.makedirs("./temp",exist_ok=True) np.savez_compressed("./temp/burgers-diffphys-solution.npz", np.reshape(vels,[N,STEPS+1])) # remove batch & channel dimension show_state(vels) ``` ## Physics-informed vs. differentiable physics reconstruction Now we have both versions, the one with the PINN, and the DP version, so let's compare both reconstructions in more detail. (Note: The following cells expect that the Burgers-forward and PINN notebooks were executed in the same environment beforehand.) Let's first look at the solutions side by side. The code below generates an image with 3 versions, from top to bottom: the "ground truth" (GT) solution as given by the regular forward simulation, in the middle the PINN reconstruction, and at the bottom the differentiable physics version. ``` # note, this requires previous runs of the forward-sim & PINN notebooks in the same environment sol_gt=npfile=np.load("./temp/burgers-groundtruth-solution.npz")["arr_0"] sol_pi=npfile=np.load("./temp/burgers-pinn-solution.npz")["arr_0"] sol_dp=npfile=np.load("./temp/burgers-diffphys-solution.npz")["arr_0"] divider = np.ones([10,33])*-1. # we'll sneak in a block of -1s to show a black divider in the image sbs = np.concatenate( [sol_gt, divider, sol_pi, divider, sol_dp], axis=0) print("\nSolutions Ground Truth (top), PINN (middle) , DiffPhys (bottom):") show_state(np.reshape(sbs,[N*3+20,33,1])) ``` It's quite clearly visible here that the PINN solution (in the middle) recovers the overall shape of the solution, hence the temporal constraints are at least partially fulfilled. However, it doesn't manage to capture the amplitudes of the GT solution very well. The reconstruction from the optimization with a differentiable solver (at the bottom) is much closer to the ground truth thanks to an improved flow of gradients over the whole course of the sequence. In addition, it can leverage the grid-based discretization for both forward as well as backward passes, and in this way provide a more accurate signal to the unknown initial state. It is nonetheless visible that the reconstruction lacks certain "sharper" features of the GT version, e.g., visible in the bottom left corner of the solution image. Let's quantify these errors over the whole sequence: ``` err_pi = np.sum( np.abs(sol_pi-sol_gt)) / (STEPS*N) err_dp = np.sum( np.abs(sol_dp-sol_gt)) / (STEPS*N) print("MAE PINN: {:7.5f} \nMAE DP: {:7.5f}".format(err_pi,err_dp)) print("\nError GT to PINN (top) , DiffPhys (bottom):") show_state(np.reshape( np.concatenate([sol_pi-sol_gt, divider, sol_dp-sol_gt],axis=0) ,[N*2+10,33,1])) ``` That's a pretty clear result: the PINN error is almost 4 times higher than the one from the Differentiable Physics (DP) reconstruction. This difference also shows clearly in the jointly visualized image at the bottom: the magnitudes of the errors of the DP reconstruction are much closer to zero, as indicated by the purple color above. A simple direct reconstruction problem like this one is always a good initial test for a DP solver. It can be tested independently before moving on to more complex setups, e.g., coupling it with an NN. If the direct optimization does not converge, there's probably still something fundamentally wrong, and there's no point involving an NN. Now we have a first example to show similarities and differences of the two approaches. In the next section, we'll present a discussion of the findings so far, before moving to more complex cases in the following chapter. ## Next steps As with the PINN version, there's variety of things that can be improved and experimented with using the code above: * You can try to adjust the training parameters to further improve the reconstruction. * As for the PINN case, you can activate a different optimizer, and observe the changing (not necessarily improved) convergence behavior. * Vary the number of steps, or the resolution of the simulation and reconstruction.
github_jupyter
``` # Automatically reload imported modules that are changed outside this notebook %load_ext autoreload %autoreload 2 # More pixels in figures import matplotlib.pyplot as plt %matplotlib inline plt.rcParams["figure.dpi"] = 200 # Init PRNG with fixed seed for reproducibility import numpy as np np_rng = np.random.default_rng(1) import tensorflow as tf tf.random.set_seed(np_rng.integers(0, tf.int64.max)) ``` # Common Voice spoken language identification with a neural network **2020-11-08** This example is a thorough, but simple walk-through on how to do everything from loading mp3-files containing speech to preprocessing and transforming the speech data into something we can feed to a neural network classifier. Deep learning based speech analysis is a vast research topic and there are countless techniques that could possibly be applied to improve the results of this example. This example tries to avoid going into too much detail into these techniques and instead focuses on getting an end-to-end classification pipeline up and running with a small dataset. ## Data This example uses open speech data downloaded from the [Mozilla Common Voice](https://commonvoice.mozilla.org/en/datasets) project. See the readme file for downloading the data. In addition to the space needed for the downloaded data, you will need at least 10 GiB of free disk space for caching (can be disabled). ``` import urllib.parse from IPython.display import display, Markdown languages = """ et mn ta tr """.split() languages = sorted(l.strip() for l in languages) display(Markdown("### Languages")) display(Markdown('\n'.join("* `{}`".format(l) for l in languages))) bcp47_validator_url = 'https://schneegans.de/lv/?tags=' display(Markdown("See [this tool]({}) for a description of the BCP-47 language codes." .format(bcp47_validator_url + urllib.parse.quote('\n'.join(languages))))) ``` ## Loading the metadata We start by preprocessing the Common Voice metadata files. Update `datadir` and `workdir` to match your setup. All output will be written to `workdir`. ``` import os workdir = "/data/exp/cv4" datadir = "/mnt/data/speech/common-voice/downloads/2020/cv-corpus" print("work dir:", workdir) print("data source dir:", datadir) os.makedirs(workdir, exist_ok=True) assert os.path.isdir(datadir), datadir + " does not exist" ``` Common Voice metadata is distributed as `tsv` files and all audio samples are mp3-files under `clips`. ``` dirs = sorted((f for f in os.scandir(datadir) if f.is_dir()), key=lambda f: f.name) print(datadir) for d in dirs: if d.name in languages: print(' ', d.name) for f in os.scandir(d): print(' ', f.name) missing_languages = set(languages) - set(d.name for d in dirs) assert missing_languages == set(), "missing languages: {}".format(missing_languages) ``` There's plenty of metadata, but it seems that the train-dev-test split has been predefined so lets use that. [pandas](https://pandas.pydata.org/pandas-docs/stable/index.html) makes it easy to read, filter, and manipulate metadata in tables. Lets try to preprocess all metadata here so we don't have to worry about it later. ``` import pandas as pd from IPython.display import display, Markdown # Lexicographic order of labels as a fixed index target to label mapping target2lang = tuple(sorted(languages)) lang2target = {lang: target for target, lang in enumerate(target2lang)} print("lang2target:", lang2target) print("target2lang:", target2lang) def expand_metadata(row): """ Update dataframe row by generating a unique utterance id, expanding the absolute path to the mp3 file, and adding an integer target for the label. """ row.id = "{:s}_{:s}".format( row.path.split(".mp3", 1)[0].split("common_voice_", 1)[1], row.split) row.path = os.path.join(datadir, row.lang, "clips", row.path) row.target = lang2target[row.lang] return row def tsv_to_lang_dataframe(lang, split): """ Given a language and dataset split (train, dev, test), load the Common Voice metadata tsv-file from disk into a pandas.DataFrame. Preprocess all rows by dropping unneeded columns and adding new metadata. """ df = pd.read_csv( os.path.join(datadir, lang, split + ".tsv"), sep='\t', # We only need these columns from the metadata usecols=("client_id", "path", "sentence")) # Add language label as column df.insert(len(df.columns), "lang", lang) # Add split name to every row for easier filtering df.insert(len(df.columns), "split", split) # Add placeholders for integer targets and utterance ids generated row-wise df.insert(len(df.columns), "target", -1) df.insert(len(df.columns), "id", "") # Create new metadata columns df = df.transform(expand_metadata, axis=1) return df split_names = ("train", "dev", "test") # Concatenate metadata for all 4 languages into a single table for each split splits = [pd.concat([tsv_to_lang_dataframe(lang, split) for lang in target2lang]) for split in split_names] # Concatenate split metadata into a single table, indexed by utterance ids meta = (pd.concat(splits) .set_index("id", drop=True, verify_integrity=True) .sort_index()) del splits for split in split_names: display(Markdown("### " + split)) display(meta[meta["split"]==split]) ``` ### Checking that all splits are disjoint by speaker To ensure our neural network will learn what language is being spoken and not who is speaking, we want to test it on data that does not have any voices present in the training data. The `client_id` should correspond to a unique, pseudonymized identifier for every speaker. Lets check all splits are disjoint by speaker id. ``` def assert_splits_disjoint_by_speaker(meta): split2spk = {split: set(meta[meta["split"]==split].client_id.to_numpy()) for split in split_names} for split, spk in split2spk.items(): print("split {} has {} speakers".format(split, len(spk))) print() print("asserting all are disjoint") assert split2spk["train"] & split2spk["test"] == set(), "train and test, mutual speakers" assert split2spk["train"] & split2spk["dev"] == set(), "train and dev, mutual speakers" assert split2spk["dev"] & split2spk["test"] == set(), "dev and test, mutual speakers" print("ok") assert_splits_disjoint_by_speaker(meta) ``` We can see that none of the speakers are in two or more dataset splits. We also see that the test set has a lot of unique speakers who are not in the training set. This is good because we want to test that our neural network classifier knows how to classify input from unknown speakers. ### Checking that all audio files exist ``` for uttid, row in meta.iterrows(): assert os.path.exists(row["path"]), row["path"] + " does not exist" print("ok") ``` ## Balancing the language distribution Lets see how many samples we have per language. ``` import seaborn as sns sns.set(rc={'figure.figsize': (8, 6)}) ax = sns.countplot( x="split", order=split_names, hue="lang", hue_order=target2lang, data=meta) ax.set_title("Total amount of audio samples") plt.show() ``` We can see that the amount of samples with Mongolian, Tamil, and Turkish speech are quite balanced, but we have significantly larger amounts of Estonian speech. More data is of course always better, but if there is too much of one label compared to the others, our neural network might overfit on this label. But these are only the counts of audio files, how much speech do we have in total per language? We need to read every file to get a reliable answer. See also [SoX](http://sox.sourceforge.net/Main/HomePage) for a good command line tool. ``` import miniaudio meta["duration"] = np.array([ miniaudio.mp3_get_file_info(path).duration for path in meta.path], np.float32) meta def plot_duration_distribution(data): sns.set(rc={'figure.figsize': (8, 6)}) ax = sns.boxplot( x="split", order=split_names, y="duration", hue="lang", hue_order=target2lang, data=data) ax.set_title("Median audio file duration in seconds") plt.show() ax = sns.barplot( x="split", order=split_names, y="duration", hue="lang", hue_order=target2lang, data=data, ci=None, estimator=np.sum) ax.set_title("Total amount of audio in seconds") plt.show() plot_duration_distribution(meta) ``` The median length of Estonian samples is approx. 2.5 seconds greater compared to Turkish samples, which have the shortest median length. We can also see that the total amount of Estonian speech is much larger compared to other languages in our datasets. Notice also the significant amount of outliers with long durations in the Tamil and Turkish datasets. Lets do simple random oversampling for the training split using this approach: 1. Select the target language according to maximum total amount of speech in seconds (Estonian). 2. Compute differences in total durations between the target language and the three other languages. 3. Compute median signal length by language. 4. Compute sample sizes by dividing the duration deltas with median signal lengths, separately for each language. 5. Draw samples with replacement from the metadata separately for each language. 6. Merge samples with rest of the metadata and verify there are no duplicate ids. ``` def random_oversampling(meta): groupby_lang = meta[["lang", "duration"]].groupby("lang") total_dur = groupby_lang.sum() target_lang = total_dur.idxmax()[0] print("target lang:", target_lang) print("total durations:") display(total_dur) total_dur_delta = total_dur.loc[target_lang] - total_dur print("total duration delta to target lang:") display(total_dur_delta) median_dur = groupby_lang.median() print("median durations:") display(median_dur) sample_sizes = (total_dur_delta / median_dur).astype(np.int32) print("median duration weighted sample sizes based on total duration differences:") display(sample_sizes) samples = [] for lang in groupby_lang.groups: sample_size = sample_sizes.loc[lang][0] sample = (meta[meta["lang"]==lang] .sample(n=sample_size, replace=True, random_state=np_rng.bit_generator) .reset_index() .transform(update_sample_id, axis=1)) samples.append(sample) return pd.concat(samples).set_index("id", drop=True, verify_integrity=True) def update_sample_id(row): row["id"] = "{}_copy_{}".format(row["id"], row.name) return row # Augment training set metadata meta = pd.concat([random_oversampling(meta[meta["split"]=="train"]), meta]).sort_index() assert not meta.isna().any(axis=None), "NaNs in metadata after augmentation" plot_duration_distribution(meta) assert_splits_disjoint_by_speaker(meta) meta ``` Speech data augmentation is a common research topic. There are [better](https://www.isca-speech.org/archive/interspeech_2015/papers/i15_3586.pdf) ways to augment data than the simple duplication of metadata rows we did here. One approach (which we won't be doing here) which is easy to implement and might work well is to take copies of signals and make them randomly a bit faster or slower. For example, draw randomly speed ratios from `[0.9, 1.1]` and resample the signal by multiplying its sample rate with the random ratio. ## Inspecting the audio Lets take a look at the speech data and listen to a few randomly picked samples from each label. We pick 2 random samples for each language from the training set. ``` samples = (meta[meta["split"]=="train"] .groupby("lang") .sample(n=2, random_state=np_rng.bit_generator)) samples ``` Then lets read the mp3-files from disk, plot the signals, and listen to the audio. ``` from IPython.display import display, Audio, HTML import scipy.signal def read_mp3(path, resample_rate=16000): if isinstance(path, bytes): # If path is a tf.string tensor, it will be in bytes path = path.decode("utf-8") f = miniaudio.mp3_read_file_f32(path) # Downsample to target rate, 16 kHz is commonly used for speech data new_len = round(len(f.samples) * float(resample_rate) / f.sample_rate) signal = scipy.signal.resample(f.samples, new_len) # Normalize to [-1, 1] signal /= np.abs(signal).max() return signal, resample_rate def embed_audio(signal, rate): display(Audio(data=signal, rate=rate, embed=True, normalize=False)) def plot_signal(data, figsize=(6, 0.5), **kwargs): ax = sns.lineplot(data=data, lw=0.1, **kwargs) ax.set_axis_off() ax.margins(0) plt.gcf().set_size_inches(*figsize) plt.show() def plot_separator(): display(HTML(data="<hr style='border: 2px solid'>")) for sentence, lang, clip_path in samples[["sentence", "lang", "path"]].to_numpy(): signal, rate = read_mp3(clip_path) plot_signal(signal) print("length: {} sec".format(signal.size / rate)) print("lang:", lang) print("sentence:", sentence) embed_audio(signal, rate) plot_separator() ``` One of the most challenging aspects of the Mozilla Common Voice dataset is that the audio quality varies greatly: different microphones, background noise, user is speaking close to the device or far away etc. It is difficult to ensure that a neural network will learn to classify different languages as opposed to classifying distinct acoustic artefacts from specific microphones. There's a [vast amount of research](https://www.isca-speech.org/archive/Interspeech_2020/) being done on developing techniques for solving these kind of problems. However, these are well out of scope for this simple example and we won't be studying them here. ## Spectral representations It is usually not possible (at least not yet in 2020) to detect languages directly from the waveform. Instead, the [fast Fourier transform](https://en.wikipedia.org/wiki/Short-time_Fourier_transform) (FFT) is applied on small, overlapping windows of the signal to get a 2-dimensional representation of energies in different frequency bands. See [this](https://wiki.aalto.fi/display/ITSP/Spectrogram+and+the+STFT) for further details. However, output from the FFT is usually not usable directly and must be refined. Lets begin by selecting the first signal from our random sample and extract the power spectrogram. ### Power spectrogram ``` from lidbox.features.audio import spectrograms def plot_spectrogram(S, cmap="viridis", figsize=None, **kwargs): if figsize is None: figsize = S.shape[0]/50, S.shape[1]/50 ax = sns.heatmap(S.T, cbar=False, cmap=cmap, **kwargs) ax.invert_yaxis() ax.set_axis_off() ax.margins(0) plt.gcf().set_size_inches(*figsize) plt.show() sample = samples[["sentence", "lang", "path"]].to_numpy()[0] sentence, lang, clip_path = sample signal, rate = read_mp3(clip_path) plot_signal(signal) powspec = spectrograms([signal], rate)[0] plot_spectrogram(powspec.numpy()) ``` This representation is very sparse, with zeros everywhere except in the lowest frequency bands. The main problem here is that relative differences between energy values are very large, making it different to compare large changes in energy. These differences can be reduced by mapping the values onto a logarithmic scale. The [decibel-scale](https://en.wikipedia.org/wiki/Decibel) is a common choice. We will use the maximum value of `powspec` as the reference power ($\text{P}_0$). ### Decibel-scale spectrogram ``` from lidbox.features.audio import power_to_db dbspec = power_to_db([powspec])[0] plot_spectrogram(dbspec.numpy()) ``` This is an improvement, but the representation is still rather sparse. We also see that most speech information is in the lower bands, with a bit of energy in the higher frequencies. A common approach is to "squeeze together" the y-axis of all frequency bands by using a different scale, such as the [Mel-scale](https://en.wikipedia.org/wiki/Mel_scale). Lets "squeeze" the current 256 frequency bins into 40 Mel-bins. ### Log-scale Mel-spectrogram **Note** that we are scaling different things here. The Mel-scale warps the frequency bins (y-axis), while the logarithm is used to reduce relative differences between individual spectrogram values (pixels). ``` from lidbox.features.audio import linear_to_mel def logmelspectrograms(signals, rate): powspecs = spectrograms(signals, rate) melspecs = linear_to_mel(powspecs, rate, num_mel_bins=40) return tf.math.log(melspecs + 1e-6) logmelspec = logmelspectrograms([signal], rate)[0] plot_spectrogram(logmelspec.numpy()) ``` One common normalization technique is frequency channel standardization, i.e. normalization of rows to zero mean and unit variance. ``` from lidbox.features import cmvn logmelspec_mv = cmvn([logmelspec])[0] plot_spectrogram(logmelspec_mv.numpy()) ``` Or only mean-normalization if you think the variances contain important information. ``` logmelspec_m = cmvn([logmelspec], normalize_variance=False)[0] plot_spectrogram(logmelspec_m.numpy()) ``` ## Cepstral representations Another common representation are the Mel-frequency cepstral coefficients (MFCC), which are obtained by applying the [discrete cosine transform](https://en.wikipedia.org/wiki/Discrete_cosine_transform) on the log-scale Mel-spectrogram. ### MFCC ``` def plot_cepstra(X, figsize=None): if not figsize: figsize = (X.shape[0]/50, X.shape[1]/20) plot_spectrogram(X, cmap="RdBu_r", figsize=figsize) mfcc = tf.signal.mfccs_from_log_mel_spectrograms([logmelspec])[0] plot_cepstra(mfcc.numpy()) ``` Most of the information is concentrated in the lower coefficients. It is common to drop the 0th coefficient and select a subset starting at 1, e.g. 1 to 20. See [this post](http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/) for more details. ``` mfcc = mfcc[:,1:21] plot_cepstra(mfcc.numpy()) ``` Now we have a very compact representation, but most of the variance is still in the lower coefficients and overshadows the smaller changes in higher coefficients. We can normalize the MFCC matrix row-wise by standardizing each row to zero mean and unit variance. This is commonly called cepstral mean and variance normalization (CMVN). ### MFCC + CMVN ``` mfcc_cmvn = cmvn([mfcc])[0] plot_cepstra(mfcc_cmvn.numpy()) ``` ### Which one is best? Speech feature extraction is a large, active research topic and it is impossible to choose one representation that would work well in all situations. Common choices in state-of-the-art spoken language identification are log-scale Mel-spectrograms and MFCCs, with different normalization approaches. For example, [here](https://github.com/swshon/dialectID_e2e) is an experiment in Arabic dialect identification, where log-scale Mel-spectra (referred to as FBANK) produced slightly better results compared to MFCCs. It is not obvious when to choose which representation, or if we should even use the FFT at all. You can read [this post](https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html) for a more detailed discussion. ## Voice activity detection It is common for speech datasets to contain audio samples with short segments of silence or sounds that are not speech. Since these are usually irrelevant for making a language classification decision, we would prefer to discard such segments. This is called voice activity detection (VAD) and it is another large, active research area. [Here](https://wiki.aalto.fi/pages/viewpage.action?pageId=151500905) is a brief overview of VAD. Non-speech segments can be either noise or silence. Separating non-speech noise from speech is non-trivial but possible, for example with [neural networks](https://www.isca-speech.org/archive/Interspeech_2019/pdfs/1354.pdf). Silence, on the other hand, shows up as zeros in our speech representations, since these segments contain lower energy values compared to segments with speech. Such non-speech segments are therefore easy to detect and discard, for example by comparing the energy of the segment to the average energy of the whole sample. If the samples in our example do not contain much background noise, a simple energy-based VAD technique should be enough to drop all silent segments. We'll use the [root mean square](https://en.wikipedia.org/wiki/Root_mean_square) (RMS) energy to detect short silence segments. `lidbox` has a simple energy-based VAD function, which we will use as follows: 1. Divide the signal into non-overlapping 10 ms long windows. 2. Compute RMS of each window. 3. Reduce all window RMS values by averaging to get a single mean RMS value. 4. Set a decision threshold at 0.1 for marking silence windows. In other words, if the window RMS is less than 0.1 of the mean RMS, mark the window as silence. ``` from lidbox.features.audio import framewise_rms_energy_vad_decisions import matplotlib.patches as patches sentence, lang, clip_path = sample signal, rate = read_mp3(clip_path) window_ms = tf.constant(10, tf.int32) window_frame_length = (window_ms * rate) // 1000 # Get binary VAD decisions for each 10 ms window vad_1 = framewise_rms_energy_vad_decisions( signal=signal, sample_rate=rate, frame_step_ms=window_ms, strength=0.1) # Plot unfiltered signal sns.set(rc={'figure.figsize': (6, 0.5)}) ax = sns.lineplot(data=signal, lw=0.1, legend=None) ax.set_axis_off() ax.margins(0) # Plot shaded area over samples marked as not speech (VAD == 0) for x, is_speech in enumerate(vad_1.numpy()): if not is_speech: rect = patches.Rectangle( (x*window_frame_length, -1), window_frame_length, 2, linewidth=0, color='gray', alpha=0.2) ax.add_patch(rect) plt.show() print("lang:", lang) print("sentence: '{}'".format(sentence)) embed_audio(signal, rate) # Partition the signal into 10 ms windows to match the VAD decisions windows = tf.signal.frame(signal, window_frame_length, window_frame_length) # Filter signal with VAD decision == 1 (remove gray areas) filtered_signal = tf.reshape(windows[vad_1], [-1]) plot_signal(filtered_signal) print("dropped {:d} out of {:d} frames, leaving {:.3f} of the original signal".format( signal.shape[0] - filtered_signal.shape[0], signal.shape[0], filtered_signal.shape[0]/signal.shape[0])) embed_audio(filtered_signal, rate) ``` The filtered signal has less silence, but some of the pauses between words sound too short and unnatural. We would prefer not to remove small pauses that normally occur between words, so lets say all pauses shorter than 300 ms should not be filtered out. Lets also move all VAD code into a function. ``` def remove_silence(signal, rate): window_ms = tf.constant(10, tf.int32) window_frames = (window_ms * rate) // 1000 # Get binary VAD decisions for each 10 ms window vad_1 = framewise_rms_energy_vad_decisions( signal=signal, sample_rate=rate, frame_step_ms=window_ms, # Do not return VAD = 0 decisions for sequences shorter than 300 ms min_non_speech_ms=300, strength=0.1) # Partition the signal into 10 ms windows to match the VAD decisions windows = tf.signal.frame(signal, window_frames, window_frames) # Filter signal with VAD decision == 1 return tf.reshape(windows[vad_1], [-1]) sentence, lang, clip_path = sample signal, rate = read_mp3(clip_path) filtered_signal = remove_silence(signal, rate) plot_signal(filtered_signal) print("dropped {:d} out of {:d} frames, leaving {:.3f} of the original signal".format( signal.shape[0] - filtered_signal.shape[0], signal.shape[0], filtered_signal.shape[0]/signal.shape[0])) print("lang:", lang) print("sentence: '{}'".format(sentence)) embed_audio(filtered_signal, rate) ``` We dropped some silence segments but left most of the speech intact, perhaps this is enough for our example. Although this VAD approach is simple and works ok for our data, it will not work for speech data with non-speech sounds in the background like music or noise. For such data we might need more powerful VAD filters such as neural networks that have been trained on a speech vs non-speech classification task with large amounts of different noise. But lets not add more complexity to our example. We'll use the RMS based filter for all other signals too. ## Comparison of representations Lets extract these features for all signals in our random sample. ``` for sentence, lang, clip_path in samples[["sentence", "lang", "path"]].to_numpy(): signal_before_vad, rate = read_mp3(clip_path) signal = remove_silence(signal_before_vad, rate) logmelspec = logmelspectrograms([signal], rate)[0] logmelspec_mvn = cmvn([logmelspec], normalize_variance=False)[0] mfcc = tf.signal.mfccs_from_log_mel_spectrograms([logmelspec])[0] mfcc = mfcc[:,1:21] mfcc_cmvn = cmvn([mfcc])[0] plot_width = logmelspec.shape[0]/50 plot_signal(signal.numpy(), figsize=(plot_width, .6)) print("VAD: {} -> {} sec".format( signal_before_vad.size / rate, signal.numpy().size / rate)) print("lang:", lang) print("sentence:", sentence) embed_audio(signal.numpy(), rate) plot_spectrogram(logmelspec_mvn.numpy(), figsize=(plot_width, 1.2)) plot_cepstra(mfcc_cmvn.numpy(), figsize=(plot_width, .6)) plot_separator() ``` ## Loading the samples to a `tf.data.Dataset` iterator Our dataset is relatively small (2.5 GiB) and we might be able to read all files into signals and keep them in main memory. However, most speech datasets are much larger due to the amount of data needed for training neural network models that would be of any practical use. We need some kind of lazy iteration or streaming solution that views only one part of the dataset at a time. One such solution is to represent the dataset as a [TensorFlow iterator](https://www.tensorflow.org/api_docs/python/tf/data/Dataset), which evaluates its contents only when they are needed, similar to the [MapReduce](https://en.wikipedia.org/wiki/MapReduce) programming model for big data. The downside with lazy iteration or streaming is that we lose the capability of doing random access by row id. However, this shouldn't be a problem since we can always keep the whole metadata table in memory and do random access on its rows whenever needed. Another benefit of TensorFlow dataset iterators is that we can map arbitrary [`tf.function`](https://www.tensorflow.org/api_docs/python/tf/function)s over the dataset and TensorFlow will automatically parallelize the computations and place them on different devices, such as the GPU. The core architecture of `lidbox` has been organized around the `tf.data.Dataset` API, leaving all the heavy lifting for TensorFlow to handle. But before we load all our speech data, lets warmup with our small random sample of 8 rows. ``` samples ``` Lets load it into a `tf.data.Dataset`. ``` def metadata_to_dataset_input(meta): # Create a mapping from column names to all values under the column as tensors return { "id": tf.constant(meta.index, tf.string), "path": tf.constant(meta.path, tf.string), "lang": tf.constant(meta.lang, tf.string), "target": tf.constant(meta.target, tf.int32), "split": tf.constant(meta.split, tf.string), } sample_ds = tf.data.Dataset.from_tensor_slices(metadata_to_dataset_input(samples)) sample_ds ``` All elements produced by the `Dataset` iterator are `dict`s of (string, Tensor) pairs, where the string denotes the metadata type. Although the `Dataset` object is primarily for automating large-scale data processing pipelines, it is easy to extract all elements as `numpy`-values: ``` for x in sample_ds.as_numpy_iterator(): display(x) ``` ### Reading audio files Lets load the signals by [mapping](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map) a file reading function for each element over the whole dataset. We'll add a `tf.data.Dataset` function wrapper on top of `read_mp3`, which we defined earlier. TensorFlow will infer the input and output values of the wrapper as tensors from the type signature of dataset elements. We must use `tf.numpy_function` if we want to allow calling the non-TensorFlow function `read_mp3` also from inside the graph environment. It might not be as efficient as using TensorFlow ops but reading a file would have a lot of latency anyway so this is not such a big hit for performance. Besides, we can always hide the latency by reading several files in parallel. ``` def read_mp3_wrapper(x): signal, sample_rate = tf.numpy_function( # Function read_mp3, # Argument list [x["path"]], # Return value types [tf.float32, tf.int64]) return dict(x, signal=signal, sample_rate=tf.cast(sample_rate, tf.int32)) for x in sample_ds.map(read_mp3_wrapper).as_numpy_iterator(): print("id: {}".format(x["id"].decode("utf-8"))) print("signal.shape: {}, sample rate: {}".format(x["signal"].shape, x["sample_rate"])) print() ``` ### Removing silence and extracting features Organizing all preprocessing steps as functions that can be mapped over the `Dataset` object allows us to represent complex transformations easily. ``` def remove_silence_wrapper(x): return dict(x, signal=remove_silence(x["signal"], x["sample_rate"])) def batch_extract_features(x): with tf.device("GPU"): signals, rates = x["signal"], x["sample_rate"] logmelspecs = logmelspectrograms(signals, rates[0]) logmelspecs_smn = cmvn(logmelspecs, normalize_variance=False) mfccs = tf.signal.mfccs_from_log_mel_spectrograms(logmelspecs) mfccs = mfccs[...,1:21] mfccs_cmvn = cmvn(mfccs) return dict(x, logmelspec=logmelspecs_smn, mfcc=mfccs_cmvn) features_ds = (sample_ds.map(read_mp3_wrapper) .map(remove_silence_wrapper) .batch(1) .map(batch_extract_features) .unbatch()) for x in features_ds.as_numpy_iterator(): print(x["id"]) for k in ("signal", "logmelspec", "mfcc"): print("{}.shape: {}".format(k, x[k].shape)) print() ``` ### Inspecting dataset contents in TensorBoard `lidbox` has a helper function for dumping element information into [`TensorBoard`](https://www.tensorflow.org/tensorboard) summaries. This converts all 2D features into images, writes signals as audio summaries, and extracts utterance ids. ``` import lidbox.data.steps as ds_steps cachedir = os.path.join(workdir, "cache") _ = ds_steps.consume_to_tensorboard( # Rename logmelspec as 'input', these will be plotted as images ds=features_ds.map(lambda x: dict(x, input=x["logmelspec"])), summary_dir=os.path.join(cachedir, "tensorboard", "data", "sample"), config={"batch_size": 1, "image_size_multiplier": 4}) ``` Open a terminal and launch TensorBoard to view the summaries written to `$wrkdir/cache/tensorboard/dataset/sample`: ``` tensorboard --logdir /data/exp/cv4/cache/tensorboard ``` Then open the url in a browser and inspect the contents. You can leave the server running, since we'll log the training progress to the same directory. ## Loading all data We'll now begin loading everything from disk and preparing a pipeline from mp3-filepaths to neural network input. We'll use the autotune feature of `tf.data` to allow TensorFlow figure out automatically how much of the pipeline should be split up into parallel calls. ``` import lidbox.data.steps as ds_steps TF_AUTOTUNE = tf.data.experimental.AUTOTUNE def signal_is_not_empty(x): return tf.size(x["signal"]) > 0 def pipeline_from_metadata(data, shuffle=False): if shuffle: # Shuffle metadata to get an even distribution of labels data = data.sample(frac=1, random_state=np_rng.bit_generator) ds = ( # Initialize dataset from metadata tf.data.Dataset.from_tensor_slices(metadata_to_dataset_input(data)) # Read mp3 files from disk in parallel .map(read_mp3_wrapper, num_parallel_calls=TF_AUTOTUNE) # Apply RMS VAD to drop silence from all signals .map(remove_silence_wrapper, num_parallel_calls=TF_AUTOTUNE) # Drop signals that VAD removed completely .filter(signal_is_not_empty) # Extract features in parallel .batch(1) .map(batch_extract_features, num_parallel_calls=TF_AUTOTUNE) .unbatch() ) return ds # Mapping from dataset split names to tf.data.Dataset objects split2ds = { split: pipeline_from_metadata(meta[meta["split"]==split], shuffle=split=="train") for split in split_names } ``` ### Testing pipeline performance Note that we only constructed the pipeline with all steps we want to compute. All TensorFlow ops are computed only when elements are requested from the iterator. Lets iterate over the training dataset from first to last element to ensure the pipeline will not be a performance bottleneck during training. ``` _ = ds_steps.consume(split2ds["train"], log_interval=2000) ``` ### Caching pipeline state We can [cache](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#cache) the iterator state as a single binary file at arbitrary stages. This allows us to automatically skip all steps that precede the call to `tf.Dataset.cache`. Lets cache the training dataset and iterate again over all elements to fill the cache. **Note** that you will still be storing all data on the disk (4.6 GiB new data), so this optimization is a space-time tradeoff. ``` os.makedirs(os.path.join(cachedir, "data")) split2ds["train"] = split2ds["train"].cache(os.path.join(cachedir, "data", "train")) _ = ds_steps.consume(split2ds["train"], log_interval=2000) ``` If we iterate over the dataset again, TensorFlow should read all elements from the cache file. ``` _ = ds_steps.consume(split2ds["train"], log_interval=2000) ``` As a side note, if your training environment has fast read-write access to a file system configured for reading and writing very large files, this optimization can be a very significant performance improvement. **Note** also that all usual problems related to cache invalidation apply. When caching extracted features and metadata to disk, be extra careful in your experiments to ensure you are not interpreting results computed on data from some outdated cache. ### Dumping a few batches to TensorBoard Lets extract 100 first elements of every split to TensorBoard. ``` for split, ds in split2ds.items(): _ = ds_steps.consume_to_tensorboard( ds.map(lambda x: dict(x, input=x["logmelspec"])), os.path.join(cachedir, "tensorboard", "data", split), {"batch_size": 1, "image_size_multiplier": 2, "num_batches": 100}, exist_ok=True) ``` ## Training a supervised, neural network language classifier We have now configured an efficient data pipeline and extracted some data samples to summary files for TensorBoard. It is time to train a classifier on the data. ### Drop metadata from dataset During training, we only need a tuple of model input and targets. We can therefore drop everything else from the dataset elements just before training starts. This is also a good place to decide if we want to train on MFCCs or Mel-spectra. ``` model_input_type = "logmelspec" def as_model_input(x): return x[model_input_type], x["target"] train_ds_demo = list(split2ds["train"] .map(as_model_input) .shuffle(100) .take(6) .as_numpy_iterator()) for input, target in train_ds_demo: print(input.shape, target2lang[target]) if model_input_type == "mfcc": plot_cepstra(input) else: plot_spectrogram(input) plot_separator() ``` ### Asserting all input is valid Since the training dataset is cached, we can quickly iterate over all elements and check that we don't have any NaNs or negative targets. ``` def assert_finite(x, y): tf.debugging.assert_all_finite(x, "non-finite input") tf.debugging.assert_non_negative(y, "negative target") return x, y _ = ds_steps.consume(split2ds["train"].map(as_model_input).map(assert_finite), log_interval=5000) ``` It is also easy to compute stats on the dataset elements. For example finding global minimum and maximum values of the inputs. ``` x_min = split2ds["train"].map(as_model_input).reduce( tf.float32.max, lambda acc, elem: tf.math.minimum(acc, tf.math.reduce_min(elem[0]))) x_max = split2ds["train"].map(as_model_input).reduce( tf.float32.min, lambda acc, elem: tf.math.maximum(acc, tf.math.reduce_max(elem[0]))) print("input tensor global minimum: {}, maximum: {}".format(x_min.numpy(), x_max.numpy())) ``` ### Selecting a model architecture `lidbox` provides a small set of neural network model architectures out of the box. Many of these architectures have good results in the literature for different datasets. These models have been implemented in Keras, so you could replace the model we are using here with anything you want. The ["x-vector"](http://danielpovey.com/files/2018_odyssey_xvector_lid.pdf) architecture has worked well in speaker and language identification so lets create an untrained Keras x-vector model. One of its core features is learning fixed length vector representations (x-vectors) for input of arbitrary length. These vectors are extracted from the first fully connected layer (`segment1`), without activation. This opens up opportunities for doing all kinds of statistical analysis on these vectors, but that's out of scope for our example. We'll try to regularize the network by adding frequency [channel dropout](https://dl.acm.org/doi/abs/10.1016/j.patrec.2017.09.023) with probability 0.8. In other words, during training we set input rows randomly to zeros with probability 0.8. This might avoid overfitting the network on frequency channels containing noise that is irrelevant for deciding the language. ``` import lidbox.models.xvector as xvector def create_model(num_freq_bins, num_labels): model = xvector.create([None, num_freq_bins], num_labels, channel_dropout_rate=0.8) model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5)) return model model = create_model( num_freq_bins=20 if model_input_type == "mfcc" else 40, num_labels=len(target2lang)) model.summary() ``` ### Channel dropout demo Here's what happens to the input during training. ``` channel_dropout = tf.keras.layers.SpatialDropout1D(model.get_layer("channel_dropout").rate) for input, target in train_ds_demo: print(input.shape, target2lang[target]) input = channel_dropout(tf.expand_dims(input, 0), training=True)[0].numpy() if model_input_type == "mfcc": plot_cepstra(input) else: plot_spectrogram(input) plot_separator() ``` ### Training the classifier The validation set is needed after every epoch, so we might as well cache it. **Note** that this writes 2.5 GiB of additional data to disk the first time the validation set is iterated over, i.e. at the end of epoch 1. Also, we can't use batches since our input is of different lengths (perhaps with [ragged tensors](https://www.tensorflow.org/versions/r2.3/api_docs/python/tf/data/experimental/dense_to_ragged_batch)). ``` callbacks = [ # Write scalar metrics and network weights to TensorBoard tf.keras.callbacks.TensorBoard( log_dir=os.path.join(cachedir, "tensorboard", model.name), update_freq="epoch", write_images=True, profile_batch=0, ), # Stop training if validation loss has not improved from the global minimum in 10 epochs tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=10, ), # Write model weights to cache everytime we get a new global minimum loss value tf.keras.callbacks.ModelCheckpoint( os.path.join(cachedir, "model", model.name), monitor='val_loss', save_weights_only=True, save_best_only=True, verbose=1, ), ] train_ds = split2ds["train"].map(as_model_input).shuffle(1000) dev_ds = split2ds["dev"].cache(os.path.join(cachedir, "data", "dev")).map(as_model_input) history = model.fit( train_ds.batch(1), validation_data=dev_ds.batch(1), callbacks=callbacks, verbose=2, epochs=100) ``` ## Evaluating the classifier Lets run all test set samples through our trained model by loading the best weights from the cache. ``` from lidbox.util import predict_with_model test_ds = split2ds["test"].map(lambda x: dict(x, input=x["logmelspec"])).batch(1) _ = model.load_weights(os.path.join(cachedir, "model", model.name)) utt2pred = predict_with_model(model, test_ds) test_meta = meta[meta["split"]=="test"] assert not test_meta.join(utt2pred).isna().any(axis=None), "missing predictions" test_meta = test_meta.join(utt2pred) test_meta ``` ### Average detection cost ($\text{C}_\text{avg}$) The de facto standard metric for evaluating spoken language classifiers might be the *average detection cost* ($\text{C}_\text{avg}$), which has been refined to its current form during past [language recognition competitions](https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=925272). `lidbox` provides this metric as a `tf.keras.Metric` subclass. Scikit-learn provides other commonly used metrics so there is no need to manually compute those. ``` from lidbox.util import classification_report from lidbox.visualize import draw_confusion_matrix true_sparse = test_meta.target.to_numpy(np.int32) pred_dense = np.stack(test_meta.prediction) pred_sparse = pred_dense.argmax(axis=1).astype(np.int32) report = classification_report(true_sparse, pred_dense, lang2target) for m in ("avg_detection_cost", "avg_equal_error_rate", "accuracy"): print("{}: {:.3f}".format(m, report[m])) lang_metrics = pd.DataFrame.from_dict({k: v for k, v in report.items() if k in lang2target}) lang_metrics["mean"] = lang_metrics.mean(axis=1) display(lang_metrics.T) fig, ax = draw_confusion_matrix(report["confusion_matrix"], lang2target) ``` ## Conclusions This was an example on deep learning based simple spoken language identification of 4 different languages from the Mozilla Common Voice free speech datasets. We managed to train a model that adequately recognizes languages spoken by the test set speakers. However, there is clearly room for improvement. We did simple random oversampling to balance the language distribution in the training set, but perhaps there are better ways to do this. We also did not tune optimization hyperparameters or try different neural network architectures or layer combinations. It might also be possible to increase robustness by audio feature engineering, such as [random FIR filtering](https://www.isca-speech.org/archive/Interspeech_2018/abstracts/1047.html) to simulate microphone differences.
github_jupyter
##### Copyright 2019 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 텐서플로 2.0 시작하기: 전문가용 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/alpha/tutorials/quickstart/advanced"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/alpha/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행 하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/alpha/tutorials/quickstart/advanced.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 [tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 메일을 보내주시기 바랍니다. 이 문서는 [구글 코랩](https://colab.research.google.com/notebooks/welcome.ipynb)(Colaboratory) 노트북 파일입니다. 파이썬 프로그램을 브라우저에서 직접 실행할 수 있기 때문에 텐서플로를 배우고 사용하기 좋은 도구입니다: 1. 파이썬 런타임(runtime)에 연결하세요: 메뉴 막대의 오른쪽 상단에서 *CONNECT*를 선택하세요. 2. 노트북의 모든 코드 셀(cell)을 실행하세요: *Runtime* > *Run all*을 선택하세요. 더 많은 예제와 자세한 안내는 [텐서플로 튜토리얼](https://www.tensorflow.org/alpha/tutorials/)을 참고하세요. 먼저 프로그램에 텐서플로 라이브러리를 임포트합니다: ``` from __future__ import absolute_import, division, print_function, unicode_literals !pip install tensorflow-gpu==2.0.0-alpha0 import tensorflow as tf from tensorflow.keras.layers import Dense, Flatten, Conv2D from tensorflow.keras import Model ``` [MNIST 데이터셋](http://yann.lecun.com/exdb/mnist/)을 로드하여 준비합니다. ``` mnist = tf.keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # 채널 차원을 추가합니다. x_train = x_train[..., tf.newaxis] x_test = x_test[..., tf.newaxis] ``` tf.data를 사용하여 데이터셋을 섞고 배치를 만듭니다: ``` train_ds = tf.data.Dataset.from_tensor_slices( (x_train, y_train)).shuffle(10000).batch(32) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) ``` 케라스(Keras)의 [모델 서브클래싱(subclassing) API](https://www.tensorflow.org/guide/keras#model_subclassing)를 사용하여 `tf.keras` 모델을 만듭니다: ``` class MyModel(Model): def __init__(self): super(MyModel, self).__init__() self.conv1 = Conv2D(32, 3, activation='relu') self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.d2 = Dense(10, activation='softmax') def call(self, x): x = self.conv1(x) x = self.flatten(x) x = self.d1(x) return self.d2(x) model = MyModel() ``` 훈련에 필요한 옵티마이저(optimizer)와 손실 함수를 선택합니다: ``` loss_object = tf.keras.losses.SparseCategoricalCrossentropy() optimizer = tf.keras.optimizers.Adam() ``` 모델의 손실과 성능을 측정할 지표를 선택합니다. 에포크가 진행되는 동안 수집된 측정 지표를 바탕으로 최종 결과를 출력합니다. ``` train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') ``` `tf.GradientTape`를 사용하여 모델을 훈련합니다: ``` @tf.function def train_step(images, labels): with tf.GradientTape() as tape: predictions = model(images) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions) ``` 이제 모델을 테스트합니다: ``` @tf.function def test_step(images, labels): predictions = model(images) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) EPOCHS = 5 for epoch in range(EPOCHS): for images, labels in train_ds: train_step(images, labels) for test_images, test_labels in test_ds: test_step(test_images, test_labels) template = '에포크: {}, 손실: {}, 정확도: {}, 테스트 손실: {}, 테스트 정확도: {}' print (template.format(epoch+1, train_loss.result(), train_accuracy.result()*100, test_loss.result(), test_accuracy.result()*100)) ``` 훈련된 이미지 분류기는 이 데이터셋에서 약 98%의 정확도를 달성합니다. 더 자세한 내용은 [TensorFlow 튜토리얼](https://www.tensorflow.org/alpha/tutorials/)을 참고하세요.
github_jupyter
# Mandala: self-managing experiments ## What is Mandala? Mandala enables new, simpler patterns for working with complex and evolving computational experiments. It eliminates low-level code and decisions for how to save, load, query, delete and otherwise organize results. To achieve this, it lets computational code "manage itself" by organizing and addressing its own data storage. ```{admonition} Under construction :class: warning This project is under active development ``` ### Features at a glance - **concise**: code computations in pure Python (w/ control flow, collections, ...) -- results are automatically tracked and queriable - **iterate rapidly**: add/edit parameters/logic and rerun code -- past results are loaded on demand, and only new computations are executed - **pattern-match against Python code**: query across complex, branching projects by reusing computational code itself ### Quick start #### Installation ```console pip install git+https://github.com/amakelov/mandala ``` #### Recommended introductions To build some understanding, check these out: - 2-minute introduction: [intro to self-managing code](2mins) - 10-minute introduction: [manage a small ML project](10mins) #### Minimal working examples If you want to jump right into code, below are a few minimal, somewhat interesting examples to play with and extend: ``` from typing import List from mandala.all import * set_logging_level('warning') # create a storage for results storage = Storage(in_memory=True) # can also be persistent (on disk) @op(storage) # memoization decorator def inc(x) -> int: return x + 1 @op(storage) def mean(x:List[int]) -> float: # you can operate on / return collections of memoized results return sum(x) / len(x) with run(storage): # calls inside `run` block are memoized nums = [inc(i) for i in range(5)] result = mean(nums) # memoization composes through lists without copying data print(f'Mean of 5 nums: {result}') # add logic/parameters directly on top of memoized code without re-doing past work with run(storage, lazy=True): nums = [inc(i) for i in range(10)] result = mean(nums) # walk over chains of calls without loading intermediate data # to traverse storage and collect results flexibly with run(storage, lazy=True): nums = [inc(i) for i in range(10)] result = mean(nums) print(f'Reference to mean of 10 nums: {result}') storage.attach(result) # load the value in-place print(f'Loaded mean of 10 nums: {result}') # pattern-match to memoized compositions of calls with query(storage) as q: # this may not make sense unless you read the tutorials i = Query() inc_i = inc(i).named('inc_i') nums = MakeList(containing=inc_i, at_index=0).named('nums') result = mean(nums).named('result') df = q.get_table(inc_i, nums, result) df ``` ## Why Mandala? ### Advantages Compared to other tools for tracking and managing computations, the features that most set Mandala apart are the direct and concise patterns in which complex Python code can interact with its own storage. This manifests in several ways: - **Python code as interface to its own storage**: you just write the code to compute what you want to compute (freely using Python's control flow and collections), and directly add more parameters and logic to it over time. Mandala takes care of the rest: - **the organization of storage mirrors the structure of code**, and Mandala provides you with the tools to make maximum use of this -- retracing memoized code with on-demand data loading, and declarative code-based pattern-matching. - this leads to **simple, intuitive and flexible ways to query and iterate on experiments**, even when their logic gets quite complex -- without any data organization efforts on your part. - it also allows you to **query relationships between any variables in your projects**, even when they are separated by many computational steps -- **without explicitly annotating these relationships**. - **refactor code and data will follow**: Mandala makes it easy to apply familiar software refactorings to code *without* losing the relationship to this code's existing results. This gives you high-level tools to manage the complexity of both the code and its data as the project grows. - **organize all results and their relationships**: Mandala manages all the artifacts produced by computations, not just a set of human-readable metrics. It lets you use pure Python idioms to - compute with **data structures with shared substructure** - **index and view data in multiple ways** and on multiple levels of analysis without storage duplication. This gives you much flexibility in manipulating the contents of storage to express your intent. ### Comparisons Mandala takes inspiration from many other programming tools and concepts. Below is an (incomplete but growing) list of comparisons with relevant tools: - [algebraicjulia](https://www.algebraicjulia.org/): [conjunctive](https://www.algebraicjulia.org/blog/post/2020/12/cset-conjunctive-queries/) [queries](https://www.algebraicjulia.org/blog/post/2020/11/sql-as-hypergraph/) are integral to Mandala's declarative interface, and are generalized in several ways to make them practical for complex experiments: - a single table of values is used to enable polymorphism - operations on lists/dicts are integrated with query construction - queries can use the hierarchical structure of computations - constraints can be partitioned (to avoid interaction) while using some shared base (to enable code reuse) - dynamic query generation can use conditionals to enable disjunctive queries, and even loops (though this quickly becomes inefficient) - [koji](https://arxiv.org/abs/1901.01908) and [content-addressable computation](https://research.protocol.ai/publications/ipfs-fan-a-function-addressable-computation-network/delarocha2021a.pdf): Mandala uses causal hashing to - ensure correct, deterministic and idempotent behavior; - avoid hashing large (or unhashable) Python objects; - avoid discrepancies between object hashes across library versions Mandala can be thought of as a single-node, Python-only implementation of general-purpose content-addressable computation with two extra features: - hierarchical organization of computation, - declarative queries - [funsies](https://github.com/aspuru-guzik-group/funsies) is a workflow engine for Python scripts that also uses causal hashing. Mandala differs by integrating more closely with Python (by using functions instead of scripts as the units of work), and thus enabling more fine-grained control and expressiveness over what gets computed and how. - [joblib.Memory](https://joblib.readthedocs.io/en/latest/memory.html#memory) implements persistent memoization for Python functions that overcomes some of the issues naive implementations have with large and complex Python objects. Mandala augments `joblib.Memory` in some key ways: - memoized calls can be queried/deleted declaratively - collections and memoized functions calling other memoized functions can reuse storage - you can modify and refactor memoized functions while retaining connection to memoized calls - you can avoid the latency of hashing large/complex objects - [incpy](https://dl.acm.org/doi/abs/10.1145/2001420.2001455?casa_token=ahM2UC4Uk-4AAAAA:9lZXVDS7nYEHzHPJk-UCTOAICGb2astAh2hrL00VB125nF6IGG90OwA-ujbe-cIg2hT4T1MOpbE2) augments the Python interpreter with automatic persistent memoization. Mandala also enables automatic persistent memoization, but it is different from `incpy` in some key ways: - uses decorators to explicitly designate memoized functions (which can be good or bad depending on your goals) - allows for lazy retracing of memoized calls - provides additional features like the ones mentioned in the comparison with `joblib.Memory` ### Philosophy When can we declare data management for computational experiments a solved problem? It's unclear how to turn this question into a measurable goal, but there is a somewhat objective *lower bound* on how simple data management can get: > At the end of the day, we have to *at least* write down the (Python) code to express > the computations we want to run, *regardless* of data management concerns. > Can this be *all* the code we have to write, and *still* be able to achieve > the goals of data management? Mandala aims to bring us to this idealized lower bound. It adopts the view that Python itself is flexible and expressive enough to capture our intentions about experiments. There shouldn't be a ton of extra interfaces, concepts and syntax between your thoughts, their expression in code, and its results. By mirroring the structure of computational code in the organization of data, and harmoniously extending Python's tools for capturing intention and managing complexity, we can achieve a more flexible, natural and immediate way to interact with computations. This echoes the design goals of some other tools. For example, [dask](https://dask.org) and [ray](https://ray.io) (both of which Mandala integrates with) aim to let you write Python code the way you are used to, and take care of parallelization for you. ## Limitations This project is under active development, and not ready for production. Its goal so far has been to demonstrate that certain high-level programming patterns are viable by building a sufficiently useful working prototype. Limitations can be summarized as follows: - it is easy to get started, but effective use in complex projects requires some getting used to; - much of the code does what it does in very simple and often inefficient ways; - interfaces and (more importantly) storage formats may change in backward incompatible ways. - bugs likely still exist; That being said, Mandala is already quite usable in many practical situations. Below is a detailed outline of current limitations you should be aware of if you consider using this library in your work. ### "Missing" features There are some things you may be used to seeing in projects like this that currently don't exist: - **functions over scripts**: Mandala focuses on functions as the basic building blocks of experiments as opposed to Python scripts. There is no fundamental conceptual distinction between the two, but: - functions provide a better-behaved interface, especially when it comes to typing, refactoring, and hierarchical organization - using functions makes it much easier to use projects such as [ray](https://www.ray.io/) and [dask](https://dask.org/) alongside Mandala - if you don't need to do something extra complicated involving different Python processes or virtual environments, it is easy to wrap a script as a function that takes in some settings and resource descriptions (e.g., paths to input files) and returns other resource descriptions (e.g., paths to output files). However, the burden of refactoring the script's interface manually and organizing its input/output resources would still be on you. So, always use a function where you can. - **no integration with git**: version control data is not automatically included in Mandala's records at this point, thought this would be an easy addition. There are other programming patterns available for working with multiple versions of code. - **no GUI**: for now, the library leans heavily towards using computational code itself as a highly programmable interface to results, and visualization is left to other tools. ### Acquiring best practices Using some features effectively requires deeper understanding: - **declarative queries**: It's possible to create underconstrained pattern-matching queries which return a number of rows that grows multiplicatively with the numbers of rows of memoization tables of functions in the query. Such queries may take a very long time or run out of RAM even for moderately-sized projects (`sqlite` will usually complain about this at the start of the query). Certain ways to define and compose memoized functions promote such queries, so a good understanding of this issue may be needed depending on the project. - **deletions**: deleting anything from storage is subject to invariants that prevent the existence of "mysterious" objects (ones without a computational history tracing back to user inputs) from existing. This means that you must understand well how deletion works to avoid deleting more things than you really intend. ### Performance The library has not been optimized much for performance. A few things to keep in mind for now: - When using disk-based persistence, Mandala introduces an overhead of a few 10s of ms for each call to a memoized function, on top of any work to serialize inputs/outputs and run the function. - Storing and loading large collections can be slow (a list of 1000 integers already leads to a visible ~1s delay)
github_jupyter
# notebook for processing fully reduced m3 data "triplets" This is a notebook for processing L0 / L1B / L2 triplets (i.e., the observations that got reduced). ## general notes We process the reduced data in triplets simply to improve the metadata on the L0 and L2 products. We convert L1B first to extract several attributes to fill out their metadata. This data is scratched to disk in [./directories/m3/m3_index.csv'](./directories/m3/m3_index.csv), because it also serves as a useful user-facing index to the archive. A complete version of this index is provided in this repository, but this index was originally created during this conversion process, and will be recreated if you run it again. This index is read into the ```m3_index variable``` below; its path is also soft-coded in several ```m3_conversion``` classes, so make sure you change that or feed them the correct path as an argument if you change this location. This notebook does not apply programmatic rules to iterate over the file structure of the mirrored archive. It uses an index that was partly manually generated: [/src/directories/m3/m3_data_mappings.csv](/src/directories/m3/m3_data_mappings.csv). This was manually manipulated to manage several small idiosyncracies in the PDS3 archive. 35 of the V3 L1B products in the PDS3 archive are duplicated: one copy in the correct month-by-year directory, one copy in some incorrect month-by-year directory. We pick the 'first' one in all cases (see the line ```pds3_label_file = input_directory + group_files[product_type][0]``` below). Each pair's members have identical md5sums, so it *probably* doesn't matter which member of the pair we use. ## performance tips The most likely bottlenecks for this process are I/O throughput and CPU. We recommend both using a high-throughput disk and parallelizing this, either using ```pathos``` (vanilla Python ```multiprocessing``` will probably fail during a pickling step) or simply by running multiple copies of this notebook. If you do parallelize this process on a single machine, note that working memory can suddenly catch you off-guard as a constraint. While many of the M3 observational data files are small, some are over 4 GB, and the method presented here requires them to be completely loaded into memory in order to convert them to FITS and strip the prefix tables from the L0 files. When passed ```clean=True```, the ```m3_converter``` observational data writer class constructors aggressively delete data after using it, but this still results in a pretty high -- and spiky -- working memory burden. ``` import datetime as dt import os from types import MappingProxyType from more_itertools import distribute import pandas as pd import sh from m3_bulk import basenamer, make_m3_triplet, \ m3_triplet_bundle_paths, crude_time_log, fix_end_object_tags from m3_conversion import M3L0Converter, M3L1BConverter, M3L2Converter from pvl.decoder import ParseError m3_index = pd.read_csv('./directories/m3/m3_index.csv') # directory of file mappings, grouped into m3 basename clusters file_mappings = pd.read_csv('./directories/m3/m3_data_mappings.csv') file_mappings["basename"] = file_mappings["filepath"].apply(basenamer) basename_groups = list(file_mappings.groupby("basename")) # what kind of files does each pds4 product have? # paths to the locally-written versions are stored in the relevant attributes of # the associated PDSVersionConverter instance. pds4_filetypes = MappingProxyType({ 'l0': ('pds4_label_file', 'clock_file', 'fits_image_file'), 'l1b': ('pds4_label_file', 'loc_file', 'tim_file', 'rdn_file', 'obs_file'), 'l2': ('pds4_label_file', 'sup_file', 'rfl_file') }) # root directories of PDS3 and PDS4 data sets respectively input_directory = '/home/ubuntu/m3_input/' output_directory = '/home/ubuntu/m3_output/' # all the triplets: what we are converting here. reduced_groups = [group for group in basename_groups if len(group[1]) >= 3] # the edr_groups = [group for group in basename_groups if len(group[1]) == 1] # lonesome EDR images triplet_product_types = ('l1b', 'l0', 'l2') # initialize our mapping of product types to # product-writer class constructors. # MappingProxyType is just a safety mechanism # to make sure constructors don't get messed with converters = MappingProxyType({ 'l0': M3L0Converter, 'l1b': M3L1BConverter, 'l2': M3L2Converter }) writers = {} # dict to hold instances of the converter classes # initialize iteration, control execution in whatever way # this is a place to split your index up however you like # if you're parallelizing using multiple copies of this # notebook. chunk_ix_of_this_notebook = 0 total_chunks = 40 chunks = distribute(total_chunks, reduced_groups) # eagerly evaluate so we know how long it is, # and what all is in it if we have an error chunk = list(chunks[chunk_ix_of_this_notebook]) log_string = "_" + str(chunk_ix_of_this_notebook) group_enumerator = enumerate(chunk) for ix, group in group_enumerator: print(ix, len(chunk)) print("beginning product conversion") triplet_start_time = dt.datetime.now() group_files = make_m3_triplet(group) # what are the correct output paths (relative to # the root of the pds4 bundle) for these products? bundle_paths = m3_triplet_bundle_paths(group) for product_type in triplet_product_types: # read the PDS3 product and perform file conversions pds3_label_file = input_directory + group_files[product_type][0] try: writers[product_type] = converters[product_type]( pds3_label_file, suppress_warnings=True, clean=True ) except ParseError: # fix broken END_OBJECT tags in some of the target-mode files print("fixing broken END_OBJECT tags") temp_label_file = fix_end_object_tags(pds3_label_file) writers[product_type] = converters[product_type]( temp_label_file, suppress_warnings=True, clean=True ) os.remove(temp_label_file) # write PDS4 label and product files # don't actually need to shave the extra / here but... # this would be more safely rewritten with PyFilesystem # (see clem-conversion) output_path = output_directory + bundle_paths[product_type][1:] sh.mkdir("-p", output_path) writers[product_type].write_pds4(output_path, write_product_files=True, clean=True) # occasionally (slow but very useful) spot-check with validate tool # note that this just invokes a one-line script at /usr/bin/validate # that links to the local install of the PDS Validate Tool; this # allows us to avoid throwing java stuff all over our environment if ix % 20 == 1: print("1-mod-20th triplet: running Validate Tool") validate_results = sh.validate("-t", writers[product_type].pds4_label_file) with open("validate_dump.txt", "a") as file: file.write(validate_results.stdout.decode()) print("validated successfully") # log transfer crudely crude_time_log( "m3_data_conversion_log" + log_string + ".csv", writers[product_type], str((dt.datetime.now() - triplet_start_time).total_seconds()) ) print( "done with this triplet; total seconds " + str((dt.datetime.now() - triplet_start_time).total_seconds()) ) ```
github_jupyter
# Speed benchmarks This is just for having a quick reference of how the speed of running the program scales ``` from __future__ import print_function import pprint import subprocess import sys sys.path.append('../') # sys.path.append('/home/heberto/learning/attractor_sequences/benchmarking/') import numpy as np import matplotlib.pyplot as plt import matplotlib import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable import seaborn as sns %matplotlib inline np.set_printoptions(suppress=True, precision=2) sns.set(font_scale=2.0) ``` #### Git machine ``` run_old_version = False if run_old_version: hash_when_file_was_written = '321620ef1b753fe42375bbf535c9ab941b72ae26' hash_at_the_moment = subprocess.check_output(["git", 'rev-parse', 'HEAD']).strip() print('Actual hash', hash_at_the_moment) print('Hash of the commit used to run the simulation', hash_when_file_was_written) subprocess.call(['git', 'checkout', hash_when_file_was_written]) ``` #### Load the libraries ``` from benchmarking.standard_program import run_standard_program, calculate_succes_program, training_program import timeit def wrapper(func, *args, **kwargs): def wrapped(): return func(*args, **kwargs) return wrapped ``` ## Standard program #### Minicolumns ``` hypercolumns = 4 minicolumns_range = np.arange(10, 100, 5) epochs = 1 times_minicolumns = [] for minicolumns in minicolumns_range: function = wrapper(run_standard_program, hypercolumns=hypercolumns, minicolumns=minicolumns, epochs=epochs) time = timeit.timeit(function, number=1) times_minicolumns.append(time) fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(minicolumns_range, times_minicolumns, '*-', markersize=14) ax.set_xlabel('Minicolumns') ax.set_ylabel('Seconds that the program runed'); ``` #### Hypercolumns ``` hypercolumns_range = np.arange(4, 20, 2) minicolumns = 20 epochs = 1 times_hypercolumns = [] for hypercolumns in hypercolumns_range: function = wrapper(run_standard_program, hypercolumns, minicolumns, epochs) time = timeit.timeit(function, number=1) times_hypercolumns.append(time) sns.set(font_scale=2.0) fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(hypercolumns_range, times_hypercolumns, '*-', markersize=14) ax.set_xlabel('Hypercolumns') ax.set_ylabel('Seconds that the program runed'); ``` #### Epochs ``` hypercolumns = 4 minicolumns = 20 epochs_range = np.arange(1, 10, 1) times_epochs = [] for epochs in epochs_range: function = wrapper(run_standard_program, hypercolumns, minicolumns, epochs) time = timeit.timeit(function, number=1) times_epochs.append(time) sns.set(font_scale=2.0) fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(epochs_range, times_epochs, '*-', markersize=14) ax.set_xlabel('Epochs') ax.set_ylabel('Seconds that the program runed') ``` #### Everything to compare ``` fig = plt.figure(figsize=(16, 12)) ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) ax1.plot(minicolumns_range, times_minicolumns, '*-', markersize=14) ax2.plot(hypercolumns_range, times_hypercolumns, '*-', markersize=14) ax3.plot(epochs_range, times_epochs, '*-', markersize=14) ax1.set_title('Minicolumn scaling') ax2.set_title('Hypercolumn scaling') ax3.set_title('Epoch scaling') ax1.set_ylabel('Time (s)'); ``` ## Training and recalling times Her we run the standard program before and then we test how long it takes for it to run recalls and test recall success ``` hypercolumns = 4 minicolumns = 10 epochs = 3 manager = run_standard_program(hypercolumns, minicolumns, epochs) ``` #### Recall only ``` T_recall_range = np.arange(3, 20, 1) time_recall = [] for T_recall in T_recall_range: function = wrapper(training_program, manager=manager, T_recall=T_recall) time = timeit.timeit(function, number=1) time_recall.append(time) # Plot4 fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(T_recall_range, time_recall, '*-', markersize=14) ax.set_xlabel('T_recall') ax.set_ylabel('Seconds that the program took to run') ax.set_title('Normal recall profile') plt.show() ``` #### Success recall ``` T_recall_range = np.arange(3, 20, 1) time_success = [] for T_recall in T_recall_range: function = wrapper(calculate_succes_program, manager=manager, T_recall=T_recall) time = timeit.timeit(function, number=1) time_success.append(time) # Plot fig = plt.figure(figsize=(16, 12)) ax = fig.add_subplot(111) ax.plot(T_recall_range, time_success, '*-', markersize=14) ax.plot(T_recall_range, time_recall, '*-', markersize=14) ax.set_xlabel('T_recall') ax.set_ylabel('Seconds that the program took to run') ax.set_title('Recall Success profiling') plt.show() ```
github_jupyter
<!--- <div style="text-align: center;"> <font size="5"> <b>Data-driven Design and Analyses of Structures and Materials (3dasm)</b> </font> </div> <br> </br> <div style="text-align: center;"> <font size="5"> <b>Lecture 1</b> </font> </div> <center> <img src=docs/tudelft_logo.jpg width=550px> </center> <div style="text-align: center;"> <font size="4"> <b>Miguel A. Bessa | <a href = "mailto: M.A.Bessa@tudelft.nl">M.A.Bessa@tudelft.nl</a> | Associate Professor</b> </font> </div> --> <img src=docs/tudelft_logo.jpg width=50%> ## Data-driven Design and Analyses of Structures and Materials (3dasm) ## Lecture 1 ### Miguel A. Bessa | <a href = "mailto: M.A.Bessa@tudelft.nl">M.A.Bessa@tudelft.nl</a> | Associate Professor ## Introduction **What:** A lecture of the "3dasm" course **Where:** This notebook comes from this [repository](https://github.com/bessagroup/3dasm_course) **Reference for entire course:** Murphy, Kevin P. *Probabilistic machine learning: an introduction*. MIT press, 2022. Available online [here](https://probml.github.io/pml-book/book1.html) **How:** We try to follow Murphy's book closely, but the sequence of Chapters and Sections is different. The intention is to use notebooks as an introduction to the topic and Murphy's book as a resource. * If working offline: Go through this notebook and read the book. * If attending class in person: listen to me (!) but also go through the notebook in your laptop at the same time. Read the book. * If attending lectures remotely: listen to me (!) via Zoom and (ideally) use two screens where you have the notebook open in 1 screen and you see the lectures on the other. Read the book. **Optional reference (the "bible" by the "bishop"... pun intended 😆) :** Bishop, Christopher M. *Pattern recognition and machine learning*. Springer Verlag, 2006. **References/resources to create this notebook:** * [Figure (Car stopping distance)](https://korkortonline.se/en/theory/reaction-braking-stopping/) * Snippets of code from this awesome [repo](https://github.com/gerdm/prml) by Gerardo Duran-Martin that replicates many figures in Bishop's book Apologies in advance if I missed some reference used in this notebook. Please contact me if that is the case, and I will gladly include it here. ## **OPTION 1**. Run this notebook **locally in your computer**: 1. Install miniconda3 [here](https://docs.conda.io/en/latest/miniconda.html) 2. Open a command window and create a virtual environment called "3dasm": ``` conda create -n 3dasm python=3 numpy scipy jupyter nb_conda matplotlib pandas scikit-learn rise tensorflow -c conda-forge ``` 3. Install [git](https://github.com/git-guides/install-git), open command window & clone the repository to your computer: ``` git clone https://github.com/bessagroup/3dasm_course ``` 4. Load jupyter notebook by typing in (anaconda) command window (it will open in your internet browser): ``` conda activate 3dasm jupyter notebook ``` 5. Open notebook (3dasm_course/Lectures/Lecture1/3dasm_Lecture1.ipynb) **Short note:** My personal environment also has other packages that help me while teaching. > conda install -n 3dasm -c conda-forge jupyter_contrib_nbextensions hide_code Then in the 3dasm conda environment: > jupyter nbextension install --py hide_code --sys-prefix > > jupyter nbextension enable --py hide_code > > jupyter serverextension enable --py hide_code > > jupyter nbextension enable splitcell/splitcell ## **OPTION 2**. Use **Google's Colab** (no installation required, but times out if idle): 1. go to https://colab.research.google.com 2. login 3. File > Open notebook 4. click on Github (no need to login or authorize anything) 5. paste the git link: https://github.com/bessagroup/3dasm_course 6. click search and then click on the notebook (*3dasm_course/Lectures/Lecture1/3dasm_Lecture1.ipynb*) ``` # Basic plotting tools needed in Python. import matplotlib.pyplot as plt # import plotting tools to create figures import numpy as np # import numpy to handle a lot of things! %config InlineBackend.figure_format = "retina" # render higher resolution images in the notebook plt.style.use("seaborn") # style for plotting that comes from seaborn plt.rcParams["figure.figsize"] = (8,4) # rescale figure size appropriately for slides ``` ## Outline for today * Introduction - Taking a probabilistic perspective on machine learning * Basics of univariate statistics - Continuous random variables - Probabilities vs probability densities - Moments of a probability distribution * The mindblowing Bayes' rule - The rule that spawns almost every ML model (even when we don't realize it) **Reading material**: This notebook + Chapter 2 until Section 2.3 ## Get hyped about Artificial Intelligence... ``` from IPython.display import display, YouTubeVideo, HTML YouTubeVideo('RNnZwvklwa8', width=512, height=288) # show that slides are interactive: # rescale video to 768x432 and back to 512x288 ``` **Well...** This class *might* not make you break the world (yet!). Let's focus on the fundamentals: * Probabilistic perspective on machine learning * Supervised learning (especially regression) ## Machine learning (ML) * **ML definition**: A computer program that learns from experience $E$ wrt tasks $T$ such that the performance $P$ at those tasks improves with experience $E$. * We'll treat ML from a **probabilistic perspective**: - Treat all unknown quantities as **random variables** * What are random variables? - Variables endowed with probability distributions! ## The car stopping distance problem <img src="docs/reaction-braking-stopping.svg" title="Car stopping distance" width="50%" align="right"> <br></br> Car stopping distance ${\color{red}y}$ as a function of its velocity ${\color{green}x}$ before it starts braking: ${\color{red}y} = {\color{blue}z} x + \frac{1}{2\mu g} {\color{green}x}^2 = {\color{blue}z} x + 0.1 {\color{green}x}^2$ - ${\color{blue}z}$ is the driver's reaction time (in seconds) - $\mu$ is the road/tires coefficient of friction (assume $\mu=0.5$) - $g$ is the acceleration of gravity (assume $g=10$ m/s$^2$). ## The car stopping distance problem ### How to obtain this formula? $y = d_r + d_{b}$ where $d_r$ is the reaction distance, and $d_b$ is the braking distance. ### Reaction distance $d_r$ $d_r = z x$ with $z$ being the driver's reaction time, and $x$ being the velocity of the car at the start of braking. ## The car stopping distance problem ### Braking distance $d_b$ Kinetic energy of moving car: $E = \frac{1}{2}m x^2$ &nbsp; &nbsp; &nbsp; where $m$ is the car mass. Work done by braking: $W = \mu m g d_b$ &nbsp; &nbsp; &nbsp; where $\mu$ is the coefficient of friction between the road and the tire, $g$ is the acceleration of gravity, and $d_b$ is the car braking distance. The braking distance follows from $E=W$: $d_b = \frac{1}{2\mu g}x^2$ Therefore, if we add the reacting distance $d_r$ to the braking distance $d_b$ we get the stopping distance $y$: $$y = d_r + d_b = z x + \frac{1}{2\mu g} x^2$$ ## The car stopping distance problem <img src="docs/reaction-braking-stopping.svg" title="Car stopping distance" width="25%" align="right"> $y = {\color{blue}z} x + 0.1 x^2$ The driver's reaction time ${\color{blue}z}$ is a **random variable (rv)** * Every driver has its own reaction time $z$ * Assume the distribution associated to $z$ is Gaussian with **mean** $\mu_z=1.5$ seconds and **variance** $\sigma_z^2=0.5$ seconds$^2$ $$ z \sim \mathcal{N}(\mu_z=1.5,\sigma_z^2=0.5^2) $$ where $\sim$ means "sampled from", and $\mathcal{N}$ indicates a Gaussian **probability density function (pdf)** ## Univariate Gaussian <a title="probability density function">pdf</a> The gaussian <a title="probability density function">pdf</a> is defined as: $$ \mathcal{N}(z | \mu_z, \sigma_z^2) = \frac{1}{\sqrt{2\pi\sigma_z^2}}e^{-\frac{1}{2\sigma_z^2}(z - \mu_z)^2} $$ Alternatively, we can write it using the **precision** term $\lambda_z := 1 / \sigma_z^2$ instead of using $\sigma_z^2$: $$ \mathcal{N}(z | \mu_z, \lambda_z^{-1}) = \frac{\lambda_z^{1/2}}{\sqrt{2\pi}}e^{-\frac{\lambda_z}{2}(z - \mu_z)^2} $$ Anyway, recall how this <a title="probability density function">pdf</a> looks like... ``` def norm_pdf(z, mu_z, sigma_z2): return 1 / np.sqrt(2 * np.pi * sigma_z2) * np.exp(-(z - mu_z)**2 / (2 * sigma_z2)) zrange = np.linspace(-8, 4, 200) # create a list of 200 z points between z=-8 and z=4 fig, ax = plt.subplots() # create a plot ax.plot(zrange, norm_pdf(zrange, 0, 1), label=r"$\mu_z=0; \ \sigma_z^2=1$") # plot norm_pdf(z|0,1) ax.plot(zrange, norm_pdf(zrange, 1.5, 0.5**2), label=r"$\mu_z=1.5; \ \sigma_z^2=0.5^2$") # plot norm_pdf(z|1.5,0.5^2) ax.plot(zrange, norm_pdf(zrange, -1, 2**2), label=r"$\mu_z=-1; \ \sigma_z^2=2^2$") # plot norm_pdf(z|-1,2^2) ax.set_xlabel("z", fontsize=20) # create x-axis label with font size 20 ax.set_ylabel("probability density", fontsize=20) # create y-axis label with font size 20 ax.legend(fontsize=15) # create legend with font size 15 ax.set_title("Three different Gaussian pdfs", fontsize=20); # create title with font size 20 ``` The <span style="color:green">green</span> curve shows the Gaussian <a title="probability density function">pdf</a> of the <a title="random variable">rv</a> $z$ **conditioned** on the mean $\mu_z=1.5$ and variance $\sigma_z^2=0.5^2$ for the car stopping distance problem. ## Univariate Gaussian <a title="probability density function">pdf</a> $$ p(z) = \mathcal{N}(z | \mu_z, \sigma_z^2) = \frac{1}{\sqrt{2\pi\sigma_z^2}}e^{-\frac{1}{2\sigma_z^2}(z - \mu_z)^2} $$ The output of this expression is the **PROBABILITY DENSITY** of $z$ **given** (or conditioned to) a particular $\mu_z$ and $\sigma_z^2$. * **Important**: Probability Density $\neq$ Probability So, what is a probability? ## Probability The probability of an event $A$ is denoted by $\text{Pr}(A)$. * $\text{Pr}(A)$ means the probability with which we believe event A is true * An event $A$ is a binary variable saying whether or not some state of the world holds. Probability is defined such that: $0 \leq \text{Pr}(A) \leq 1$ where $\text{Pr}(A)=1$ if the event will definitely happen and $\text{Pr}(A)=0$ if it definitely will not happen. ## Joint probability **Joint probability** of two events: $\text{Pr}(A \wedge B)= \text{Pr}(A, B)$ If $A$ and $B$ are **independent**: $\text{Pr}(A, B)= \text{Pr}(A) \text{Pr}(B)$ For example, suppose $z_1$ and $z_2$ are chosen uniformly at random from the set $\mathcal{Z} = \{1, 2, 3, 4\}$. Let $A$ be the event that $z_1 \in \{1, 2\}$ and $B$ be the event that **another** <a title="random variable">rv</a> denoted as $z_2 \in \{3\}$. Then we have: $\text{Pr}(A, B) = \text{Pr}(A) \text{Pr}(B) = \frac{1}{2} \cdot \frac{1}{4}$. ## Probability of a union of two events Probability of event $A$ or $B$ happening is: $\text{Pr}(A \vee B)= \text{Pr}(A) + \text{Pr}(B) - \text{Pr}(A \wedge B)$ If these events are mutually exclusive (they can't happen at the same time): $$ \text{Pr}(A \vee B)= \text{Pr}(A) + \text{Pr}(B) $$ For example, suppose an <a title="random variable">rv</a> denoted as $z_1$ is chosen uniformly at random from the set $\mathcal{Z} = \{1, 2, 3, 4\}$. Let $A$ be the event that $z_1 \in \{1, 2\}$ and $B$ be the event that the **same** <a title="random variable">rv</a> $z_1 \in \{3\}$. Then we have $\text{Pr}(A \vee B) = \frac{2}{4} + \frac{1}{4}$. ## Conditional probability of one event given another We define the **conditional probability** of event $B$ happening given that $A$ has occurred as follows: $$ \text{Pr}(B | A)= \frac{\text{Pr}(A,B)}{\text{Pr}(A)} $$ This is not defined if $\text{Pr}(A) = 0$, since we cannot condition on an impossible event. ## Conditional independence of one event given another We say that event $A$ is conditionally independent of event $B$ if we have $\text{Pr}(A | B)= \text{Pr}(A)$ This implies $\text{Pr}(B|A) = \text{Pr}(B)$. Hence, the joint probability becomes $\text{Pr}(A, B) = \text{Pr}(A) \text{Pr}(B)$ The book uses the notation $A \perp B$ to denote this property. ## Coming back to our car stopping distance problem <img src="docs/reaction-braking-stopping.svg" title="Car stopping distance" width="25%" align="right"> $y = {\color{blue}z} x + 0.1 x^2$ where $z$ is a **continuous** <a title="random variable">rv</a> such that $z \sim \mathcal{N}(\mu_z=1.5,\sigma_z^2=0.5^2)$. * What is the probability of an event $Z$ defined by a reaction time $z \leq 0.52$ seconds? $$ \text{Pr}(Z)=\text{Pr}(z \leq 0.52)= P(z=0.52) $$ where $P(z)$ denotes the **cumulative distribution function (cdf)**. Note that <a title="cumulative distribution function">cdf</a> is denoted with a capital $P$. Likewise, we can compute the probability of being in any interval as follows: $\text{Pr}(a \leq z \leq b)= P(z=b)-P(z=a)$ * But how do we compute the cdf at a particular value $b$, e.g. $P(z=b)$? ## <a title="Cumulative distribution functions">Cdf's</a> result from <a title="probability density functions">pdf's</a> A <a title="probability density functions">pdf</a> $p(z)$ is defined as the derivative of the <a title="cumulative distribution functions">cdf</a> $P(z)$: $$ p(z)=\frac{d}{d z}P(z) $$ So, given a <a title="probability density function">pdf</a> $p(z)$, we can compute the following probabilities: $$\text{Pr}(z \leq b)=\int_{-\infty}^b p(z) dz = P(b)$$ $$\text{Pr}(z \geq a)=\int_a^{\infty} p(z) dz = 1 - P(a)$$ $$\text{Pr}(a \leq z \leq b)=\int_a^b p(z) dz = P(b) - P(a)$$ **IMPORTANT**: $\int_{-\infty}^{\infty} p(z) dz = 1$ ### Some notes about <a title="probability density functions">pdf's</a> The integration to unity is important! $$\int_{-\infty}^{\infty} p(z) dz = 1$$ **Remember:** the integral of a <a title="probability density function">pdf</a> leads to a probability, and probabilities cannot be larger than 1. For example, from this property we can derive the following: $$ \int_{-\infty}^{\infty} p(z) dz = \int_{-\infty}^{a} p(z) dz + \int_{a}^{\infty} p(z) dz $$ $$ \Rightarrow \text{Pr}(z \geq a)= 1 - \text{Pr}(z \leq a) = 1 - \text{P}(a) = 1 - \int_{-\infty}^a p(z) dz $$ In some cases we will work with probability distributions that are **unnormalized**, so this comment is important! * Being unnormalized means that the probability density of the distribution does not integrate to 1. * In this case, we cannot call such function a <a title="probability density function">pdf</a>, even though its output is a probability density. ## <a title="Cumulative distribution functions">Cdf's</a> result from <a title="probability density functions">pdf's</a> Key point? * Given a <a title="probability density function">pdf</a> $p(z)$, we can compute the probability of a continuous <a title="random variable">rv</a> $z$ being in a finite interval as follows: $$ \text{Pr}(a \leq z \leq b)=\int_a^b p(z) dz = P(b) - P(a) $$ As the size of the interval gets smaller, we can write $$ \text{Pr}\left(z - \frac{dz}{2} \leq z \leq z + \frac{dz}{2}\right) \approx p(z) dz $$ Intuitively, this says the probability of $z$ being in a small interval around $z$ is the density at $z$ times the width of the interval. ``` from scipy.stats import norm # import from scipy.stats the normal distribution zrange = np.linspace(-3, 3, 100) # 100 values for plot fig_std_norm, (ax1, ax2) = plt.subplots(1, 2) # create a plot with 2 subplots side-by-side ax1.plot(zrange, norm.cdf(zrange, 0, 1), label=r"$\mu_z=0; \ \sigma_z=1$") # plot cdf of standard normal ax1.set_xlabel("z", fontsize=20) ax1.set_ylabel("probability", fontsize=20) ax1.legend(fontsize=15) ax1.set_title("Standard Gaussian cdf", fontsize=20) ax2.plot(zrange, norm.pdf(zrange, 0, 1), label=r"$\mu_z=0; \ \sigma_z=1$") # plot pdf of standard normal ax2.set_xlabel("z", fontsize=20) ax2.set_ylabel("probability density", fontsize=20) ax2.legend(fontsize=15) ax2.set_title("Standard Gaussian pdf", fontsize=20) fig_std_norm.set_size_inches(25, 5) # scale figure to be wider (since there are 2 subplots) ``` ## Note about scipy.stats [scipy](https://docs.scipy.org/doc/scipy/index.html) is an open-source software for mathematics, science, and engineering. It's brilliant and widely used for many things! **In particular**, [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html) is a simple module within scipy that has statistical functions and operations that are very useful. This way, we don't need to code all the functions ourselves. That's why we are using it to plot the cdf and pdf of the Gaussian distribution from now on, and we will use it for other things later. * In case you are interested, scipy.stats has a nice [tutorial](https://docs.scipy.org/doc/scipy/tutorial/stats.html) ## Coming back to our car stopping distance problem <img src="docs/reaction-braking-stopping.svg" title="Car stopping distance" width="25%" align="right"> $y = {\color{blue}z} x + 0.1 x^2$ where $z$ is a continuous <a title="random variable">rv</a> such that $p(z)= \mathcal{N}(z | \mu_z=1.5,\sigma_z^2=0.5^2)$. * What is the probability of an event $Z$ defined by a reaction time $z \leq 0.52$ seconds? $$ \text{Pr}(Z) = \text{Pr}(z \leq 0.52) = P(z=0.52) = \int_{-\infty}^{0.52} p(z) dz $$ ``` Pr_Z = norm.cdf(0.52, 1.5, 0.5) # using scipy norm.cdf(z=0.52 | mu_z=1.5, sigma_z=0.5) print("The probability of event Z is: Pr(Z) = ",round(Pr_Z,3)) z_value = 0.52 # z = 0.52 seconds zrange = np.linspace(0, 3, 200) # 200 values for plot fig_car_norm, (ax1, ax2) = plt.subplots(1, 2) # create subplot (two figures in 1) ax1.plot(zrange, norm.cdf(zrange, 1.5, 0.5), label=r"$\mu_z=1.5; \ \sigma_z=0.5$") # Figure 1 is cdf ax1.plot(z_value, norm.cdf(z_value, 1.5, 0.5), 'r*',markersize=15, linewidth=2, label=u'$P(z=0.52~|~\mu_z=1.5, \sigma_z^2=0.5^2)$') ax1.set_xlabel("z", fontsize=20) ax1.set_ylabel("probability", fontsize=20) ax1.legend(fontsize=15) ax1.set_title("Gaussian cdf of $z$ for car problem", fontsize=20) ax2.plot(zrange, norm.pdf(zrange, 1.5, 0.5), label=r"$\mu_z=1.5; \ \sigma_z=0.5$") # figure 2 is pdf ax2.plot(z_value, norm.pdf(z_value, 1.5, 0.5), 'r*', markersize=15, linewidth=2, label=u'$p(z=0.52~|~\mu_z=1.5, \sigma_z^2=0.5^2)$') ax2.set_xlabel("z", fontsize=20) ax2.set_ylabel("probability density", fontsize=20) ax2.legend(fontsize=15) ax2.set_title("Gaussian pdf of $z$ for car problem", fontsize=20) fig_car_norm.set_size_inches(25, 5) # scale figure to be wider (since there are 2 subplots) ``` ### Why is the Gaussian distribution so widely used? Several reasons: 1. It has two parameters which are easy to interpret, and which capture some of the most basic properties of a distribution, namely its mean and variance. 2. The central limit theorem (Sec. 2.8.6 of the book) tells us that sums of independent random variables have an approximately Gaussian distribution, making it a good choice for modeling residual errors or “noise”. 3. The Gaussian distribution makes the least number of assumptions (has maximum entropy), subject to the constraint of having a specified mean and variance (Sec. 3.4.4 of the book); this makes it a good default choice in many cases. 4. It has a simple mathematical form, which results in easy to implement, but often highly effective, methods. ## Car stopping distance problem <img src="docs/reaction-braking-stopping.svg" title="Car stopping distance" width="25%" align="right"> $y = {\color{blue}z} x + 0.1 x^2$ where $z$ is a continuous <a title="random variable">rv</a> such that $z \sim \mathcal{N}(\mu_z=1.5,\sigma_z^2=0.5^2)$. * What is the **expected** value for the reaction time $z$? This is not a trick question! It's the mean $\mu_z$, of course! * But how do we compute the expected value for any distribution? ## Moments of a distribution ### First moment: Expected value or mean The expected value (mean) of a distribution is the **first moment** of the distribution: $$ \mathbb{E}[z]= \int_{\mathcal{Z}}z p(z) dz $$ where $\mathcal{Z}$ indicates the support of the distribution (the $z$ domain). * Often, $\mathcal{Z}$ is omitted as it is usually between $-\infty$ to $\infty$ * The expected value $\mathbb{E}[z]$ is often denoted by $\mu_z$ As you might expect (pun intended 😆), the expected value is a linear operator: $$ \mathbb{E}[az+b]= a\mathbb{E}[z] + b $$ where $a$ and $b$ are fixed variables (NOT rv's). Additionally, for a set of $n$ rv's, one can show that the expectation of their sum is as follows: $\mathbb{E}\left[\sum_{i=1}^n z_i\right]= \sum_{i=1}^n \mathbb{E}[z_i]$ If they are **independent**, the expectation of their product is given by $\mathbb{E}\left[\prod_{i=1}^n z_i\right]= \prod_{i=1}^n \mathbb{E}[z_i]$ ## Moments of a distribution ### Second moment (and relation to Variance) The 2nd moment of a distribution $p(z)$ is: $$ \mathbb{E}[z^2]= \int_{\mathcal{Z}}z^2 p(z) dz $$ #### Variance can be obtained from the 1st and 2nd moments The variance is a measure of the “spread” of the distribution: $$ \mathbb{V}[z] = \mathbb{E}[(z-\mu_z)^2] = \int (z-\mu_z)^2 p(z) dz = \mathbb{E}[z^2] - \mu_z^2 $$ * It is often denoted by the square of the standard deviation, i.e. $\sigma_z^2 = \mathbb{V}[z] = \mathbb{E}[(z-\mu_z)^2]$ #### Elaboration of the variance as a result of the first two moments of a distribution $$ \begin{align} \mathbb{V}[z] & = \mathbb{E}[(z-\mu_z)^2] \\ & = \int (z-\mu_z)^2 p(z) dz \\ & = \int z^2 p(z) dz + \mu_z^2 \int p(z) dz - 2\mu_z \int zp(z) dz \\ & = \mathbb{E}[z^2] - \mu_z^2 \end{align} $$ where $\mu_z = \mathbb{E}[z]$ is the first moment, and $\mathbb{E}[z^2]$ is the second moment. Therefore, we can also write the second moment of a distribution as $$\mathbb{E}[z^2] = \sigma_z^2 + \mu_z^2$$ #### Variance and standard deviation properties The standard deviation is defined as $ \sigma_z = \text{std}[z] = \sqrt{\mathbb{V}[z]}$ The variance of a shifted and scaled version of a random variable is given by $\mathbb{V}[a z + b] = a^2\mathbb{V}[z]$ where $a$ and $b$ are fixed variables (NOT rv's). If we have a set of $n$ independent rv's, the variance of their sum is given by the sum of their variances $$ \mathbb{V}\left[\sum_{i=1}^n z_i\right] = \sum_{i=1}^n \mathbb{V}[z_i] $$ The variance of their product can also be derived, as follows: $$ \begin{align} \mathbb{V}\left[\prod_{i=1}^n z_i\right] & = \mathbb{E}\left[ \left(\prod_i z_i\right)^2 \right] - \left( \mathbb{E}\left[\prod_i z_i \right]\right)^2\\ & = \mathbb{E}\left[ \prod_i z_i^2 \right] - \left( \prod_i\mathbb{E}\left[ z_i \right]\right)^2\\ & = \prod_i \mathbb{E}\left[ z_i^2 \right] - \prod_i\left( \mathbb{E}\left[ z_i \right]\right)^2\\ & = \prod_i \left( \mathbb{V}\left[ z_i \right] +\left( \mathbb{E}\left[ z_i \right]\right)^2 \right)- \prod_i\left( \mathbb{E}\left[ z_i \right]\right)^2\\ & = \prod_i \left( \sigma_{z,\,i}^2 + \mu_{z,\,i}^2 \right)- \prod_i\mu_{z,\,i}^2 \\ \end{align} $$ ## Note about higher-order moments * The $k$-th moment of a distribution $p(z)$ is defined as the expected value of the $k$-th power of $z$, i.e. $z^k$: $$ \mathbb{E}[z^k]= \int_{\mathcal{Z}}z^k p(z) dz $$ ## Mode of a distribution The mode of an <a title="random variable">rv</a> $z$ is the value of $z$ for which $p(z)$ is maximum. Formally, this is written as, $$ \mathbf{z}^* = \underset{z}{\mathrm{argmax}}~p(z)$$ If the distribution is multimodal, this may not be unique: * That's why $\mathbf{z}^*$ is in **bold**, to denote that in general it is a vector that is retrieved! * However, if the distribution is unimodal (one maximum), like the univariate Gaussian distribution, then it retrieves a scalar $z^*$ Note that even if there is a unique mode, this point may not be a good summary of the distribution. ## Mean vs mode for a non-symmetric distribution ``` # 1. Create a gamma pdf with parameter a = 2.0 from scipy.stats import gamma # import from scipy.stats the Gamma distribution a = 2.0 # this is the only input parameter needed for this distribution # Define the support of the distribution (its domain) by using the # inverse of the cdf (called ppf) to get the lowest z of the plot that # corresponds to Pr = 0.01 and the highest z of the plot that corresponds # to Pr = 0.99: zrange = np.linspace(gamma.ppf(0.01, a), gamma.ppf(0.99, a), 200) mu_z, var_z = gamma.stats(2.0, moments='mv') # This computes the mean and variance of the pdf fig_gamma_pdf, ax = plt.subplots() # a trick to save the figure for later use ax.plot(zrange, gamma.pdf(zrange, a), label=r"$\Gamma(z|a=2.0)$") ax.set_xlabel("z", fontsize=20) ax.set_ylabel("probability density", fontsize=20) ax.legend(fontsize=15) ax.set_title("Gamma pdf for $a=2.0$", fontsize=20) plt.close(fig_gamma_pdf) # do not plot the figure now. We will show it in a later cell # 2. Plot the expected value (mean) for this pdf ax.plot(mu_z, gamma.pdf(mu_z, a), 'r*', markersize=15, linewidth=2, label=u'$\mu_z = \mathbb{E}[z]$') # 3. Calculate the mode and plot it from scipy.optimize import minimize # import minimizer # Finding the maximum of the gamma pdf can be done by minimizing # the negative gamma pdf. So, we create a function that outputs # the negative of the gamma pdf given the parameter a=2.0: def neg_gamma_given_a(z): return -gamma.pdf(z,a) # Use the default optimizer of scipy (L-BFGS) to find the # maximum (by minimizing the negative gamma pdf). Note # that we need to give an initial guess for the value of z, # so we can use, for example, z=mu_z: mode_z = minimize(neg_gamma_given_a,mu_z).x ax.plot(mode_z, np.max(gamma.pdf(mode_z, a)),'g^', markersize=15, linewidth=2,label=u'mode $\mathbf{z}^*=\mathrm{argmax}~p(z)$') ax.legend() # show legend # Code to generate this Gamma distribution hidden during presentation (it's shown as notes) print('The mean is ',mu_z) # print the mean calculated for this gamma pdf print('The mode is approximately ',mode_z) # print the mode fig_gamma_pdf # show figure of this gamma pdf ``` ## The amazing Bayes' rule <font color='red'>Bayesian</font> <font color='blue'>inference</font> definition: * <font color='blue'>Inference</font> means “the act of passing from sample data to generalizations, usually with calculated degrees of certainty”. * <font color='red'>Bayesian</font> is used to refer to inference methods that represent “degrees of certainty” using probability theory, and which leverage Bayes’ rule to update the degree of certainty given data. **Bayes’ rule** is a formula for computing the probability distribution over possible values of an unknown (or hidden) quantity $z$ given some observed data $y$: $$ p(z|y) = \frac{p(y|z) p(z)}{p(y)} $$ Bayes' rule follows automatically from the identity: $p(z|y) p(y) = p(y|z) p(z) = p(y,z) = p(z,y)$ ## The amazing Bayes' rule * I know... You don't find it very amazing (yet!). * Wait until you realize that almost all ML methods can be derived from this simple formula $$ p(z|y) = \frac{p(y|z) p(z)}{p(y)} $$ ### See you next class Have fun!
github_jupyter
``` import pandas as pd train = pd.read_csv("./datasets/labeledTrainData.tsv", header=0, delimiter='\t', quoting=3) train.head() train.shape train.columns.values train["review"][0] from bs4 import BeautifulSoup example1 = BeautifulSoup(train["review"][0]) example1.get_text() import re letters_only = re.sub("[^a-zA-Z]", " ", example1.get_text()) #替换非字母为空格 letters_only lower_case = letters_only.lower() words = lower_case.split() # import nltk # nltk.download() from nltk.corpus import stopwords stopwords.words("english") words = [w for w in words if not w in stopwords.words("english")] words def review_to_words(raw_review): review_text = BeautifulSoup(raw_review).get_text() letters_only = re.sub("[^a-zA-Z]", " ", review_text) words = letters_only.lower().split() stops = set(stopwords.words('english')) meaningful_words = [w for w in words if not w in stops] return(" ".join(meaningful_words)) clean_review = review_to_words(train["review"][0]) clean_review print("Cleaning and parsing the training set movie reviews...\n") number = 10000 num_reviews = train["review"][:number].size clean_train_reviews = [] for i in range(0, num_reviews): if (i + 1) % 1000 == 0: print("Review {} of {}".format(i + 1, num_reviews)) clean_train_reviews.append(review_to_words(train["review"][i])) print("creating the bag of words...") from sklearn.feature_extraction.text import CountVectorizer vertorizer = CountVectorizer(analyzer='word', tokenizer=None, preprocessor=None, stop_words=None, max_features=5000) train_data_features = vertorizer.fit_transform(clean_train_reviews) train_data_features = train_data_features.toarray() train_data_features.shape #每一个词由5000维的向量表示 vocab = vertorizer.get_feature_names() vocab #5000 import numpy as np dist = np.sum(train_data_features, axis=0) # axis=0 对每一列进行操作 for tag, count in zip(vocab, dist): print(str(count) + " : " + tag) #每个单词出现的次数 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_data_features, train["sentiment"][:10000], test_size=0.1) print("Training the random forest...") from sklearn.ensemble import RandomForestClassifier rf_clf = RandomForestClassifier(n_estimators=50) # 100的效果最好 forest = rf_clf.fit(X_train, y_train) # evaluate from sklearn.metrics import accuracy_score y_pred = forest.predict(X_test) accuracy_score(y_pred, y_test) test = pd.read_csv("./datasets/testData.tsv", header=0, delimiter="\t", quoting=3) test.shape num_reviews = len(test["review"]) clean_test_reviews = [] print("Cleaning and parsing the test set movie reviews...") for i in range(num_reviews): if (i + 1) % 1000 == 0: print("Review {} of {}".format(i + 1, num_reviews)) clean_test_reviews.append(review_to_words(test["review"][i])) test_data_features = vertorizer.transform(clean_test_reviews) test_data_features = test_data_features.toarray() result = rf_clf.predict(test_data_features) output = pd.DataFrame({ "id":test["id"], "sentiment":result }) output.to_csv("result.csv", index=False, quoting=3) ```
github_jupyter
<a href="https://colab.research.google.com/github/stephenbeckr/numerical-analysis-class/blob/master/Demos/Ch4_integration.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Numerical Integration (quadrature) - See also Prof. Brown's [integration notebook](https://github.com/cu-numcomp/numcomp-class/blob/master/Integration.ipynb) for CSCI-3656 [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/cu-numcomp/numcomp-class/blob/master/Integration.ipynb) - Bengt Fornberg's talk [Gregory formulas and improving on the Trapezoidal rule](https://www.colorado.edu/amath/sites/default/files/attached-files/2019_unm_0.pdf) ``` import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import BarycentricInterpolator as interp # From Table 9.2 in Quarteroni, Sacco and Saleri "Numerical Mathematics" (Springer, 2000) ClosedNewtonCotesWeights = { 1:[1/2,1/2], 2:[1/3,4/3,1/3], 3:[3/8,9/8,9/8,3/8], 4:[14/45, 64/45, 24/45, 64/45, 14/45], 5:[95/288, 375/288,250/288, 250/288, 375/288, 95/288], 6:[41/140,216/140,27/140,272/140,27/140,216/140,41/140]} ClosedNewtonCotesNames = {1:"n=1, Trapezoid", 2:"n=2, Simpson's", 3:"n=3, Simpson's 3/8", 4:"n=4, Boole's", 5:"n=5", 6:"n=6"} f = lambda x : np.cos(x) F = lambda x : np.sin(x) # dF/dx = f a,b = -1,2 # Other examples to try # f = lambda x : x**(3/2) # F = lambda x : 2/5*x**(5/2) # a,b = 0,1 # f = lambda x : 1/(1+x**2) # aka Runge's function # F = lambda x : np.arctan(x) # a,b = -5,5 I = F(b) - F(a) print("Integral I is {:.3f}".format(I)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); ``` ### Try the Trapezoidal rule, n = 1 ``` n = 1 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ### And Simpson's rule, n=2 ``` n = 2 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ### n=3 ``` n = 3 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ### n=4 ``` n = 4 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ### n=5 ``` n = 5 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ### n=6 ``` n = 6 print("Using the rule: ", ClosedNewtonCotesNames[n] ) weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) # retstep tells it to return the spacing h I_estimate = h*np.dot( weights, f(nodes) ) p = interp(nodes,f(nodes)) x = np.linspace(a,b) plt.fill_between( x, f(x), alpha=0.5); plt.axvline(color='k'); plt.axhline(color='k'); plt.plot( x, p(x), 'r-', label="Interpolating polynomial" ) plt.legend() print("True integral: {:.3f}, Estimate: {:.3f}, Abs. Error: {:.5f}".format(I,I_estimate,abs(I-I_estimate))) nodes.tolist(),h,weights ``` ## Let's try different kinds of functions ``` def tryAllRules( f, F, a, b): err = [] for n in range(1,6+1): weights = ClosedNewtonCotesWeights[n] (nodes,h) = np.linspace(a,b,n+1,retstep=True) I_estimate = h*np.dot( weights, f(nodes) ) I = F(b) - F(a) # True answer err.append( abs(I_estimate - I)) return np.array( err ) f = lambda x : np.cos(x) F = lambda x : np.sin(x) # dF/dx = f a,b = -1,2 err1 = tryAllRules( f, F, a, b) # Other examples to try f = lambda x : x**(3/2) F = lambda x : 2/5*x**(5/2) a,b = 0,1 err2 = tryAllRules( f, F, a, b) f = lambda x : x**(11/2) F = lambda x : 2/13*x**(5/13) a,b = 0,1 err3 = tryAllRules( f, F, a, b) # Runge's function f = lambda x : 1/(1+x**2) F = lambda x : np.arctan(x) a,b = -5,5 err4 = tryAllRules( f, F, a, b) print("Rows are different n, columns are different functions") print(np.array2string( np.array([err1,err2,err3,err4]).T, precision=2)) ``` ### Let's examine Runge's function more closely $$f(x) = \frac{1}{1+x^2}$$ Our error wasn't going down, but the function is $C^\infty(\mathbb{R})$. Did we make a mistake? No, our formula was correct, the issue is that the $f'(\xi)$ term (and $f''(\xi)$, etc.) are very large. One way to think of this issue is that the function has a **singularity** (though it is on the imaginary axis, at $\pm i$). (Btw, how do you prounce Runge? It's German, and you can listen to native speakers say it [at Forvo](https://forvo.com/search/Runge/)) ``` import sympy from sympy.abc import x from sympy import init_printing from sympy.utilities.lambdify import lambdify init_printing() import matplotlib as mpl mpl.rcParams['mathtext.fontset'] = 'cm' mpl.rcParams.update({'font.size': 20}) f = lambda x : 1/(1+x**2) F = lambda x : np.arctan(x) a,b = -5,5 g = 1/(1+x**2) # symbolic version gNumerical = lambdify(x,g) # avoid sympy plotting xGrid = np.linspace(a,b,150) plt.figure(figsize=(10,8)) plt.plot( xGrid, gNumerical(xGrid),label='$f(x)$' ) #k = 3 # order of derivative for k in range(1,6): dg = lambdify(x,sympy.diff(g,x,k)) plt.plot( xGrid, dg(xGrid), label="$f^{("+str(k)+")}(x)$"); plt.axvline(color='k'); plt.axhline(color='k'); #plt.legend(prop={'size': 20}); plt.legend() plt.title("Runge's function"); #sympy.plot(g); # sympy plots are not so nice # sympy.plot(sympy.diff(g,x,k)); ```
github_jupyter
``` import gtsam import numpy as np from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, NonlinearFactorGraph, Point3, Pose3, Point2, PriorFactorPoint3, PriorFactorPose3, Rot3, SimpleCamera, Values) from utils import get_matches_and_e, load_image def symbol(name: str, index: int) -> int: """ helper for creating a symbol without explicitly casting 'name' from str to int """ return gtsam.symbol(ord(name), index) def get_camera_calibration(fx, fy, s, cx, cy): return Cal3_S2(fx, fy, s, cx, cy) # Define the camera observation noise model measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v img1 = load_image('img56.jpg', path='../data/lettuce_home/set6/') img2 = load_image('img58.jpg', path='../data/lettuce_home/set6/') points_1, points_2, e_estimate, r, t = get_matches_and_e(img1, img2) print(e_estimate) print(r) t = +(t/t[0])*0.05 print(t) # Create a factor graph graph = NonlinearFactorGraph() K = get_camera_calibration(644, 644, 0, 213, 387) # add all the image points to the factor graph for (i, point) in enumerate(points_1): # wrap the point in a measurement #print('adding point for camera1') factor = GenericProjectionFactorCal3_S2( Point2(point), measurement_noise, symbol('x', 0), symbol('l', i), K) graph.push_back(factor) for (i, point) in enumerate(points_2): #print('adding point for camera2') factor = GenericProjectionFactorCal3_S2( Point2(point), measurement_noise, symbol('x', 1), symbol('l', i), K) graph.push_back(factor) # Add a prior on pose of camera 1. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(symbol('x', 0), Pose3(Rot3.Rodrigues(0, 0, 0), Point3(0, 0, 0)), pose_noise) graph.push_back(factor) # Add a prior on pose of camera 2 # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(symbol('x', 1), Pose3(Rot3(r), Point3(t[0], t[1], t[2])), pose_noise) graph.push_back(factor) # point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) # factor = PriorFactorPoint3(symbol('l', 0), Point3(1,0,0), point_noise) # graph.push_back(factor) graph.print_('Factor Graph:\n') # Create the data structure to hold the initial estimate to the solution initial_estimate = Values() r_init = Rot3.Rodrigues(0, 0, 0) t_init = Point3(0, 0, 0) transformed_pose = Pose3(r_init, t_init) initial_estimate.insert(symbol('x', 0), transformed_pose) r_init = Rot3(r) t_init = Point3(t[0], t[1], t[2]) transformed_pose = Pose3(r_init, t_init) initial_estimate.insert(symbol('x', 1), transformed_pose) for j, point in enumerate(points_1): initial_estimate.insert(symbol('l', j), Point3(0.05*point[0]/640, 0.05*point[1]/640,0.05)) initial_estimate.print_('Initial Estimates:\n') # Optimize the graph and print results params = gtsam.DoglegParams() params.setVerbosity('VALUES') optimizer = DoglegOptimizer(graph, initial_estimate, params) print('Optimizing:') result = optimizer.optimize() result.print_('Final results:\n') print('initial error = {}'.format(graph.error(initial_estimate))) print('final error = {}'.format(graph.error(result))) ```
github_jupyter
``` # default_exp resimulation ``` # Match resimulation > Simulating match outcomes based on the xG of individual shots ``` #hide from nbdev.showdoc import * #export import collections import itertools import numpy as np ``` Use Poisson-Binomial distribution calculation from https://github.com/tsakim/poibin It looks like [there are plans to package the code](https://github.com/tsakim/poibin/pull/8), but for now, just copy+paste the requisite class in here (original code is provided with MIT License). ``` #export class PoiBin(object): """Poisson Binomial distribution for random variables. This class implements the Poisson Binomial distribution for Bernoulli trials with different success probabilities. The distribution describes thus a random variable that is the sum of independent and not identically distributed single Bernoulli random variables. The class offers methods for calculating the probability mass function, the cumulative distribution function, and p-values for right-sided testing. """ def __init__(self, probabilities): """Initialize the class and calculate the ``pmf`` and ``cdf``. :param probabilities: sequence of success probabilities :math:`p_i \\in [0, 1] \\forall i \\in [0, N]` for :math:`N` independent but not identically distributed Bernoulli random variables :type probabilities: numpy.array """ self.success_probabilities = np.array(probabilities) self.number_trials = self.success_probabilities.size self.check_input_prob() self.omega = 2 * np.pi / (self.number_trials + 1) self.pmf_list = self.get_pmf_xi() self.cdf_list = self.get_cdf(self.pmf_list) # ------------------------------------------------------------------------------ # Methods for the Poisson Binomial Distribution # ------------------------------------------------------------------------------ def pmf(self, number_successes): """Calculate the probability mass function ``pmf`` for the input values. The ``pmf`` is defined as .. math:: pmf(k) = Pr(X = k), k = 0, 1, ..., n. :param number_successes: number of successful trials for which the probability mass function is calculated :type number_successes: int or list of integers """ self.check_rv_input(number_successes) return self.pmf_list[number_successes] def cdf(self, number_successes): """Calculate the cumulative distribution function for the input values. The cumulative distribution function ``cdf`` for a number ``k`` of successes is defined as .. math:: cdf(k) = Pr(X \\leq k), k = 0, 1, ..., n. :param number_successes: number of successful trials for which the cumulative distribution function is calculated :type number_successes: int or list of integers """ self.check_rv_input(number_successes) return self.cdf_list[number_successes] def pval(self, number_successes): """Return the p-values corresponding to the input numbers of successes. The p-values for right-sided testing are defined as .. math:: pval(k) = Pr(X \\geq k ), k = 0, 1, ..., n. .. note:: Since :math:`cdf(k) = Pr(X <= k)`, the function returns .. math:: 1 - cdf(X < k) & = 1 - cdf(X <= k - 1) & = 1 - cdf(X <= k) + pmf(X = k), k = 0, 1, .., n. :param number_successes: number of successful trials for which the p-value is calculated :type number_successes: int, numpy.array, or list of integers """ self.check_rv_input(number_successes) i = 0 try: isinstance(number_successes, collections.Iterable) pvalues = np.array(number_successes, dtype='float') # if input is iterable (list, numpy.array): for k in number_successes: pvalues[i] = 1. - self.cdf(k) + self.pmf(k) i += 1 return pvalues except TypeError: # if input is an integer: if number_successes == 0: return 1 else: return 1 - self.cdf(number_successes - 1) # ------------------------------------------------------------------------------ # Methods to obtain pmf and cdf # ------------------------------------------------------------------------------ def get_cdf(self, event_probabilities): """Return the values of the cumulative density function. Return a list which contains all the values of the cumulative density function for :math:`i = 0, 1, ..., n`. :param event_probabilities: array of single event probabilities :type event_probabilities: numpy.array """ cdf = np.empty(self.number_trials + 1) cdf[0] = event_probabilities[0] for i in range(1, self.number_trials + 1): cdf[i] = cdf[i - 1] + event_probabilities[i] return cdf def get_pmf_xi(self): """Return the values of the variable ``xi``. The components ``xi`` make up the probability mass function, i.e. :math:`\\xi(k) = pmf(k) = Pr(X = k)`. """ chi = np.empty(self.number_trials + 1, dtype=complex) chi[0] = 1 half_number_trials = int( self.number_trials / 2 + self.number_trials % 2) # set first half of chis: chi[1:half_number_trials + 1] = self.get_chi( np.arange(1, half_number_trials + 1)) # set second half of chis: chi[half_number_trials + 1:self.number_trials + 1] = np.conjugate( chi[1:self.number_trials - half_number_trials + 1] [::-1]) chi /= self.number_trials + 1 xi = np.fft.fft(chi) if self.check_xi_are_real(xi): xi = xi.real else: raise TypeError("pmf / xi values have to be real.") xi += np.finfo(type(xi[0])).eps return xi def get_chi(self, idx_array): """Return the values of ``chi`` for the specified indices. :param idx_array: array of indices for which the ``chi`` values should be calculated :type idx_array: numpy.array """ # get_z: exp_value = np.exp(self.omega * idx_array * 1j) xy = 1 - self.success_probabilities + \ self.success_probabilities * exp_value[:, np.newaxis] # sum over the principal values of the arguments of z: argz_sum = np.arctan2(xy.imag, xy.real).sum(axis=1) # get d value: exparg = np.log(np.abs(xy)).sum(axis=1) d_value = np.exp(exparg) # get chi values: chi = d_value * np.exp(argz_sum * 1j) return chi # ------------------------------------------------------------------------------ # Auxiliary functions # ------------------------------------------------------------------------------ def check_rv_input(self, number_successes): """Assert that the input values ``number_successes`` are OK. The input values ``number_successes`` for the random variable have to be integers, greater or equal to 0, and smaller or equal to the total number of trials ``self.number_trials``. :param number_successes: number of successful trials :type number_successes: int or list of integers """ try: for k in number_successes: assert (type(k) == int or type(k) == np.int64), \ "Values in input list must be integers" assert k >= 0, 'Values in input list cannot be negative.' assert k <= self.number_trials, \ 'Values in input list must be smaller or equal to the ' \ 'number of input probabilities "n"' except TypeError: assert (type(number_successes) == int or \ type(number_successes) == np.int64), \ 'Input value must be an integer.' assert number_successes >= 0, "Input value cannot be negative." assert number_successes <= self.number_trials, \ 'Input value cannot be greater than ' + str(self.number_trials) return True @staticmethod def check_xi_are_real(xi_values): """Check whether all the ``xi``s have imaginary part equal to 0. The probabilities :math:`\\xi(k) = pmf(k) = Pr(X = k)` have to be positive and must have imaginary part equal to zero. :param xi_values: single event probabilities :type xi_values: complex """ return np.all(xi_values.imag <= np.finfo(float).eps) def check_input_prob(self): """Check that all the input probabilities are in the interval [0, 1].""" if self.success_probabilities.shape != (self.number_trials,): raise ValueError( "Input must be an one-dimensional array or a list.") if not np.all(self.success_probabilities >= 0): raise ValueError("Input probabilities have to be non negative.") if not np.all(self.success_probabilities <= 1): raise ValueError("Input probabilities have to be smaller than 1.") #export def poisson_binomial_pmf(probs, xs): return PoiBin(probs).pmf(xs) def resimulate_match(shots, up_to=26, min_xg=0.0001, **kwargs): """ 'Resimulate' a match based on xG. Takes a list of maps, where each map represents a shot has and has 'is_home' (bool) and 'xg' (float) keys. """ # Prevent potential underflow home_xgs = [max(s['xg'], min_xg) for s in shots if s['is_home']] away_xgs = [max(s['xg'], min_xg) for s in shots if not s['is_home']] home_scores = list(range(min(len(home_xgs) + 1, up_to))) away_scores = list(range(min(len(away_xgs) + 1, up_to))) home_probs = dict(zip(home_scores, poisson_binomial_pmf(home_xgs, home_scores))) away_probs = dict(zip(away_scores, poisson_binomial_pmf(away_xgs, away_scores))) scores = [] for h, a in itertools.product(range(up_to), repeat=2): home_prob = home_probs.get(h, 0) away_prob = away_probs.get(a, 0) scores.append({ 'home_goals': h, 'away_goals': a, 'home_probability': home_prob, 'away_probability': away_prob, 'probability': home_prob*away_prob, **kwargs }) # Keep everything up to 4-4; filter out P == 0 results above that return [ s for s in scores if s['probability'] > 0 or (s['home_goals'] < 5 and s['away_goals'] < 5) ] def extract_prob(probs, home_goals, away_goals): filtered = [p for p in probs if p['home_goals'] == home_goals and p['away_goals'] == away_goals] if len(filtered) == 0: return 0 return filtered[0]['probability'] probs = resimulate_match([ {'is_home': True, 'xg': 0.1} ]) assert np.isclose(extract_prob(probs, 1, 0), 0.1) shots = [ {"is_home": False, "xg": 0.030929630622267723}, {"is_home": False, "xg": 0.021505167707800865}, {"is_home": False, "xg": 0.013733051717281342}, {"is_home": False, "xg": 0.06314441561698914}, ] probs = resimulate_match(shots) assert np.isclose( extract_prob(probs, 0, 4), np.product([s['xg'] for s in shots]) ) ```
github_jupyter
# Лабораторная работа 9. ООП. ``` import numpy as np import matplotlib.pyplot as plt ``` # 1. Создание классов и объектов В языке программирования Python классы создаются с помощью инструкции `class`, за которой следует произвольное имя класса, после которого ставится двоеточие; далее с новой строки и с отступом реализуется тело класса: ``` class A: # class <имя класса>: pass # <тело класса> ``` Создание экземпляра класса: ``` a = A() # имя_переменной = ИмяКласса() print(a, 'объект класса', type(a)) ``` # 2. Класс как модуль (библиотека) Класс можно представить подобно модулю (библиотеки): - в нем могут быть свои переменные со значениями и функции - у класса есть собственное пространство имен, доступ к которым возможен через имя класса: ``` class CLASS: const = 5 # атрибут класса def adder(v): # функция-метод return v + CLASS.const CLASS.const CLASS.adder(4) ``` # 3. Класс как создатель объектов ``` Object = CLASS() Object.const Object.adder(100) ``` Дело в том, что классы и объекты не просто модули. Класс создает объекты, которые в определенном смысле являются его наследниками (копиями). Это значит, что если у объекта нет собственного поля `const`, то интерпретатор ищет его уровнем выше, то есть в классе. Таким образом, если мы присваиваем объекту поле с таким же именем как в классе, то оно перекрывает, т. е. переопределяет, поле класса: ``` Object.const Object.const = 10 Object.const CLASS.const ``` Видно, что `Object.const` и `CLASS.const` – это разные переменные. `Object.const` находится в пространстве имен объекта `Object`. `CLASS.const` – в пространстве класса `CLASS`. Если не задавать поле `const` объекту `Object`, то интерпретатор поднимется выше по дереву наследования и придет в класс, где и найдет это поле. Методы также наследуются объектами класса. В данном случае у объекта `Object` нет своего собственного метода `adder`, поэтому он ищется в классе `CLASS`. Однако от класса может быть порождено множество объектов. И методы предназначаются для обработки объектов. Таким образом, когда вызывается метод, в него надо передать конкретный объект, который он будет обрабатывать. Выражение Object.adder(100) выполняется интерпретатором следующим образом: - Ищу атрибут `adder()` у объекта `Object`. Не нахожу. - Тогда иду искать в класс `CLASS`, так как он создал объект `Object`. - Здесь нахожу искомый метод. Передаю ему объект, к которому этот метод надо применить, и аргумент, указанный в скобках. Другими словами, выражение `Object.adder(100)` преобразуется в выражение `CLASS.adder(Object, 100)`. Таким образом, интерпретатор попытался передать в метод `adder()` класса `CLASS` два параметра – объект `Object` и число `100`. Но мы запрограммировали метод `adder()` так, что он принимает только один параметр. Однако: ``` Object.adder() ``` Получается странная ситуация. Ведь `adder()` вызывается не только через класс, но и через порожденные от него объекты. Однако в последнем случае всегда будет возникать ошибка. Может понадобиться метод с параметрами, но которому не надо передавать экземпляр данного класса. Для таких ситуаций предназначены статические методы. Такие методы могут вызываться через объекты данного класса, но сам объект в качестве аргумента в них не передается. В Python острой необходимости в статических методах нет, так как код может находиться за пределами класса, и программа не начинает выполняться из класса. Если нам нужна просто какая-нибудь функция, мы можем определить ее в основной ветке. Однако в Python тоже можно реализовать статические методы с помощью декоратора `@staticmethod`: ``` class CLASS: const = 5 # атрибут класса @staticmethod def adder(v): # функция-метод return v + CLASS.const Object = CLASS() Object.adder(5) ``` Статические методы в Python – это, по сути, обычные функции, помещенные в класс для удобства и находящиеся в пространстве имен этого класса. Это может быть какой-то вспомогательный код. Вообще, если в теле метода не используется ссылка на конкретный объект (чаще всего обозначаемый как `self`), имеет смысл сделать метод статическим. # 4. Изменение полей объекта В Python объекту можно не только переопределять поля и методы, унаследованные от класса, также можно добавить новые, которых нет в классе: ``` Object1 = CLASS() Object2 = CLASS() Object2.str = 'abcd' Object2.str Object1.str CLASS.str ``` Однако в программировании так делать не принято, потому что тогда объекты одного класса будут отличаться между собой по набору атрибутов. Это затруднит автоматизацию их обработки, внесет в программу хаос. Поэтому принято присваивать полям, а также получать их значения, путем вызова методов (сеттеров (`set` – установить) и геттеров (`get` – получить)): ``` class CLASS: def setName(self, n): self.name = n def getName(self): try: return self.name except: return "No name" first = CLASS() second = CLASS() first.setName("Bob") first.getName() print(second.getName()) ``` # 5. Специальные методы # 5.1. Конструктор класса (метод `__init__()`) В объектно-ориентированном программировании конструктором класса называют метод, который автоматически вызывается при создании объектов. Его также можно назвать конструктором объектов класса. Имя такого метода обычно регламентируется синтаксисом конкретного языка программирования. В Python роль конструктора играет метод `__init__()`. В Python наличие пар знаков подчеркивания спереди и сзади в имени метода говорит о том, что он принадлежит к группе методов перегрузки операторов. Если подобные методы определены в классе, то объекты могут участвовать в таких операциях, как сложение, вычитание, вызываться в качестве функций и др. При этом методы перегрузки операторов не надо вызывать по имени. Вызовом для них является сам факт участия объекта в определенной операции. В случае конструктора класса – это операция создания объекта. Так как объект создается в момент вызова класса по имени, то в этот момент вызывается метод `__init__()`, если он определен в классе. Необходимость конструкторов связана с тем, что нередко объекты должны иметь собственные свойства сразу. Пусть имеется класс `Person`, объекты которого обязательно должны иметь имя и фамилию: ``` class Person: def setName(self, n, s): self.name = n self.surname = s p1 = Person() p1.setName("Bill", "Ross") ``` Или: ``` class Person: def __init__(self, n, s): self.name = n self.surname = s ``` В свою очередь, конструктор класса не позволит создать объект без обязательных полей: ``` p2 = Person() p2 = Person("Sam", "Baker") print(p2.name, p2.surname) ``` Однако бывает, что надо допустить создание объекта, даже если никакие данные в конструктор не передаются. В таком случае параметрам конструктора класса задаются значения по умолчанию: ``` class Rectangle: def __init__(self, w = 0.5, h = 1): self.width = w self.height = h def square(self): return self.width * self.height rec1 = Rectangle(5, 2) rec2 = Rectangle() rec3 = Rectangle(3) rec4 = Rectangle(h = 4) print(rec1.square()) print(rec2.square()) print(rec3.square()) print(rec4.square()) ``` # 5.2. Конструктор и деструктор Помимо конструктора объектов, в языках программирования есть обратный ему метод – деструктор. Он вызывается для уничтожения объекта. В языке программирования Python объект уничтожается, когда исчезают все связанные с ним переменные или им присваивается другое значение, в результате чего связь со старым объектом теряется. Удалить переменную можно с помощью команды языка `del`. В классах Python функцию деструктора выполняет метод `__del__()`. ``` class Student: def __init__(self, name, surname, position=1): self.name = name self.surname = surname self.position = position def display(self): return self.name, self.surname, self.position def __del__(self): print ("Goodbye %s %s" %(self.name, self.surname)) p1 = Student('big', 'dude', 3) p2 = Student('small', 'croon', 4) p3 = Student('neutral', 'guy', 5) print (p1.display()) print (p2.display()) print (p3.display()) del p2 print(p2.display()) ``` # 5.3. Специальные методы В Python есть ряд зарезервированных имен методов создаваемого класса – специальные (или стандартные) методы. Более подробную информацию о них вы можете найти в соответствующей [документации по Python](https://docs.python.org/3/reference/datamodel.html). Например: `__bool__()` Возвращает True или False. `__call__()` Позволяет использовать объект как функцию, т.е. его можно вызвать. `__len__()` Чаще всего реализуется в коллекциях и сходными с ними по логике работы типами, которые позволяют хранить наборы данных. Для списка (`list`) `__len__()` возвращает количество элементов в списке, для строки – количество символов в строке. Вызывается функцией `len()`, встроенной в язык Python. # Метод `__setattr__()` В Python атрибуты объекту можно назначать за пределами класса: ``` class A: def __init__(self, v): self.field1 = v a = A(10) a.field2 = 20 print(a.field1, a.field2) ``` Если такое поведение нежелательно, его можно запретить с помощью метода перегрузки оператора присваивания атрибуту `__setattr__()`: ``` class A: def __init__(self, v): self.field1 = v def __setattr__(self, attr, value): if attr == 'field1': self.__dict__[attr] = value else: raise AttributeError('Произошло обращение к несуществующему атрибуту!') a = A(15) a.field1 a.field2 = 30 a.field2 a.__dict__ ``` Метод `__setattr__()`, если он присутствует в классе, вызывается всегда, когда какому-либо атрибуту выполняется присваивание. Обратите внимание, что присвоение несуществующему атрибуту также обозначает его добавление к объекту. Когда создается объект `a`, в конструктор передается число `15`. Здесь для объекта заводится атрибут `field1`. Факт попытки присвоения ему значения тут же отправляет интерпретатор в метод `__setattr__()`, где проверяется, соответствует ли имя атрибута строке `'field1'`. Если так, то атрибут и соответствующее ему значение добавляются в словарь атрибутов объекта. Нельзя в `__setattr__()` написать просто `self.field1 = value`, так как это приведет к новому рекурсивному вызову метода `__setattr__()`. Поэтому поле назначается через словарь `__dict__`, который есть у всех объектов, и в котором хранятся их атрибуты со значениями. Если параметр `attr` не соответствует допустимым полям, то искусственно возбуждается исключение `AttributeError`. Мы это видим, когда в основной ветке пытаемся обзавестись полем `field2`. # Пример 1. Числа Фибоначчи Последовательность чисел Фибоначчи задаётся рекуррентным выражением: $$ F_n = \begin{cases} 0, n = 0, \\ 1, n = 1, \\ F_{n-1}+F_{n-2}, n > 1. \end{cases} $$ Что даёт следующую последовательность: {0, 1, 1, 2, 3, 5, 8, 13, 21, 34, …}. Один из способов решения, который может показаться логичным и эффективным, — решение с помощью рекурсии: ``` def Fibonacci_Recursion(n): if n == 0: return 0 if n == 1: return 1 return Fibonacci_Recursion (n-1) + Fibonacci_Recursion (n-2) ``` Используя такую функцию, мы будем решать задачу «с конца» — будем шаг за шагом уменьшать n, пока не дойдем до известных значений. Но, как мы видели ранее эта реализация многократно повторяет решение одних и тех же задач. Это связано с тем, что одни и те же промежуточные данные вычисляются по несколько раз, а число операций нарастает с той же скоростью, с какой растут числа Фибоначчи — экспоненциально. Один из выходов из данной ситуации — сохранение уже найденных промежуточных результатов с целью их повторного использования (кеширование). Причём кеш должен храниться во внешней области памяти. ``` def Fibonacci_Recursion_cache(n, cache): if n == 0: return 0 if n == 1: return 1 if cache[n] > 0: return cache[n] cache[n] = Fibonacci_Recursion_cache (n-1, cache) + Fibonacci_Recursion_cache (n-2, cache) return cache[n] ``` Приведенное решение достаточно эффективно (за исключением накладных расходов на вызов функций). Но можно поступить ещё проще: ``` def Fibonacci(n): fib = [0]*max(2,n) fib[0] = 1 fib[1] = 1 for i in range(2, n): fib[i] = fib[i - 1] + fib[i - 2] return fib[n-1] ``` Такое решение можно назвать решением «с начала» — мы первым делом заполняем известные значения, затем находим первое неизвестное значение, потом следующее и т.д., пока не дойдем до нужного. Так и работает динамическое программирование: сначала решили все подзадачи (нашли все `F[i]` для `i < n`), затем, зная решения подзадач, нашли решение исходной задачи. # Упражнение 1 Создайте класс для вычисления чисел Фибоначчи. Каждое число Фибоначчи является объектом этого класса, которое имеет атрибуты: значение и номер. Используйте функции для инициализации (вычисления) чисел Фибоначчи как сторонние по отношению к этому классу. ``` class Fiber: n = 1 def __init__(self, n): self.n = n def calculate(self): return Fibonacci(self.n) k = Fiber(int(input('Введите необходимое число: '))) print(k.calculate()) ``` # Упражнение 2 Поместите функции для вычисления чисел Фибоначчи внутрь созданного класса как статические функции. ``` class Fiber2: # метод без @staticmethod, но принимает только число и не требует объекта def calculate(n): fib = [0]*max(2,n) fib[0] = 1 fib[1] = 1 for i in range(2, n): fib[i] = fib[i - 1] + fib[i - 2] return fib[n-1] # пример использования: print(Fiber2.calculate(int(input('Введите необходимое число: ')))) ``` # Упражнение 3 Перегрузите операции сложения, вычитания, умножения и деления для созданного класса как операции с номерами чисел Фибоначи. ``` class FiberSuper: def __init__(self, n): self.setNumber(n) def setNumber(self, n): self.n = n self.fib = Fiber2.calculate(n) def getNumber(self): return self.n def getFibonacci(self): return self.fib def __add__(self1, self2): return FiberSuper(self1.n + self2.n) def __mul__(self1, self2): return FiberSuper(self1.n * self2.n) def __sub__(self1, self2): return FiberSuper(abs(self1.n - self2.n)) def __truediv__(self1, self2): return FiberSuper(self1.n // self2.n) k1 = FiberSuper(16) k2 = FiberSuper(8) print('k1: ', k1.getNumber(), ' - ', k1.getFibonacci()) print('k2: ', k2.getNumber(), ' - ', k2.getFibonacci()) print('k1 + k2: ', (k1 + k2).getNumber(), ' - ', (k1 + k2).getFibonacci()) print('k1 * k2: ', (k1 * k2).getNumber(), ' - ', (k1 * k2).getFibonacci()) print('k1 - k2: ', (k1 - k2).getNumber(), ' - ', (k1 - k2).getFibonacci()) print('k1 / k2: ', (k1 / k2).getNumber(), ' - ', (k1 / k2).getFibonacci()) ``` # Домашнее задание (базовое): # Задание 1. Создать класс с двумя переменными. Добавить функцию вывода на экран и функцию изменения этих переменных. Добавить функцию, которая находит сумму значений этих переменных, и функцию, которая находит наибольшее значение из этих двух переменных. ``` class Couple: def __init__(self, x, y): self.x = x self.y = y def setFirst(self, x): self.x = x def getFirst(self): return self.x def setSecond(self, y): self.y = y def getSecond(self): return self.y def out(self): print('First: ', self.x) print('Second: ', self.y) def getSum(self): return self.x + self.y def getMax(self): return max(self.x, self.y) Beta = Couple(12, 8) Beta.out() print() print('Sum: ', Beta.getSum()) print('Max: ', Beta.getMax()) ``` # Задание 2. Составить описание класса многочленов от одной переменной, задаваемых степенью многочлена и массивом коэффициентов. Предусмотреть методы для вычисления значения многочлена для заданного аргумента, операции сложения, вычитания и умножения многочленов с получением нового объекта-многочлена, вывод на экран описания многочлена. ``` class Polynom: ''' Полином исключительно положительных степеней (это нужно для интерактивного задания) Можно было сделать и лучше, как всегда) ''' def __init__(self, polynom = None): if polynom is not None: self.__dict__.update(polynom) return power = int(input('Введите степень многочлена: ')) for each in range(power, -1, -1): try: self.__dict__.update({str('power' + str(each)): float(input(f'Введите коэффициент при одночлене со степенью {each}: '))}) except: self.__dict__.update({str('power' + str(each)): 0}) def count(self, x): value = 0 for each in self.__dict__.keys(): value += self.__dict__[each] * (x ** int(each[5:])) return value def form(self): form = '' keys = list(self.__dict__.keys()) keys.sort() keys.reverse() for each in keys: if self.__dict__[each] == 0: continue if form != '': form += ' + ' form += '(' + str(self.__dict__[each]) + ')' + ('*x**(' + each[5:] + ')') * int(bool(int(each[5:]))) return form def __add__(self1, self2): coefficients = {} for obj in [self1, self2]: for key in obj.__dict__.keys(): if key not in coefficients.keys(): coefficients[key] = obj.__dict__[key] else: coefficients[key] += obj.__dict__[key] return Polynom(coefficients) def __sub__(self1, self2): coefficients = self1.__dict__.copy() for key in self2.__dict__.keys(): if key not in coefficients.keys(): coefficients[key] = 0 - (self2.__dict__[key]) else: coefficients[key] -= self2.__dict__[key] return Polynom(coefficients) def __mul__(self1, self2): coefficients = {} for key1 in self1.__dict__.keys(): for key2 in self2.__dict__.keys(): new_key = 'power' + str(int(key1[5:]) * int(key2[5])) if new_key not in coefficients.keys(): coefficients[new_key] = self1.__dict__[key1] * self2.__dict__[key2] else: coefficients[new_key] += self1.__dict__[key1] * self2.__dict__[key2] return Polynom(coefficients) parabole = Polynom() print('Значение функции:', parabole.count(float(input('Введите значение аргумента: ')))) polynom1 = Polynom() print(polynom1.form()) polynom2 = Polynom() print(polynom2.form()) print((polynom1 + polynom2).form()) print('Значение суммы функций в точке равно:', (polynom1 + polynom2).count(float(input('Введите значение аргумента: ')))) print('Значение разности функций в точке равно:', (polynom2 - polynom1).count(float(input('Введите значение аргумента: ')))) print('Форма произведения функций представляется в виде y =', (polynom1 * polynom2).form()) print((polynom1 * polynom2).count(12)) ``` # Задание 3. Составить описание класса для вектора, заданного координатами его концов в трехмерном пространстве. Обеспечить операции сложения и вычитания векторов с получением нового вектора (суммы или разности), вычисления скалярного произведения двух векторов, длины вектора, косинуса угла между векторами. ``` class Vector: def __init__(self, dot1, dot2): self.begin = dot1 self.end = dot2 self.entity = [ self.end[0] - self.begin[0], self.end[1] - self.begin[1], self.end[2] - self.begin[2] ] self.length = ( (self.entity[0]) ** 2 + (self.entity[1]) ** 2 + (self.entity[2]) ** 2 ) ** 0.5 def __add__(self1, self2): return Vector([self1.begin[0], self1.begin[1], self1.begin[2]], [(self1.end[0] + self2.entity[0]), (self1.end[1] + self2.entity[1]), (self1.end[2] + self2.entity[2])]) def __sub__(self1, self2): return Vector([self1.begin[0], self1.begin[1], self1.begin[2]], [(self1.end[0] - self2.entity[0]), (self1.end[1] - self2.entity[1]), (self1.end[2] - self2.entity[2])]) def __mul__(self1, self2): return self1.entity[0] * self2.entity[0] + self1.entity[1] * self2.entity[1] + self1.entity[2] * self2.entity[2] def getLength(self): return self.length def getCos(self1, self2): return self1 * self2 / (self1.getLength() * self2.getLength()) def about(self): print('Вектор №%i:' % id(self)) print('\tКоординаты вектора:', self.entity) print('\tКоординаты начальной точки:', self.begin) print('\tКоординаты конечной точки:', self.end) print('\tДлина вектора:', self.length) vectors = [] for i in range(2): print('Задаём %i-й вектор.' % i) x1, y1, z1 = map(float, input('Введите координаты первой точки через пробел: ').split()) x2, y2, z2 = map(float, input('Введите координаты второй точки через пробел: ').split()) vectors.append(Vector([x1, y1, z1], [x2, y2, z2])) v1 = vectors[0] v1.about() v2 = vectors[1] v2.about() print('Сложим векторы.') (v1 + v2).about() print('Вычтем векторы друг из друга.') (v1 - v2).about() (v2 - v1).about() print('Длины векторов совпадают.' * int((v1 - v2).getLength() == (v2 - v1).getLength())) print('Найдём скалярное произведение векторов.') print('v1 * v2 =', v1 * v2) print('Найдём косинус угла (в радианах) между векторами.') print('cos(v1, v2) =', Vector.getCos(v1, v2)) ``` # Задание 4. Поезда. Создайте структуру с именем `train`, содержащую поля: - название пунктов отправления и назначения; - время отправления и прибытия. Перегрузить операцию сложения: два поезда можно сложить, если пункт назначения первого совпадает с пунктом отправления второго, и время прибытия первого раньше, чем отправление второго. ``` from time import mktime, gmtime, strptime, strftime mktime(gmtime()) class Train: def __init__(self, times = None, stations = None, united = False): if times is None and stations is None: self.buyTicket() return self.departure_time = times[0] self.arrival_time = times[1] self.departure_station = stations[0] self.arrival_station = stations[1] self.road_time = self.arrival_time - self.departure_time self.united = united def buyTicket(self): self.departure_station = input('Вы покупаете билет на поезд.\n\tУкажите станцию отправления: ') self.departure_time = mktime(strptime(input('\tКогда отправляется поезд?\n\t\tВведите дату (число.месяц.год): '), '%d.%m.%Y')) self.departure_time += mktime(strptime(input('\t\tВведите время (часы:минуты): '), '%H:%M')) self.arrival_station = input('\tУкажите станцию прибытия: ') self.arrival_time = mktime(strptime(input('\tКогда прибывает поезд?\n\t\tВведите дату (ЧЧ.ММ.ГГГГ): '), '%d.%m.%Y')) self.arrival_time += mktime(strptime(input('\t\tВведите время (ЧЧ:ММ): '), '%H:%M')) self.united = False print('Спасибо за покупку! Ваш билет - под номером %i.' % id(self)) def about(self): print('Поезд %s - %s%s' % (self.departure_station, self.arrival_station, ' (ОБЪЕДИНЁННЫЙ)' * int(self.united))) print('\tВремя отправления: %s' % strftime('%a, %d %b %Y %H:%M', gmtime(self.departure_time))) print('\tВремя прибытия: %s' % strftime('%a, %d %b %Y %H:%M', gmtime(self.arrival_time))) print('\tБилет на поезд: %i' % id(self)) print('\tВремени в пути: %i часов %i минут' % ((self.arrival_time - self.departure_time) // 3600, (self.arrival_time - self.departure_time) % 3600 // 60)) def __add__(self1, self2): if self1.arrival_station == self2.departure_station and self1.arrival_time < self2.departure_time: return Train(times = [self1.departure_time, self2.arrival_time], stations = [self1.departure_station, self2.arrival_station], united = True) MSK_SPB = Train([mktime(strptime('26.12.2019 18:30', '%d.%m.%Y %H:%M')), mktime(strptime('27.12.2019 5:39', '%d.%m.%Y %H:%M'))], ['Москва', 'Санкт-Петербург'], False) SPB_HSK = Train([mktime(strptime('27.12.2019 12:00', '%d.%m.%Y %H:%M')), mktime(strptime('01.01.2020 15:26', '%d.%m.%Y %H:%M'))], ['Санкт-Петербург', 'Хельсинки'], False) MSK_SPB = Train() SPB_HSK = Train() MSK_SPB.about() SPB_HSK.about() (MSK_SPB + SPB_HSK).about() ``` # Домашнее задание (дополнительное): # Библиотека. Описать класс «библиотека». Предусмотреть возможность работы с произвольным числом книг, поиска книги по какому-либо признаку (например, по автору или по году издания), добавления книг в библиотеку, удаления книг из нее, сортировки книг по разным полям. ``` class Book: def __init__(self, title = None, authors = None, link = None, description = None, language = None, yearOfPublishing = None, publishingHouse = None, ISBN = None, volume = None, cost = None, ageLimit = None): self.title = title self.authors = authors self.link = link # здесь располагается ссылка на книгу в интернете try: self.mainAuthor = self.authors.pop(0) except: self.mainAuthor = None self.description = description self.language = language self.yearOfPublishing = yearOfPublishing self.publishingHouse = publishingHouse self.ISBN = ISBN self.volume = volume self.cost = cost self.ageLimit = ageLimit def split_str(string, length): for i in range(0, len(string), length): yield string[i : i + length].strip() def new(): print('Вы написали книгу? Поздравляем! Давайте заполним информацию о ней и опубликуем!') try: self = Book() self.title = input('\tУкажите название книги: ') self.mainAuthor = input('\tУкажите ваше ФИО - или инициалы: ') self.authors = list(map(str, input('\tБыли ли у вашей книги соавторы? ' + 'Укажите их через запятую - или оставьте поле ввода пустым: ').split(', '))) self.description = input('\tВведите описание своей книги: ') self.language = input('\tВведите язык, на котором вы написали книгу: ') self.ageLimit = int(input('\tВведите возраст, с которого вашу книгу можно читать: ')) self.volume = int(input('\tВведите объём печатного текста в страницах формата А5: ')) self.link = input('\tНаконец, если ваша книга опубликована, укажите на неё ссылку - или оставьте поле ввода пустым: ') if self.link == '': self.link = None if input('\tКстати, не хотите её опубликовать?) Введите "Да", чтобы перейти к публикации: ') == 'Да': self.publish() print('\tИнформация о книге успешно заполнена.') except: print('Оу... К сожалению, информация о книге была введена неправильно, и создание электронной версии ' + 'не может быть продолжено.') self = None finally: return self def publish(self): print() print('### Статья "Публикация книг", автор - Титов Климентий.') print('"""') print('Опубликовать свою книгу позволяет платформа Самиздата от Литрес: https://selfpub.ru/. ' + 'Выполняйте следующую последовательность действий:') print('1. Зарегистрируйтесь на сайте') print('2. Сохраните текст работы в документе DOCX или книге FB2') print('3. Укажите всю необходимую информацию о книге') print('4. Выберите способ распространения книги. Например, чтобы иметь возможность распространять печатную версию, ' + 'выберите Базовый или Безлимитный способ') print('5. Создайте эстетичную обложку') print('6. И, наконец, отправьте книгу на модерацию.') print('После успешной модерации ваша книга будет автоматически опубликована. Не забудьте заполнить данные о книге ' + 'здесь: вам нужно будет задать необходимые значения при помощи методов setISBN(ISBN), setYearOfPublishing' + '(yearOfPublishing), setPublishingHouse(publishingHouse), setCost(cost) и setLink(link). ' + 'И, конечно же, наслаждайтесь результатом!') print('"""') print() def setISBN(self, ISBN): self.ISBN = ISBN def setYearOfPublishing(self, yearOfPublishing): self.yearOfPublishing = yearOfPublishing def setPublishingHouse(self, publishingHouse): self.publishingHouse = publishingHouse def setCost(self, cost): self.cost = cost def setLink(self, link): self.link = link def about(self): print(f'Книга "{self.title}"') print(f'\tАвтор - {self.mainAuthor}') if self.authors != []: print('\tСоавторы:') for author in self.authors: print(f'\t\t{author}') if self.description: print('\tОписание:') print('\t\t"""\n\t\t' + '\n\t\t'.join(Book.split_str(self.description, 80)) + '\n\t\t"""') if self.language: print(f'\tЯзык: {self.language}') if self.yearOfPublishing: print(f'\tГод публикации - {self.yearOfPublishing}') if self.publishingHouse: print(f'\tИздательство: {self.publishingHouse}') if self.ISBN: print(f'\tISBN: {self.ISBN}') if self.volume: print(f'\tОбъём книги: {self.volume} стр.') if self.cost: print(f'\tСтоимость книги: {self.cost} руб.') if self.ageLimit: print(f'\tВозрастное ограничение: {self.ageLimit}+') if self.link: print(f'\tСсылка на книгу: {self.link}') def properties(): return ['mainAuthor', 'authors', 'description', 'language', 'yearOfPublushing', 'publishingHouse', 'ISBN', 'volume', 'cost', 'ageLimit', 'link'] _1984 = Book(title = '1984', authors = ['Джордж Оруэлл'], link = 'https://www.litres.ru/dzhordzh-oruell/1984/', description = 'Своеобразный антипод второй великой антиутопии XX века – «О дивный новый мир» ' + 'Олдоса Хаксли. Что, в сущности, страшнее: доведенное до абсурда «общество потребления» ' + '– или доведенное до абсолюта «общество идеи»? По Оруэллу, нет и не может быть ничего ужаснее ' + 'тотальной несвободы…', language = 'Русский', yearOfPublishing = 2014, publishingHouse = 'Издательство АСТ', ISBN = '978-5-17-080115-2', volume = 320, cost = 119, ageLimit = 16) _1984.about() Satan = Book.new() Property = Book.new() Seveina = Book(title = 'Севейна', authors = ['Титов Климентий', 'Снежская Виктория'], yearOfPublishing = 2019) TheOldManandtheSea = Book(title = 'The Old Man and the Sea', authors = ['Эрнест Хемингуэй']) TheGreatGatsby = Book(title = 'The Great Gatsby', authors = ['Фрэнсис Фиджеральд Скотт']) class Library: storage = {} # формат данных {ID: Book} readers = {} # формат данных {ФИО: взятые книги [ID1, ID2,..]} def __init__(self, name = None, address = None, owner = None, workers = None, contacts = None): self.name = name self.address = address self.owner = owner self.workers = workers self.contacts = contacts def printWorkers(self): print('Сотрудники:') for workerIndex in range(len(self.workers)): name = self.workers[workerIndex] print(f'\tID {workerIndex}\tФИО {name}') def printBooks(self, sortingKey = 'order'): print('Книги:') if sortingKey == 'order':Именно for bookIndex in self.storage.keys(): title = self.storage[bookIndex].title print(f'\tID {bookIndex}\tНазвание "{title}"') else: try: books = list(self.storage.items()) books.sort(key=lambda i: i[1][eval(sortingKey)]) for book in books: prop = eval(f'book[1].{sortingKey}') ID = book[0] print(f'\tID {ID}\tСвойство "{sortingKey}": {prop}') except: print('Не удалось вывести отсортированный список книг.') def printReaders(self): print('Читатели:') for reader in self.readers.keys(): books = self.readers[reader] print(f'\tФИО {reader}\tКниги: {books}') def isInProcess(self, ID): for reader in self.readers.keys(): if ID in self.readers[reader]: return True return False def shell(self): print(f'Оболочка библиотеки "{self.name}":') print(f'\tРабота с организацией') print(f'\t000. Добавить сотрудника') print(f'\t001. Удалить сотрудника') print(f'\tРабота с книгами') print(f'\t100. Добавить книгу') print(f'\t101. Удалить книгу') print(f'\t102. Вывести список книг') print(f'\t103. Принудительно вернуть книгу') print(f'\t104. Отредактировать свойства книги') print(f'\t105. Поиск по библиотеке') print(f'\t106. Просмотр свойств книги') print(f'\tРабота с читателями') print(f'\t200. Добавить нового читателя') print(f'\t201. Удалить читателя (если список задолженностей пуст)') print(f'\t202. Взять книгу') print(f'\t203. Вернуть книгу') print(f'\tВнештатные ситуации') print(f'\t300. Книга утеряна') print(f'\t301. Написана новая книга') print(f'\t302. Ликвидировать предприятие') print(f'\t-1. Выйти из оболочки') while True: action = input('Введите номер действия: ') if action == '000': # добавить сотрудника self.workers.append(input('Введите ФИО нового сотрудника: ')) print('Сотрудник успешно добавлен.') elif action == '001': # удалить сотрудника self.printWorkers() ID = input('Введите ID работника, которого хотите уволить - или оставьте поле ввода пустым: ') if ID == '': continue try: ID = int(ID) del self.workers[ID] except: print('Попытка увольнения не удалась. Может, ваш сотрудник восстал против вас?..') elif action == '100': # добавить книгу corners = list(map(str, input('Перечислите названия объектов Book через точку с запятой, если они заданы - ' + 'или оставьте поле ввода пустым: ').split('; '))) self.append(corners) elif action == '101': # удалить книгу self.printBooks() try: ID = int(input('Введите id книги: ')) self.remove(ID) except: print('Удаление книги не удалось.') elif action == '102': # вывести список книг self.printBooks() elif action == '103': # принудительный возврат книги self.printBooks() try: ID = int(input('Введите id книги: ')) self.back(ID) except: print('Возврат книги не удался.') elif action == '104': # отредактировать свойства книги self.printBooks() try: ID = int(input('Введите id книги: ')) print('Достуные свойства редактирования:', Book.properties()) key = input('Введите свойство книги, которое вы хотите отредактировать (будьте внимательны при написании свойства): ') value = input('Введите значение (строки - в кавычках, числа - без, списки поддерживаются): ') book = self.storage[ID] exec(f'book.{key} = {value}') except: print('Возврат книги не удался.') elif action == '105': # поиск по библиотеке print('Достуные свойства поиска:', Book.properties()) key = input('Введите свойство книги, по которому вы хотите найти книги (будьте внимательны при написании свойства): ') value = input('Введите значение (строки - в кавычках, числа - без, списки поддерживаются): ') try: for bookIndex in self.storage.keys(): if eval(f'self.storage[bookIndex].{key}') == eval(value): title = self.storage[bookIndex].title print(f'\tID {bookIndex}\tНазвание {title}') except: print('Поиск не удался.') elif action == '106': # просмотр свойств книги self.printBooks() try: ID = int(input('Введите id книги: ')) self.storage[ID].about() except: print('Просмотр свойств не удался.') elif action == '200': # добавить читателя name = input('Введите ФИО: ') if name not in self.readers.keys(): self.readers[name] = [] print('Читатель успешно добавлен.') else: print('Такой читатель уже существует.') elif action == '201': # удалить читателя self.printReaders() name = input('Введите ФИО: ') if name not in self.readers.keys(): print('Такого читателя не существует.') continue elif self.readers[name] != []: print('Читатель не вернул все книги.') continue else: del self.readers[name] print('Удаление прошло успешно.') elif action == '202': # взять книгу self.printBooks() try: ID = int(input('Введите id книги: ')) if not self.isInProcess(ID): self.printReaders() name = input('Введите ФИО: ') self.readers[name].append(ID) print('Книга взята.') else: print('Книга сейчас у читателя, её нельзя взять.') except: print('Взять такую книгу нельзя.') elif action == '203': # вернуть книгу self.printReaders() try: name = input('Введите ФИО: ') books = self.readers[name] for book in books: title = self.storage[book].title print(f'\tID {book}\tНазвание "{title}"') ID = int(input('Введите id книги: ')) self.readers[name].remove(ID) except: print('Книгу вернуть не удалось.') elif action == '300': # книга утеряна self.printBooks() try: ID = int(input('Введите id книги: ')) self.bookWasLost(ID) except: print('Пропажа не была зарегистрирована.') elif action == '301': # написана новая книга book = Book.new() if book is not None: self.append([book]) print('Книга добавлена в библиотеку.') elif action == '302': # ликвидировать предприятие really = (input('Вы уверены? Введите "Да" - или оставьте поле ввода пустым: ') == 'Да') if really: self.__del__() elif action == '-1': return def new(): print('Вы решили создать свою собственную библиотеку?! Да здравствует либертарианский рынок!') self = Library() self.owner = input('\tПрежде всего, укажите ФИО человека, который будет владельцем библиотеки: ') self.workers = list(map(str, input('\tВы уже наняли работников? Если да, перечислите их через запятую ' + '- или оставьте поле ввода пустым: ').split(', '))) self.name = input('\tКак вы назовёте своё предприятие? ') self.address = input('\tУкажите юридический адрес: ') self.contacts = list(map(str, input('\tУкажите контакты организации (номер телефона, эл. почту, ссылки) через пробел: ').split())) print('Поздравляем, вы не иначе как создали свою библиотеку! Можете подавать документы на регистрацию предприятия в ФНС России!') return self def append(self, books): if books == []: self.setupCard() for book in books: try: ID = self.getNewID() exec(f'self.storage[ID] = {book}') except: print(f'Объекта {book} не существует.') def setupCard(self): print('Заполнение информации о новой книге библиотеки.') try: book = Book() book.title = input('\tНазвание книги: ') book.mainAuthor = input('\tАвтор: ') book.authors = list(map(str, input('\tСоавторы (через запятую): ').split(', '))) book.description = input('\tАннотация: ') book.language = input('\tЯзык текста: ') book.ageLimit = int(input('\tМинимальный возраст читателя: ')) book.volume = int(input('\tОбъём печатного текста в страницах формата А5: ')) book.ISBN = input('\tISBN: ') book.yearOfPublishing = int(input('\tГод публикации: ')) book.publishingHouse = input('\tИздательство: ') book.cost = int(input('\tСтоимость: ')) book.link = input('\tСсылка на книгу в Интернете: ') ID = self.getNewID() self.storage[ID] = book print('Книга была успешно добавлена в библиотеку.') except: print('Заполнение было прервано из-за некорректных данных. Будьте внимательны - попробуйте ещё раз.') def getNewID(self): if len(list(self.storage.keys())) != 0: return max(list(storage.keys())) + 1 else: return 1 def bookWasLost(self, ID): print('Стоимость книги должны возместить.') def remove(self, ID): if input('Введите "Да", чтобы подтвердить удаление книги: ') == 'Да': if ID in self.storage.keys(): del self.storage[ID] for readerIndex in self.readers.keys(): if ID in self.readers[readerIndex]: self.readers[readerIndex].remove(ID) print('Удаление совершено успешно.') def back(self, ID): for readerIndex in self.readers.keys(): if ID in self.readers[readerIndex]: self.readers[readerIndex].remove(ID) print('Возврат совершён успешно.') break AGATA = Library.new() Beta = Library(name = 'Beta', address = '', owner = 'Mark CDA', workers = [], contacts = []) AGATA.shell() Seveina = Book('Севейна') ``` # Обобщённое число. Создайте класс, обобщающий понятие комплексных, двойных и дуальных чисел. Такие числа объединены одной формой записи: $$ c = a + ib,$$ где `c` – обобщённое число (комплексное, двойное или дуальное), `a` и `b` – вещественные числа, i – некоммутирующий символ. Именно из-за наличия символа `i` число `c` не просто сумма `a` и `b`. Такие числа можно представлять как вектор на плоскости `(a,b)`. А символ `i` обладает следующим свойством: - для комплексных чисел $$ i^2 = -1 $$ - для двойных чисел $$ i^2 = 1 $$ - для дуальных чисел $$ i^2 = 0 $$ Перегрузить для них базовые операции: сложения, вычитания, умножения и деления. Например, операция умножения для таких чисел имеет вид: $$ (a_1+b_1i)\cdot (a_2+b_2i)=a_1a_2+b_1a_2i+a_1b_2i+b_1b_2i^{2}=(a_1a_2+b_1b_2i^{2})+(b_1a_2+a_1b_2)i. $$ Статус: `задание не решено`.
github_jupyter
# Qcodes example with InstrumentGroup driver This notebooks explains how to use the `InstrumentGroup` driver. ## About The goal of the `InstrumentGroup` driver is to combine several instruments as submodules into one instrument. Typically, this is meant to be used with the `DelegateInstrument` driver. An example usage of this is to create an abstraction for devices on a chip. ## Usage The way it's used is mainly by specifying an entry in the station YAML. For instance, to create a Chip that has one or more Devices on it that point to different source parameters. The example below shows three devices, each of which is initialised in one of the supported ways. Device1 has only DelegateParameters, while device2 and device3 have both DelegateParameters and channels added. Device3 adds its channels using a custom channel wrapper class. ``` %%writefile example.yaml instruments: dac: type: qcodes.tests.instrument_mocks.MockDAC init: num_channels: 3 lockin1: type: qcodes.tests.instrument_mocks.MockLockin lockin2: type: qcodes.tests.instrument_mocks.MockLockin MockChip_123: type: qcodes.instrument.delegate.InstrumentGroup init: submodules_type: qcodes.instrument.delegate.DelegateInstrument submodules: device1: parameters: gate: - dac.ch01.voltage source: - lockin1.frequency - lockin1.amplitude - lockin1.phase - lockin1.time_constant drain: - lockin1.X - lockin1.Y device2: parameters: readout: - lockin1.phase channels: gate_1: dac.ch01 device3: parameters: readout: - lockin1.phase channels: type: qcodes.tests.instrument_mocks.MockCustomChannel gate_1: channel: dac.ch02 current_valid_range: [-0.5, 0] gate_2: channel: dac.ch03 current_valid_range: [-1, 0] set_initial_values_on_load: true initial_values: device1: gate.step: 5e-4 gate.inter_delay: 12.5e-4 device2: gate_1.voltage.post_delay: 0.01 device3: gate_2.voltage.post_delay: 0.03 import qcodes as qc station = qc.Station(config_file="example.yaml") lockin1 = station.load_lockin1() lockin2 = station.load_lockin2() dac = station.load_dac() chip = station.load_MockChip_123(station=station) chip.device1.gate() dac.ch01.voltage() chip.device1.gate(1.0) chip.device1.gate() dac.ch01.voltage() chip.device1.source() chip.device1.drain() ``` Device with channels/gates: ``` chip.device2.gate_1 ``` Setting voltages to a channel/gate of device2: ``` print(chip.device2.gate_1.voltage()) chip.device2.gate_1.voltage(-0.74) print(chip.device2.gate_1.voltage()) ``` Check initial values of device3, from which only gate_2.voltage.post_delay was set. ``` chip.device3.gate_1.voltage.post_delay chip.device3.gate_2.voltage.post_delay ```
github_jupyter
``` from __future__ import print_function import sisl import numpy as np import matplotlib.pyplot as plt from functools import partial %matplotlib inline ``` TBtrans is capable of calculating transport in $N\ge 1$ electrode systems. In this example we will explore a 4-terminal graphene GNR cross-bar (one zGNR, the other aGNR) system. ``` graphene = sisl.geom.graphene(orthogonal=True) R = [0.1, 1.43] hop = [0., -2.7] ``` Create the two electrodes in $x$ and $y$ directions. We will force the systems to be nano-ribbons, i.e. only periodic along the ribbon. In `sisl` there are two ways of accomplishing this. 1. Explicitly set number of auxiliary supercells 2. Add vacuum beyond the orbital interaction ranges The below code uses the first method. Please see if you can change the creation of `elec_x` by adding vacuum. **HINT**: Look at the documentation for the `sisl.Geometry` and search for vacuum. To know the orbital distance look up `maxR` in the geometry class as well. ``` elec_y = graphene.tile(3, axis=0) elec_y.set_nsc([1, 3, 1]) elec_y.write('elec_y.xyz') elec_x = graphene.tile(5, axis=1) elec_x.set_nsc([3, 1, 1]) elec_x.write('elec_x.xyz') ``` Subsequently we create the electronic structure. ``` H_y = sisl.Hamiltonian(elec_y) H_y.construct((R, hop)) H_y.write('ELEC_Y.nc') H_x = sisl.Hamiltonian(elec_x) H_x.construct((R, hop)) H_x.write('ELEC_X.nc') ``` Now we have created the electronic structure for the electrodes. All that is needed is the electronic structure of the device region, i.e. the crossing nano-ribbons. ``` dev_y = elec_y.tile(30, axis=1) dev_y = dev_y.translate( -dev_y.center(what='xyz') ) dev_x = elec_x.tile(18, axis=0) dev_x = dev_x.translate( -dev_x.center(what='xyz') ) ``` Remove any atoms that are *duplicated*, i.e. when we overlay these two geometries some atoms are the same. ``` device = dev_y.add(dev_x) device.set_nsc([1,1,1]) duplicates = [] for ia in dev_y: idx = device.close(ia, 0.1) if len(idx) > 1: duplicates.append(idx[1]) device = device.remove(duplicates) ``` Can you explain why `set_nsc([1, 1, 1])` is called? And if so, is it necessary to do this step? --- Ensure the lattice vectors are big enough for plotting. Try and convince your-self that the lattice vectors are unimportant for tbtrans in this example. *HINT*: what is the periodicity? ``` device = device.add_vacuum(70, 0).add_vacuum(20, 1) device = device.translate( device.center(what='cell') - device.center(what='xyz') ) device.write('device.xyz') ``` Since this system has 4 electrodes we need to tell tbtrans where the 4 electrodes are in the device. The following lines prints out the fdf-lines that are appropriate for each of the electrodes (`RUN.fdf` is already filled correctly): ``` print('elec-Y-1: semi-inf -A2: {}'.format(1)) print('elec-Y-2: semi-inf +A2: end {}'.format(len(dev_y))) print('elec-X-1: semi-inf -A1: {}'.format(len(dev_y) + 1)) print('elec-X-2: semi-inf +A1: end {}'.format(-1)) H = sisl.Hamiltonian(device) H.construct([R, hop]) H.write('DEVICE.nc') ``` # Exercises In this example we have more than 1 transmission paths. Before you run the below code which plots all relevant transmissions ($T_{ij}$ for $j>i$), consider if there are any symmetries, and if so, determine how many different transmission spectra you should expect? Please plot the geometry using your favourite geometry viewer (`molden`, `Jmol`, ...). The answer is not so trivial. ``` tbt = sisl.get_sile('siesta.TBT.nc') ``` Make easy function calls for plotting energy resolved quantites: ``` E = tbt.E Eplot = partial(plt.plot, E) # Make a shorthand version for the function (simplifies the below line) T = tbt.transmission t12, t13, t14, t23, t24, t34 = T(0, 1), T(0, 2), T(0, 3), T(1, 2), T(1, 3), T(2, 3) Eplot(t12, label=r'$T_{12}$'); Eplot(t13, label=r'$T_{13}$'); Eplot(t14, label=r'$T_{14}$'); Eplot(t23, label=r'$T_{23}$'); Eplot(t24, label=r'$T_{24}$'); Eplot(t34, label=r'$T_{34}$'); plt.ylabel('Transmission'); plt.xlabel('Energy [eV]'); plt.ylim([0, None]); plt.legend(); ``` - In `RUN.fdf` we have added the flag `TBT.T.All` which tells tbtrans to calculate *all* transmissions, i.e. between all $i\to j$ for all $i,j \in \{1,2,3,4\}$. This flag is by default `False`, why? - Create 3 plots each with $T_{1j}$ and $T_{j1}$ for all $j\neq1$. ``` # Insert plot of T12 and T21 # Insert plot of T13 and T31 # Insert plot of T14 and T41 ``` - Considering symmetries, try to figure out which transmissions ($T_{ij}$) are unique? - Plot the bulk DOS for the 2 differing electrodes. - Plot the spectral DOS injected by all 4 electrodes. ``` # Helper routines, this makes BDOS(...) == tbt.BDOS(..., norm='atom') BDOS = partial(tbt.BDOS, norm='atom') ADOS = partial(tbt.ADOS, norm='atom') ``` Bulk density of states: ``` Eplot(..., label=r'$BDOS_1$'); Eplot(..., label=r'$BDOS_2$'); plt.ylabel('DOS [1/eV/N]'); plt.xlabel('Energy [eV]'); plt.ylim([0, None]); plt.legend(); ``` Spectral density of states for all electrodes: - As a final exercise you can explore the details of the density of states for single atoms. Take for instance atom 205 (204 in Python index) which is in *both* GNR at the crossing. Feel free to play around with different atoms, subset of atoms (pass a `list`) etc. ``` Eplot(..., label=r'$ADOS_1$'); ... plt.ylabel('DOS [1/eV/N]'); plt.xlabel('Energy [eV]'); plt.ylim([0, None]); plt.legend(); ``` - For 2D structures one can easily plot the DOS per atom via a scatter plot in `matplotlib`, here is the skeleton code for that, you should select an energy point and figure out how to extract the atom resolved DOS (you will need to look-up the documentation for the `ADOS` method to figure out which flag to use. ``` Eidx = tbt.Eindex(...) ADOS = [tbt.ADOS(i, ....) for i in range(4)] f, axs = plt.subplots(2, 2, figsize=(10, 10)) a_xy = tbt.geometry.xyz[tbt.a_dev, :2] for i in range(4): A = ADOS[i] A *= 100 / A.max() # normalize to maximum 100 (simply for plotting) axs[i // 2][i % 2].scatter(a_xy[:, 0], a_xy[:, 1], A, c="bgrk"[i], alpha=.5); plt.xlabel('x [Ang]'); plt.ylabel('y [Ang]'); plt.axis('equal'); ```
github_jupyter
``` #pip install xlwt openpyxl xlsxwriter xlrd import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns ``` # Loading in Calibration datasets ``` #CO2 only df_Eguchi_CO2= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Eguchi_CO2', index_col=0) df_Allison_CO2= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Allison_CO2', index_col=0) df_Dixon_CO2= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Dixon_CO2', index_col=0) df_MagmaSat_CO2= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='MagmaSat_CO2', index_col=0) df_Shishkina_CO2=pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Shishkina_CO2', index_col=0) #H2O Only df_Iacono_H2O= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Iacono_H2O', index_col=0) df_Shishkina_H2O=pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Shishkina_H2O', index_col=0) df_MagmaSat_H2O= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='MagmSat_H2OExt', index_col=0) df_Dixon_H2O=pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Dixon_H2O', index_col=0) df_Moore_H2O=pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Moore_H2O', index_col=0) #Mixed CO2-H2O df_Iacono_CO2H2O= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='Iacono_H2O-CO2', index_col=0) df_MagmaSat_CO2H2O= pd.read_excel('Solubility_Datasets_V1.xlsx', sheet_name='MagmaSat_CO2H2O', index_col=0) ``` # Subdividing up the Allison dataset by the different systems ``` #San Francisco Volcanic Field df_Allison_CO2_SFVF=df_Allison_CO2.loc[df_Allison_CO2['Location']=='SFVF'] #Sunset Crater df_Allison_CO2_SunsetCrater=df_Allison_CO2.loc[df_Allison_CO2['Location']=='SunsetCrater'] #Erebus df_Allison_CO2_Erebus=df_Allison_CO2.loc[df_Allison_CO2['Location']=='Erebus'] #Vesuvius df_Allison_CO2_Vesuvius=df_Allison_CO2.loc[df_Allison_CO2['Location']=='Vesuvius'] #Etna df_Allison_CO2_Etna=df_Allison_CO2.loc[df_Allison_CO2['Location']=='Etna'] #Stromboli df_Allison_CO2_Stromboli=df_Allison_CO2.loc[df_Allison_CO2['Location']=='Stromboli'] ``` # Calculating min and max P and T for each model ``` # Calculating limits - Magmasat read off their graph minDixonP_H2O=df_Dixon_H2O["P (bars)"].min() maxDixonP_H2O=df_Dixon_H2O["P (bars)"].max() minDixonT_H2O=1200 maxDixonT_H2O=1200 minDixonP_CO2=df_Dixon_CO2["P (bars)"].min() maxDixonP_CO2=df_Dixon_CO2["P (bars)"].max() minDixonT_CO2=1200 maxDixonT_CO2=1200 minDixonP_CO2H2O=df_Dixon_CO2["P (bars)"].min() maxDixonP_CO2H2O=df_Dixon_CO2["P (bars)"].max() minDixonT_CO2H2O=1200 maxDixonT_CO2H2O=1200 minMooreP_H2O=df_Moore_H2O["P (bars)"].min() maxMooreP_H2O=df_Moore_H2O["P (bars)"].max() maxMooreP_H2O_Pub=3000 minMooreT_H2O=df_Moore_H2O["T (C)"].min() maxMooreT_H2O=df_Moore_H2O["T (C)"].max() minIaconoP_H2O=df_Iacono_H2O["P (bar)"].min() maxIaconoP_H2O=df_Iacono_H2O["P (bar)"].max() minIaconoT_H2O=df_Iacono_H2O["T (K)"].min()-273.15 maxIaconoT_H2O=df_Iacono_H2O["T (K)"].max()-273.15 minIaconoP_CO2H2O=df_Iacono_CO2H2O["P (bar)"].min() maxIaconoP_CO2H2O=df_Iacono_CO2H2O["P (bar)"].max() minIaconoT_CO2H2O=df_Iacono_CO2H2O["T (K)"].min()-273.15 maxIaconoT_CO2H2O=df_Iacono_CO2H2O["T (K)"].max()-273.15 minEguchiP_CO2=10000*df_Eguchi_CO2["P(GPa)"].min() maxEguchiP_CO2=10000*df_Eguchi_CO2["P(GPa)"].max() minEguchiT_CO2=df_Eguchi_CO2["T(°C)"].min() maxEguchiT_CO2=df_Eguchi_CO2["T(°C)"].max() minAllisonP_CO2=df_Allison_CO2["Pressure (bars)"].min() maxAllisonP_CO2=df_Allison_CO2["Pressure (bars)"].max() minAllisonP_CO2_SFVF=df_Allison_CO2_SFVF["Pressure (bars)"].min() maxAllisonP_CO2_SFVF=df_Allison_CO2_SFVF["Pressure (bars)"].max() minAllisonP_CO2_SunsetCrater=df_Allison_CO2_SunsetCrater["Pressure (bars)"].min() maxAllisonP_CO2_SunsetCrater=df_Allison_CO2_SunsetCrater["Pressure (bars)"].max() minAllisonP_CO2_Erebus=df_Allison_CO2_Erebus["Pressure (bars)"].min() maxAllisonP_CO2_Erebus=df_Allison_CO2_Erebus["Pressure (bars)"].max() minAllisonP_CO2_Vesuvius=df_Allison_CO2_Vesuvius["Pressure (bars)"].min() maxAllisonP_CO2_Vesuvius=df_Allison_CO2_Vesuvius["Pressure (bars)"].max() minAllisonP_CO2_Etna=df_Allison_CO2_Etna["Pressure (bars)"].min() maxAllisonP_CO2_Etna=df_Allison_CO2_Etna["Pressure (bars)"].max() minAllisonP_CO2_Stromboli=df_Allison_CO2_Stromboli["Pressure (bars)"].min() maxAllisonP_CO2_Stromboli=df_Allison_CO2_Stromboli["Pressure (bars)"].max() minAllisonT_CO2=1200 maxAllisonT_CO2=1200 minShishkinaP_H2O=10*df_Shishkina_H2O["P (MPa)"].min() maxShishkinaP_H2O=10*df_Shishkina_H2O["P (MPa)"].max() minShishkinaT_H2O=df_Shishkina_H2O["T (°C)"].min() maxShishkinaT_H2O=df_Shishkina_H2O["T (°C)"].max() minShishkinaP_CO2=10*df_Shishkina_CO2["P (MPa)"].min() maxShishkinaP_CO2=10*df_Shishkina_CO2["P (MPa)"].max() minShishkinaT_CO2=df_Shishkina_CO2["T (°C)"].min() maxShishkinaT_CO2=df_Shishkina_CO2["T (°C)"].max() # Measured off Magmasat graph minMagmasatP_CO2=10*0 maxMagmasatP_CO2=10*3000 minMagmasatT_CO2=1139 maxMagmasatT_CO2=1730 minMagmasatP_H2O=10*0 maxMagmasatP_H2O=10*2000 minMagmasatT_H2O=550 maxMagmasatT_H2O=1418 ``` # Table of calibration limits ``` columns=['Publication', 'Species', 'Min P (bars)', 'Max P (bars)', 'Min T (C)', 'Max T (C)', 'notes'] df_PT2=pd.DataFrame([['Dixon 1997', 'H2O', minDixonP_H2O, maxDixonP_H2O, minDixonT_H2O, maxDixonT_H2O, '-'], ['Dixon 1997', 'CO2', minDixonP_CO2, maxDixonP_CO2, minDixonT_CO2, maxDixonT_CO2, '-'], ['Moore et al. 1998 (cal datasat)', 'H2O', minMooreP_H2O, maxMooreP_H2O, minMooreT_H2O, maxMooreT_H2O, '2 samples in dataset with >3kbar P'], ['Moore et al. 1998 (author range)', 'H2O', 0, 3000, 700, 1200, 'Paper says reliable up to 3kbar'], ['Iacono-Marziano et al., 2012', 'H2O', minIaconoP_H2O, maxIaconoP_H2O, minIaconoT_H2O, maxIaconoT_H2O, '-'], ['Iacono-Marziano et al., 2012', 'CO2-H2O', minIaconoP_CO2H2O, maxIaconoP_CO2H2O, minIaconoT_CO2H2O, maxIaconoT_CO2H2O, '-'], ['Shishkina et al., 2014', 'H2O', minShishkinaP_H2O, maxShishkinaP_H2O, minShishkinaT_H2O, maxShishkinaT_H2O, '-'], ['Shishkina et al., 2014', 'CO2', minShishkinaP_CO2, maxShishkinaP_CO2, minShishkinaT_CO2, maxShishkinaT_CO2, '-'], ['Ghiorso and Gualda., 2015 (MagmaSat)', 'H2O', minMagmasatP_H2O, maxMagmasatP_H2O, minMagmasatT_H2O, maxMagmasatT_H2O, '-'], ['Ghiorso and Gualda., 2015 (MagmaSat)', 'CO2', minMagmasatP_CO2, maxMagmasatP_CO2, minMagmasatT_CO2, maxMagmasatT_CO2, '-'], ['Eguchi and Dasgupta, 2018', 'CO2', minEguchiP_CO2, maxEguchiP_CO2, minEguchiT_CO2, maxEguchiT_CO2, '-'], ['Allison et al. 2019 (All Data)', 'CO2', minAllisonP_CO2, maxAllisonP_CO2, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (SFVF)', 'CO2', minAllisonP_CO2_SFVF, maxAllisonP_CO2_SFVF, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (Sunset Crater)', 'CO2', minAllisonP_CO2_SunsetCrater, maxAllisonP_CO2_SunsetCrater, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (Erebus)', 'CO2', minAllisonP_CO2_Erebus, maxAllisonP_CO2_Erebus, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (Vesuvius)', 'CO2', minAllisonP_CO2_Vesuvius, maxAllisonP_CO2_Vesuvius, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (Etna)', 'CO2', minAllisonP_CO2_Etna, maxAllisonP_CO2_Etna, minAllisonT_CO2, maxAllisonT_CO2, '-'], ['Allison et al. 2019 (Etna)', 'CO2', minAllisonP_CO2_Stromboli, maxAllisonP_CO2_Stromboli, minAllisonT_CO2, maxAllisonT_CO2, '-'], ], columns=columns).set_index('Publication') #save to excel file for easy import into manuscript with pd.ExcelWriter("Table_of_Calibration_Limits.xlsx") as writer: df_PT2.to_excel(writer, 'Table') df_PT2 ``` # Things to include for Dixon CO2 model Caution: 1. This CO2 model is only valid where C dissolves as carbonate ions, and is not applicable for intermediate-silicic compositions where C is also present as molecular CO2. 2. T is assumed to be constant at 1200C in the equations of Dixon. There is some temperature dependence in the implementation of this model through the fugacity and <font color='red'>{insert other terms where this is true}. </font> 3. The compositional dependence of CO2 in the Dixon, 1997 model is incorperated emperically through the parameter Pi. Dixon (1997) provide an equation for Pi in terms of oxide fractions at 1200C, 1kbar. However, they also show that in the North arch Volcanic field, there is a strong correlation between Pi and SiO2, allowing a simplification of the compositional dependence in terms of SiO2. This was implemented in Volatilecalc, and is used in this model. Note: [Part A](#pA)<br>Equation 1 will only be valid if your samples have similar major element systematics to the calibration dataset of Dixon, 1997. We provide a plot in notebook X to assess this. Crucially, if the full Pi term in your sample suite does not follow the same trajectory with SiO2 as the North Arch dataset, this simplification will lead to innaccurate results. [Part B](#pA)<br> Equation 1 is only valid for 40-49 wt% SiO2. In VolatileCalc, you cannot enter a SiO2 value>49 wt%. In the literature, for samples with >49 wt% SiO2, the SiO2 content has been set to 49wt% to allow the Dixon model to be used (e.g., Tucker et al., 2019; Coombs et al. 2005). Newman and Lowenstern suggest that calculating the result with SiO2=49wt% would generally be valid for basalts with ~52 wt% SiO2. In our code, samples with SiO2>49 are calculated assuming SiO2=49. Here's how this is implemented in our code: if sample['SiO2'] > 48.9: return 3.817e-7 else: return 8.7e-6 - 1.7e-7*sample['SiO2'] [Part C](#pA)<br> It is unclear whether the Pi dependence, and by extension, equation 1 are valid at pressures >1000 bars. Lesne et al. (2011) suggest it may hold to 2000 bars, in VolatileCalc, the limit is placed at 5000 bars. 2) The correlation between Pi and SiO2 was only parameterized between 40-49 wt% SiO2. In our code, if SiO2 of the sample>49 wt%, Extrapolation beyond this Specific errors to spit out if you do exceed our parameters 1. Your SiO2>49 wt%, please see caution statement 2. This pressure is above the limit of 2000 bars that Lesne et al. 2011 suggest the Dixon compositoinal dependence may be valid too. 3. This pressure is above the upper limit of 5,000 bars as suggested by Volatile Calc (Newman and Lowenstern, 2001)" 4. This pressure is above the maximum experimentally calibrated pressure reported by Dixon (1997). Extrapolation should be stable to 20,000 bars." 5. This pressure is above the maximum extrapolated pressure reported by Dixon et al. (1995)" 6. In the Dixon model, T is assumed to be constant at 1200C. <font color='red'>Simon, not sure the best way to explain that although it is out of range, Temp isn't super sensitive for basalts </font> # Things to include for the Moore model Caution: 1. This is an emperical model, so care should be taken extrapolating the results beyond the calibration range of the dataset. Specific errors 1. PSat>3000 : Your P is >3000 bars, authors warn against extrapolating beyond this point due to limitations of calibration dataset, as well as the fact high P may be problematic due to critical behavoir 2. Temp>1200, Temp< 700 - You are outside the calibration range for temperature defined by the authors - caution is need intepreting these results. # Things to include for Iacono-Marziano model Caution 1. This semi-empirical model has limited composition range. In particular, the authors warn that the effect of MgO, FeO and Na2O on solubiltiy are poorly constrained due to limited variation in their dataset. In particular, they emphasize that they only have a single pressure for Na-rich melts, so high Na2O melts are not well calibrated at various pressures 2. The temperature range is limited to 1200-1300 C, with combined H2O-CO2 between 1100-1400. 3. This model ignores the effect of ferric/ferrous iron, although Papale has showed this has a big effect. Specific errors - 1. Your temperature is out of range for X 2. Your pressures is out of range of X. 3. Your MgO is out of range of their dataset - the authors specifically warn the effect of MgO on solubility is poorly constrained due to limited variability in the calibration dataset. <font color='red'>Simon, not sure if we want errors like only N samples in database have MgO contents equal to yours, as there are a few at higher ones </font> 4. Your FeO is out of range - the authors specifically warn the effect of FeO on solubility is poorly constrained due to limited variability in the calibration dataset. <font color='red'>Simon, not sure if we want errors like only N samples in database have FeO contents equal to yours, as there are a few at higher ones </font> 5. Your Na2O is out of range - the authors specifically warn that high Na2O melts are not handled well, as the database only contains these at 1 pressure. <font color='red'>Simon, not sure if we want errors like only N samples in database have Na2O contents equal to yours, as there are a few at higher ones </font> ``` # As few outliers, might be better to say only 1 composition in database has high enough FeO or something plt.hist(df_Iacono_CO2H2O["MgO"], bins = [0,1,2,3,4,5,6,7,8,9,10,11]) plt.xlabel("MgO, wt%") plt.ylabel("Number of samples") plt.title("histogram MgO Iacono") plt.show() plt.hist(df_Iacono_CO2H2O["FeOT"], bins = [0,1,2,3,4,5,6,7,8,9,10,11]) plt.xlabel("FeO*, wt%") plt.ylabel("Number of samples") plt.title("histogram FeOT Iacono") plt.show() plt.hist(df_Iacono_CO2H2O["Na2O"], bins = [0,1,2,3,4,5,6,7,8,9,10,11]) plt.xlabel("Na$_2$O, wt%") plt.ylabel("Number of samples") plt.title("histogram Na2O Iacono") plt.show() ``` # Things to include for Shishkina model Caution: 1. This is an emperical model, so care should be taken extrapolating the results beyond the calibration range of the dataset. 2. This CO2 model is only valid where C dissolves as carbonate ions, and is not applicable for intermediate-silicic compositions where C is also present as molecular CO2. 3. This model only provides H2O and CO2 models separatly. They are combined in this study in <font color='red'>Simon what is best way to explain how its done here </font> 4. Note than H2O eq can be used for mafic-int melts, at relatively oxidised conditions (NNO+1 to NNO+4). Pure CO2 fluid experiments are more reduced, Fe2+/T>0.8 where reported. Warn against use of CO2 for highly alkali compositions.<font color='red'>Simon, not sure whether we want to implement warnings for this, or just state it in the info/caution section </font> Specific errors 1. Your pressure is out of range 2. Your temp is out of range 3. Your SiO2 is > 54 wt%, which is the limit of the calibration range of the CO2 dataset, the authors specifically warn that the model isn't applicable for intermediate-silicic compositions where C is also present as molecular CO2. The H2O model extends up to 69 wt%. 4. Your K2O+Na2O is outside the range for the CO2 model of X <font color='red'>Simon this is based on caution they say to take for alkali compositions </font> ``` # Simon - do we want to code in things like this? df_Shishkina_CO2["K2O+Na2O"]=df_Shishkina_CO2["K2O"]+df_Shishkina_CO2["Na2O"] df_Shishkina_H2O["K2O+Na2O"]=df_Shishkina_H2O["K2O"]+df_Shishkina_H2O["Na2O"] columns=['Model', 'Oxide', 'Min', 'Max'] Shish_Lim=pd.DataFrame([['Shishkina-H2O', 'SiO2', df_Shishkina_H2O["SiO2"].min(), df_Shishkina_H2O["SiO2"].max()], ['Shishkina-CO2', 'SiO2', df_Shishkina_CO2["SiO2"].min(), df_Shishkina_CO2["SiO2"].max()], ['Shishkina-CO2', 'K2O+Na2O', df_Shishkina_CO2["K2O+Na2O"].min(), df_Shishkina_CO2["K2O+Na2O"].max()], ['Shishkina-H2O', 'K2O+Na2O', df_Shishkina_H2O["K2O+Na2O"].min(), df_Shishkina_H2O["K2O+Na2O"].max()], #['Shishkina-H2O', 'SiO2', df_Shishkina_H2O["SiO2"].min(), df_Shishkina_H2O["SiO2"].max()], #['Shishkina-CO2', 'SiO2', df_Shishkina_CO2["SiO2"].min()],df_Shishkina_CO2["SiO2"].min() ], columns=columns) Shish_Lim ``` # Magmasat Specific errors 1. Your Temp is <800C. Authors warn that below this, the calibration dataset only have H2O experiments, and very few, so concern with extrapolation to lower temp. # Things to include for Eguchi and Dasgupta, 2018 Caution 1. This model is for CO2 only, and was only calibrated on H2O-poor compositions (~0.2-1 wt%). The authors suggest that for hydrous melts, a mixed CO2-H2O fluid saturation model must be used. The authors show comparisons in their paper; the difference between magmasat and this model is <30% for up to 4.5 wt% H2O, concluding that this model does a reasaonble job of predicting CO2 solubility up to 2-4 wt% <font color='red'>This is suprising to me, given the differences I found with Allison... Would it work/not work to combine this model with a different H2O model as allison do?S </font> Specific errors 1. your H2O>1 wt%. The model is only calibrated on water-poor compositions. The authors suggest the model works reasonably well up to H2O contents of 2-3 wt%. 2. Your P is >50000 bars, the authors warn the model works poorly at higher pressures, possibly due to a structural change in silicate melt at pressures above 5 GPa 3. Your P is <503 bars, which is the minimum pressure in the calibration dataset. # Things to include for Allison model Caution: 1. For the Allison model, please select which of their 5 systems, SFVF, Sunset crater, Erebus, Vesuvius, Etna and Stromboli is most suitable for your system using the element diagrams in the "calibration" notebook. <font color='red'>Simon should we try clustering in SiO2-Na2O-K2O space </font> 2. Note that the pressure calibration range for SFVF, Sunset Crater, Erebus and Vesuvius only incorperates pressures from ~4000-6000 bars. For Etna the authors added data from Iacono-Marziano et al. 2012 and Lesne et al. 2011, extending the calibration dataset down to ~485 bars, while for strombili, they include data from Lesne extending it down to ~269 bars. 3. Temperature is assumed to be fixed at 1200C in the Allison model. 4. Although this model is technically a CO2-only model, in all their calculations they combine their CO2 model with the H2O model of Lesne et al. 2011. You should implement a water model as well to get reliable answers Specific errors- 1. Your P>7000 bars, the spreadsheet provided by Allison et al. (2019) would not have given an answer. 2. Your P<50 bars, the spreadsheet provided by Allison et al. (2019) would not have given an answer. 3. <font color='red'>Include pressure range for each model? and build errors out of that </font> 4. You have not choosen a water model. Your results may be unrelible.
github_jupyter
# Обратные связи в контуре управления Для рассмотренных в предыдущих лекциях регуляторов требуется оценивать состояние объекта управления. Для построения таких оценок необходимо реализовать обратные связи в контуре управления. На практике для этого используются специальные устройства: датчики. # Случайные величины Случайная величина -- это переменная, значениее которой определяется в результате эксперимента, подверженного влиянию случайных факторов. Случайные величины характеризуются функция плотности вероятности \begin{equation} p(a \leq \xi \leq b) = \int_{a}^{b} p(\xi) \,d\xi \end{equation} которая определяет вероятность попадания значения $\xi$ в интервал $[a \quad b]$. Математическим ожиданием случайной величины называется \begin{equation} \mathbb{E}[\xi] = \int_{-\infty}^{\infty} \xi \cdot p(\xi) \,d\xi \end{equation} Дисперсия случайной величины \begin{equation} \mathbb{D}[\xi] = \mathbb{E}[\left(\xi - \mathbb{E}[\xi]\right)^2] \end{equation} Ковариация двух случайных величин \begin{equation} \Sigma[\xi_1, \xi_2] = \mathbb{E}[(\xi_1 - \mathbb{E}[\xi_1]) (\xi_2 - \mathbb{E}[\xi_2])] \end{equation} ``` # [ПРИМЕР 1] Измерения случайной величины import numpy as np xi = np.random.random() print(xi) # [ПРИМЕР 2] Распределение случайной величины import numpy as np import matplotlib.pyplot as plt np.random.seed(200) N = 999 xi = np.random.random(N) # plot xi fig1 = plt.figure(figsize=(10,5)) ax1 = fig1.add_subplot(1,1,1) ax1.set_title("Random variable xi") ax1.plot(range(N), xi, color = 'b') ax1.set_ylabel(r'xi') ax1.set_xlabel(r'n') ax1.grid(True) # [ПРИМЕР 3] Нормальное распределение import numpy as np import scipy.stats as st import matplotlib.pyplot as plt import numpy as np x = np.random.normal(3, 1, 100000) _, bins, _ = plt.hist(x, 50, density = True, alpha = 0.5) mu, sigma = st.norm.fit(x) best_fit_line = st.norm.pdf(bins, mu, sigma) plt.plot(bins, best_fit_line) # [ПРИМЕР 4] Математическое ожидание и дисперсия import numpy as np import matplotlib.pyplot as plt #np.random.seed(200) N = 999 xi = np.random.normal(0, 1, N) e = np.mean(xi) print("Expected value: ", (e)) d = np.mean((xi - e)**2) print("Variance: ", (d)) # plot xi fig1 = plt.figure(figsize=(10,5)) ax1 = fig1.add_subplot(1,1,1) ax1.set_title("Random variable xi") ax1.plot(range(N), xi, color = 'b') ax1.set_ylabel(r'xi') ax1.set_xlabel(r'n') ax1.set_ylim([-4 * d, 4 * d]) ax1.grid(True) mean = [0, 0] covariance_mat = [[1., -0.5], [-0.5, 1.]] x, y = np.random.multivariate_normal(mean, covariance_mat, 10000000).T plt.figure(figsize = (3, 3 )) plt.hist2d(x, y, bins=(1000, 1000), cmap = plt.cm.jet) plt.subplots_adjust(bottom = 0, top = 1, left = 0, right = 1) plt.xlim(-5, 5) plt.ylim(-5, 5) plt.show() ``` # Фильтр Калмана (Линейные системы) Модель системы (процесса): $$x_k = A_k \cdot x_{k-1} + B_k \cdot u_k + w_k,$$ где $w_k$~$N(0, Q_k)$ - нормально распределённый случайный процесс, характеризующийся нулевым математическим ожиданием и матрицей ковариации $Q_k$. Модель наблюдений (измерений): $$y_k = H_k \cdot x_k + v_k,$$ где $v_k$ - нормально распределённый случайный процесс, характеризующийся нулевым математическим ожиданием и матрицей ковариации $R_k$. Требуется получить оценку вектора состояния системы $\hat{x}_k$, зная аналогичную оценку на предыдущем шаге ($\hat{x}_{k-1}$), выход системы (вектор измерений) $y_k$ и вектор управляющих параметров $u_k$. ## Прогноз Прогноз (экстраполяция) вектора состояния с помощью модели процесса: $$\overline{x}_k = A_k \cdot \hat{x}_{k-1} + B_k \cdot u_k ,$$ Прогноз матрицы ковариации ошибок: $$\overline{P}_k = A_k\cdot \hat{P}_{k-1}\cdot A_k^T + Q_k$$ ## Коррекция Вычисление коэффициента усиления Калмана: $$K_k = \overline{P}_k \cdot H_{k}^T \cdot \left(H_k\cdot \overline{P}_k\cdot H_k^T + R_k\right)^{-1}$$ Оценка матрицы ковариации ошибки: $$\hat{P}_k = \left(I - K_k\cdot H_k\right)\cdot \overline{P}_k$$ Оценка вектора состояния: $$\hat{x}_k = \overline{x}_k + K_k\cdot\left(y_k - H_k\cdot\overline{x}_k\right)$$ # Пример Рассмотрим задачу о движении точки по прямой под действием случайных (постоянных внутри каждого такта управления) ускорений. Вектор состояния системы включает в себя координату $x$ и скорость $v$, то есть $x_k = [x \quad v]^T$. Уравнения движения: $$x_k = A_k \cdot x_{k-1} + G_k \cdot a_k,$$ где $$A_k = \begin{pmatrix} 1 & \Delta t \\ 0 & 1 \end{pmatrix}, \quad G_k = \begin{pmatrix} 0.5 \Delta t^2 \\ \Delta t \end{pmatrix}. $$ Матрица ковариации шума процесса: $$Q = G\cdot G^T \cdot \sigma_a^2,$$ где $\sigma_a$ характеризует случайное распределение ускорений. На каждом такте процесса измеряется координата точки. Таким образом, матрица наблюдения $$H = \begin{pmatrix} 1 & 0 \end{pmatrix},$$ а модель наблюдений $$y_k = H \cdot x_k + v_k,$$ где $v_k$ - нормально распределённый шум измерений ($\sigma_m$). Матрица ковариации шума измерений: $R = \left[ \sigma_m^2 \right].$ ``` import numpy as np import matplotlib.pyplot as plt from random import normalvariate class RealWorld: def __init__(self, sigma_acc, sigma_meas, dt): self.time = 0.0 self.time_step = dt self.position = 0.0 self.sigma_acc = sigma_acc self.velocity = 0.1 self.measurement = None # шум измерений self.sigma_meas = sigma_meas def measure(self): if self.measurement == None: self.measurement = self.position + normalvariate(0, self.sigma_meas) return self.measurement def step(self): self.time += self.time_step acceleration = normalvariate(0, self.sigma_acc) self.position += self.velocity * self.time_step + 0.5 * acceleration * self.time_step**2 self.velocity += acceleration * self.time_step self.measurement = None ``` # Пример Рассмотрим задачу о движении точки по прямой под действием случайных (постоянных внутри каждого такта управления) ускорений. Вектор состояния системы включает в себя координату $x$ и скорость $v$, то есть $x_k = [x \quad v]^T$. Уравнения движения: $$x_k = A_k \cdot x_{k-1} + G_k \cdot a_k,$$ где $$A_k = \begin{pmatrix} 1 & \Delta t \\ 0 & 1 \end{pmatrix}, \quad G_k = \begin{pmatrix} 0.5 \Delta t^2 \\ \Delta t \end{pmatrix}. $$ Матрица ковариации шума процесса: $$Q = G\cdot G^T \cdot \sigma_a^2,$$ где $\sigma_a$ характеризует случайное распределение ускорений. На каждом такте процесса измеряется координата точки. Таким образом, матрица наблюдения $$H = \begin{pmatrix} 1 & 0 \end{pmatrix},$$ а модель наблюдений $$y_k = H \cdot x_k + v_k,$$ где $v_k$ - нормально распределённый шум измерений ($\sigma_m$). Матрица ковариации шума измерений: $R = \left[ \sigma_m^2 \right].$ ``` sigma_a = 0.01 # нормально распределённые ускорения # дисперсия шума измерений sigma_measurement = 10. world = RealWorld(sigma_a, sigma_measurement, 0.5) #оператор эволюции A = np.array([[1., world.time_step],[0., 1.]]) G = np.array([0.5 * world.time_step**2, world.time_step]) # шум процесса Q = np.outer(G, G) * sigma_a**2 # матрица ковариации ошибки position_uncertainty = 1. velocity_uncertainty = 1. P = np.array([[position_uncertainty, 0.],[0., velocity_uncertainty]]) #модель наблюдений, измеряем только положение H = np.array([1., 0.]) # дисперсия шума измерений R = np.array([sigma_measurement**2]) episode_len = 1000 data = np.zeros((6, episode_len)) for i in range(episode_len): world.step() measurement = world.measure() if i == 0: # первое измерение x_est = np.array([measurement, 0.]) elif i == 1: # второе измерение x_est = np.array([measurement, ( measurement - data[4, i-1] ) / world.time_step]) else: # если i >=2 начинаем применять модель ################################################################## # прогноз vel_est = data[5, i-1] pos_est = data[4, i-1] + vel_est * world.time_step x_pred = np.array([pos_est, vel_est]) # прогноз матрицы ковариации ошибки P_pred = A.dot(P).dot(A.T) + Q ################################################################## # Коррекция K = P_pred.dot(H.T) / (H.dot(P_pred).dot(H.T) + R) P = (np.eye(2) - K.dot(H)).dot(P_pred) x_est = x_pred + K.dot(measurement - H.dot(x_pred)) data[:, i] = np.array([world.time, world.position, world.velocity, measurement, x_est[0], x_est[1]]) # plot fig1 = plt.figure(figsize=(16,8)) ax1 = fig1.add_subplot(1,2,1) ax2 = fig1.add_subplot(1,2,2) # r ax1.set_title("position") ax1.plot(data[0, :], data[3, :], 'k', label = 'pos_mes') ax1.plot(data[0, :], data[1, :], 'r', label = 'pos_world') #ax1.plot(data[0, :], data[4, :]-data[1, :], 'g', label = 'pos_est') ax1.set_ylabel(r'r') ax1.set_xlabel(r't, [s]') ax1.grid(True) ax1.legend() # v ax2.set_title("velocity") ax2.plot(data[0, :], data[2, :], 'r', label = 'v') #ax2.plot(data[0, :], data[5, :], 'g', label = 'v_est') ax2.set_ylabel(r'v') ax2.set_xlabel(r't, [s]') ax2.grid(True) ax2.legend() fig2 = plt.figure(figsize=(16,8)) ax3 = fig2.add_subplot(1,2,1) ax4 = fig2.add_subplot(1,2,2) # r ax3.set_title("position") ax3.plot(data[0, :], data[1, :], 'r', label = 'pos_world') ax3.plot(data[0, :], data[4, :], 'g', label = 'pos_est') ax3.set_ylabel(r'r') ax3.set_xlabel(r't, [s]') ax3.grid(True) ax3.legend() # v ax4.set_title("velocity") ax4.plot(data[0, :], data[2, :], 'r', label = 'v') ax4.plot(data[0, :], data[5, :], 'g', label = 'v_est') ax4.set_ylabel(r'v') ax4.set_xlabel(r't, [s]') ax4.grid(True) ax4.legend() ``` # Модель измерений Рассмотрим движение системы \begin{equation} \dot{x} = f(x). \end{equation} Вектор измерений $z$ зависит от состояния системы, а также содержит случайную компоненту \begin{equation} z(x) = h(x) + \xi. \end{equation} Функция $h(x)$ связывает состояние системы с измерением датчика. Например, если датчик GNNS-приемника в одномерной задаче движения тележки по рельсам смещен от центра тележки $x$ на расстояние $r$, можно записать \begin{equation} h(x) = x + r. \end{equation} # Расширенный фильтр Калмана (Extended Kalman Filter) Расширенный фильтр Калмана подразумевает, как правило, нелиинейную модель системы (процесса): \begin{equation} \dot{x} = f(x) + w(t). \end{equation} Шум $w$ имеет нормальное распределение, нулевое математическое ожидание и матрицу ковариации $Q$. Модель наблюдений также может описываться нелинейным уравнением \begin{equation} y = h(x) + v(t), \end{equation} где $v$ имеет нормальное распределение, нулевое математическое ожидание и матрицу ковариации $R$. Однако, как правило, считается, что измерения обрабатываются фильтром периодически с частотой такта управления, поэтому модель записывают в видее соотношенийми между вектором состояния на момент получения измерений $x_k = x(t_k)$ и набором измерений $y_k = y(t_k)$: \begin{equation} y_k = h(x_k) + v_k. \end{equation} Алгоритм снова выполняется в два этапа - прогноз и коррекция. ## Прогноз Прогноз (экстраполяция) вектора состояния с помощью нелинейной модели процесса: $$\overline{x}_k = \hat{x}_{k-1} + \int_{t_{k-1}}^{t_k} f(x)dt,$$ Прогноз матрицы ковариации ошибок: \begin{equation} \overline{P}_k = \Phi_k \cdot \hat{P}_{k-1} \cdot \Phi_k^T + Q, \end{equation} где \begin{equation} \Phi_k = I + F\cdot \Delta t = I +\frac{\partial f(x)}{\partial x}\cdot(t_k -t_{k-1}) \end{equation} ## Коррекция Здесь отличие от линейного алгоритма в необходимости линеаризовать модель наблюдений, чтобы получить матрицу $H$: \begin{equation} H_k = \frac{\partial h(x}{\partial x}. \end{equation} Вычисление коэффициента усиления Калмана: $$K_k = \overline{P}_k \cdot H_{k}^T \cdot \left(H_k\cdot \overline{P}_k\cdot H_k^T + R_k\right)^{-1}$$ Оценка матрицы ковариации ошибки: $$\hat{P}_k = \left(I - K_k\cdot H_k\right)\cdot \overline{P}_k$$ Оценка вектора состояния: $$\hat{x}_k = \overline{x}_k + K_k\cdot\left(y_k - h(\overline{x}_k)\right)$$ # Пример: Вращение твёрдого тела с неподвижным центром масс Ориентация твердого тела описывается кватернионом $q$, задающим положение связанной с телом системы координат относительно некоторой неподвижной системы координат. Говорят, что кватернион $q^{\mathrm{BI}}$ задает ориентацию некоторого базиса (B) относительно некоторого другого базиса (I), если представление любого вектора $\mathbf{v}$ в этих системах координат определяется соотношением: $$\mathbf{v}^{\mathrm{B}} = q^{\mathrm{BI}}\circ\mathbf{v}^{\mathrm{I}}\circ \tilde{q}^{\mathrm{BI}}$$ Кинематические уравнения твердого тела записываются как: \begin{equation}\label{eq:quat}\tag{1} \dot{q} = \frac{1}{2}q\circ \boldsymbol{\omega}, \end{equation} где $q$ - кватернион ориентации тела, $\boldsymbol{\omega}$ - угловая скорость тела в рпоекциях на связанные с телом оси. Модель движения твёрдого тела с неодвижной точкой дополняется динамическими уравнениями Эйлера \begin{equation}\label{eq:euler}\tag{2} \mathbf{J}\cdot \dot{\boldsymbol{\omega}} + \boldsymbol{\omega} \times \mathbf{J}\cdot \boldsymbol{\omega} = \mathbf{T}, \end{equation} где $\mathbf{J}$ - тензор инерции тела, $\mathbf{T}$ - главный момнет сил, действующих на тело. Таким образом, вектор состояния состояит из 4х компонент кватерниона ориентации и 3х компонент вектора угловой скорости. А модель процесса состоит из уравнений \eqref{eq:quat} и \eqref{eq:euler}.
github_jupyter
``` import spotipy from spotipy.oauth2 import SpotifyOAuth import pandas as pd import time scope = 'user-top-read user-library-read' sp = spotipy.Spotify(client_credentials_manager=SpotifyOAuth(scope=scope)) sp.user_playlists(sp.current_user()['id']) results = sp.current_user_top_artists(time_range='short_term', limit=50) all_genres = [genre for r in results['items'] for genre in r['genres'] ] all_genres from collections import Counter top_genres = Counter(all_genres) top_genres = {key : value for key, value in sorted(top_genres.items(), key=lambda k: k[1], reverse=True)} top_genres results top_genres_and_artists = [[r['name'], r['id'], r['genres']] if len(r['genres']) > 0 else [r['name'], r['id'], ['unknown genre']] for r in results['items']] top_genres_and_artists artists = [] for artist_name, artist_id, genres in top_genres_and_artists: if 'indie soul' in genres: artists.append([artist_name, artist_id]) artists def get_top_genres(): results = sp.current_user_top_artists(time_range='short_term', limit=50) all_genres = [genre for r in results['items'] for genre in r['genres']] top_genres = Counter(all_genres) top_genres = {key : value for key, value in sorted(top_genres.items(), key=lambda k: k[1], reverse=True)} return top_genres def get_top_artists(top_genres): # TO-DO: Let user select from top genres top_genre = list(top_genres.keys())[0] # Get the only one top genre for now print("Selected genre: %s" % (top_genre)) artists = [] for artist_name, artist_id, genres in top_genres_and_artists: if top_genre in genres: artists.append([artist_name, artist_id]) print("Selected artists belonging to this genre: ", artists) return artists def get_discography(artist_id, min_track_duration=30000): tracks = [] album_ids = [album['id'] for album in sp.artist_albums(artist_id)['items'] if album['album_type'] != 'compilation' ] for album_id in album_ids: # track_ids = [track['id'] for track in sp.album_tracks(album_id)['items'] if track['duration_ms'] > min_track_duration] for track in sp.album_tracks(album_id)['items']: # There are unexpected results while retrieving the discography of an artist # Only get the albums that the artist owns flag = False for artist in track['artists']: if artist['id'] == artist_id: flag = True break if flag and track['duration_ms'] > min_track_duration: tracks.append(track['id']) if len(tracks) == 100: break return tracks def get_all_features(artists): t = time.time() df = pd.DataFrame() for artist_name, artist_id in artists: try: tracks = get_discography(artist_id) except: time.sleep(2) tracks = get_discography(artist_id) while(len(tracks) > 0): if len(df) == 0: df = pd.DataFrame(sp.audio_features(tracks=tracks[:100])) df['artist_name'] = artist_name df['artist_id'] = artist_id # Could not add track names in here # API does not return audio features of all tracks # There might be a restriction on different markets # df['track_name'] = track_names[:100] else: df_feats = pd.DataFrame(sp.audio_features(tracks=tracks[:100])) df_feats['artist_name'] = artist_name df_feats['artist_id'] = artist_id # df_feats['track_name'] = track_names[:100] df = df.append(df_feats) tracks = tracks[100:] print(time.time() - t) return df top_genres = get_top_genres() artists = get_top_artists(top_genres) df = get_all_features(artists) df.tempo.median() df.tempo.mean() (df.tempo.min() + df.tempo.max()) / 2 df.sample(800) def return_playlist(**kwargs): """ danceability='default', energy='default', speechiness='default', acousticness='default', instrumentalness='default', liveness='default', valence='default', tempo='default' """ # Select tracks based on the provided ranges top_genres = get_top_genres() artists = get_top_artists(top_genres) df = get_all_features(artists) print(len(df)) # Sort dataframe based on provided features # Randomly return tracks based on sorted # TO-DO: Select tracks based on user market for feature, value in kwargs.items(): avg = (df[feature].min() + df[feature].max()) / 2 if value == 'high': # df.sort_values(feature, ascending=False, inplace=True) df = df[df[feature] > avg] elif value == 'low': # df.sort_values(feature, ascending=True, inplace=True) df = df[df[feature] < avg] print(len(df)) # df = df.head(len(df)//3) try: return df.sample(25) except: return df playlist = return_playlist(danceability='high', instrumentalness='low', valence="low", tempo="low", energy="low") playlist def get_playlist_tracks(playlist): track_uris = playlist['uri'].to_list() artist_names = playlist['artist_name'].to_list() track_names = [track['name'] for track in sp.tracks(track_uris)['tracks']] tracks = ["{} by {}".format(track, artist) for track, artist in zip(track_names, artist_names)] return tracks, track_uris _, ids = get_playlist_tracks(playlist) ids string = "" for i in ids: string += i string += " " string string[:-1].split(" ") ```
github_jupyter
# Latitude, Longitude for any pixel in a GeoTiff File How to generate the latitude and longitude for a pixel at any given position in a GeoTiff file. ``` from osgeo import ogr, osr, gdal # opening the geotiff file ds = gdal.Open('G:\BTP\Satellite\Data\Test2\LE07_L1GT_147040_20050506_20170116_01_T2\LE07_L1GT_147040_20050506_20170116_01_T2_B1.TIF') col, row, band = ds.RasterXSize, ds.RasterYSize, ds.RasterCount print(col, row, band) xoff, a, b, yoff, d, e = ds.GetGeoTransform() print(xoff, a, b, yoff, d, e) # details about the params: GDAL affine transform parameters # xoff,yoff = left corner # a,e = weight,height of pixels # b,d = rotation of the image (zero if image is north up) def pixel2coord(x, y): """Returns global coordinates from coordinates x,y of the pixel""" xp = a * x + b * y + xoff yp = d * x + e * y + yoff return(xp, yp) x,y = pixel2coord(col/2,row/2) print (x, y) ``` #### These global coordinates are in a *projected coordinated system*, which is a representation of the spheroidal earth's surface, but flattened and distorted onto a plane. #### To convert these into latitude and longitude, we need to convert these coordinates into *geographic coordinate system*. ``` # get the existing coordinate system old_cs= osr.SpatialReference() old_cs.ImportFromWkt(ds.GetProjectionRef()) # create the new coordinate system wgs84_wkt = """ GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.01745329251994328, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]]""" new_cs = osr.SpatialReference() new_cs.ImportFromWkt(wgs84_wkt) # create a transform object to convert between coordinate systems transform = osr.CoordinateTransformation(old_cs,new_cs) # converting into geographic coordinate system lonx, latx, z = transform.TransformPoint(x,y) print (latx, lonx, z) # rb = ds.GetRasterBand(1) px,py = col/2,row/2 # the pixel location pix = ds.ReadAsArray(px,py,1,1) print pix[0][0] # pixel value ``` # Reverse Geocoding Converting a lat/long to a physical address or location. We want the name of the DISTRICT. ## -------------------------------------------------------------------------------- ### API 1: Not so accurate ## -------------------------------------------------------------------------------- ``` coordinates = (latx,lonx) results = rg.search(coordinates) print results print type(results) print type(results[0]) results[0] k = 4 # If we want k*k pixels in total from the image for i in range(0,col,col/k): for j in range(0,row,row/k): # fetching the lat and lon coordinates x,y = pixel2coord(i,j) lonx, latx, z = transform.TransformPoint(x,y) # fetching the name of district coordinates = (latx,lonx) results = rg.search(coordinates) # The pixel value for that location px,py = i,j pix = ds.ReadAsArray(px,py,1,1) pix = pix[0][0] # printing s = "The pixel value for the location Lat: {0:5.1f}, Long: {1:5.1f} ({2:15}) is {3:7}".format(latx,lonx,results[0]["name"],pix) print (s) ``` ## -------------------------------------------------------------------------------- ### API 2 ## -------------------------------------------------------------------------------- ``` g = geocoder.google([latx,lonx], method='reverse') print type(g) print g print g.city print g.state print g.state_long print g.country print g.country_long print g.address ``` ###### The above wrapper for Google API is not good enough for us. Its not providing us with the district. ##### Lets try another python library available for the Google Geo API ``` results = Geocoder.reverse_geocode(latx, lonx) print results.city print results.country print results.street_address print results.administrative_area_level_1 print results.administrative_area_level_2 ## THIS GIVES THE DISTRICT !! <---------------- print results.administrative_area_level_3 ``` ##### This is what we need, we are getting the district name for given lat,lon coordinates ``` ## Converting the unicode string to ascii string v = results.country print type(v) v = v.encode("ascii") print type(v) print v ``` ##### Now lets check for an image from Rajasthan ``` k = 4 # If we want k*k pixels in total from the image for i in range(0,col,col/k): for j in range(0,row,row/k): # fetching the lat and lon coordinates x,y = pixel2coord(i,j) lonx, latx, z = transform.TransformPoint(x,y) # fetching the name of district results = Geocoder.reverse_geocode(latx, lonx) # The pixel value for that location px,py = i,j pix = ds.ReadAsArray(px,py,1,1) pix = pix[0][0] # printing if results.country.encode('ascii') == 'India': s = "Lat: {0:5.1f}, Long: {1:5.1f}, District: {2:12}, Pixel Val: {3:7}".format(latx,lonx,results.administrative_area_level_2,pix) print (s) ``` # Bing Maps REST API ``` import requests # To make the REST API Call import json (latx,lonx) url = "http://dev.virtualearth.net/REST/v1/Locations/" point = str(latx)+","+str(lonx) key = "Aktjg1X8bLQ_KhLQbVueYMhXDEMo7OaTweIkBvFojInYE4tVxoTp1bGKWbtU_OPJ" response = requests.get(url+point+"?key="+key) print(response.status_code) data = response.json() print(type(data)) data s = data["resourceSets"][0]["resources"][0]["address"]["adminDistrict2"] s = s.encode("ascii") s url = "http://dev.virtualearth.net/REST/v1/Locations/" key = "Aktjg1X8bLQ_KhLQbVueYMhXDEMo7OaTweIkBvFojInYE4tVxoTp1bGKWbtU_OPJ" ``` ## Bing API Test #### For 100 pixel locations ``` k = 10 # If we want k*k pixels in total from the image for i in range(0,col,col/k): for j in range(0,row,row/k): ############### fetching the lat and lon coordinates ####################################### x,y = pixel2coord(i,j) lonx, latx, z = transform.TransformPoint(x,y) ############### fetching the name of district ############################################## point = str(latx)+","+str(lonx) response = requests.get(url+point+"?key="+key) data = response.json() s = data["resourceSets"][0]["resources"][0]["address"] if s["countryRegion"].encode("ascii") != "India": print ("Outside Indian Territory") continue district = s["adminDistrict2"].encode("ascii") ############### The pixel value for that location ########################################## px,py = i,j pix = ds.ReadAsArray(px,py,1,1) pix = pix[0][0] # printing s = "Lat: {0:5.1f}, Long: {1:5.1f}, District: {2:12}, Pixel Val: {3:7}".format(latx,lonx,district,pix) print (s) ``` # We have another player in the ground! Can Reverse Geocode by using the python libraries `shapely` and `fiona` with a shapefile for all the district boundaries of India ``` import fiona from shapely.geometry import Point, shape # Change this for Win7 base = "/Users/macbook/Documents/BTP/Satellite/Data/Maps/Districts/Census_2011" fc = fiona.open(base+"/2011_Dist.shp") def reverse_geocode(pt): for feature in fc: if shape(feature['geometry']).contains(pt): return feature['properties']['DISTRICT'] return "NRI" k = 10 # If we want k*k pixels in total from the image for i in range(0,col,col/k): for j in range(0,row,row/k): ############### fetching the lat and lon coordinates ####################################### x,y = pixel2coord(i,j) lonx, latx, z = transform.TransformPoint(x,y) ############### fetching the name of district ############################################## point = Point(lonx,latx) district = reverse_geocode(point) if district=="NRI": print ("Outside Indian Territory") continue ############### The pixel value for that location ########################################## px,py = i,j pix = ds.ReadAsArray(px,py,1,1) pix = pix[0][0] # printing s = "Lat: {0:5.1f}, Long: {1:5.1f}, District: {2:12}, Pixel Val: {3:7}".format(latx,lonx,district,pix) print (s) ``` # Now we can proceed to GenFeatures Notebook
github_jupyter
``` !pip install unidecode googletrans !pip install squarify import re import time import tweepy import folium import squarify import warnings import collections import numpy as np import pandas as pd from PIL import Image from folium import plugins from datetime import datetime from textblob import TextBlob import matplotlib.pyplot as plt from unidecode import unidecode from googletrans import Translator from geopy.geocoders import Nominatim from wordcloud import WordCloud, STOPWORDS # Adicione suas credenciais para a API do Twitter CONSUMER_KEY = YOUR_CONSUMER_KEY CONSUMER_SECRET = YOUR_CONSUMER_SECRET ACCESS_TOKEN = YOUR_ACCESS_TOKEN ACCESS_TOKEN_SECRET = YOUR_ACCESS_TOKEN_SECRET ``` # Implementação da classe para obter os tweets ``` class TweetAnalyzer(): def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret): ''' Conectar com o tweepy ''' auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) self.conToken = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, retry_count=5, retry_delay=10) def __clean_tweet(self, tweets_text): ''' Tweet cleansing. ''' clean_text = re.sub(r'RT+', '', tweets_text) clean_text = re.sub(r'@\S+', '', clean_text) clean_text = re.sub(r'http\S+', '', clean_text) clean_text = clean_text.replace("\n", " ") return clean_text def search_by_keyword(self, keyword, count=10, result_type='mixed', lang='en', tweet_mode='extended'): ''' Search for the twitters thar has commented the keyword subject. ''' tweets_iter = tweepy.Cursor(self.conToken.search, q=keyword, tweet_mode=tweet_mode, rpp=count, result_type=result_type, since=datetime(2020,7,31,0,0,0).date(), lang=lang, include_entities=True).items(count) return tweets_iter def prepare_tweets_list(self, tweets_iter): ''' Transforming the data to DataFrame. ''' tweets_data_list = [] for tweet in tweets_iter: if not 'retweeted_status' in dir(tweet): tweet_text = self.__clean_tweet(tweet.full_text) tweets_data = { 'len' : len(tweet_text), 'ID' : tweet.id, 'User' : tweet.user.screen_name, 'UserName' : tweet.user.name, 'UserLocation' : tweet.user.location, 'TweetText' : tweet_text, 'Language' : tweet.user.lang, 'Date' : tweet.created_at, 'Source': tweet.source, 'Likes' : tweet.favorite_count, 'Retweets' : tweet.retweet_count, 'Coordinates' : tweet.coordinates, 'Place' : tweet.place } tweets_data_list.append(tweets_data) return tweets_data_list def sentiment_polarity(self, tweets_text_list): tweets_sentiments_list = [] for tweet in tweets_text_list: polarity = TextBlob(tweet).sentiment.polarity if polarity > 0: tweets_sentiments_list.append('Positive') elif polarity < 0: tweets_sentiments_list.append('Negative') else: tweets_sentiments_list.append('Neutral') return tweets_sentiments_list analyzer = TweetAnalyzer(consumer_key = CONSUMER_KEY, consumer_secret = CONSUMER_SECRET, access_token = ACCESS_TOKEN, access_token_secret=ACCESS_TOKEN_SECRET) keyword = ("'Black is King' OR 'black is king' OR 'Beyonce' OR 'beyonce' OR #blackisking OR '#BlackIsKing' OR 'black is king beyonce'") count = 5000 tweets_iter = analyzer.search_by_keyword(keyword, count) tweets_list = analyzer.prepare_tweets_list(tweets_iter) tweets_df = pd.DataFrame(tweets_list) ``` # Análises ## Qual o tweet mais curtido e retweetado? ``` likes_max = np.max(tweets_df['Likes']) likes = tweets_df[tweets_df.Likes == likes_max].index[0] print(f"O tweet com mais curtidas é: {tweets_df['TweetText'][likes]}") print(f"Numero de curtidas: {likes_max}") retweet_max = np.max(tweets_df['Retweets']) retweet = tweets_df[tweets_df.Retweets == retweet_max].index[0] print(f"O tweet com mais retweets é: {tweets_df['TweetText'][retweet]}") print(f"Numero de curtidas: {retweet_max}") ``` ## Qual a porcentagem dos sentimentos captado? ``` tweets_df['Sentiment'] = analyzer.sentiment_polarity(tweets_df['TweetText']) sentiment_percentage = tweets_df.groupby('Sentiment')['ID'].count().apply(lambda x : 100 * x / count) sentiment_percentage.plot(kind='bar') plt.show() plt.savefig('sentiments_tweets.png', bbox_inches='tight', pad_inches=0.5) ``` ## Quais as palavras mais atribuídas? ``` words = ' '.join(tweets_df['TweetText']) words_clean = " ".join([word for word in words.split()]) warnings.simplefilter('ignore') mask = np.array(Image.open('crown.png')) wc = WordCloud(stopwords=STOPWORDS, mask=mask, max_words=1000, max_font_size=100, min_font_size=10, random_state=42, background_color='white', mode="RGB", width=mask.shape[1], height=mask.shape[0], normalize_plurals=True).generate(words_clean) plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.savefig('black_is_king_cloud.png', dpi=300) plt.show() ``` ## Quais são as fonte de tweets mais utilizados? ``` # Calcular quantidade de source source_list = tweets_df['Source'].tolist() occurrences = collections.Counter(source_list) source_df = pd.DataFrame({'Total':list(occurrences.values())}, index=occurrences.keys()) sources_sorted = source_df.sort_values('Total', ascending=True) # Plotar gráfico plt.style.use('ggplot') plt.rcParams['axes.edgecolor']='#333F4B' plt.rcParams['axes.linewidth']=0.8 plt.rcParams['xtick.color']='#333F4B' plt.rcParams['ytick.color']='#333F4B' my_range=list(range(1,len(sources_sorted.index)+1)) ax = sources_sorted.Total.plot(kind='barh',color='#1f77b4', alpha=0.8, linewidth=5, figsize=(15,15)) ax.get_xaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x)))) plt.savefig('source_tweets.png', bbox_inches='tight', pad_inches=0.5) # Distribuição das 5 primeiras fontes mais utilizadas squarify.plot(sizes=sources_sorted['Total'][:5], label=sources_sorted.index, alpha=.5) plt.axis('off') plt.show() ``` ## De quais regiões vieram os tweets ``` geolocator = Nominatim(user_agent="TweeterSentiments") latitude = [] longitude = [] for user_location in tweets_df['UserLocation']: try: location = geolocator.geocode(user_location) latitude.append(location.latitude) longitude.append(location.longitude) except: continue coordenadas = np.column_stack((latitude, longitude)) mapa = folium.Map(zoom_start=3.) mapa.add_child(plugins.HeatMap(coordenadas)) mapa.save('Mapa_calor_tweets.html') mapa ``` ## Análise temporal dos tweets ``` data = tweets_df data['Date'] = pd.to_datetime(data['Date']).apply(lambda x: x.date()) tlen = pd.Series(data['Date'].value_counts(), index=data['Date']) tlen.plot(figsize=(16,4), color='b') plt.savefig('timeline_tweets.png', bbox_inches='tight', pad_inches=0.5) ```
github_jupyter
# Document Classification & Clustering - Lecture What could we do with the document-term-matrices (dtm[s]) created in the previous notebook? We could visualize them or train an algorithm to do some specific task. We have covered both classification and clustering before, so we won't focus on the particulars of algorithms. Instead we'll focus on the unique problems of dealing with text input for these models. ## Contents * [Part 1](#p1): Vectorize a whole Corpus * [Part 2](#p2): Tune the vectorizer * [Part 3](#p3): Apply Vectorizer to Classification problem * [Part 4](#p4): Introduce topic modeling on text data **Business Case**: Your managers at Smartphone Inc. have asked to develop a system to bucket text messages into two categories: **spam** and **not spam (ham)**. The system will be implemented on your companies products to help users identify suspicious texts. # Spam Filter - Count Vectorization Method ``` import pandas as pd import numpy as np pd.set_option('display.max_colwidth', 200) ``` **Import the data and take a look at it** ``` def load(): url = "https://raw.githubusercontent.com/sokjc/BayesNotBaes/master/sms.tsv" df = pd.read_csv(url, sep='\t', header=None, names=['label', 'msg']) df = df.rename(columns={"msg":"text"}) # encode target df['label_num'] = df['label'].map({'ham': 0, 'spam': 1}) return df pd.set_option('display.max_colwidth', 200) df = load() df.tail() ``` Notice that this text isn't as coherent as the job listings. We'll proceed like normal though. What is the ratio of Spam to Ham messages? ``` df['label'].value_counts() df['label'].value_counts(normalize=True) ``` **Model Validation - Train Test Split** (Cross Validation would be better here) ``` from sklearn.model_selection import train_test_split X = df['text'] y = df['label_num'] X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.2, random_state=812) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape, sep='\n') ``` **Count Vectorizer** Today we're just going to let Scikit-Learn do our text cleaning and preprocessing for us. Lets run our vectorizer on our text messages and take a peek at the tokenization of the vocabulary ``` from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(max_features=None, ngram_range=(1,1), stop_words='english') vectorizer.fit(X_train) print(vectorizer.get_feature_names()[300:325]) ``` Now we'll complete the vectorization with `.transform()` ``` train_word_counts = vectorizer.transform(X_train) # not necessary to save to a dataframe, but helpful for previewing X_train_vectorized = pd.DataFrame(train_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_train_vectorized.shape) X_train_vectorized.head() ``` We also need to vectorize our `X_test` data, but **we need to use the same vocabulary as the training dataset**, so we'll just call `.transform()` on `X_test` to get our `X_test_vectorized` ``` test_word_counts = vectorizer.transform(X_test) X_test_vectorized = pd.DataFrame(test_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_test_vectorized.shape) X_test_vectorized.head() ``` Lets run some classification models and see what kind of accuracy we can get! # Model Selection ``` from sklearn.metrics import accuracy_score def assess_model(model, X_train, X_test, y_train, y_test, vect_type='Count'): model.fit(X_train, y_train) train_predictions = model.predict(X_train) test_predictions = model.predict(X_test) result = {} result['model'] = str(model).split('(')[0] result['acc_train'] = accuracy_score(y_train, train_predictions) result['acc_test'] = accuracy_score(y_test, test_predictions) result['vect_type'] = vect_type print(result) return result from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB # Multinomial Naive Bayes from sklearn.ensemble import RandomForestClassifier models = [LogisticRegression(random_state=42, solver='lbfgs'), MultinomialNB(), RandomForestClassifier()] results = [] for model in models: result = assess_model( model, X_train_vectorized, X_test_vectorized, y_train, y_test) results.append(result) pd.DataFrame.from_records(results) ``` # Spam Filter - TF-IDF Vectorization Method ``` from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_features=None, ngram_range=(1,1), stop_words='english') # fit to train vectorizer.fit(X_train) print(vectorizer) # apply to train train_word_counts = vectorizer.transform(X_train) X_train_vectorized = pd.DataFrame(train_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_train_vectorized.shape) X_train_vectorized.head() # apply to test test_word_counts = vectorizer.transform(X_test) X_test_vectorized = pd.DataFrame(test_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_test_vectorized.shape) X_test_vectorized.head() models = [LogisticRegression(random_state=42, solver='lbfgs'), MultinomialNB(), RandomForestClassifier()] for model in models: result = assess_model( model, X_train_vectorized, X_test_vectorized, y_train, y_test, vect_type='Tfidf') results.append(result) pd.DataFrame.from_records(results) ``` # Sentiment Analysis The objective of **sentiment analysis** is to take a text phrase and determine if its sentiment is: Postive, Neutral, or Negative. Suppose that you wanted to use NLP to classify reviews for your company's products as either positive, neutral, or negative. Maybe you don't trust the star ratings left by the users and you want an additional measure of sentiment from each review - maybe you would use this as a feature generation technique for additional modeling, or to identify disgruntled customers and reach out to them to improve your customer service, etc. Sentiment Analysis has also been used heavily in stock market price estimation by trying to track the sentiment of the tweets of individuals after breaking news comes out about a company. Does every word in each review contribute to its overall sentiment? Not really. Stop words for example don't really tell us much about the overall sentiment of the text, so just like we did before, we will discard them. ### NLTK Movie Review Sentiment Analysis `pip install -U nltk` ``` import random import nltk def load_movie_reviews(): from nltk.corpus import movie_reviews nltk.download('movie_reviews') nltk.download('stopwords') print("Total reviews:", len(movie_reviews.fileids())) print("Positive reviews:", len(movie_reviews.fileids('pos'))) print("Negative reviews:", len(movie_reviews.fileids('neg'))) # Get Reviews and randomize reviews = [(list(movie_reviews.words(fileid)), category) for category in movie_reviews.categories() for fileid in movie_reviews.fileids(category)] random.shuffle(reviews) documents = [] sentiments = [] for review in reviews: # Add sentiment to list if review[1] == "pos": sentiments.append(1) else: sentiments.append(0) # Add text to list review_text = " ".join(review[0]) documents.append(review_text) df = pd.DataFrame({"text": documents, "sentiment": sentiments}) return df df = load_movie_reviews() df.head() ``` ### Train Test Split ``` X = df['text'] y = df['sentiment'] X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.2, random_state=42) ``` # Sentiment Analysis - CountVectorizer ## Generate vocabulary from train dataset ``` from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(max_features=None, ngram_range=(1,1), stop_words='english') vectorizer.fit(X_train) train_word_counts = vectorizer.transform(X_train) X_train_vectorized = pd.DataFrame(train_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_train_vectorized.shape) X_train_vectorized.head() test_word_counts = vectorizer.transform(X_test) X_test_vectorized = pd.DataFrame(test_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_test_vectorized.shape) X_test_vectorized.head() ``` ### Model Selection ``` models = [LogisticRegression(random_state=42, solver='lbfgs'), MultinomialNB(), RandomForestClassifier()] results = [] for model in models: result = assess_model( model, X_train_vectorized, X_test_vectorized, y_train, y_test, vect_type='Count') results.append(result) pd.DataFrame.from_records(results) ``` # Sentiment Analysis - tfidfVectorizer ``` from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(max_features=2000, ngram_range=(1,2), min_df = 5, max_df = .80, stop_words='english') vectorizer.fit(X_train) train_word_counts = vectorizer.transform(X_train) X_train_vectorized = pd.DataFrame(train_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_train_vectorized.shape) X_train_vectorized.head() test_word_counts = vectorizer.transform(X_test) X_test_vectorized = pd.DataFrame(test_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_test_vectorized.shape) X_test_vectorized.head() ``` ### Model Selection ``` for model in models: result = assess_model( model, X_train_vectorized, X_test_vectorized, y_train, y_test, vect_type='tfidf') results.append(result) pd.DataFrame.from_records(results) ``` # Using NLTK to clean the data ### Importing the data fresh to avoid variable collisions ``` df = load_movie_reviews() ``` ### Cleaning function to apply to each document ``` from nltk.corpus import stopwords import string # turn a doc into clean tokens def clean_doc(doc): # split into tokens by white space tokens = doc.split() # remove punctuation from each token table = str.maketrans('', '', string.punctuation) tokens = [w.translate(table) for w in tokens] # remove remaining tokens that are not alphabetic tokens = [word for word in tokens if word.isalpha()] # filter out stop words stop_words = set(stopwords.words('english')) tokens = [w for w in tokens if not w in stop_words] # filter out short tokens tokens = [word for word in tokens if len(word) > 1] return tokens df_nltk = pd.DataFrame() df_nltk['text'] = df.text.apply(clean_doc) df_nltk['sentiment'] = df.sentiment df_nltk.head() ``` ### Reformat reviews for sklearn ``` documents = [] for review in df_nltk.text: review = " ".join(review) documents.append(review) sentiment = list(df_nltk.sentiment) new_df = pd.DataFrame({'text': documents, 'sentiment': sentiment}) new_df.head() ``` ### Train Test Split ``` X = new_df.text y = new_df.sentiment X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.2, random_state=42) ``` ### Vectorize the reviews ``` from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(max_features=None, ngram_range=(1,1), stop_words='english') vectorizer.fit(X_train) train_word_counts = vectorizer.transform(X_train) X_train_vectorized = pd.DataFrame(train_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_train_vectorized.shape) X_train_vectorized.head() test_word_counts = vectorizer.transform(X_test) X_test_vectorized = pd.DataFrame(test_word_counts.toarray(), columns=vectorizer.get_feature_names()) print(X_test_vectorized.shape) X_test_vectorized.head() ``` ### Model Selection ``` models = [LogisticRegression(random_state=42, solver='lbfgs'), MultinomialNB(), RandomForestClassifier()] results = [] for model in models: result = assess_model( model, X_train_vectorized, X_test_vectorized, y_train, y_test, vect_type='Tfidf') results.append(result) pd.DataFrame.from_records(results) # import xgboost as xgb from xgboost.sklearn import XGBClassifier clf = XGBClassifier( #hyper params n_jobs = -1, ) clf.fit(X_train_vectorized, y_train, eval_metric = 'auc') ```
github_jupyter
``` a = 'ok' b = 'test' print(a+b) print(a*2) name = 'Bob' print(f'Hello, {name}') greeting = 'Hello, {}' with_name = greeting.format(name) print(with_name) size = input('Enter the size of your house: ') integer = int(size) floating = float(size) print(integer, floating) square_meters = integer / 10.8 print(f'{integer} square feet is {square_meters} square meters.') print(f'{integer} square feet is {square_meters:.2f} square meters.') user_age = input('Enter your age: ') years = int(user_age) months = years * 12 days = months * 30 hours = days * 24 minutes = hours * 60 seconds = minutes * 60 print(f'Your age, {years}, is equal to {months} months or {seconds} seconds.') friends = {'Bob', 'Anne', 'Rolf'} abroad = {'Bob', 'Rolf'} local_friends = friends.difference(abroad) print(local_friends) local_friends_opposite = abroad.difference(friends) print(local_friends_opposite) other_friends = {'Maria', 'Jose'} all_friends = friends.union(other_friends) print(all_friends) abroad.add('Lara') print(abroad) friends_study_science = {'Ellen', 'Renato', 'Bob', 'Rolf'} abroad_study_science = abroad.intersection(friends_study_science) print(abroad_study_science) colors = {'blue', 'red', 'white', 'black'} user_color = input('Enter a color that you think is in the game: ').lower() if user_color in colors: print('You are right!') else: print("Sorry, you're wrong") friends = ['Suzy', 'Ellie', 'Sarah', 'Anna', 'Sayuri'] friends_starts_s = [] friends_starts_s_list_comprehension = [] for friend in friends: if friend.startswith('S'): friends_starts_s.append(friend) print(friends_starts_s) # using list comprehension friends_starts_s_list_comprehension = [friend for friend in friends if friend.startswith('S')] print(friends_starts_s_list_comprehension) student_attendance = {'Rolf': 96, 'Bob': 80, 'Anne': 100} for student, attendance in student_attendance.items(): print(f'{student} has {attendance}% of attendance') attendance_values = student_attendance.values() print(sum(attendance_values) / len(attendance_values)) person = ('Jose', 30, 'artist') name, _, profession = person print(name, profession) friends = ['Ella', 'Ellie'] def add_friend(): friend_name = input('Enter your friend name: ') f = friends + [friend_name] print(f) add_friend() def say_hello(name, surname='Doe'): print(f'Hello, {name} {surname}.') say_hello(surname='Filly', name='Phil') say_hello('Filly', 'Phil') say_hello('Filly', surname='Phil') say_hello('Phil') def add(x, y): return x + y # transform into Lambda add = lambda x, y: x + y print(add(5, 7)) # you can also call it right away, like an IIFE print((lambda x, y: x + y)(5, 7)) # Another Example def double(x): return x * 2 sequence = [1, 3, 5, 7] doubled = [double(x) for x in sequence] doubled_inline = [(lambda x: x * 2)(y) for y in sequence] print(doubled) print(doubled_inline) # same thing - you can use map, it will go through each number in the sequence and apply double on it, it will then return a list with it # NOTE: it is a little bit slower than list comprehension doubled_same = list(map(double, sequence)) print(doubled_same) def multiply(*args): print(args) total = 1 for arg in args: total = total * arg return total multiply(1,3,5) def add(x,y): return x + y nums = [3, 5] print(add(*nums)) # it will destructure the nums when calling add, so 3 will be x and 5 will be y # Another way nums = {'x': 15, 'y': 25} print(add(x=nums['x'], y=nums['y'])) # instead of doing like that, we can use `**` print(add(**nums)) # Going back to the mulpiply example and using with another function def apply(*args, operator): if operator == '*': return multiply(*args) # we need to add the `*` to destructure, otherwise we will send a tuple and the multiply function will create a tuple with the tuple elif operator == '+': return sum(args) else: return 'No valid operator provided to apply()' print(apply(1, 3, 6, 9, operator='*')) # we need to use the keyword argument for operator, otherwise the `*args` from the function will get everything as the args and the operator will be missing. def named(**kwargs): print(kwargs) named(name='Bob', age=25) # Another option def named1(name, age): print(name, age) details = {'name': 'Bob', 'age': 25} named1(**details) named(**details) def print_nicely(**kwargs): named(**kwargs) for arg, value in kwargs.items(): print(f'{arg}: {value}') print_nicely(name='Bob', age=25) def both(*args, **kwargs): print(args) print(kwargs) both(1, 3, 5, name='Bob', age=25) # create the Student class class Student: # all objects has the self ('this'), but they can have other properties, like name or grades def __init__(self, name, grades): self.name = name self.grades = grades def average(self): return sum(self.grades) / len(self.grades) # create a new student student1 = Student('Matt', (90, 90, 80, 75, 80)) student2 = Student('Rob', (40, 50, 60, 75, 60)) print(student1.name) print(student2.grades) print(Student.average(student1)) #same as below print(student1.average()) class Person: def __init__(self, name, age): self.name = name self.age = age bob = Person('Bob', 35) print(bob) class Person_modified: def __init__(self, name, age): self.name = name self.age = age # what to print when we print the string representation of the instance def __str__(self): return f'I am {self.name}, and I have {self.age} years.' # this method goal is to be unambiguous and it should return a string that allows us to recreate the object very easily def __repr__(self): return f"<Person('{self.name}', {self.age})>" bob_modified = Person_modified('Bob', 35) print(bob_modified) # I am Bob, and I have 35. # in order to print the __repr__ method, you can call it or comment the __str__ and just print the instance: print(bob_modified.__repr__()) class ClassTest: def instance_method(self): print(f'Called instance_method of {self}') @classmethod def class_method(cls): print(f'Called class_method of {cls}') @staticmethod def static_method(): print('Called static_method') test = ClassTest() # instance method because it is called on the instance - it will receive 'self', which is the instance and you can use it in the return test.instance_method() # class method because it is called on the class - it will receive 'cls', which is the class and you can use it in the return => Very used as factory ClassTest.class_method() # static method is called without 'passing' the object/instance to it, it is really just a function that you pasted inside the class, it doesn't have any info of the class or the instance ClassTest.static_method() # Another Example class Book: TYPES = ('hardcover', 'paperback') def __init__(self, name, book_type, weight): self.name = name self.book_type = book_type self.weight = weight def __repr__(self): return f'<Book {self.name}, {self.book_type}, weighing {self.weight}g>' # factory => create a new instance within the class using the class ==> since cls is the class, you can use Book or cls, but it is best practices to use cls, also because of inheritance @classmethod def hardcover(cls, name, page_weight): return cls(name, Book.TYPES[0], page_weight + 100) @classmethod def paperback(cls, name, page_weight): return cls(name, Book.TYPES[1], page_weight + 100) book = Book.hardcover('Harry Potter', 1500) light = Book.hardcover('Python', 600) print(book) print(light) class Device: def __init__(self, name, connected_by): self.name = name self.connected_by = connected_by self.connected = True def __str__(self): # the '!r' calls the repr method on self.name, so it adds the quotes automatically return f'Device {self.name!r} ({self.connected_by})' def disconnect(self): self.connected = False print('Disconnected.') # create a Printer class who inherits from Device, so you have access to all the methods from the Device class and can also add new methods specific to the Printer class class Printer(Device): def __init__(self, name, connected_by, capacity): # get the parent class with super() and then call the __init__ method of it passing the variables => this way you don't have to copy everything again super().__init__(name, connected_by) self.capacity = capacity self.remaining_pages = capacity def __str__(self): return f'{super().__str__()} ({self.remaining_pages} pages remaining.)' def print(self, pages): if not self.connected: print('Your printer is not connected!') return print(f'Printing {pages} pages') self.remaining_pages -= pages headphones = Device('Headphones', 'Bluetooth') print(headphones) printer = Printer('Printer', 'USB', 500) printer.print(20) print(printer) printer.disconnect() printer.print(30) class Bookshelf: def __init__(self, quantity): self.quantity = quantity def __str__(self): # python ternary operator: 'true' if 'condition' else 'false' end = 's.' if self.quantity > 1 else '.' return f'Bookshelf with {self.quantity} book{end}' shelf = Bookshelf(300) # with inheritance ==> not the best way, you are saying that books are also bookshelves, which is not technically true. Also, you are completely overriding the __str__ method from Bookshelf and you are not using the Bookshelf anywhere. class Book_inheritance(Bookshelf): def __init__(self, name, quantity): super().__init__(quantity) self.name = name def __str__(self): return f'Book {self.name}' book = Book_inheritance('Harry Potter', 120) print(book) # with composition ==> better to use in this case, since with this you mean: a bookshelf has many books. But a book is not a bookshelf. class Bookshelf_composition: def __init__(self, *books): self.books = books def __str__(self): # python ternary operator: 'true' if 'condition' else 'false' end = 's.' if len(self.books) > 1 else '.' return f'Bookshelf with {len(self.books)} book{end}' class Book_composition: def __init__(self, name): self.name = name def __str__(self): return f'Book {self.name}' book = Book_composition('Harry Potter') book1 = Book_composition('Harry Potter II') shelf1 = Bookshelf_composition(book, book1) print(shelf1) from typing import List def list_avg(sequence: List) -> float: return sum(sequence) / len(sequence) # list_avg(123) list_avg([1,2,3]) class TooManyPagesReadError(ValueError): pass class Book: def __init__(self, name: str, page_count: int): self.name = name self.page_count = page_count self.pages_read = 0 def __repr__(self): return ( f'<Book {self.name}, read{self.pages_read} pages out of {self.page_count}>' ) def read(self, pages: int): if self.pages_read + pages > self.page_count: raise TooManyPagesReadError(f'You tried to read {self.pages_read + pages} pages, but this book only has {self.page_count} pages.') self.pages_read += pages print(f'You have now read {self.pages_read} pages out of {self.page_count}.') python101 = Book('Python 101', 50) python101.read(35) python101.read(10) python101.read(30) user = {'username': 'jose', 'access_level': 'guest'} # unprotected route def get_admin_password(): return '1234' # create decorator to protect the route def make_secure(func): def secure_function(): if user['access_level'] == 'admin': return func() else: return f'No admin permissions for {user["username"]}' return secure_function get_admin_password = make_secure(get_admin_password) print(get_admin_password()) # With The '@' syntax def make_secure1(func): def secure_function(): if user['access_level'] == 'admin': return func() else: return f'No admin permissions for {user["username"]}' return secure_function # just add the '@' and the decorator function name to secure this route and then call it @make_secure1 def get_admin_password1(): return '1234' print(get_admin_password1()) # it will return the name as 'secure_function' and any documentation from get_admin_password1 would be lost and replaced with the secure_function print(get_admin_password1.__name__) # in order to fix this, we need to import functools and add the decorator before the secure_function import functools def make_secure2(func): # decorator @functools.wraps(func) #it will protect the name and documentation of the 'func', in this case, the get_admin_password def secure_function(): # function that will replace the other one if user['access_level'] == 'admin': return func() else: return f'No admin permissions for {user["username"]}' return secure_function @make_secure2 def get_admin_password2(): return '1234' # returns get_admin_password2 print(get_admin_password2.__name__) from typing import List, Optional class Student: # this is BAD def __init__(self, name: str, grades: List[int] = []): self.name = name self.grades = grades def take_exam(self, result: int): self.grades.append(result) bob = Student('Bob') matt = Student('Matt') bob.take_exam(90) print(bob.grades) # [90] print(matt.grades) # [90] class Student1: # this is BAD def __init__(self, name: str, grades: Optional[List[int]] = None): self.name = name self.grades = grades or [] def take_exam(self, result: int): self.grades.append(result) bob1 = Student1('Bob') matt1 = Student1('Matt') bob1.take_exam(90) print(bob1.grades) # [90] print(matt1.grades) # [] ```
github_jupyter
# Kestrel+Model ### A [Bangkit 2021](https://grow.google/intl/id_id/bangkit/) Capstone Project Kestrel is a TensorFlow powered American Sign Language translator Android app that will make it easier for anyone to seamlessly communicate with people who have vision or hearing impairments. The Kestrel model builds on the state of the art MobileNetV2 model that is optimized for speed and latency on smartphones to accurately recognize and interpret sign language from the phone’s camera and display the translation through a beautiful, convenient and easily accessible Android app. # American Sign Language Fingerspelling alphabets from the [National Institute on Deafness and Other Communication Disorders (NIDCD)](https://www.nidcd.nih.gov/health/american-sign-language-fingerspelling-alphabets-image) <table> <tr><td> <img src="https://www.nidcd.nih.gov/sites/default/files/Content%20Images/NIDCD-ASL-hands-2019_large.jpg" alt="Fashion MNIST sprite" width="600"> </td></tr> <tr><td align="center"> <b>Figure 1.</b> <a href="https://www.nidcd.nih.gov/health/american-sign-language-fingerspelling-alphabets-image">ASL Fingerspelling Alphabets</a> <br/>&nbsp; </td></tr> </table> ``` from google.colab import drive drive.mount('/content/drive') ``` # Initial setup ``` try: %tensorflow_version 2.x except: pass import numpy as np import matplotlib.pylab as plt import tensorflow as tf import tensorflow_hub as hub import PIL import PIL.Image from os import listdir import pathlib from tqdm import tqdm from tensorflow.keras.preprocessing import image_dataset_from_directory print("\u2022 Using TensorFlow Version:", tf.__version__) print("\u2022 Using TensorFlow Hub Version: ", hub.__version__) print('\u2022 GPU Device Found.' if tf.config.list_physical_devices('GPU') else '\u2022 GPU Device Not Found. Running on CPU') ``` # Data preprocessing ### (Optional) Unzip file on Google Drive ``` import zipfile import pathlib zip_dir = pathlib.Path('/content/drive/Shareddrives/Kestrel/A - Copy.zip') unzip_dir = pathlib.Path('/content/drive/Shareddrives/Kestrel/A_Unzipped') with zipfile.ZipFile(zip_dir, 'r') as zip_ref: zip_ref.extractall(unzip_dir) ``` ### Loading images from directory ``` data_dir = pathlib.Path('/Dev/A') ``` ### (Optional) Counting the number of images in the dataset ``` image_count = len(list(data_dir.glob('*/color*.png'))) print(image_count) ``` ### (Optional) Displaying one of the "a" letter sign language image: ``` two = list(data_dir.glob('*/color*.png')) PIL.Image.open(str(two[0])) ``` # Create the dataset Loading the images off disk using [image_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory). Define some parameters for the loader: ``` BATCH_SIZE = 30 IMG_SIZE = (160, 160) ``` ### Coursera method using ImageDataGenerator ``` from tensorflow.keras.preprocessing.image import ImageDataGenerator train_generator = ImageDataGenerator( rescale = 1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest', validation_split=0.2) validation_generator = ImageDataGenerator( rescale = 1./255, validation_split=0.2) train_dataset = train_generator.flow_from_directory(data_dir, batch_size = BATCH_SIZE, class_mode = 'categorical', subset='training', target_size = IMG_SIZE, shuffle=True, ) validation_dataset = validation_generator.flow_from_directory(data_dir, batch_size = BATCH_SIZE, class_mode = 'categorical', subset='validation', target_size = IMG_SIZE, shuffle=True, ) ``` Splitting images for training and validation ### (Optional) Visualize the data Show the first 9 images and labels from the training set: ``` #@title Showing 9 images plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") for image_batch, labels_batch in train_dataset: print(image_batch.shape) print(labels_batch.shape) break ``` ### (Deprecated) Create a test set To create a Test Set, determine how many batches of data are available in the validation set using ```tf.data.experimental.cardinality```, then move 20% of them to a test set. ``` validation_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(validation_batches // 5) validation_dataset = validation_dataset.skip(validation_batches // 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) ``` ### Configure the dataset for performance Use buffered prefetching to load images from disk without having I/O become blocking. To learn more about this method see the [data performance](https://www.tensorflow.org/guide/data_performance) guide. ``` AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.cache().prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.cache().prefetch(buffer_size=AUTOTUNE) # test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) ``` # Create the model ### Create the base model from the pre-trained convnets You will create the base model from the **MobileNet V2** model developed at Google. This is pre-trained on the ImageNet dataset, a large dataset consisting of 1.4M images and 1000 classes. ImageNet is a research training dataset with a wide variety of categories like `jackfruit` and `syringe`. This base of knowledge will help us classify cats and dogs from our specific dataset. First, you need to pick which layer of MobileNet V2 you will use for feature extraction. The very last classification layer (on "top", as most diagrams of machine learning models go from bottom to top) is not very useful. Instead, you will follow the common practice to depend on the very last layer before the flatten operation. This layer is called the "bottleneck layer". The bottleneck layer features retain more generality as compared to the final/top layer. First, instantiate a MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the **include_top=False** argument, you load a network that doesn't include the classification layers at the top, which is ideal for feature extraction. ``` # Create the base model from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.applications.MobileNetV2(input_shape=(160, 160, 3), include_top=False, weights='imagenet') ``` This feature extractor converts each `224 x 224` image into a `7x7x1280` block of features. Let's see what it does to an example batch of images: ``` image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) ``` ### Freeze the convolutional base In this step, you will freeze the convolutional base created from the previous step and to use as a feature extractor. Additionally, you add a classifier on top of it and train the top-level classifier. It is important to freeze the convolutional base before you compile and train the model. Freezing (by setting layer.trainable = False) prevents the weights in a given layer from being updated during training. MobileNet V2 has many layers, so setting the entire model's `trainable` flag to False will freeze all of them. ``` base_model.trainable = False # Let's take a look at the base model architecture base_model.summary() ``` ### Adding new layer to the model ``` last_layer = base_model.get_layer('out_relu') print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output from tensorflow.keras.optimizers import RMSprop from tensorflow.keras import layers from tensorflow.keras import Model # Flatten the output layer to 1 dimension x = layers.Flatten()(last_output) # Add a dropout rate of 0.5 # x = layers.Dropout(0.5)(x) # Add a fully connected layer with 1,024 hidden units and ReLU activation # x = layers.Dense(1024, activation='relu', kernel_regularizer='l2')(x) x = layers.Dense(1024, activation='relu')(x) # Add a dropout rate of 0.5 x = layers.Dropout(0.5)(x) # Add a final layer for classification x = layers.Dense (24, activation='softmax')(x) model = Model( base_model.input, x) model.summary() # !pip install scipy ``` ### Training the model ``` checkpoint_path = "TensorFlow_Training_Checkpoint/Kestrel_Training_10_50Dropout0.5/cp.ckpt" import os # base_learning_rate = 0.0001 def get_uncompiled_model(): model = Model( base_model.input, x) return model def get_compiled_model(): model = get_uncompiled_model() model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"], ) return model checkpoint_dir = os.path.dirname(checkpoint_path) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) print("Restoring from", latest_checkpoint) #return tf.keras.models.load_model(latest_checkpoint) model = Model( base_model.input, x) model.load_weights(checkpoint_path) model.compile( optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"], ) return model print("Creating a new model") return get_compiled_model() # Create a callback that saves the model's weights model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, monitor='val_accuracy', mode='auto', save_best_only=True, # Only save a model if `val_loss` has improved. verbose=1) early_callbacks = [ tf.keras.callbacks.EarlyStopping( # Stop training when `val_loss` is no longer improving monitor="val_accuracy", # "no longer improving" being defined as "no better than 1e-2 less" # min_delta=1e-2, # "no longer improving" being further defined as "for at least 2 epochs" patience=30, verbose=1, ) ] model = make_or_restore_model() history = model.fit(train_dataset, epochs=50, validation_data = validation_dataset, verbose = 1, callbacks=[model_checkpoint_callback, early_callbacks])# Pass callback to training # This may generate warnings related to saving the state of the optimizer. # These warnings (and similar warnings throughout this notebook) # are in place to discourage outdated usage, and can be ignored. # # EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. # export_dir = 'saved_model/2' # # YOUR CODE HERE # tf.saved_model.save(model, export_dir) ``` ### Plotting the accuracy and loss ``` import matplotlib.pyplot as plt acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.plot(epochs, loss, 'r', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.title('Training and validation loss') plt.legend() plt.show() ``` # Exporting to TFLite You will now save the model to TFLite. We should note, that you will probably see some warning messages when running the code below. These warnings have to do with software updates and should not cause any errors or prevent your code from running. ``` # EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. export_dir = 'saved_model/10_50Dropout0.5V2' # YOUR CODE HERE tf.saved_model.save(model, export_dir) # # Select mode of optimization # mode = "Speed" # if mode == 'Storage': # optimization = tf.lite.Optimize.OPTIMIZE_FOR_SIZE # elif mode == 'Speed': # optimization = tf.lite.Optimize.OPTIMIZE_FOR_LATENCY # else: # optimization = tf.lite.Optimize.DEFAULT # EXERCISE: Use the TFLiteConverter SavedModel API to initialize the converter import tensorflow as tf converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) # YOUR CODE HERE # Set the optimzations converter.optimizations = [tf.lite.Optimize.DEFAULT]# YOUR CODE HERE # Invoke the converter to finally generate the TFLite model tflite_model = converter.convert()# YOUR CODE HERE tflite_model_file = pathlib.Path('saved_model/10_50Dropout0.5V2/model.tflite') tflite_model_file.write_bytes(tflite_model) # path_to_pb = "C:/saved_model/saved_model.pb" # def load_pb(path_to_pb): # with tf.gfile.GFile(path_to_pb, "rb") as f: # graph_def = tf.GraphDef() # graph_def.ParseFromString(f.read()) # with tf.Graph().as_default() as graph: # tf.import_graph_def(graph_def, name='') # return graph # print(graph) ``` # Test the model with TFLite interpreter ``` # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Gather results for the randomly sampled test images predictions = [] test_labels = [] test_images = [] test_batches = data_dir.map(format_example).batch(1) for img, label in test_batches.take(50): interpreter.set_tensor(input_index, img) interpreter.invoke() predictions.append(interpreter.get_tensor(output_index)) test_labels.append(label[0]) test_images.append(np.array(img)) # Utilities functions for plotting def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) img = np.squeeze(img) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label.numpy(): color = 'green' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks(list(range(10))) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array[0], color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array[0]) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') # Visualize the outputs # Select index of image to display. Minimum index value is 1 and max index value is 50. index = 5 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(index, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(index, predictions, test_labels) plt.show() ```
github_jupyter
# PyTorch: Tabular Classify Binary ![mines](../images/mines.png) ``` import torch import torch.nn as nn from torch import optim import torchmetrics from sklearn.preprocessing import LabelBinarizer, StandardScaler import aiqc from aiqc import datum ``` --- ## Example Data Reference [Example Datasets](example_datasets.ipynb) for more information. ``` df = datum.to_pandas('sonar.csv') df.head() ``` --- ## a) High-Level API Reference [High-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data. ``` splitset = aiqc.Pipeline.Tabular.make( df_or_path = df , dtype = None , feature_cols_excluded = 'object' , feature_interpolaters = None , feature_window = None , feature_encoders = dict( sklearn_preprocess = StandardScaler() , dtypes = ['float64'] ) , feature_reshape_indices = None , label_column = 'object' , label_interpolater = None , label_encoder = dict(sklearn_preprocess = LabelBinarizer(sparse_output=False)) , size_test = 0.12 , size_validation = 0.22 , fold_count = None , bin_count = None ) def fn_build(features_shape, label_shape, **hp): model = nn.Sequential( nn.Linear(features_shape[0], 12), nn.BatchNorm1d(12,12), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(12, label_shape[0]), nn.Sigmoid() ) return model def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): ## --- Prepare mini batches for analysis --- batched_features, batched_labels = aiqc.torch_batcher( samples_train['features'], samples_train['labels'], batch_size=5, enforce_sameSize=False, allow_1Sample=False ) ## --- Metrics --- acc = torchmetrics.Accuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- epochs = hp['epoch_count'] for epoch in range(epochs): ## --- Batch training --- for i, batch in enumerate(batched_features): # Make raw (unlabeled) predictions. batch_probability = model(batched_features[i]) batch_loss = loser(batch_probability, batched_labels[i]) # Backpropagation. optimizer.zero_grad() batch_loss.backward() optimizer.step() ## --- Epoch metrics --- # Overall performance on training data. train_probability = model(samples_train['features']) train_loss = loser(train_probability, samples_train['labels']) train_acc = acc(train_probability, samples_train['labels'].to(torch.short)) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model(samples_evaluate['features']) eval_loss = loser(eval_probability, samples_evaluate['labels']) eval_acc = acc(eval_probability, samples_evaluate['labels'].to(torch.short)) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) return model, history ``` Optional, will be automatically selected based on `analysis_type` if left as `None`. ``` def fn_optimize(model, **hp): optimizer = optim.Adamax( model.parameters() , lr=hp['learning_rate'] ) return optimizer hyperparameters = { "learning_rate": [0.01, 0.005] , "epoch_count": [50] } queue = aiqc.Experiment.make( library = "pytorch" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , splitset_id = splitset.id , repeat_count = 2 , hide_test = False , hyperparameters = hyperparameters , fn_lose = None #optional/ automated , fn_optimize = fn_optimize #optional/ automated , fn_predict = None #optional/ automated , foldset_id = None ) queue.run_jobs() ``` For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation. --- ## b) Low-Level API Reference [Low-Level API Docs](api_low_level.ipynb) for more information including how to work with non-tabular data and defining optimizers. ``` dataset = aiqc.Dataset.Tabular.from_pandas(df) label_column = 'object' label = dataset.make_label(columns=[label_column]) labelcoder = label.make_labelcoder( sklearn_preprocess = LabelBinarizer(sparse_output=False) ) feature = dataset.make_feature(exclude_columns=[label_column]) encoderset = feature.make_encoderset() featurecoder_0 = encoderset.make_featurecoder( sklearn_preprocess = StandardScaler() , dtypes = ['float64'] ) splitset = aiqc.Splitset.make( feature_ids = [feature.id] , label_id = label.id , size_test = 0.22 , size_validation = 0.12 ) def fn_build(features_shape, label_shape, **hp): model = nn.Sequential( nn.Linear(features_shape[0], 12), nn.BatchNorm1d(12,12), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(12, label_shape[0]), nn.Sigmoid() ) return model def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): ## --- Prepare mini batches for analysis --- batched_features, batched_labels = aiqc.torch_batcher( samples_train['features'], samples_train['labels'], batch_size=5, enforce_sameSize=False, allow_1Sample=False ) ## --- Metrics --- acc = torchmetrics.Accuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- epochs = hp['epoch_count'] for epoch in range(epochs): ## --- Batch training --- for i, batch in enumerate(batched_features): # Make raw (unlabeled) predictions. batch_probability = model(batched_features[i]) batch_loss = loser(batch_probability, batched_labels[i]) # Backpropagation. optimizer.zero_grad() batch_loss.backward() optimizer.step() ## --- Epoch metrics --- # Overall performance on training data. train_probability = model(samples_train['features']) train_loss = loser(train_probability, samples_train['labels']) train_acc = acc(train_probability, samples_train['labels'].to(torch.short)) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model(samples_evaluate['features']) eval_loss = loser(eval_probability, samples_evaluate['labels']) eval_acc = acc(eval_probability, samples_evaluate['labels'].to(torch.short)) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) return model, history ``` Optional, will be automatically selected based on `analysis_type` if left as `None`. ``` def fn_optimize(model, **hp): optimizer = optim.Adamax( model.parameters() , lr=hp['learning_rate'] ) return optimizer hyperparameters = { "learning_rate": [0.01, 0.005] , "epoch_count": [50] } algorithm = aiqc.Algorithm.make( library = "pytorch" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , fn_optimize = fn_optimize ) hyperparamset = algorithm.make_hyperparamset( hyperparameters = hyperparameters ) queue = algorithm.make_queue( splitset_id = splitset.id , hyperparamset_id = hyperparamset.id , repeat_count = 1 ) queue.run_jobs() ``` For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation.
github_jupyter
<div class="contentcontainer med left" style="margin-left: -50px;"> <dl class="dl-horizontal"> <dt>Title</dt> <dd> Scatter Element</dd> <dt>Dependencies</dt> <dd>Matplotlib</dd> <dt>Backends</dt> <dd><a href='./Scatter.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/Scatter.ipynb'>Bokeh</a></dd> <dd><a href='../plotly/Scatter.ipynb'>Plotly</a></dd> </dl> </div> ``` import numpy as np import holoviews as hv from holoviews import dim hv.extension('matplotlib') ``` The ``Scatter`` element visualizes as markers placed in a space of one independent variable, traditionally denoted as *x*, against a dependent variable, traditionally denoted as *y*. In HoloViews, the name ``'x'`` is the default dimension name used in the key dimensions (``kdims``) and ``'y'`` is the default dimension name used in the value dimensions (``vdims``). We can see this from the default axis labels when visualizing a simple ``Scatter`` element: ``` np.random.seed(42) coords = [(i, np.random.random()) for i in range(20)] scatter = hv.Scatter(coords) scatter.opts(color='k', marker='s', s=50) ``` Here the random *y* values are considered to be the 'data' whereas the *x* positions express where those data values were measured (compare this to the different way that [``Points``](./Points.ipynb) elements are defined). In this sense, ``Scatter`` is equivalent to a [``Curve``](./Curve.ipynb) without any lines connecting the samples, and you can use slicing to view the *y* values corresponding to a chosen *x* range: ``` scatter[0:12] + scatter[12:20] ``` A ``Scatter`` element must always have at least one value dimension (to give it a *y* location), but additional value dimensions are also supported. Here is an example with two additional quantities for each point, declared as the ``vdims`` ``'z'`` and ``'size'`` visualized as the color and size of the dots, respectively: ``` np.random.seed(10) data = np.random.rand(100,4) scatter = hv.Scatter(data, vdims=['y', 'z', 'size']) scatter = scatter.opts(color='z', s=dim('size')*100) scatter + scatter[0.3:0.7, 0.3:0.7].hist() ``` In the right subplot, the ``hist`` method is used to show the distribution of samples along our first value dimension, (*y*). The marker shape specified above can be any supported by [matplotlib](http://matplotlib.org/api/markers_api.html), e.g. ``s``, ``d``, or ``o``; the other options select the color and size of the marker. **Note**: Although the ``Scatter`` element is superficially similar to the [``Points``](./Points.ipynb) element (they can generate plots that look identical), the two element types are semantically quite different: Unlike ``Scatter``, ``Points`` are used to visualize data where the *y* variable is *independent*. This semantic difference also explains why the histogram generated by the ``hist`` call above visualizes the distribution of a different dimension than it does for [``Points``](./Points.ipynb) (because here *y*, not *z*, is the first ``vdim``). This difference means that ``Scatter`` elements most naturally overlay with other elements that express dependent relationships between the x and y axes in two-dimensional space, such as the ``Chart`` types like [``Curve``](./Curve.ipynb). Conversely, ``Points`` elements either capture (x,y) spatial locations or they express a dependent relationship between an (x,y) location and some other dimension (expressed as point size, color, etc.), and thus they most naturally overlay with [``Raster``](./Raster.ipynb) types like [``Image``](./Image.ipynb). For full documentation and the available style and plot options, use ``hv.help(hv.Scatter).``
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import sys sys.path.append('../../pyutils') import metrics import utils ``` # Introduction In unsupervised learing, one has a set of $N$ observations $x_i \in \mathbb{R}^p$, having joint density $P(X)$. The goal is to infer properties of this density. At very low dimension ($p \leq 3$), several methods can directly estimate $P(X)$ for any $X$. But these methods fail in high-dimensions. It can be used to: - Identify low-dimensional manifolds with high data density. - Cluster analysis finds multiple convex regions that contains modes of $P(X)$ - Mixture modeling try to estimate $P(X)$ with a mixture of density functions. - Association rules: construct simple rules that describe regions of high density. In unsupervised learning, there is no measure of success, it's difficult to prove the conclusions of the model. # Association Rules The general goal of association rules is to find values $v_1, \text{...}, v_L$ such that the probability density $P(X=v_l)$ is relatively large. This problem is also called mode finding or bump hunting. For problems with a large number of values, the number of observations such that $X=v_l$ is usually too small to be reliable. One solution is to seek regions of the $X$-space. Let $s_{j}$ a subset of values taken by feature v$X_j$. The goal is to find $s_{1},\text{...},s_p$ such that the folowing value is large: $$P \left( \bigcap_{j=1}^p (X_j \in s_j) \right)$$ ## Market Basket Analysis This problem is usually not feasible for $p$ and $N$ large. Market Basket Analysis is a special case where all predictions are binary: $X_j \in \{ 0, 1 \}$. The goal is to find a subset of integers $\mathcal{K} \subset \{ 1, \text{...}, p \}$ such that the following value is large: $$P \left( \bigcap_{k \in \mathcal{K}} (X_k = 1) \right) = \prod_{k \in \mathcal{J}} P(X_k = 1)$$ $\mathcal{K}$ is called an item set. This value is called the support or prevalente $T(\mathcal{K})$. It can be estimated from the dataset: $$T(\mathcal{K}) = \frac{1}{N} \sum_{i=1}^N \prod_{k \in \mathcal{K}} x_{ik}$$ The goal of the algorithm is to fing, given a lower bound $t$ for the support, all item sets with support greater than $t$: $$\{ \mathcal{K}_l | T(\mathcal{K}_l) > t \}$$ There are $2^J$ possible item sets, fortunately they are algorithms that allow to find the item sets without looking at all the possibilities. ## The Apriori Algorithm This algorithm can handle very large $N$ and $p$ as long as the number of itemset with support greather than $t$ is small enough. It uses the following property: $$\mathcal{L} \subseteq \mathcal{K} \implies T(\mathcal{L}) \geq T(\mathcal{K})$$ It works by doing only a few passes through the training set. The first pass over the data compute the support of all single-item sets, and discards all with support lower than $t$. The following passes combine the remaining itemsets with the ones remaining after the first pass, and discard all with support lower than $t$. The process stops when all itemsets are discarded. Each obtained itemset $\mathcal{K}$ into two subsets such that: $A \cup B = \mathcal{K}$, written $A \implies B$. The support of the rule is written $T(A \implies B) \approx P(A \cap B)$, it is the same as $T(\mathcal{K})$. The support is the proportion of observations having $A \cap B$. The confidence is the proportion of obversations having $B$ among all those having $A$. It is written $C(A \implies B)$. $$C(A \implies B) = \frac{T(A \implies B)}{T(A)} \approx P(B|A)$$ The lift is how likely it is to have $A$ and $B$ relative to $B$. It is written $L(A \implies B)$. $$L(A \implies B) = \frac{C(A \implies B)}{T(B)} \approx \frac{P(B|A)}{P(B)}$$ ## Example Given a dataset of 7500 transactions from a french retail store, find associative rules from it Dataset: [Link](https://drive.google.com/file/d/1y5DYn0dGoSbC22xowBq2d4po6h1JxcTQ/view) ``` import os from apyori import apriori from google_drive_downloader import GoogleDriveDownloader as gdd FILE_ID ='1y5DYn0dGoSbC22xowBq2d4po6h1JxcTQ' FILE_PATH = '/tmp/store_data.csv' if not os.path.isfile(FILE_PATH): gdd.download_file_from_google_drive(file_id=FILE_ID, dest_path=FILE_PATH) data = pd.read_csv(FILE_PATH, header=None) data.head() records = [] for i in range(data.shape[0]): records.append([str(data.values[i,j]) for j in range(data.shape[1]) if str(data.values[i,j]) != 'nan']) rules = apriori(records, min_support=0.0235, min_confidence=0.1, min_lift=1.5, min_length=2) res = list(rules) print(len(res)) for item in res: s = '' stats = item.ordered_statistics[0] for x in stats.items_base: s += str(x) + '; ' s += '=> ' for x in stats.items_add: s += str(x) + '; ' s += ('S = {:.4f}, C = {:.4f}, L = {:.4f}'.format(item.support, stats.confidence, stats.lift)) print(s) class AprioriRule: def __init__(self, a, b, support, confidence, lift): self.a = a self.b = b self.support = support self.confidence = confidence self.lift = lift class Apriori: def __init__(self, min_support, min_confidence, min_lift, min_length): self.min_support = min_support self.min_confidence = min_confidence self.min_lift = min_lift self.min_length = min_length def fit(self, data): #1) build dict of words self.lwords = [] self.dwords = dict() for entry in data: for w in entry: if not w in self.dwords: self.dwords[w] = len(self.lwords) self.lwords.append(w) #2) build data matrix self.X = np.zeros((len(data), len(self.lwords))) for i in range(len(data)): for w in data[i]: self.X[i, self.dwords[w]] = 1 #3) first pass through dataset rules = [] res = [] for j in range(self.X.shape[1]): items = [j] s = self.get_support(items) if s >= self.min_support: res.append(items) rules.append(items) res1 = list(res) # 4) other passes through dataset until no itemset found while len(res) > 0: res_next = [] for items in res: for other in res1: if other[0] > items[-1]: items_ext = items + other s = self.get_support(items_ext) if s >= self.min_support: res_next.append(items_ext) rules.append(items_ext) res = res_next # 5) remove lists too short rules = [x for x in rules if len(x) >= self.min_length] # 6) divide rules into A => B rules rules_ex = [] for r in rules: rules_ex += self.split_rule(r) rules = rules_ex # 7) compute all rules stats rules = [self.build_rule(r) for r in rules] # 8) filter rules rules = [r for r in rules if r.confidence > self.min_confidence and r.lift > self.min_lift] self.rules = rules def get_support(self, items): n = 0 for x in self.X: val = 1 for it in items: if x[it] == 0: val = 0 break n += val return n / len(self.X) def split_rule(self, r): res = [] for i in range(len(r) - 1): p1 = r[:i+1] p2 = r[i+1:] res.append((p1, p2)) return res def build_rule(self, r): sab = self.get_support(r[0] + r[1]) sa = self.get_support(r[0]) sb = self.get_support(r[1]) support = sab confidence = support / sa lift = confidence / sb wa = [self.lwords[x] for x in r[0]] wb = [self.lwords[x] for x in r[1]] return AprioriRule(wa, wb, support, confidence, lift) mod = Apriori(min_support=0.0235, min_confidence=0.1, min_lift=1.5, min_length=2) mod.fit(records) print(len(mod.rules)) for item in mod.rules: s = '' for x in item.a: s += str(x) + '; ' s += '=> ' for x in item.b: s += str(x) + '; ' s += ('S = {:.4f}, C = {:.4f}, L = {:.4f}'.format(item.support, item.confidence, item.lift)) print(s) ``` ## Unsupervised as Supersived learning We are trying to estimate the probability density $g(x)$. We onyl have access to a reference probability density $g_0(x)$. It could be for example the uniform density over the range of the variables. We can easily sample $N_0$ observations from $g_0(x)$. We also have the dataset $x_1,\text{...},x_N$, an i.i.d. random sample drawn from $g(x)$. Let's pool this two datasets together and assign mass $w = \frac{N_0}{N+N_0}$ to those drawn from $g(x)$, and $w_0 = \frac{N}{N+N_0}$ to those drawn from $g_0(x)$. We get a mixture density $\frac{g(x) + g_0(x)}{2}$. If we assign $Y=1$ to sample draw from $g(x)$ and $Y=0$ to those draw from $g_0(x)$, we get: $$\mu(x) = E(Y|x) = \frac{g(x)}{g(x) + g_0(x)}$$ $\mu_x$ can be estimated by supervised learning by combining the $N$ samples from $g(x)$ with $Y=1$, and the $N_0$ samples from $g_0(x)$ with $Y=0$. Then, we can get an estimate for $g(x)$: $$\hat{g}(x) = g_0(x) \frac{\hat{\mu}(x)}{1 - \hat{\mu}(x)}$$ The accuracy of $\hat{g}(x)$ greatly depends on the choice of $g_0(x)$. ## Generalized Association rules The goal is to find a subset of integers $\mathcal{J} \subset \{ 1, 2, \text{...}, p \}$ and the corresponding value subjects $s_j$ so that the following value is large: $$P \left( \bigcap_{j \in \mathcal{J}} (X_j \in s_j) \right)$$ This can be estimated by: $$\frac{1}{N} \sum_{i=1}^N I \left( \bigcap_{j \in \mathcal{J}} (x_{ij} \in s_j) \right)$$ This favors the discovery of itemsets whose marginal constituents $(X_j \in s_j)$ are frequent, that is the following value is large: $$\frac{1}{N} \sum_{i=1}^N I(x_{ij} \in s_j)$$ A good reference distribution is the product of the marginal distributions: $$g_0(x) = \prod_{j=1}^J g_j(x_j)$$ A sample from $g_0(x)$ is easily generated from the original dataset by appliying different random permutation to the data values of each of the variables. After drawing samples from $g_0(x)$, we get a training dataset for supervised learning, with $Y \in \{ 0, 1 \}$. The goal is to use this data to find regrions: $$R = \bigcap{j \in \mathcal{J}} (X_j \in s_j)$$ for which $\mu(x) = E(Y|x)$ is large. One might also require that the support os these regions is big enough: $$T(R) = \int_{x \in R} g(x)dx$$ Decision trees are such a model, each leaf $t$ represent a region $R$: $$\bar{y}_t = \text{ave}(y_i|x_i \in t)$$ The actual data support is given by: $$T(R) = \bar{y}_t \frac{N_t}{N + N_0}$$ with $R_t$ the number of observations in the leaf $t$. ``` from sklearn.tree import DecisionTreeRegressor class GeneralizedAssosRules: def __init__(self, t): self.t = t def fit(self, X): N, p = X.shape N0 = 2*N X0 = X[np.random.choice(N, size=N0, replace=True)] for j in range(p): X0[:, j] = X0[np.random.permutation(N0), j] Xe = np.vstack((X, X0)) ye = np.vstack((np.ones((N, 1)), np.zeros((N0, 1)))) self.tree = DecisionTreeRegressor() self.tree.fit(Xe, ye) print(Xe.shape) print(ye.shape) X = np.random.randn(124, 7) mod = GeneralizedAssosRules(0.2) mod.fit(X) ``` # Cluster Analysis It consists of grouping a collection of obects into subsets (called clusters) such that those within each cluster are more closely related to one another than objects from other clusters. To form clusters, an important notion is the degree of similarity or dissimmalirity between two objects. It can be a distance, like the euclidian distance. It is used for example by K-Means clustering which use a top-down procedure to build clusters. Other approches are mostly bottom-up. ## Proximity Matrices $Let D \in \mathbb{R}^{N*N}$ the matrix of dissimilarities, with $N$ the number of objects. $D_{ij}$ represents the proximity between object $i$ and object $j$. Usually it's symmetric matrix with nonnegative entries, and zeroes on the diagonal. We usually have $x_ij$ with $N$ observations and $p$ features. We need to compute the dissimilarity between 2 observations in order to build $D$. One solution is to use a dissimilarity $d_j(x_{ij}, x_{i'j})$ for each feature: $$D(x_i, x_{i'}) = \sum_{j=1}^p d_j(x_{ij}, x_{i'j})$$ For quantitative variables, we define an error: $d(x_i, x_{i'}) = l(|x_i - x_{i'}|)$. Usually it's the squared error loss, or the absolute error. Or in cal also be based on correlation: $$p(x_i, x_{i'}) = \frac{\sum_j (x_{ij} - \bar{x}_i)(x_{i'j} - \bar{x}_{i'})}{\sqrt{\sum_j (x_{ij} - \bar{x}_i)^2 \sum_j (x_{i'j} - \bar{x}_{i'})^2}}$$ with $\bar{x}_i = \sum_{j} x_{ij}/p$. For ordinal variables, we usually replace their $M$ original values by: $$\frac{i - 1/2}{M}$$ with $i$ the original order of the variable. Then they are treated as quantitative variables. For categorical variables, the dissemilarity must be defined explicitly, by using a $M*M$ matrix for examples. They are several ways to combine all $d_j(x_{ij}, x_{i'j})$. It can be with a weighted average: $$D(x_i, x_{i'}) = \sum_{j=1}^p w_j d_j(x_{ij}, x_{i'j})$$ $$\text{with } \sum_{j=1}^p w_j = 1$$ Setting $w_j = 1/j$ does not give all attribute equal influence. To get equal influence, you should set $w_j = 1/\bar{d}_j$ with: $$\bar{d}_j = \frac{1}{N^2} \sum_{i=1}^N \sum_{i'=1}^N d_j(x_{ij}, x_{i'j})$$ This seems a reasonable idea, but may be counterproductive. To cluser data, you may not want all attributes to contribute equally. ## Clustering Algorithms The goal of clustering os to partition data into groups so that the dissimilarities between those assigned to the same cluster are smaller than those in different clusters. They fall into three types: - combinatorial algorithms - mixture modelling - mode seeking ## Combinatorial algorithms These algorithms assign each observation to a cluster without any probability model. Each observation $x_i$ is assigned to a cluster $k \in \{1, \text{...}, K \}$. These assignments can be characterized by an encoder: $k = C(i)$. The models looks for $C^*(i)$ that achieves a particular goal. It is adjusted to minimize a loss function that charactherize the clustering goal. One possible loss is the within-cluster point scatter. It make observations in the same cluster as close as possible: $$W(C) = \frac{1}{2} \sum_{k=1}^K \sum_{C(i)=k} \sum_{C(i')=k} d(x_i, x_{i'})$$ Another loss is the between-cluster point scatter. It makes observations in different cluster as far as possible: $$B(C) = \frac{1}{2} \sum_{k=1}^K \sum_{C(i)=k} \sum_{C(i')\neq k} d(x_i, x_{i'})$$ Minimize $W(C)$ is equivalent to maximize $B(C)$. Th total point scatter $T$ is a constant given the data, independant of cluster assignment. $$T = W(C) + B(C)$$ Minimize this loss function by testing all assignments is intractable. For only $N=19$ and $K=4$, they are around $10^{10}$ possible assignments. Algorithms are often based on iterative greedy descent. It starts with initial assignments, that are changed in each step, in a way to reduce the loss function. The algorithm terminates when there is no possible improvment. But the result is a local optima, which may be highly suboptimal compared to the global optimum. ## K-Means K-Means is a combinatorial algorithm that uses the squared Euclidian distance: $$d(x_i, x_{i'}) = ||x_i - x_{i'}||^2$$ We are minimizing the within-cluster distance: $$W(C) = \sum_{k=1}^K N_k \sum_{C(i) = k} ||x_i - \bar{x}_k||^2$$ with $\bar{x}_k$ the mean of all observations in cluster $k$, and $N_k$ the number of observations in cluster $K$ We are trying to solve: $$C^* = \min_C \sum_{k=1}^K N_k \sum_{C(i) = k} ||x_i - \bar{x}_k||^2$$ The K-Means algorithm is reaally simple: 1. Initialize the $R$ clusters randomly (from training set) 2. Repeat until convergence: - Assign each training point to the closest centroid - The center of each cluster becomes the mean of all its assigned points Each step reduce the loss function, but it converges only to a local mininum. One should start the algorithm with many different random inititialization, and choose the one with the lowest loss. ``` from sklearn.datasets import load_iris from sklearn.decomposition import PCA class KMeansClustering: def __init__(self, R, nstarts = 100): self.R = R self.nstarts = nstarts def fit(self, X): best_loss = float('inf') best_means = None for _ in range(self.nstarts): self.train(X) loss = self.get_loss(X) if loss < best_loss: best_loss = loss best_means = self.means self.means = best_means def train(self, X): N, p = X.shape self.means = X[np.random.choice(N, self.R)] while True: old_means = self.means.copy() #assign each point to the closest cluster ctrs = [list() for _ in range(self.R)] for x in X: ctrs[self.get_closest_ctr_idx(x)].append(x) # compute the new center position of every cluster for i in range(self.R): if len(ctrs[i]) != 0: self.means[i] = np.mean(np.vstack(ctrs[i]), axis=0) if np.linalg.norm(old_means - self.means) < 1e-6: break def get_loss(self, X): #assign each point to the closest cluster ctrs = [list() for _ in range(self.R)] for x in X: ctrs[self.get_closest_ctr_idx(x)].append(x) #compute distance between each point and the cluster center loss = 0 for k in range(self.R): for x in ctrs[k]: loss += len(ctrs[k])*(x-self.means[k]) @ (x-self.means[k]) return loss def get_closest_ctr_idx(self, x): min_idx = None min_dist = float('inf') for i in range(self.R): dist = (x - self.means[i]) @ (x - self.means[i]) if dist < min_dist: min_idx = i min_dist = dist return min_idx def predict(self, X): y = np.empty(len(X)).astype(np.int) for i in range(len(X)): y[i] = self.get_closest_ctr_idx(X[i]) return y X, y = load_iris().data, load_iris().target X = X[np.random.permutation(len(X))] pca = PCA(n_components=2) X = pca.fit_transform(X) X = X - np.mean(X, axis=0) X = X / np.std(X, axis=0) print(X.shape) print(y.shape) mod = KMeansClustering(3) mod.fit(X) colors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)] print('loss:', mod.get_loss(X)) plt.scatter(X[:,0], X[:,1], c=colors) plt.show() ``` ## Gaussian Mixtures as Soft K-Means K-Means is closely related to estimating a Gaussian mixture with the EM algorithm. - The E-step assign weight to each data point based on it's relative density under each mixture component (closeness) - The M-step recompute the component density based on current weights (mean / covariance) If every Gaussian have covariance matrix $\sigma^2 I$, the relative density under each mixture is a monote function of the euclidian distance between the data point and the mixture center. Hence EM as a soft K-Means, making probabalistic (rather than deterministic) assigment of points to cluster centers. As $\sigma^2 \to 0$, the probabilities become $0$ and $1$, and the two methods coincide. ## Vector Quantization Vector Quantization is a compression technique in image / signal processing, using K-Means. The prodecure is: - Break the image into small blocks, for example for a $1024*1024$ image break into $2*2$ blocks, we get $512*512$ vectors in $\mathbb{R}^4$ - A K-Means is run on the blocks. As $K$ increases, the quality of the image and the compressed size decrease. Each block is approximated by it's closest centroid. - We just need to store the $K$ centroids vectors, and the index of the closest centroid of all the blocks. - To reconstruct the image, each block become it's closest centroid, and the blocks are converted to an image This works because with typical images many blocks look the same. It only require only one block each to represent them. We can go further by applying a hierarchical K-Means, or using a variable coding length. ``` from PIL import Image from sklearn.cluster import KMeans class VectorQuantization: def __init__(self, K, bsize = 4): self.K = K self.bsize = 2 def img2block(self, X): s = X.shape[0] res = np.empty((s//self.bsize, s//self.bsize, self.bsize*self.bsize)).astype(np.int) for i in range(res.shape[0]): for j in range(res.shape[1]): res[i, j] = np.array([ X[2*i,2*j],X[2*i+1,2*j],X[2*i,2*j+1],X[2*i+1,2*j+1] ]) return res.reshape(-1, self.bsize*self.bsize) def block2img(self, b): s2 = int(np.sqrt(b.shape[0])) b = b.reshape(s2, s2, self.bsize*self.bsize) X = np.empty((s2*self.bsize, s2*self.bsize)).astype(np.int) for i in range(s2): for j in range(s2): X[2*i,2*j] = b[i,j,0] X[2*i+1,2*j] = b[i,j,1] X[2*i,2*j+1] = b[i,j,2] X[2*i+1,2*j+1] = b[i,j,3] return X def encode(self, img): b = self.img2block(img) clf = KMeans(n_clusters=self.K, n_init=1) clf.fit(b) code = clf.labels_ centers = clf.cluster_centers_ return code, centers def decode(self, code, centers): b = np.empty((len(code), self.bsize*self.bsize)).astype(np.int) for i in range(len(b)): b[i] = centers[code[i]] return self.block2img(b) IMG_URL = 'https://i.ytimg.com/vi/J4Q86j9HOao/hqdefault.jpg' IMG_PATH = '/tmp/img.jpg' utils.dl_file(IMG_URL, IMG_PATH) X = Image.open(IMG_PATH) X = X.resize((256,256), Image.ANTIALIAS) X = X.convert('L') X = np.asarray(X.getdata(),dtype=np.int).reshape((X.size[1],X.size[0])) vq200 = VectorQuantization(K=200) code, centers = vq200.encode(X) X2 = vq200.decode(code, centers) vq4 = VectorQuantization(K=4) code, centers = vq4.encode(X) X3 = vq4.decode(code, centers) print(metrics.tdist(X, X2)) print(metrics.tdist(X, X3)) plt.imshow(X, cmap='gray') plt.show() plt.imshow(X2, cmap='gray') plt.show() plt.imshow(X3, cmap='gray') plt.show() ``` ## K-medoids K-Means is appropriate when the dissimilairty measure $D(x_i, x_{i'})$ is the euclidian distance. These requires all variables to be of quantitative type, it the procedure lacks robustness on outliers. The algorithm can be generalized to any $D(x_i, x_{i'})$ We don't need the inputs $x$, only the distances. It's far more expensive to compute than K-Means. K-medoids algorithm: 1. Start with a particular intialization $C(i)$ 2. Repeat until the cluster assignments $C(i)$ doesn't change: - For each cluster $k$, find the cluster center $m_k$: $$m_k = \arg \min_{ \{i:C(i)=k \} } \sum_{C(i')=k} D(x_i, x_{i'})$$ - Minitmize the total error by assigning each observation to the closest cluster: $$C(i) = \arg \min_k D(x_i, m_k)$$ ``` from sklearn.datasets import load_iris from sklearn.decomposition import PCA class KMedoidsClustering: def __init__(self, K): self.K = K def fit(self, X): N, p = X.shape self.centers = [None] * self.K # build distance matrix D = np.empty((N, N)) for i in range(N): for j in range(N): D[i,j] = (X[i] - X[j]) @ (X[i] - X[j]) X = None #X is useless, we only need D # initialization #assign each point ro a random cluster ctrs = [list() for _ in range(self.K)] for i in range(N): ctrs[np.random.randint(0, self.K)].append(i) while True: #estimate cluster centers for k in range(self.K): best_i = None best_dist = float('inf') ck = ctrs[k] for i in ck: dist = 0 for i2 in ck: dist += D[i, i2] if dist < best_dist: best_dist = dist best_i = i self.centers[k] = best_i ## old_ctrs = ctrs ctrs = [list() for _ in range(self.K)] # assign each point to the closest cluster center for i in range(N): best_k = None best_dist = float('inf') for k in range(self.K): dist = D[i, self.centers[k]] if dist < best_dist: best_dist = dist best_k = k ctrs[best_k].append(i) #stop only if the assigments didn't changed changed = False for k in range(self.K): if ctrs[k] != old_ctrs[k]: changed = True break if not changed: break #build labels vectors self.labels = np.empty(N).astype(np.int) for k in range(self.K): for i in ctrs[k]: self.labels[i] = k X, y = load_iris().data, load_iris().target X = X[np.random.permutation(len(X))] pca = PCA(n_components=2) X = pca.fit_transform(X) X = X - np.mean(X, axis=0) X = X / np.std(X, axis=0) print(X.shape) print(y.shape) mod = KMedoidsClustering(3) mod.fit(X) colors = [ ['red', 'blue', 'green'][x] for x in mod.labels] plt.scatter(X[:,0], X[:,1], c=colors) plt.show() ``` ## Initialization It can be defined by specifying an initial set of centers $\{ m_1, \text{...}, m_K$ or an initial encoder $C(i)$. Specifying the center is usually more convenient. A strategy based of forward stepwise assignment is derived, called K-Means++. K-Means++ agorithm: 1. Initialize the first center $m_1$ uniformly at random from all observations. 2. For $k=2 \to K$: - Compute for every observation the distance with the closest of the already chosen centroids: $$D(i) = \min_{c = \{ m_1, \text{...}, m_{k-1} \} } D(x_i, c)$$ - Choose the center $m_k$ from a weighted probability probability distribution of $X$, with weights $D(i)^2$ ``` from sklearn.datasets import load_iris from sklearn.decomposition import PCA class KMeansClustering: def __init__(self, R, nstarts = 100): self.R = R self.nstarts = nstarts def fit(self, X): best_loss = float('inf') best_means = None for _ in range(self.nstarts): self.train(X) loss = self.get_loss(X) if loss < best_loss: best_loss = loss best_means = self.means self.means = best_means def train(self, X): N, p = X.shape # K-means++ Initialization self.means = np.empty((self.R, p)) self.means[0] = X[np.random.choice(N)] for k in range(1, self.R): d = np.empty(N) for i in range(N): d[i] = min([(X[i]-self.means[c])@(X[i]-self.means[c]) for c in range(k)]) d /= np.sum(d) self.means[k] = X[np.random.choice(N, p=d)] while True: old_means = self.means.copy() #assign each point to the closest cluster ctrs = [list() for _ in range(self.R)] for x in X: ctrs[self.get_closest_ctr_idx(x)].append(x) # compute the new center position of every cluster for i in range(self.R): if len(ctrs[i]) != 0: self.means[i] = np.mean(np.vstack(ctrs[i]), axis=0) if np.linalg.norm(old_means - self.means) < 1e-6: break def get_loss(self, X): #assign each point to the closest cluster ctrs = [list() for _ in range(self.R)] for x in X: ctrs[self.get_closest_ctr_idx(x)].append(x) #compute distance between each point and the cluster center loss = 0 for k in range(self.R): for x in ctrs[k]: loss += len(ctrs[k])*(x-self.means[k]) @ (x-self.means[k]) return loss def get_closest_ctr_idx(self, x): min_idx = None min_dist = float('inf') for i in range(self.R): dist = (x - self.means[i]) @ (x - self.means[i]) if dist < min_dist: min_idx = i min_dist = dist return min_idx def predict(self, X): y = np.empty(len(X)).astype(np.int) for i in range(len(X)): y[i] = self.get_closest_ctr_idx(X[i]) return y X, y = load_iris().data, load_iris().target X = X[np.random.permutation(len(X))] pca = PCA(n_components=2) X = pca.fit_transform(X) X = X - np.mean(X, axis=0) X = X / np.std(X, axis=0) print(X.shape) print(y.shape) mod = KMeansClustering(3) mod.fit(X) colors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)] print('loss:', mod.get_loss(X)) plt.scatter(X[:,0], X[:,1], c=colors) plt.show() ``` ## Choice of K One technique is to use a loss function, such as the within-cluster dissimilarity $W_K$, and compute it for several values of K. But this loss is decreasing with the number of $K$, even when used on a validation set with Cross-Validation. The value of $K$ start decreasing exponentially, then at a point the difference between each $K$ abruptly decrease. Heuristically, set $K^*=K$ for this particular $K$ when the difference become less important, gives good results. The $K^*$ can be found simply by plotting $W_K$ for different values of $K$. The plot looks like an elbow at $K^*$. This method is also called the elbow method. ``` losses = [] for k in range(1, 10): mod = KMeansClustering(k) mod.fit(X) losses.append(mod.get_loss(X)) plt.plot(np.arange(1, 10), losses) plt.show() best_k = 2 #by looking at plot mod = KMeansClustering(best_k) mod.fit(X) colors = [ ['red', 'blue', 'green'][x] for x in mod.predict(X)] plt.scatter(X[:,0], X[:,1], c=colors) plt.show() ``` ## Hierarchical Clustering Hierarchical clustering uses only a measure of dissimilarity between 2 groups of observations. They produce hierarchical representations in wich the clusters at each level are created by merging clusters at the next lower level. At the lowest level there is $N$ clusters of size $1$, and at the highest $1$ cluster of size $N$. There exist two-strategies: - Aglomerative (bottom-up): Start at the bottom and and recursively merge a pair of clusters into one - Divisive (top-down): Start at the top and recursively split one cluster into two. Each level represents a different grouping of the data. It's up to the user to decide which level represents a natural clustering. Most methods posseses a monotonicity property: The dissimilarity between clusters is monotone increasing with the level. The model can be plotted as a binary tree, where the height of each node is proportional to the value of the intergroup dissimilarity between it's two children. This is called a dendogram. The results are valid only if the data really posseses a hierarchical structure. ## Agglomerative Clustering It starts with every observation in a different cluster. At each step, the closest 2 clusters are merged. After $N-1$ steps, the algorithm stops with only one cluster left. A measure of dissimilary between 2 groups, $d(G,H)$ is needed. They are several possibilities: - Single Linkage is the least dissimilar of all pairs: $$d_\text{SL}(G,H) = \min_{i \in G, i' \in H} d_{ii'}$$ - Complete Linkage is the most dissimilar of all pairs: $$d_\text{CL}(G,H) = \max_{i \in G, i' \in H} d_{ii'}$$ - Group Average is the mean dissimilairty between all pairs: $$d_\text{GA}(G,H) = \frac{1}{N_G N_H} \sum_{i \in G} \sum_{i' \in H} d_{ii'}$$ If the data is compact (small dissimilarities between clusters, clusters well separated from each others), all methods produce similar results. Single Linkage only requires a single pair of two groups to be small to combine them. It has a tendency to combine at relatively low thresholds, observations linked by a series of close intermediates. This phenomem, called chaining, is a defect of the method. Complete Linkage is the other extreme, two groups are similar if all their obrservations are close. It tends to produce compact clusters, however it may produce clusters with observations much closer to members of other clusters than to member of their own cluster, breaking the closoness property. Groupe average is a compromise between the two. ## Divisive Clustering It begins with the whole dataset into one cluster, then recursively divide one existing cluster in two. Ater $N-1$ steps, are $N$ clusters of size $1$. This approach is less used than agglomerative clustering. Place all observations in a single cluster $G$. Chooses the observations whose average dissimilairy from all other observations is the largest. It is the first member of a new cluster H. At each step, the observation in $G$ whose average dissimilarity from those in H, minus the remaining observations in G, is transfered to H. It continues until the largest value became negative. The original cluster is then split in two, G and H. At each step a new cluster is chosen an split in two. The cluster chosen can be the one with the largest diameter, or the largest average dissimilarity between it's members. # Self-Organizing Maps Self-organization of a massive document collection - Kohonen, T., Kaski, S., Lagus, K., Saloj ̈arvi, J., Paatero,A. and Saarela,A. (2000) - [PDF](http://lig-membres.imag.fr/bisson/cours/M2INFO-AIW-ML/papers/Kohonen00.pdf) This method can be viewed as a constrained version of K-Means, where the prototype are encouraged to lie in a or two dimensional manifold in the feature space. We consider a SOM as a rectangular grid of $q_1*q_2=K$ prototypes $m_j \in \mathbb{R}^p$. Once the model is fit, the observations can be mapped into the rectangular grid. Algorithm: For each observation $x$_i: - Find the cluster $m_j$ closest to $x_i$ - Find all clusters $m_k$ such that the distance in the grid between $l_j$ and $l_k$ is lower than $r$. - Move all $m_k$ closer to $x_i$: $$m_k \leftarrow m_k + \alpha (x_i - m_k)$$ Thousands of iterations are made over the dataset. At each iteration, $\alpha$ and $r$ are decreased. The update both move the prototypes closer to the data, but also maintain a smooth 2D spatial relationship between prototypes. ``` def d2_dist(a, b): return (a[0]-b[0])**2 + (a[1]-b[1])**2 class SOM: def __init__(self, Q, niters = 1000): self.Q = Q self.niters = niters self.alpha_beg = 1 self.alpha_end = 0 self.dalpha = (self.alpha_end - self.alpha_beg) / self.niters self.r_beg = 10 self.r_end = 1 self.dr = (self.r_end - self.r_beg) / self.niters def get_closest_centroid(self, x): best_dist = float('inf') best_pos = None for i in range(self.Q): for j in range(self.Q): dist = (self.clusters[i,j]-x) @ (self.clusters[i,j]-x) if dist < best_dist: best_dist = dist best_pos = (i,j) return best_pos def fit(self, X): N, p = X.shape self.clusters = np.random.randn(self.Q, self.Q, p) alpha = self.alpha_beg r = self.r_beg for it in range(self.niters): for x in X: i0, j0 = self.get_closest_centroid(x) for i in range(self.Q): for j in range(self.Q): if d2_dist((i,j), (i0,j0)) < r: d = x - self.clusters[i,j] self.clusters[i,j] += alpha * d if it % 50 == 0: print('iteration:', it) alpha += self.dalpha r += self.dr def reconstruct(self, X): X2 = np.empty(X.shape) for i in range(len(X)): pos = self.get_closest_centroid(X[i]) X2[i] = self.clusters[pos] return X2 X, y = load_iris().data, load_iris().target X = X[np.random.permutation(len(X))] pca = PCA(n_components=2) X = pca.fit_transform(X) X = X - np.mean(X, axis=0) X = X / np.std(X, axis=0) mod = SOM(Q=5, niters=250) mod.fit(X) Xr = mod.reconstruct(X) print('recons error:', np.linalg.norm(X - Xr)) plt.scatter(X[:,0], X[:,1], c='blue') plt.scatter(Xr[:,0], Xr[:,1], c='red') plt.show() ``` # Principal Components, Curves and Surfaces Principal Components provides a sequence of best linear approximations of the data, of all ranks $q \leq p$. The parametric representation of an affine hyperplane is: $$f(\lambda) = \mu + V_q \lambda$$ with $\mu \in \mathbb{R}^p$ a location vector, $V_q \in \mathbb{R}^{p*q}$ a matrix with unit orthogonal columns vectors, and $\lambda \in \mathbb{R}^q$ a vector of parameters. We can fit this model by minimizing the reconstruction error: $$\min_{\mu, \lambda_i, V_q} \sum_{i=1}^N ||x_i - \mu - V_q\lambda_i||^2$$ When we partially optimize of $\mu$ and $\lambda_i$ we get: $$\hat{\mu} = \bar{x}$$ $$\hat{\lambda}_i = V^T_q(x_i - \bar{x})$$ The problem becomes: $$\min_{V_q} ||(x_i - \bar{x}) - V_qV^T_q(x_i - \bar{x})||^2$$ We assume $\bar{x} = 0$. The reconstruction matrix $H_q \in \mathbb{R}^{p*p}$ is a projection matrix such that $H_q = V_qV_q^T$ The solution can be found with the singular value decomposition of $X$ centered: $$X = UDV^T$$ For each rank $q$, the solution $V_q$ are the first $q$ columns of V. ``` from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA class MyPCA: def __init__(self, q): self.q = q def fit(self, X): Xc = np.mean(X, axis=0, keepdims=True) X = X - Xc U, d, VT = np.linalg.svd(X) Vq = VT[:self.q].T self.Xc = Xc self.Vq = Vq def transform(self, X): return (X - self.Xc) @ self.Vq def inverse_transform(self, Xq): return (Xq @ self.Vq.T) + self.Xc X, y = load_iris().data, load_iris().target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=15) p1 = MyPCA(q=2) p1.fit(X_train) p2 = PCA(n_components=2) p2.fit(X_train) Xtrq1 = p1.transform(X_train) Xtrq2 = p2.transform(X_train) print(Xtrq1.shape) print(metrics.tdist(Xtrq1, Xtrq2)) Xteq1 = p1.transform(X_test) Xteq2 = p2.transform(X_test) print(Xteq1.shape) print(metrics.tdist(Xteq1, Xteq2)) Xtrr1 = p1.inverse_transform(Xtrq1) Xtrr2 = p2.inverse_transform(Xtrq2) print(Xtrr1.shape) print(metrics.tdist(Xtrr1, Xtrr2)) Xter1 = p1.inverse_transform(Xteq1) Xter2 = p2.inverse_transform(Xteq2) print(Xter1.shape) print(metrics.tdist(Xter1, Xter2)) ``` The colums of $UD$ are called the principal components. the $N$ optimal $\hat{\lambda}_i$ are given by the first q principal components. ## Principal Curves and Surfaces Principal curve generelize the principal component line. It provides a smooth one-dimensional curved approximation of a set of data points. A principal surface is more general, provides a curved manifold approximation of dimension 2 or more. ## Spectral Clustering Traditional clustering methods use spherical or elliptical metrics, and won't work well if the clusters are non-convex. Spectral clustering is a generalization designed for these situations. Let's define a matrix of similarities $S \in \mathbb{R}^{N*N}$, with $s_{ii'} \geq 0$ the similarity between $x_i$ and $x_{i'}$. Let $G = <V, E>$ an undirected similarity graph with vertices $v_i$ for each observation, and edges weighted by $s_{ii'}$ only if reaches a specific threshold, otherwhise there is no edge. Clustering is now a graph problem, we wish to partition the graph such that edges between different groups have low weight, and within a group have high weight. Let $d_{ii'}$ the euclidian distance between $x_i$ and $x_{ii'}$. One similarity mesure is the radial-kernel gram matrix: $s_{ii'} = \exp (-d^2_{ii'}/c)$, with $c > 0$ a scale parameter. One way to define a similarity graph is the mutual K-nearest neighbor graph. Define $\mathcal{N}_k$ the symmetric set of nearby pair of points. A pair $(i,i')$ if $x_i$ is among the K-nearest neighbors of $x_{i'}$, or vice versa. We connect all pairs in $\mathcal{N}_k$ with weight $w_{ii'} = s_{ii'}$, otherwhise the weight is 0. Another way is to include all edges to get a fully connected graph, with weights $w_{ii'}=s_{ii'}$. The matrix of edge weights $W \in \mathbb{R}^{N*N}$ is called the adjency matrix. The degree of vertex $i$ is $g_i = \sum_{i'} w_{ii'}$. Let $G \in \mathbb{R}^{N*N}$ a diagonal matrix with diagonal elements $g_i$. The graph Laplacian is defined by $L = G - W$. Spectral clustering find the $m$ eigenvectors corresponding to the $m$ smallest eigenvalues of $L$. It gives us the matrix $Z \in \mathbb{R}^{N*m}$. Using a standard method like K-Means, we cluster the rows of $Z$ to yield a clustering of the original points. ``` from sklearn.datasets import load_iris from sklearn.decomposition import PCA class SpectralClustering: def __init__(self, K, c, m): self.K = K self.c = c self.m = m def fit(self, X): N, p = X.shape S = np.array([ [np.exp(-(X[i]-X[j])@(X[i]-X[j])/self.c) for i in range(N)] for j in range(N) ]) W = S G = np.diag(np.sum(W, axis=0)) L = G - W w, V = np.linalg.eigh(L) Z = V[:, 1:self.m+1] km = KMeansClustering(self.K) km.fit(Z) self.Z = Z self.km = km self.labels = km.predict(Z) X, y = load_iris().data, load_iris().target X = X[np.random.permutation(len(X))] pca = PCA(n_components=2) X = pca.fit_transform(X) X = X - np.mean(X, axis=0) X = X / np.std(X, axis=0) mod = SpectralClustering(K=4, c=1, m=2) mod.fit(X) colors = [ ['red', 'blue', 'green', 'yellow'][x] for x in mod.labels] plt.scatter(X[:,0], X[:,1], c=colors) plt.show() ``` ## Kernel Principal Components Kernel principal component analysis - Bernhard Schoelkopf, Alexander J. Smola, and Klaus-Robert Mueller. (1999) - [PDF](http://pca.narod.ru/scholkopf_kernel.pdf) In PCA, we diagonalize an estimate an estimate of the covariance matrix: $$C = \frac{1}{p} \sum_{j=1}^p x_{:,j} x_{:,j}^T$$ Kernel PCA follows the same principle, but first map non lineary the data into another feature space using the transformation $\Phi$. As for kernel SVM methods, be don't need to compute $\Phi(x)$, only the dot product $\Phi(x_i)^T\Phi(x_j)$ The covariance matrix became: $$\bar{C} = \frac{1}{p} \sum_{j=1}^p \Phi(x_{:,j}) \Phi(x_{:,j})^T$$ We need to find engeinvalues $\lambda$ and eigenvectors $V$ satisfying $\lambda V = \bar{C} V$. Let's define the kernel matrix $K \in \mathbb{R}^{N*N}$ such that: $$K_{ij} = \Phi(x_i)^T \Phi(x_j)$$ We now solve the eigenvalue problems: $$\lambda \alpha = K \alpha$$ We get the projected data on $q$ components with: $$Z_q = \alpha * \sqrt{\lambda}$$ The kernel matrix is computed with data not centered. We need to center it first, using the following trick: $$K_\text{center} = K - 1_NK - K1_N + 1_N K 1_N = (I - 1_N)K(I-1_N)$$ with $1_n \in \mathbb{R}^{N*N}$ a matrix with all elements equal to $1/N$. ``` from sklearn.datasets import load_iris from sklearn.decomposition import KernelPCA X, y = load_iris().data, load_iris().target pca = KernelPCA(n_components=2, kernel='rbf', gamma=0.5) X = pca.fit_transform(X) plt.scatter(X[:,0], X[:,1]) plt.show() print(pca.lambdas_.shape, pca.alphas_.shape) print(pca.lambdas_) print(X[:10]) from sklearn.datasets import load_iris from sklearn.preprocessing import KernelCenterer def center_kernel(K): N = len(K) IM1 = np.eye(N) - (np.ones((N,N)) / N) return IM1 @ K @ IM1 def kernel_linear(): return lambda a, b: a @ b def kernel_rbf(gamma): return lambda x, y: np.exp(-gamma * (x - y)@(x - y)) class MyKernelPCA: def __init__(self, q, kernel): self.q = q self.kernel = kernel def fit_transform(self, X): N, p = X.shape K = np.empty((N, N)) for i in range(N): for j in range(N): K[i,j] = self.kernel(X[i], X[j]) K = center_kernel(K) w, V = np.linalg.eigh(K) w, V = np.flip(w), np.flip(V, axis=1) wq, Vq = w[:self.q], V[:, :self.q] self.X = X self.lambdas = wq self.alphas = Vq return self.alphas * np.sqrt(self.lambdas) X, y = load_iris().data, load_iris().target pca = MyKernelPCA(q=2, kernel=kernel_rbf(0.5)) X = pca.fit_transform(X) plt.scatter(X[:,0], X[:,1]) plt.show() print(pca.lambdas.shape, pca.alphas.shape) print(pca.lambdas) print(X[:10]) ``` ## Sparse Principal Components Sparse principal component analysis - Zou, H., Hastie, T. and Tibshirani, R. (2006) - [PDF](https://web.stanford.edu/~hastie/Papers/spc_jcgs.pdf) Principal components can be interpret by examining the $v_j$, called loadings. The interpretiation may be easier if they are parse. Methods are usually based on a kind of Lasso (L1) penalty. One approach is to solve the following problem: $$\max_v v^T(X^TX)v$$ $$\text{s.t. } \sum_{j=1}^p |v_j| \leq t, \space v^Tv=1$$ Another strategy use the reconstruction error with some penalty. For a signe component, the criterion is: $$\min_{\theta, v} \sum_{i=1}^N ||x_i - \theta v^T x_i||_2^2 + \lambda ||v||_2^2 + \lambda_1 ||v||_1$$ $$\text{s.t. } ||\theta||_2 = 1$$ If $\lambda=\lambda_1=0$, then $v=\theta$ is the largest principal component direction. The second penalty on $v$ encourages sparseness of the loadings. For $K$ components, the problem became: $$\min_{\theta, v} \sum_{i=1}^N ||x_i - \Theta V^T x_i||_2^2 + \lambda \sum_{k=1}^K ||v_k||_2^2 + \sum_{k=1}^K \lambda_{1k} ||v||_1$$ $$\text{s.t. } \Theta^T\Theta= I$$ The criterion is not jointly convex in $\Theta$ and $V$, but is convex in each parameter with the other fixed. Minimization over $V$ is equivalent to $K$ elastic net problems. Minimization over $\Theta$ is solved by SVD. Alternation the 2 steps converges to the solution. # Non-Negative Matrix Factorization Learning the parts of objects by non-negative matrix factorization - Lee, D. and Seung, H. (1999) - [PDF](http://www.columbia.edu/~jwp2128/Teaching/E4903/papers/nmf_nature.pdf) Algorithms for non-negative matrix factorization - Lee, D. and Seung, H. (2001)- [PDF](https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf) Non-negative matrix factorization is an alternative approach to PCA, where data and components are assumed to be non-negative. The data matrix $X$ is approximated by: $$X \approx WH$$ with $X \in \mathbb{R}^{N*p}$, $W \in \mathbb{R}^{N*r}$, $H \in \mathbb{R}^{r*p}$, and $r \leq \max(N,p)$. We also assume that $x_{ij}, w_{ik}, h_{kj} \geq 0$. $W$ and $H$ are found by maximizing the log-likelihood of the data following a Poisson distribution: $$L(W,H) = \sum_{i=1}^N \sum_{j=1}^p \left( x_{ij} \log(WH)_{ij} - (WH)_{ij} \right)$$ By iteratively applying the following updates, we converges to a local maximum: $$w_{ik} \leftarrow w_{ik} \frac{\sum_{j=1}^p h_{kj}x_{ij}/(WH)_{ij}}{\sum_{j=1}^p h_{kj}}$$ $$h_{kj} \leftarrow h_{kj} \frac{\sum_{i=1}^N w_{ik}x_{ij}/(WH)_{ij}}{\sum_{i=1}^N w_{ik}}$$ ``` from sklearn.decomposition import NMF from sklearn.datasets import load_iris X, y = load_iris().data, load_iris().target mod = NMF(3) W = mod.fit_transform(X) H = mod.components_ print(H) print(metrics.tdist(W @ H, X)) print(np.sum(W>=0) == W.size) print(np.sum(H>=0) == H.size) from sklearn.datasets import load_iris class MyNMF: def __init__(self, r): self.r = r def fit(self, X): N, p = X.shape W = np.abs(np.random.randn(N, self.r)) H = np.abs(np.random.randn(self.r, p)) for it in range(1000): WH = W @ H for i in range(N): for k in range(self.r): W[i,k] *= np.sum(H[k]*X[i] / WH[i]) / np.sum(H[k]) WH = W @ H for k in range(self.r): for j in range(p): H[k,j] *=np.sum(W[:,k]*X[:,j]/WH[:,j])/np.sum(W[:,k]) return W, H X, y = load_iris().data, load_iris().target mod = MyNMF(3) W, H = mod.fit(X) print(H) print(metrics.tdist(W @ H, X)) print(np.sum(W>=0) == W.size) print(np.sum(H>=0) == H.size) ``` ## Archetypal Analysis Archetypal analysis - Cutler, A. and Breiman, L. (1994) - [PDF](http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/379.pdf) This method is a propotypes method, simila to K-Means. It approximates each data point by a convex combination of a collection of prototypes. $$X \approx WH$$ with $X \in \mathbb{R}^{N*p}$, $W \in \mathbb{R}^{N*r}$, $H \in \mathbb{R}^{r*p}$. We assume $w_{ik} \geq 0$ and $\sum_{k=1}^r w_{ik} = 1$. The $N$ rows of $X$ are representation by convex combinations of the $r$ archetypes (rows of H). The archetypes themselves are combex combinations of the obvervations: $$H = BX$$ with $B \in \mathbb{R}^{r*N}$, $b_{ki} \geq 0$, and $\sum_{i=1}^N b_{ki}=1$. We minimize the following criterion: $$J(W,B) = ||X - WBX||^2$$ Minimizing J with respect to one variable, with the other fixed is convex for both of them. We iteratively minimizes $J$ with respect to $W$ then $B$ until convergence. But the overall problem is not convex, and it converges to a local minimum. # Independant Component Analysis Multivariate data are often viewed as multiple indirect measurement arising from underlying sources, that cannot be directly measured. Factor analysis is a classical technique to identify these latent sources. They are usually based on Gaussian distributions. Independant Cpomponent Analysis is another approach, that relies on the non-Gaussian nature of the underlying sources. ## Latent Variables and Factor Analysis Let's define the reduced singular value decomposition of $X \in \mathbb{R}^{N*p}$: $$X = UDV^T$$ Let's define $S = \sqrt{N} U$ and $A^T = DV^T / \sqrt{N}$. We get a latent variable representation: $$X = SA^T$$ Each column of $X$ is a linar combination of the columns of $S$. The columns of $S$ have zero mean, unit variance, and are uncorrelated. $$X_j = a_{j1}S_1 + a_{j2}S_2 + \text{...} + a_{jp}S_p$$ We can rewrite it as $X = AS$. But for any orthogonal matrix $R \in \mathbb{R}^{p*p}$ we have: $$X = AS = AR^TRS = A^*S^*$$ with $\text{cov}(S^*) = I$. Hence they are many such decompositions, and it is therefore impossible to identify any particular latent variables as unique underlying sources. The classic factor analysis model has the form: $$X_j = a_{j1}S_1 + a_{j2}S_2 + \text{...} + a_{jq}S_q + \sigma_j$$ $$X = AS + \sigma$$ with $S$ a vector of $q$ underlying latent variables or factors, and $A \in \mathbb{R}^{p*q}$ a matrix of factors loadings, and $\sigma_j$ uncorrelated 0-mean disturbances. Typically the $S_l$ and $\sigma_j$ are modeled as Gaussians, and the model is fit by maximum ikelihood. ## Independant Component Analysis Let $X \in \mathbb{R}^{p*N}$, where each column of $X$ represent an observation. The goal is to find the decomposition: $$X = AS$$ with $A \in \mathbb{R}^{p*p}$ orthogonal matrix and $S \in \mathbb{R}^{p*N}$, such that the columns of $S$ are statically independant. We suppose that $X$ have already been whitened ($Cov(X) = I$) We are trying to find an orthogonal matrix $A$ such that the components of $S=A^TX$ are indepandant (and Non-Gaussian). Several ICA approches are based on entropy. The diferential entropy $H$ of a random variable with density $g(Y)$ is: $$H(Y) = - \int g(y) \log g(y) dy$$ A natural mesure of dependance is the mutual information $I(Y)$ between the components of the random vector $Y$: $$I(Y) = \sum_{j=1}^p H(Y_j) - H(Y)$$ Let $Y=A^TX$ with $A$ orthogonal and $\text{cov}(X)=I$. It can be show that: $$I(Y) = \sum_{j=1}^p H(Y_j) - H(X)$$ Finding $A$ that minimize $I(Y) = I(A^TX)$ looks for orthogonal transformation that leads to the mods independance between its components. Instead of using the entropy $H(Y_j)$, we can use the negentropy measure: $$J(Y_j) = H(Z_j) - H(Y_j)$$ with $Z_j$ a gaussian random variable with the same variance as $Y_j$. We can use an aproximation that can be computed and optimized on the data: $$J(Y_j) \approx (E G(Y_j) - E G(Z_j))^2$$ $$\text{where } G(u) = \frac{1}{a} \log \cosh (au), \space \forall 1 \leq a \leq 2$$ ## Exploratory Projection Pursuit A projection pursuit algorithm for exploratory data analysis - Friedman, J. and Tukey, J. (1974) - [PDF](http://www.slac.stanford.edu/pubs/slacpubs/1250/slac-pub-1312.pdf) This is a graphical exploration technique for visualizing high-dimensional data. ## A Direct Approach to ICA Independent components analysis through product density estimation - Hastie, T. and Tibshirani, R. (2003) - [PDF](https://papers.nips.cc/paper/2155-independent-components-analysis-through-product-density-estimation.pdf) We observe a random vector $X \in \mathbb{R}^p$, assumed to arise from a linear mixing of a latent source random vector $S \in \mathbb{R}^P$: $$X = AS$$. The components $S_j$ are assumed to be independently distributed. We assume $E(S) = 0$, $Cov(S) = I$, $Cov(X) = I$, and $A$ ortohogonal. Because the $S_j$ are independant, the joint density of $S$ is given by: $$f_S(s) = \prod_{j=1}^p f_j(s_j)$$. And since $A$ is orthogonal, the joint density of $X$ is: $$f_X(x) = \prod_{j=1}^p f_j(a_j^Tx)$$. the model $f_X$ is fit using semi-parametric maximum likelihood. Each $f_j$ is represented by an exponentially tilted Gaussian density: $$f_j(s_j) = \phi (s_j) \exp (g_j(s_j))$$ Whith $\phi$ the standard Gaussian and $j_g$ a cubic smoothing pline restricted such that $f_j$ integrates to $1$ ### Fitting the Model We got the data $x_1, \text{...}, x_N$. We first center and whiten it. Then we fit the model using penalized maximum log-likelihood: $$\min_{A, \{ g_j \}_1^p} \sum_{j=1}^p \left[ \frac{1}{N} \sum_{i=1}^N (\log \phi(a_j^Tx_i) + g_j(a_j^Tx_i)) - \lambda_j \int g_j''^2(t)dt \right]$$ $$\text{s.t. } a_j^Ta_k = \delta_{jk} \space \forall j,k$$ $$\text{s.t. } \int \phi(s) \exp(g_j(s)) ds = 1 \space \forall j$$ ProDen ICA algorithm: - Initialize A from a random gaussian, then orgonalize it - Repeat until convergence: - Given fixed $A$, optimize seperately each for each $g_j$ using the penalized density estimation algorithm. - Given fixed $g_j$, optimize for A using one step of the fixed point algorithm. ### Penalized density estimation When $p=1$, the problem simplifies to: $$\min_g \frac{1}{N} \sum_{i=1}^N (\log \phi(s_i) + g(s_i)) - \lambda \int g''^2(t)dt$$ $$\text{s.t. } \int \phi(s) \exp(g(s)) ds = 1$$ The constraint can be integraded with the modified criterion: $$\min_g \frac{1}{N} \sum_{i=1}^N (\log \phi(s_i) + g(s_i)) - \int \phi(s) \exp(g(s)) ds - \lambda \int g''^2(t)dt$$ We approximate the integral using a grid of $L$ values $s_l^*$ separated by $\Delta$, covering the observed values $s_i$: $$y_l^* = \frac{\# s_i \in (s_l^* - \Delta/2, s_l^* + \Delta/2)}{N}$$ The final criterion is: $$\min_g \sum_{l=1}^L \left[ y_l^*(\log \phi(s_l^*) + g(s_l^*)) - \Delta \phi(s_l^*) \exp(g(s_l^*)) \right] - \lambda \int g''^2(t)dt$$ This is a generalized additive model, that can be fit using a newton algorithm, turned into an iteratively reweighted penalized least square regression problem. This is done using a weighted cubic smoothing spline. ### Fixed-point method The penalty term does not depend on $A$, and because all colums of $A$ are othogonal, the Gaussian component $\log \phi(a_j^Tx_i)$ does not depend of A either. What remains to be optimized is: $$C(A) = \frac{1}{N} \sum_{i=1}^N \sum_{j=1}^p g_j(a_j^Tx_i)$$ # Multidimensional Scaling Multidimensional Scaling tries to learn a lower-dimensional manifold like PCA. It only works with distances $d_{ij}$, distance between obervation $i$ and $j$. The goal is to find a lower-dimensional representation of the data that preserves the distance as well as possible. Krukaskal-Shephard scaling (least squares) minimizes the following stress function: $$S_M(Z) = \sum_{i \neq i'} (d_{ii'} - ||z_i - z_{i'}||)^2$$ The criterion is minimized using gradient descent. Another criterion is the Sammon mapping: $$S_{Sm}(Z) = \sum_{i \neq i'} \frac{(d_{ii'} - ||z_i - z_{i'}||)^2}{d_{ii'}}$$ In classical scaling, we use similarities $s_{ii'}$. One example is the center inner product $s_{ii'} = \langle x_i - \bar{x}, x_{i'} - \bar{x} \rangle$. The criterion is: $$S_C(Z) = \sum_{i,i'} (s_{ii'} - \langle z_i - \bar{z}, z_{i'} - \bar{z} \rangle)^2$$ If the similarities are the center inner product, this is equivalent to PCA. Another approach is nonmetric scaling, this minimizes the following criterion: $$S_{NM}(Z) = \sum_{i \neq i'} \frac{(||z_i - z_{i'}|| - \theta(d_{ii'}))^2}{\sum_{i \neq i'} ||z_i - z_{i'}||^2}$$ with $\theta$ an arbitrary increasing function. We fit the model by iteratively optimizing for $Z$ with gradient descent and $\theta$ with isotonic regression until convergence. Isotonic regression is a regression technique trying to minimize the squared error, but the approximator is any form of monotone function. # Nonlinear Dimension Reduction Several methods exist to find a low-dimensional nonlinear manifold of the data ## Isometric feature mapping A global geometric framework for nonlinear dimensionality reduction - Tenenbaum, J. B., de Silva, V. and Langford, J. C. (2000) - [PDF](https://web.mit.edu/cocosci/Papers/sci_reprint.pdf) We build a graph of the dataset, We find the neighbors of each of the points, and build edges with its neighbors. We approximate the geodesic distance between 2 points by the shortest path between these 2 points on the graph. Classical scaling is applied to the graph distances. ## Local linear embedding Nonlinear dimensionality reduction by locally linear embedding - Roweis, S. T. and Saul, L. K. (2000) - [PDF](http://www.robots.ox.ac.uk/~az/lectures/ml/lle.pdf) The point are approximated locally, and a lower dimensional representation is built from these approximations. 1. For each data point $x_i$m we find its K-nearest neighbors $\mathcal{N}(i)$ 2. We approximate each point by an affine mixture of its neighbors: $$\min_{W_{ik}} ||x_i - \sum_{k \in \mathcal{N}(i)} w_{ik}x_k||^2$$ over weights $w_{ik}$ satysfying $\sum_k w_{ik}=1$. 3. We find points $y_i$ in a lower-dimensional space that minimizes: $$\sum_{i=1}^N ||y_i - \sum_{k=1}^N w_{ik} y_k||^2$$ ## Local Multidimension Scaling Local multidimensional scaling for nonlineardimension reduction, graph drawing and proximity analysis - Chen, L. and Buja, A. (2008) - [PDF](https://pdfs.semanticscholar.org/183f/fb91f924ae7b938e4bfd1f5b2c3f8ef3b35c.pdf) Let $\mathcal{N}$ the set of nearby pairs, suchat that $(i,i') \in \mathcal{N}$ if $i$ is among the K-nearest neighbors of $i'$ or vice-versa. The goal if to find the point representations $z_i$ that minimize the stress function: $$S_L(Z) = \sum_{(i,i') \in \mathcal{N}} (d_{ii'} - ||z_i - z_{i'}||)^2 - \tau \sum_{(i,i') \notin \mathcal{N}} ||z_i - z_{i'}||$$ with tuning parameters $\tau$ and $K$. The first term tries to preserve local structure in the data, while the second encourage representations of points that are non-neighbors to be farther appart. The model is trained with gradient descent # The Google PageRank Algorithm The pagerank citation ranking: bringing order to the web - Page, L., Brin, S., Motwani, R. and Winograd, T. (1998) - [PDF](http://ilpubs.stanford.edu:8090/422/1/1999-66.pdf) We have $N$ webpages, and want to rank them in term of importance. A webpage is important if many webpages point to it. It also takes into account the importance of the linkin pages and the number of outgoing pages they have. Let $L$ a binary matrix, $L_{ij} = 1$ if page $j$ points to page $i$, $0$ otherwhise. Let $c_j = \sum_{i=1}^N L_{ij}$ the number of pages pointed to by page $j$. Then the google PageRanks $p_i$ are defined recursively as: $$p_i = (1 - d) + d \sum_{j=1}^N \frac{L_{ij}}{c_j} p_j$$ with $d$ a positive constant that ensures that each page get a PageRank of at least $1-d$. We can write it in matrix notiation: $$p = (1 - d)e + d LD_c^{-1}p$$ with $e$ a vector of $N$ ones and $D_c = \text{diag}(c)$. If we had the constraint that the average PageRank is 1 ($e^Tp=N$), the equation can be rewritten as: $$p= \left[ (1-d)ee^T/N + dLD_c^{-1} \right] p$$ $$p=Ap$$ It can be shown that this problem is the same as a random walk expressed by a Markov Chain, and so the largest eigenvalue of $A$ is $1$. This means we can find $p$ with the power method. Algorithm: - Start with some random $p_0$ - Iterative until convergence: $$p_k \leftarrow A p_{k-1}$$ $$p_k \leftarrow N \frac{p_k}{e^Tp_k}$$ ``` def page_rank(L, d=0.85, tol=1e-12): N = L.shape[0] c = np.sum(L, axis=0) e = np.ones(N) A = (1-d)/N + d * L * (1/c).reshape(1, N) pk = np.random.rand(N) its = 0 while True: its += 1 pk1 = A @ pk pk1 /= np.mean(pk1) if (pk - pk1) @ (pk - pk1) < tol: break pk = pk1 print(metrics.tdist(pk1, A @ pk1)) print('Niters:', its) return pk1 L = np.array([ [0, 0, 1, 0], [1, 0, 0, 0], [1, 1, 0, 1], [0, 0, 0, 0] ]) p = page_rank(L) print(p) ```
github_jupyter
# Wie Sie dieses Notebook nutzen: - Führen Sie diesen Code Zelle für Zelle aus. - Um die Variableninhalte zu beobachten, nutzen Sie in Jupyter-Classic den "Variable Inspektor". Falls Sie dieses Notebook in Jupyter-Lab verwenden, nutzen Sie hierfür den eingebauten Debugger. - Wenn Sie "Code Tutor" zur Visualisierung des Ablaufes einzelner Zellen nutzen möchten, führen Sie einmalig die nachfolgende Zelle aus. Anschliessend schreiben Sie %%tutor in die erste Zeile jeder Zelle, die Sie visualisieren möchten. - Die Dokumentation von range(), len() und allen anderen eingebauten Funktionen finden Sie hier: https://docs.python.org/3/library/functions.html ``` # Für Code Tutor Visualisierungen from metakernel import register_ipython_magics register_ipython_magics() ``` ## Funktionen - Funktionen definiert man mit __def__ - Die korrekte Einrückung des Anweisungsblocks ist zu beachten. - Funktionen haben optional __Parameter__ und einen __Rückgabewert__. Letzterer wird mit "return" zurückgegeben. - Funktionen haben eine __Dokumentation__, die im Docstring hinterlegt ist. - Funktionen haben __Testfälle__, die automatisch ausgeführt werden können und die Funktion dokumentieren und die Verwendung demonstrieren. - Funktionen können Tesfälle im Docstring haben, aber auch auf viele andere Arten getestet werden, etwa mittels __assert__-Statements oder fortgeschritten mit [unittest](https://docs.python.org/3/library/unittest.html#module-unittest). ### Definition ```python def name_der_funktion(parameter1, parameter2): """ Hier steht in einem Satz, was diese Funktion macht. Tests: >>> print(name_der_funktion("Rot", "Grün")) Gelb >>> print(name_der_funktion("Rot", "Blau")) Cyan Hier können weitere relevante Hinweise zur Nutzung gegeben werden. """ berechung 1 berechung 2 berechung ... ergebnis = berechung n return ergebnis ``` ### Anwendung Funktionen lassen sich sehr gut wiederverwenden, etwa in Schleifen. Dazu muss man die Funktion selbst nicht verstehen, wie sie intern funktioniert, sondern nur das Ergebnis. ``` def hash13(s): """ Erzeugt einen Hashwert des Stings s zwischen 0 und 13 Tests: ToDo """ summe = 0 i=0 while i < len(s): j = ord(s[i]) # print("Buchstabe: {} Code: {}".format(s[i], j)) summe = summe + j i+=1 return summe % 13 passwoerter = ["Hallo", "3re4kl4", "abcde", "rambo"] for p in passwoerter: h = hash13(p) print("{} - {}".format(p, h)) ``` ### Gültigkeitsbereich (Scope) Die an die Funktions-Parameter übergebenen Werte sind nur innerhalb des aktuellen Funktionsaufrufs gültig. ``` def funktions_name(parameter1, parameter2): ergebnis = parameter1 * parameter2 return ergebnis rueckgabewert = funktions_name(7,2) print(rueckgabewert) ``` Ausserhalb einer Funktion sind die Parameter-Variablen nicht definiert ``` print(parameter1) ``` ### Tests Um die eingebetteten Tests laufen zu lassen, muss die Funktion "run_docstring_examples" aus dem Packet "doctest" importiert werden. ```python from doctest import run_docstring_examples ``` Dann können durch ff. Aufruf die Tests, die im Docstring stehen, ausgeführt werden. ```python run_docstring_examples(name_der_funktion, locals()) ``` ``` from doctest import run_docstring_examples def mittelwert(zahlen): """ Berechnet das arithmetrische Mittel einer Liste von Zahlen. >>> print(mittelwert([20, 30, 70])) 40.0 >>> print(mittelwert([0, 0, 0])) 0.0 """ ergebnis = sum(zahlen) / len(zahlen) return ergebnis run_docstring_examples(mittelwert, locals()) assert mittelwert([20, 30, 70]) == 40.0 assert mittelwert([0, 0, 0]) == 0.0 ``` Es können auch alle Testfälle in den Docstrings aller Funktionen einer .py-Datei gleichzeitig getestet werden. ``` def average(values): """ Computes the arithmetic mean of a list of numbers. >>> print(average([20, 30, 70])) 40.0 >>> print(average([0, 0, 0])) 0.0 """ return sum(values) / len(values) def second_best(values): """ Computes the second highest value of a list of numbers. >>> print(second_best([20, 30, 70])) 30 >>> print(second_best([0, 0, 0])) 0 """ pass import doctest doctest.testmod() # automatically validate the embedded tests of all functions ```
github_jupyter
# Hinge Loss In this project you will be implementing linear classifiers beginning with the Perceptron algorithm. You will begin by writing your loss function, a hinge-loss function. For this function you are given the parameters of your model θ and θ0 Additionally, you are given a feature matrix in which the rows are feature vectors and the columns are individual features, and a vector of labels representing the actual sentiment of the corresponding feature vector. 1. First, implement the basic hinge loss calculation on a single data-point. Instead of the entire feature matrix, you are given one row, representing the feature vector of a single data sample, and its label of +1 or -1 representing the ground truth sentiment of the data sample def hinge_loss_single(feature_vector, label, theta, theta_0): feature_vector - A numpy array describing the given data point. label - A real valued number, the correct classification of the data point. theta - A numpy array describing the linear classifier. theta_0 - A real valued number representing the offset parameter. Returns: A real number representing the hinge loss associated with the given data point and parameters. ``` import numpy as np feature_vector= np.array([1, 2]) label= 1 theta= np.array([-1, 1]) theta_0= -0.2 def hinge_loss_single(feature_vector, label, theta, theta_0): if (label* np.dot(feature_vector, theta) + theta_0) >=1: loss= 0 else: loss= 1 - (label* (np.dot(theta, feature_vector) + theta_0)) return loss ``` # The Complete Hinge Loss Now it's time to implement the complete hinge loss for a full set of data. Your input will be a full feature matrix this time, and you will have a vector of corresponding labels. The kth row of the feature matrix corresponds to the kth element of the labels vector. This function should return the appropriate loss of the classifier on the given dataset. ``` def hinge_loss_full(feature_matrix, labels, theta, theta_0): total_loss=[] for i, x in enumerate(feature_matrix): if (labels[i]*(np.dot(theta, feature_matrix[i]) + theta_0)) >= 1: loss= 0 else: loss= 1 - (labels[i]*(np.dot(theta, feature_matrix[i])+ theta_0)) total_loss.append(loss) return sum(total_loss)/len(feature_matrix) feature_matrix = np.array([[1, 2], [1, -1]]) label, theta, theta_0 = np.array([1, 1]), np.array([-1, 1]), -0.2 hinge_loss_full(feature_matrix, label, theta, theta_0) ```
github_jupyter
(Feedforward)= # Chapter 8 -- Feedforward Let's take a look at how feedforward is processed in a three layers neural net. <img src="images/feedForward.PNG" width="500"> Figure 8.1 From the figure 8.1 above, we know that the two input values for the first and the second neuron in the hidden layer are $$ h_1^{(1)} = w_{11}^{(1)}*x_1 + w_{21}^{(1)}*x_2 + w_{31}^{(1)}*x_3+ w_{41}^{(1)}*1 $$ (eq8_1) $$ h_2^{(1)} = w_{12}^{(2)}*x_1 + w_{22}^{(2)}*x_2 + w_{32}^{(2)}*x_3+ w_{42}^{(2)}*1 $$ (eq8_2) where the $w^{(n)}_{4m}$ term is the bias term in the form of weight. To simplify the two equations above, we can use matrix $$ H^{(1)} = [h_1^{(1)} \;\; h_2^{(1)}] = [x_1 \;\; x_2 \;\; x_3 \;\; 1] \begin{bmatrix} w^{(1)}_{11} & w^{(1)}_{12} \\ w^{(1)}_{21} & w^{(1)}_{22} \\ w^{(1)}_{31} & w^{(1)}_{32} \\ w^{(1)}_{41} & w^{(1)}_{4 2} \end{bmatrix} $$ (eq8_3) Similarly, the two outputs from the input layer can be the inputs for the hidden layer $$ \sigma(H^{(1)}) = [\sigma(h_1^{(1)}) \;\; \sigma( h_2^{(1)})] $$ (eq8_4) This in turns can be the input values for the next layer (output layer) $$ h^{(2)} = w^{(2)}_{11}* \sigma(h^{(1)}_1)+w^{(2)}_{21} *\sigma(h^{(1)}_2)+w^{(2)}_{31}*1 $$ (eq8_5) Again, we can simplify this equation by using matrix $$ H^{(2)} = [\sigma(h_1^{(1)}) \;\;\sigma(h_2^{(1)}) \; \; 1] \begin{bmatrix} w^{(2)}_{11} \\ w^{(2)}_{21} \\ w^{(2)}_{31} \end{bmatrix} $$ (eq8_6) Then we send this value $h^{(2)}$ into the sigma function in the final output layer to obtain the prediction $$ \hat{y} = \sigma(h^{(2)}) $$ (eq8_7) To put all the equation of three layers together, we can have $$ \hat{y} = \sigma(\sigma([x_1 \;\; x_2 \;\; x_3 \;\; 1] \begin{bmatrix} w^{(1)}_{11} & w^{(1)}_{12} \\ w^{(1)}_{21} & w^{(1)}_{22} \\ w^{(1)}_{31} & w^{(1)}_{32} \\ w^{(1)}_{41} & w^{(1)}_{42} \end{bmatrix}) \begin{bmatrix} w^{(2)}_{11} \\ w^{(2)}_{21} \\ w^{(2)}_{31} \end{bmatrix}) $$ (eq8_8) Or we can simplify it to be $$ \hat{y} = \sigma(\sigma(xW^{(1)})W^{(2)}) $$ (eq8_9) This is the feedforward process: based on the known weights $W$ and input $x$ to calculate the prediction $\hat{y}$. Finally, it's easy to write code computing the output from a Network instance. We begin by defining the sigmoid function: ``` def sigmoid(z): return 1.0/(1.0+np.exp(-z)) ``` Note that when the input z is a vector or Numpy array, Numpy automatically applies the function sigmoid elementwise, that is, in vectorized form. We then add a feedforward method to the Network class, which, given an input a for the network, returns the corresponding output: ``` def feedforward(self, a): """Returning the output a, which is the input to the next layer""" for b, w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a)+b) return a ```
github_jupyter
In this lab, we will optimize the weather simulation application written in Fortran (if you prefer to use C++, click [this link](../../C/jupyter_notebook/profiling-c.ipynb)). Let's execute the cell below to display information about the GPUs running on the server by running the pgaccelinfo command, which ships with the PGI compiler that we will be using. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell. ``` !pgaccelinfo ``` ## Exercise 2 ### Learning objectives Learn how to identify and parallelise the computationally expensive routines in your application using OpenACC compute constructs (A compute construct is a parallel, kernels, or serial construct.). In this exercise you will: - Implement OpenACC parallelism using parallel directives to parallelise the serial application - Learn how to compile your parallel application with PGI compiler - Benchmark and compare the parallel version of the application with the serial version - Learn how to interpret PGI compiler feedback to ensure the applied optimization were successful From the top menu, click on *File*, and *Open* `miniWeather_openacc.f90` and `Makefile` from the current directory at `Fortran/source_code/lab2` directory and inspect the code before running below cells.We have already added OpenACC compute directives (`!$acc parallel loop`) around the expensive routines (loops) in the code. Once done, compile the code with `make`. View the PGI compiler feedback (enabled by adding `-Minfo=accel` flag) and investigate the compiler feedback for the OpenACC code. The compiler feedback provides useful information about applied optimizations. ``` !cd ../source_code/lab2 && make clean && make ``` Let's inspect part of the compiler feedback and see what it's telling us. <img src="images/ffeedback1-0.png"> - Using `-ta=tesla:managed`, instruct the compiler to build for an NVIDIA Tesla GPU using "CUDA Managed Memory" - Using `-Minfo` command-line option, we will see all output from the compiler. In this example, we use `-Minfo=accel` to only see the output corresponding to the accelerator (in this case an NVIDIA GPU). - The first line of the output, `compute_tendencies_x`, tells us which function the following information is in reference to. - The line starting with 247 and 252, shows we created a parallel OpenACC loop. This loop is made up of gangs (a grid of blocks in CUDA language) and vector parallelism (threads in CUDA language) with the vector size being 128 per gang. - The line starting with 249 and 252, `Loop is parallelizable` of the output tells us that on these lines in the source code, the compiler found loops to accelerate. - The rest of the information concerns data movement. Compiler detected possible need to move data and handled it for us. We will get into this later in this lab. It is very important to inspect the feedback to make sure the compiler is doing what you have asked of it. Now, **Run** the application for small values of `nx_glob`,`nz_glob`, and `sim_time`: **40, 20, 1000**. ``` !cd ../source_code/lab2 && ./miniWeather ``` **Profile** it with Nsight Systems command line `nsys`. ``` !cd ../source_code/lab2 && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o miniWeather_3 ./miniWeather ``` You can see that the changes made actually slowed down the code and it runs slower compared to the non-accelerated CPU only version. Let's checkout the profiler's report. [Download the profiler output](../source_code/lab2/miniWeather_3.qdrep) and open it via the GUI. From the "timeline view" on the top pane, double click on the "CUDA" from the function table on the left and expand it. Zoom in on the timeline and you can see a pattern similar to the screenshot below. The blue boxes are the compute kernels and each of these groupings of kernels is surrounded by purple and teal boxes (annotated with red color) representing data movements. **Screenshots represents profiler report for the values of 400,200,1500.** <img src="images/nsys_slow.png" width="80%" height="80%"> Let's hover your mouse over kernels (blue boxes) one by one from each row and checkout the provided information. <img src="images/occu-1.png" width="60%" height="60%"> **Note**: In the next two exercises, we start optimizing the application by improving the occupancy and reducing data movements. ## Post-Lab Summary If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. ``` %%bash cd .. rm -f openacc_profiler_files.zip zip -r openacc_profiler_files.zip * ``` **After** executing the above zip command, you should be able to download the zip file [here](../openacc_profiler_files.zip). ----- # <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../profiling_start.ipynb>HOME</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<span style="float:center"> <a href=profiling-fortran-lab3.ipynb>NEXT</a></span> </p> ----- # Links and Resources [OpenACC API Guide](https://www.openacc.org/sites/default/files/inline-files/OpenACC%20API%202.6%20Reference%20Guide.pdf) [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/) [CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads) **NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems). Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. --- ## Licensing This material is released by NVIDIA Corporation under the Creative Commons Attribution 4.0 International (CC BY 4.0).
github_jupyter
``` import matplotlib import matplotlib.pyplot as plt import os import random import io import imageio import glob import scipy.misc import numpy as np from six import BytesIO from PIL import Image, ImageDraw, ImageFont from IPython.display import display, Javascript from IPython.display import Image as IPyImage import tensorflow as tf from object_detection.utils import label_map_util from object_detection.utils import config_util from object_detection.utils import visualization_utils as viz_utils from object_detection.builders import model_builder %matplotlib inline ``` ### Function to run inference on a single image ``` def run_inference_single_image(model,image): image = np.asarray(image) input_tensor = tf.convert_to_tensor(image) input_tensor = input_tensor[tf.newaxis,...] model_fn = model.signatures["serving_default"] output = model_fn(input_tensor) num_detections = int(output.pop("num_detections")) output = {key:value[0, :num_detections].numpy() for key,value in output.items()} output['num_detections'] = num_detections output['detection_classes']=output['detection_classes'].astype(np.int64) return output LABEL_PATH = '/home/thirumalaikumar/Intern Projects/TrafficControl/content/sub_surf/gate_label_map.pbtxt' ci = label_map_util.create_category_index_from_labelmap(LABEL_PATH,use_display_name=True) def show_inference(model,frame): image_np = np.array(frame) output = run_inference_single_image(model,image_np) classes = np.squeeze(output['detection_classes'])#class to which the object belongs to boxes = np.squeeze(output['detection_boxes'])#box coords scores = np.squeeze(output['detection_scores'])#prob score of the model #condition for Detecting only the gate indices = np.argwhere(classes==2) boxes = np.squeeze(boxes[indices]) classes = np.squeeze(classes[indices]) scores = np.squeeze(scores[indices]) viz_utils.visualize_boxes_and_labels_on_image_array( image_np, boxes, classes, scores, ci, use_normalized_coordinates=True, max_boxes_to_draw=100, min_score_thresh=.8, agnostic_mode=False, ) return image_np model = tf.saved_model.load("/home/thirumalaikumar/Intern Projects/TrafficControl/content/sub_surf/saved_model") import cv2 def post_process_bb(model,img,threshold=0.5): img = cv2.imread(img) output = run_inference_single_image(model,img) assert len(output['detection_boxes']) == len(output['detection_scores']) max_score_index = np.squeeze(np.argwhere(output['detection_scores']>=threshold)) detection_score = output['detection_scores'][max_score_index] box_coords = output['detection_boxes'][max_score_index] detecction_class = output['detection_classes'][max_score_index] return img,detection_score,detecction_class,box_coords def midpoint(): img,score,classes,coords = post_process_bb(model,"/home/thirumalaikumar/hackathon/images_1005-20201030T064700Z-001/images_1005/1_458/download (37).jpg") im_width = img.shape[0] im_height = img.shape[1] try: coords = coords.reshape(1,coords.shape[0]) except ValueError as v: print("Your Object detector has detected more than 1 BB") print(coords.shape) for i in range(len(coords)): x1,y1,x2,y2 = coords[i] (left, right, top, bottom) = (y1 * im_width, y2 * im_width, x1 * im_height, x2 * im_height) p1 = (int(left), int(top)) p2 = (int(right), int(bottom)) #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) _ = cv2.rectangle(img, p1, p2, (255,0,0), 15) x_center = int((left+right)/2) y_center = int(bottom) x2_center = int((left+right)/2) y2_center = int(top) center1 = (x_center, y_center) center2 = (x2_center,y2_center) res = tuple(map(lambda i, j: i + j, center1, center2)) res = tuple(map(lambda i: i / 2, res)) res = tuple(map(lambda i: int(i) , res)) img1 = cv2.circle(img,res, 15, (0, 255, 0), -1) #cv2.putText(img1,"Gate",p1, cv2.FONT_HERSHEY_SIMPLEX,1, (255, 255, 255), 2, cv2.LINE_AA) return img1 plot.imshow(midpoint()) vid = cv2.VideoCapture(0) while(True): # Capture the video frame # by frame ret, frame = vid.read() imagen = show_inference(model,frame) # Display the resulting frame cv2.imshow('frame', cv2.resize(imagen,(800,600))) # the 'q' button is set as the # quitting button you may use any # desired button of your choice if cv2.waitKey(1) & 0xFF == ord('q'): break # After the loop release the cap object vid.release() # Destroy all the windows cv2.destroyAllWindows() ```
github_jupyter
``` %matplotlib nbagg import os os.environ["PYOPENCL_COMPILER_OUTPUT"]="1" import numpy import fabio import pyopencl from pyopencl import array as cla from matplotlib.pyplot import subplots ctx = pyopencl.create_some_context(interactive=True) queue = pyopencl.CommandQueue(ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE) ctx image = fabio.open("/users/kieffer/workspace-400/tmp/pyFAI/test/testimages/Pilatus6M.cbf").data mask = (image<0).astype("int8") fig, ax = subplots() ax.imshow(image.clip(0,100)) %load_ext pyopencl.ipython_ext %%cl_kernel //read withou caching float inline read_simple(global int *img, int height, int width, int row, int col){ //This kernel reads the value and returns it without active caching float value = NAN; // Read if ((col>=0) && (col<width) && (row>=0) && (row<height)){ int read_pos = col + row*width; value = (float)img[read_pos]; if (value<0){ value = NAN; } } return value; } void inline read_and_store(global int *img, int height, int width, int row, int col, int half_wind_height, int half_wind_width, local float* storage){ //This kernel reads the value and stores in the local storage int line_size, write_pos, idx_line; float value = NAN; // Read if ((col>=0) && (col<width) && (row>0) && (row<height)){ int read_pos = col + row*width; value = (float)img[read_pos]; if (value<0){ value = NAN; } } // Save locally if ((col>=-half_wind_width) && (col<=width+half_wind_width) && (row>-half_wind_height) && (row<=height+half_wind_height)){ line_size = get_local_size(0) + 2 * half_wind_width; idx_line = (half_wind_height+row)%(2*half_wind_height+1); write_pos = line_size*idx_line + half_wind_width + col - get_group_id(0)*get_local_size(0); storage[write_pos] = value; } //return value } //Store a complete line void inline store_line(global int *img, int height, int width, int row, int half_wind_height, int half_wind_width, local float* storage){ read_and_store(img, height, width, row, get_global_id(0), half_wind_height, half_wind_width, storage); if (get_local_id(0)<half_wind_width){ // read_and_store_left read_and_store(img, height, width, row, get_group_id(0)*get_local_size(0)-half_wind_width+get_local_id(0), half_wind_height, half_wind_width, storage); //read_and_store_right read_and_store(img, height, width, row, (get_group_id(0)+1)*get_local_size(0)+get_local_id(0), half_wind_height, half_wind_width, storage); } } float read_back( int height, int width, int row, int col, int half_wind_height, int half_wind_width, local float* storage){ float value=NAN; int write_pos, line_size, idx_line; if ((col>=-half_wind_width) && (col<=width+half_wind_width) && (row>-half_wind_height) && (row<=height+half_wind_height)){ line_size = get_local_size(0) + 2 * half_wind_width; idx_line = (half_wind_height+row)%(2*half_wind_height+1); write_pos = line_size*idx_line + half_wind_width + col - get_group_id(0)*get_local_size(0); value = storage[write_pos]; } return value; } // workgroup size of kernel: 32 to 128, cache_read needs to be (wg+2*half_wind_width)*(2*half_wind_height+1)*sizeof(float) kernel void spot_finder(global int *img, int height, int width, int half_wind_height, int half_wind_width, float threshold, float radius, global int *cnt_high, //output global int *high, //output int high_size, local float *cache_read, local int *local_high, int local_size){ //decaration of variables int col, row, cnt, i, j, where; float value, sum, std, centroid_r, centroid_c, dist, mean; col = get_global_id(0); local int local_cnt_high[1]; local_cnt_high[0] = 0; for (i=0; i<local_size; i+=get_local_size(0)){ local_high[i+get_local_id(0)] = 0; } row=0; //pre-load data for the first line for (i=-half_wind_height; i<half_wind_height; i++){ store_line(img, height, width, row+i, half_wind_height, half_wind_width, cache_read); } barrier(CLK_LOCAL_MEM_FENCE); //loop within a column for (row=0;row<height; row++){ //read data store_line(img, height, width, row+half_wind_height, half_wind_height, half_wind_width, cache_read); barrier(CLK_LOCAL_MEM_FENCE); //calculate mean sum = 0.0f; centroid_r = 0.0f; centroid_c = 0.0f; cnt = 0; for (i=-half_wind_height; i<=half_wind_height; i++){ for (j=-half_wind_width; j<=half_wind_width; j++){ value = read_back(height, width, row+i, col+j, half_wind_height, half_wind_width, cache_read); if (isfinite(value)){ sum += value; centroid_r += value*i; centroid_c += value*j; cnt += 1; } } } if (cnt){ mean = sum/cnt; dist = sum*radius; if ((fabs(centroid_r)<dist) && (fabs(centroid_c)<dist)){ // calculate std sum = 0.0; for (i=-half_wind_height; i<=half_wind_height; i++){ for (j=-half_wind_width; j<=half_wind_width; j++){ value = read_back(height, width, row+i, col+j, half_wind_height, half_wind_width, cache_read); if (isfinite(value)){ sum += pown(mean-value,2); } } } std = sqrt(sum/cnt); value = read_back(height, width, row, col, half_wind_height, half_wind_width, cache_read); if ((value-mean)>threshold*std){ where = atomic_inc(local_cnt_high); if (where<local_size){ local_high[where] = col+width*row; } } // if intense signal } // if properly centered } // if patch not empty barrier(CLK_LOCAL_MEM_FENCE); } //for row //Store the results in global memory barrier(CLK_LOCAL_MEM_FENCE); if (get_local_id(0) == 0) { cnt = local_cnt_high[0]; if ((cnt>0) && (cnt<local_size)) { where = atomic_add(cnt_high, cnt); if (where+cnt>high_size){ cnt = high_size-where; //store what we can } for (i=0; i<cnt; i++){ high[where+i] = local_high[i]; } } }//store results } //kernel // workgroup size of kernel: without cacheing read kernel void simple_spot_finder(global int *img, int height, int width, int half_wind_height, int half_wind_width, float threshold, float radius, global int *cnt_high, //output global int *high, //output int high_size, local int *local_high, int local_size){ //decaration of variables int col, row, cnt, i, j, where, tid, blocksize; float value, sum, std, centroid_r, centroid_c, dist, mean, M2, delta, delta2, target_value; col = get_global_id(0); row = get_global_id(1); //Initialization of output array in shared local int local_cnt_high[2]; blocksize = get_local_size(0) * get_local_size(1); tid = get_local_id(0) + get_local_id(1) * get_local_size(0); if (tid < 2){ local_cnt_high[tid] = 0; } for (i=0; i<local_size; i+=blocksize){ if ((i+tid)<local_size) local_high[i+tid] = 0; } barrier(CLK_LOCAL_MEM_FENCE); //Calculate mean + std + centroids mean = 0.0f; M2 = 0.0f; centroid_r = 0.0f; centroid_c = 0.0f; cnt = 0; for (i=-half_wind_height; i<=half_wind_height; i++){ for (j=-half_wind_width; j<=half_wind_width; j++){ value = read_simple(img, height, width, row+i, col+j); if (isfinite(value)){ centroid_r += value*i; centroid_c += value*j; cnt += 1; delta = value - mean; mean += delta / cnt; delta2 = value - mean; M2 += delta * delta2; } } } if (cnt){ dist = mean*radius*cnt; std = sqrt(M2 / cnt); target_value = read_simple(img, height, width, row, col); if (((target_value-mean)>threshold*std) && (fabs(centroid_r)<dist) && (fabs(centroid_c)<dist)){ where = atomic_inc(local_cnt_high); if (where<local_size){ local_high[where] = col+width*row; } } // if intense signal properly centered } // if patch not empty //Store the results in global memory barrier(CLK_LOCAL_MEM_FENCE); if (tid==0) { cnt = local_cnt_high[0]; if ((cnt>0) && (cnt<local_size)) { where = atomic_add(cnt_high, cnt); if (where+cnt>high_size){ cnt = high_size-where; //store what we can } local_cnt_high[0] = cnt; local_cnt_high[1] = where; } } barrier(CLK_LOCAL_MEM_FENCE); //copy the data from local to global memory for (i=0; i<local_cnt_high[0]; i+=blocksize){ high[local_cnt_high[1]+i+tid] = local_high[i+tid]; }//store results } //kernel def peak_count(img, window=3, threshold=3.0, radius=1.0, workgroup=32, array_size=10000): img_d = cla.to_device(queue, image) high_d = cla.zeros(queue, (array_size,), dtype=numpy.int32) high_cnt_d = cla.zeros(queue, (1,), dtype=numpy.int32) read_cache = pyopencl.LocalMemory(4*(workgroup+2*window)*(2*window+1)) write_cache = pyopencl.LocalMemory(4096) height, width = img.shape size = (width+workgroup-1)&~(workgroup-1) ev = spot_finder(queue, (size,), (workgroup,), img_d.data, numpy.int32(height), numpy.int32(width), numpy.int32(window), numpy.int32(window), numpy.float32( threshold), numpy.float32( radius), high_cnt_d.data, high_d.data, numpy.int32(array_size), read_cache, write_cache, numpy.int32(1024)) size = high_cnt_d.get()[0] print("found %i peaks in %.3fms"%(size, (ev.profile.end-ev.profile.start)*1e-6)) return high_d.get()[:size] %time raw = peak_count(image, window=5, threshold=6) x=raw%image.shape[-1] y=raw//image.shape[-1] ax.plot(x,y,".w") def simple_peak_count(img, window=3, threshold=3.0, radius=1.0, workgroup=32, array_size=10000): img_d = cla.to_device(queue, image) high_d = cla.zeros(queue, (array_size,), dtype=numpy.int32) high_cnt_d = cla.zeros(queue, (1,), dtype=numpy.int32) #read_cache = pyopencl.LocalMemory(4*(workgroup+2*window)*(2*window+1)) write_cache = pyopencl.LocalMemory(4096) height, width = img.shape size_w = (width+workgroup-1)&~(workgroup-1) size_h = (height+workgroup-1)&~(workgroup-1) ev = simple_spot_finder(queue, (size_w,size_h), (workgroup, workgroup), img_d.data, numpy.int32(height), numpy.int32(width), numpy.int32(window), numpy.int32(window), numpy.float32( threshold), numpy.float32( radius), high_cnt_d.data, high_d.data, numpy.int32(array_size), #read_cache, write_cache, numpy.int32(1024)) size = high_cnt_d.get()[0] print("found %i peaks in %.3fms"%(size, (ev.profile.end-ev.profile.start)*1e-6)) return high_d.get()[:size] %time raw = simple_peak_count(image, window=5, threshold=6) x=raw%image.shape[-1] y=raw//image.shape[-1] ax.plot(x,y,".y") # Work on scan from math import log2 n = 32 ary = numpy.ones(n) ary ary1 = numpy.copy(ary) ary2 = numpy.empty_like(ary) for i in range(int(log2(n))): start = 1<<i print(i,start) for j in range(start): ary2[j] = ary1[j] for j in range(start, n): ary2[j] = ary1[j] + ary1[j-start] ary1, ary2 = ary2, ary1 print(ary1) ary-numpy.ones(n).cumsum() (32+6)*7*4*2*4 ```
github_jupyter
``` # default_exp downloaders #export import requests import pathspec import time from pathlib import Path, PurePosixPath from tightai.lookup import Lookup from tightai.conf import CLI_ENDPOINT #hide test = False if test: CLI_ENDPOINT = "http://cli.desalsa.io:8000" #export class DownloadVersion(Lookup): path = "." dest_path = "." project_id = "" version = "" api = CLI_ENDPOINT def __init__(self, path=".", project_id=None, version=None, *args, **kwargs): api = None if "api" in kwargs: api = kwargs.pop("api") super().__init__(*args, **kwargs) if api != None: self.api = api assert project_id != None if "v" in f"{version}": version = version.replace("v", "") try: version = int(version) except: raise Exception("Version must be a number or in the format v1, v2, v3, and so on.") self.path = Path(path).resolve() self.version = version self.project_id = project_id self.endpoint = f"{self.api}/projects/{project_id}/versions/{version}/download/" def save_from_url(self, dest, url, force=True): dest_path = Path(dest) if not force: if dest_path.exists(): print(f"{dest_path} already exists") return None dest_path_parent = dest_path.resolve().parent dest_path_parent.mkdir(parents=True, exist_ok=True) # NOTE the stream=True parameter below with requests.get(url, stream=True) as r: r.raise_for_status() with open(dest_path, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): # If you have chunk encoded response uncomment if # and set chunk_size parameter to None. #if chunk: f.write(chunk) return dest def download(self, overwrite=False): r = self.http_get(self.endpoint) self.handle_invalid_lookup(r, expected_status_code=200) files = r.json() for fdict in files: fname = fdict['fname'] furl = fdict['url'] dest = PurePosixPath(self.path / fname) print("Downloading", fname, "to", dest) self.save_from_url(dest, furl, force=overwrite) return #hide # path_str = "/Users/jmitch/tight/my-tight-apps/dl-tests" # path_str = Path(path_str) # assert path.exists() == True # dl = DownloadVersion(path=path_str, project_id='news-categories', version=1) # dl.download(overwrite=True) ```
github_jupyter
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/Spark%20v2.7.6%20Notebooks/21.Gender_Classifier.ipynb) # 21. Gender Classifier **Gender Classifier** detects the gender of the patient in the clinical document. It can classify the documents into `Female`, `Male` and `Unknown`. -'**Classifierdl_gender_sbert**' (works with licensed `sbiobert_base_cased_mli`) It has been trained on more than four thousands clinical documents (radiology reports, pathology reports, clinical visits etc.) which were annotated internally. ## Colab Setup ``` import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) %%capture for k,v in license_keys.items(): %set_env $k=$v !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh !bash jsl_colab_setup.sh -p 2.4.4 import json import os from pyspark.ml import Pipeline,PipelineModel from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl import sparknlp params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) print (sparknlp.version()) print (sparknlp_jsl.version()) spark # if you want to start the session with custom params as in start function above def start(secret): builder = SparkSession.builder \ .appName("Spark NLP Licensed") \ .master("local[*]") \ .config("spark.driver.memory", "16G") \ .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \ .config("spark.kryoserializer.buffer.max", "2000M") \ .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.11:"+version) \ .config("spark.jars", "https://pypi.johnsnowlabs.com/"+secret+"/spark-nlp-jsl-"+jsl_version+".jar") return builder.getOrCreate() #spark = start(secret) ``` # Gender Classifier Pipeline with **sbert** ``` document = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sbert_embedder = BertSentenceEmbeddings().pretrained("sbiobert_base_cased_mli", 'en', 'clinical/models')\ .setInputCols(["document"])\ .setOutputCol("sentence_embeddings")\ .setMaxSentenceLength(512) gender_classifier = ClassifierDLModel.pretrained( 'classifierdl_gender_sbert', 'en', 'clinical/models') \ .setInputCols(["document", "sentence_embeddings"]) \ .setOutputCol("class") gender_pred_pipeline_sbert = Pipeline(stages = [ document, sbert_embedder, gender_classifier ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model_sbert = gender_pred_pipeline_sbert.fit(empty_data) text ="""social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home.family history: shows a family history of breast cancer.""" gender_pipeline_sbert = LightPipeline(model_sbert) result = gender_pipeline_sbert.annotate(text) result['class'][0] ``` ### Sample Clinical Notes ``` text1 = '''social history: shows that does not smoke cigarettes or drink alcohol,lives in a nursing home. family history: shows a family history of breast cancer.''' result = gender_pipeline_sbert.annotate(text1) result['class'][0] text2 = '''The patient is a 48- year-old, with severe mitral stenosis diagnosed by echocardiography, moderate aortic insufficiency and moderate to severe pulmonary hypertension who is being evaluated as a part of a preoperative workup for mitral and possible aortic valve repair or replacement.''' result = gender_pipeline_sbert.annotate(text2) result['class'][0] text3 = '''HISTORY: The patient is a 57-year-old XX, who I initially saw in the office on 12/27/07, as a referral from the Tomball Breast Center. On 12/21/07, the patient underwent image-guided needle core biopsy of a 1.5 cm lesion at the 7 o'clock position of the left breast (inferomedial). The biopsy returned showing infiltrating ductal carcinoma high histologic grade. The patient stated that xx had recently felt and her physician had felt a palpable mass in that area prior to her breast imaging.''' result = gender_pipeline_sbert.annotate(text3) result['class'][0] text4 = '''The patient states that xx has been overweight for approximately 35 years and has tried multiple weight loss modalities in the past including Weight Watchers, NutriSystem, Jenny Craig, TOPS, cabbage diet, grape fruit diet, Slim-Fast, Richard Simmons, as well as over-the-counter measures without any long-term sustainable weight loss. At the time of presentation to the practice, xx is 5 feet 6 inches tall with a weight of 285.4 pounds and a body mass index of 46. xx has obesity-related comorbidities, which includes hypertension and hypercholesterolemia.''' result = gender_pipeline_sbert.annotate(text4) result['class'][0] text5 = '''Prostate gland showing moderately differentiated infiltrating adenocarcinoma, Gleason 3 + 2 extending to the apex involving both lobes of the prostate, mainly right.''' result = gender_pipeline_sbert.annotate(text5) result['class'][0] text6 = '''SKIN: The patient has significant subcutaneous emphysema of the upper chest and anterior neck area although he states that the subcutaneous emphysema has improved significantly since yesterday.''' result = gender_pipeline_sbert.annotate(text6) result['class'][0] text7 = '''INDICATION: The patient is a 42-year-old XX who is five days out from transanal excision of a benign anterior base lesion. xx presents today with diarrhea and bleeding. Digital exam reveals bright red blood on the finger. xx is for exam under anesthesia and control of hemorrhage at this time. ''' result = gender_pipeline_sbert.annotate(text7) result['class'][0] text8 = '''INDICATION: ___ year old patient with complicated medical history of paraplegia and chronic indwelling foley, recurrent MDR UTIs, hx Gallbladder fossa abscess,type 2 DM, HTN, CAD, DVT s/p left AKA complicated complicated by respiratory failure requiring tracheostomy and PEG placement, right ischium osteomyelitis due to chronic pressure ulcers with acute shortness of breath...''' result = gender_pipeline_sbert.annotate(text8) result['class'][0] ```
github_jupyter
## Load Library And Data ``` # importing the library import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # to know the ecoding type import chardet with open('E:\\Recommendation System\\book.csv', 'rb') as rawdata: result = chardet.detect(rawdata.read(100000)) result ``` - The encoding standard used in the input file is ISO-8859-1 - Hence, to minimize the error while loading the input data, we are passing this encoding standard ``` # load the dataset 1 books_data = pd.read_csv('E:\\1_ExcelR_data\\0_assignmentsData\\10_Recommendation System\\book.csv', encoding='ISO-8859-1') books_data ``` ## Data Cleaning And EDA ``` # drop unnecessary column books_data.drop(['Unnamed: 0'], axis = 1, inplace=True) books_data.head() books_data.sort_values(by=['User.ID']) # data dimenssion books_data.shape # data description books_data.describe().T # dataframes types books_data.dtypes # informartion of the data books_data.info() ``` - No null values - two features are numeric - one feature is categorical ``` books_data.describe()['Book.Rating'] ``` - max rating = 10 - min rating = 1 - average rating = 7.5 ``` # find the minimum and maximum ratings print('Minimum rating is:', (books_data['Book.Rating'].min())) print('Maximum rating is:', (books_data['Book.Rating'].max())) ``` - Most of the books are getting max ratings as 8 - Minimum ratings as 1 are very few books ``` # Unique Users and ratings print("Total data \n") print("Total no of ratings :",books_data.shape[0]) print("Total No of Users :", len(np.unique(books_data['User.ID']))) print("Total No of products :", len(np.unique(books_data['Book.Rating']))) # find out the average rating for each and every books Average_ratings = pd.DataFrame(books_data.groupby('Book.Title')['Book.Rating'].mean()) Average_ratings.head(3) ``` - Average ratings received by readers as, 1) 8.0 - Jason, Madison &amp 2) 6.0 - Other Stories;Merril;1985;McClelland &amp 3) 4.0 - Repairing PC Drives &amp ## Visualize The Data ``` # Check the distribution of the rating plt.figure(figsize=(10, 5)) sns.countplot("Book.Rating", data = books_data) plt.title('Rating distrubutions', fontsize = 20) plt.xlabel("Book ratings", fontsize = 15) plt.ylabel("Total counts", fontsize = 15) plt.show() ``` ## Building The Recommender ``` # make pivot table book_users = books_data.pivot_table( index='User.ID', columns = books_data['Book.Title'], values='Book.Rating') book_users # find correlation between "10 Commandments Of Dating" and other books book_read = book_users["10 Commandments Of Dating"] similarity_with_other_books = book_users.corrwith(book_read) similarity_with_other_books = similarity_with_other_books.sort_values(ascending=False) similarity_with_other_books.head(10) # imputer NaN with 0 book_users.fillna(0, inplace = True) book_users # collecting unique user id book_users.index = books_data['User.ID'].unique() book_users.head() ``` ## Computation with Cosine Distance ``` # calculating Cosine Similarities between Users from sklearn.metrics import pairwise_distances from scipy.spatial.distance import cosine, correlation # Cosine similarities values (using distance matrics) user_sim = 1 - pairwise_distances(book_users.values, metric = 'cosine') user_sim # store the result (Cosine Similarities values) in a dataframe user_similarity_df = pd.DataFrame(user_sim) user_similarity_df # set the index and columns to userId user_similarity_df.index = books_data['User.ID'].unique() user_similarity_df.columns = books_data['User.ID'].unique() books_data user_similarity_df.iloc[0:7, 0:7] np.fill_diagonal(user_sim, 0) user_similarity_df.iloc[0:7, 0:7] ``` ## Most Similarity ``` # Most similar readers user_similarity_df.idxmax(axis = 1)[0:20] # find out book read by two users 276780 and 276726 books_data[(books_data['User.ID'] == 276780) | (books_data['User.ID'] == 276726)] # user 276780 books user_276780 = books_data[books_data['User.ID'] == 276780] user_276780 # user 276726 books user_276726 = books_data[books_data['User.ID'] == 276726] user_276726 ``` ## Recommendations ``` # meging two user book data into single one pd.merge(user_276780, user_276726, on = 'Book.Title', how = 'outer') ``` - User __176780__ read two books titled __'Wild Animus'__ and __'Airframe'__ which is rated as __7.0__ - User __276726__ read only one book titled __'Classical Mythology'__ which is rated as __5.0__ - So based on ratings given by readers, _the book 'Classical Mythology' is recommended to User 176780 and the books 'Wild Animus' and 'Airframe' are recommended to User 276726_
github_jupyter
## Eng+Wales well-mixed example model This is the inference notebook with increased inference window. There are various model variants as encoded by `expt_params_local` and `model_local`, which are shared by the notebooks in a given directory. Outputs of this notebook: (same as `inf` notebook with added `tWin` label in filename) NOTE carefully : `Im` compartment is cumulative deaths, this is called `D` elsewhere ### Start notebook (the following line is for efficient parallel processing) ``` %env OMP_NUM_THREADS=1 %matplotlib inline import numpy as np from matplotlib import pyplot as plt import pyross import time import pandas as pd import matplotlib.image as mpimg import pickle import os import pprint import scipy.stats # comment these before commit #print(pyross.__file__) #print(os.getcwd()) from ew_fns import * import expt_params_local import model_local ``` ### switches etc ``` verboseMod=False ## print ancillary info about the model? (would usually be False, for brevity) ## Calculate things, or load from files ? doInf = False ## do inference, or load it ? doHes = True ## Hessian may take a few minutes !! does this get removed? what to do? ## time unit is one week daysPerWeek = 7.0 ## these are params that might be varied in different expts exptParams = expt_params_local.getLocalParams() ## over-ride params for inference window exptParams['timeLast'] = 11 exptParams['forecastTime'] = 11-exptParams['timeLast'] exptParams['pikFileRoot'] += '-tWin11' pprint.pprint(exptParams) ## this is used for filename handling throughout pikFileRoot = exptParams['pikFileRoot'] ``` ### convenient settings ``` np.set_printoptions(precision=3) pltAuto = True plt.rcParams.update({'figure.autolayout': pltAuto}) plt.rcParams.update({'font.size': 14}) ``` ## LOAD MODEL ``` loadModel = model_local.loadModel(exptParams,daysPerWeek,verboseMod) ## should use a dictionary but... [ numCohorts, fi, N, Ni, model_spec, estimator, contactBasis, interventionFn, modParams, priorsAll, initPriorsLinMode, obsDeath, fltrDeath, simTime, deathCumulativeDat ] = loadModel ``` ### Inspect most likely trajectory for model with prior mean params ``` x0_lin = estimator.get_mean_inits(initPriorsLinMode, obsDeath[0], fltrDeath) guessTraj = estimator.integrate( x0_lin, exptParams['timeZero'], simTime, simTime+1) ## plots yesPlot = model_spec['classes'].copy() yesPlot.remove('S') plt.yscale('log') for lab in yesPlot : indClass = model_spec['classes'].index(lab) totClass = np.sum(guessTraj[:,indClass*numCohorts:(indClass+1)*numCohorts],axis=1) plt.plot( N * totClass,'-',lw=3,label=lab) plt.plot(N*np.sum(obsDeath,axis=1),'X',label='data') plt.legend(fontsize=14,bbox_to_anchor=(1, 1.0)) plt.xlabel('time in weeks') plt.ylabel('class population') plt.show() ; plt.close() indClass = model_spec['classes'].index('Im') plt.yscale('log') for coh in range(numCohorts): plt.plot( N*guessTraj[:,coh+indClass*numCohorts],label='m{c:d}'.format(c=coh) ) plt.xlabel('time in weeks') plt.ylabel('cumul deaths by age cohort') plt.legend(fontsize=8,bbox_to_anchor=(1, 1.0)) plt.show() ; plt.close() ``` ## INFERENCE parameter count * 32 for age-dependent Ai and Af (or beta and Af) * 2 (step-like) or 3 (NPI-with-easing) for lockdown time and width (+easing param) * 1 for projection of initial condition along mode * 5 for initial condition in oldest cohort * 5 for the gammas * 1 for beta in late stage total: 46 (step-like) or 47 (with-easing) The following computation with CMA-ES takes some minutes depending on compute power, it should use multiple CPUs efficiently, if available. The result will vary (slightly) according to the random seed, can be controlled by passing `cma_random_seed` to `latent_infer` ``` def runInf() : infResult = estimator.latent_infer(obsDeath, fltrDeath, simTime, priorsAll, initPriorsLinMode, generator=contactBasis, intervention_fun=interventionFn, tangent=False, verbose=True, enable_global=True, enable_local =True, **exptParams['infOptions'], ) return infResult if doInf: ## do the computation elapsedInf = time.time() infResult = runInf() elapsedInf = time.time() - elapsedInf print('** elapsed time',elapsedInf/60.0,'mins') # save the answer opFile = pikFileRoot + "-inf.pik" print('opf',opFile) with open(opFile, 'wb') as f: pickle.dump([infResult,elapsedInf],f) else: ## load a saved computation print(' Load data') # here we load the data # (this may be the file that we just saved, it is deliberately outside the if: else:) ipFile = pikFileRoot + "-inf.pik" print('ipf',ipFile) with open(ipFile, 'rb') as f: [infResult,elapsedInf] = pickle.load(f) ``` #### unpack results ``` epiParamsMAP = infResult['params_dict'] conParamsMAP = infResult['control_params_dict'] x0_MAP = infResult['x0'] CM_MAP = contactBasis.intervention_custom_temporal( interventionFn, **conParamsMAP) logPinf = -estimator.minus_logp_red(epiParamsMAP, x0_MAP, obsDeath, fltrDeath, simTime, CM_MAP, tangent=False) print('** measuredLikelihood',logPinf) print('** logPosterior ',infResult['log_posterior']) print('** logLikelihood',infResult['log_likelihood']) ``` #### MAP dominant trajectory ``` estimator.set_params(epiParamsMAP) estimator.set_contact_matrix(CM_MAP) trajMAP = estimator.integrate( x0_MAP, exptParams['timeZero'], simTime, simTime+1) yesPlot = model_spec['classes'].copy() yesPlot.remove('S') plt.yscale('log') for lab in yesPlot : indClass = model_spec['classes'].index(lab) totClass = np.sum(trajMAP[:,indClass*numCohorts:(indClass+1)*numCohorts],axis=1) plt.plot( N * totClass,'-',lw=3,label=lab) plt.plot(N*np.sum(obsDeath,axis=1),'X',label='data') plt.xlabel('time in weeks') plt.ylabel('class population') plt.legend(fontsize=14,bbox_to_anchor=(1, 1.0)) plt.show() ; plt.close() fig,axs = plt.subplots(1,2,figsize=(10,4.5)) cohRanges = [ [x,x+4] for x in range(0,75,5) ] #print(cohRanges) cohLabs = ["{l:d}-{u:d}".format(l=low,u=up) for [low,up] in cohRanges ] cohLabs.append("75+") ax = axs[0] ax.set_title('MAP (average dynamics)') mSize = 3 minY = 0.12 maxY = 1.0 indClass = model_spec['classes'].index('Im') ax.set_yscale('log') ax.set_ylabel('cumulative M (by cohort)') ax.set_xlabel('time/weeks') for coh in reversed(list(range(numCohorts))) : ax.plot( N*trajMAP[:,coh+indClass*numCohorts],'o-',label=cohLabs[coh],ms=mSize ) maxY = np.maximum( maxY, np.max(N*trajMAP[:,coh+indClass*numCohorts])) #ax.legend(fontsize=8,bbox_to_anchor=(1, 1.0)) maxY *= 1.6 ax.set_ylim(bottom=minY,top=maxY) #plt.show() ; plt.close() ax = axs[1] ax.set_title('data') ax.set_xlabel('time/weeks') indClass = model_spec['classes'].index('Im') ax.set_yscale('log') for coh in reversed(list(range(numCohorts))) : ax.plot( N*obsDeath[:,coh],'o-',label=cohLabs[coh],ms=mSize ) ## keep the same as other panel ax.set_ylim(bottom=minY,top=maxY) ax.legend(fontsize=10,bbox_to_anchor=(1, 1.0)) #plt.show() ; plt.close() #plt.savefig('ageMAPandData.png') plt.show(fig) ``` #### sanity check : plot the prior and inf value for one or two params ``` (likFun,priFun,dim) = pyross.evidence.latent_get_parameters(estimator, obsDeath, fltrDeath, simTime, priorsAll, initPriorsLinMode, generator=contactBasis, intervention_fun=interventionFn, tangent=False, ) def showInfPrior(xLab) : fig = plt.figure(figsize=(4,4)) dimFlat = np.size(infResult['flat_params']) ## magic to work out the index of this param in flat_params jj = infResult['param_keys'].index(xLab) xInd = infResult['param_guess_range'][jj] ## get the range xVals = np.linspace( *priorsAll[xLab]['bounds'], 100 ) #print(infResult['flat_params'][xInd]) pVals = [] checkVals = [] for xx in xVals : flatP = np.zeros( dimFlat ) flatP[xInd] = xx pdfAll = np.exp( priFun.logpdf(flatP) ) pVals.append( pdfAll[xInd] ) #checkVals.append( scipy.stats.norm.pdf(xx,loc=0.2,scale=0.1) ) plt.plot(xVals,pVals,'-',label='prior') infVal = infResult['flat_params'][xInd] infPdf = np.exp( priFun.logpdf(infResult['flat_params']) )[xInd] plt.plot([infVal],[infPdf],'ro',label='inf') plt.xlabel(xLab) upperLim = 1.05*np.max(pVals) plt.ylim(0,upperLim) #plt.plot(xVals,checkVals) plt.legend() plt.show(fig) ; plt.close() #print('**params\n',infResult['flat_params']) #print('**logPrior\n',priFun.logpdf(infResult['flat_params'])) showInfPrior('gammaE') ``` ## Hessian matrix of log-posterior (this can take a few minutes, it does not make use of multiple cores) ``` if doHes: ## this eps amounts to a perturbation of approx 1% on each param ## (1/4) power of machine epsilon is standard for second deriv xx = infResult['flat_params'] eps = 100 * xx*( np.spacing(xx)/xx )**(0.25) #print('**params\n',infResult['flat_params']) #print('** rel eps\n',eps/infResult['flat_params']) CM_MAP = contactBasis.intervention_custom_temporal( interventionFn, **conParamsMAP) estimator.set_params(epiParamsMAP) estimator.set_contact_matrix(CM_MAP) start = time.time() hessian = estimator.latent_hessian(obs=obsDeath, fltr=fltrDeath, Tf=simTime, generator=contactBasis, infer_result=infResult, intervention_fun=interventionFn, eps=eps, tangent=False, fd_method="central", inter_steps=0) end = time.time() print('time',(end-start)/60,'mins') opFile = pikFileRoot + "-hess.npy" print('opf',opFile) with open(opFile, 'wb') as f: np.save(f,hessian) else : print('Load hessian') # reload in all cases (even if we just saved it) ipFile = pikFileRoot + "-hess.npy" try: print('ipf',ipFile) with open(ipFile, 'rb') as f: hessian = np.load(f) except (OSError, IOError) : print('... error loading hessian') hessian = None #print(hessian) print("** param vals") print(infResult['flat_params'],'\n') if np.all(hessian) != None : print("** naive uncertainty v1 : reciprocal sqrt diagonal elements (x2)") print( 2/np.sqrt(np.diagonal(hessian)) ,'\n') print("** naive uncertainty v2 : sqrt diagonal elements of inverse (x2)") print( 2*np.sqrt(np.diagonal(np.linalg.inv(hessian))) ,'\n') ```
github_jupyter
``` #Use this command to run it on floydhub: floyd run --gpu --env tensorflow-1.4 --data emilwallner/datasets/imagetocode/2:data --data emilwallner/datasets/html_models/1:weights --mode jupyter from os import listdir from numpy import array from keras.preprocessing.text import Tokenizer, one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Model from keras.utils import to_categorical from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense, Flatten from keras.preprocessing.image import array_to_img, img_to_array, load_img from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input import numpy as np # Load the images and preprocess them for inception-resnet images = [] all_filenames = listdir('resources/images/') all_filenames.sort() for filename in all_filenames: images.append(img_to_array(load_img('resources/images/'+filename, target_size=(299, 299)))) images = np.array(images, dtype=float) images = preprocess_input(images) # Run the images through inception-resnet and extract the features without the classification layer IR2 = InceptionResNetV2(weights=None, include_top=False, pooling='avg') IR2.load_weights('/data/models/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5') features = IR2.predict(images) # We will cap each input sequence to 100 tokens max_caption_len = 100 # Initialize the function that will create our vocabulary tokenizer = Tokenizer(filters='', split=" ", lower=False) # Read a document and return a string def load_doc(filename): file = open(filename, 'r') text = file.read() file.close() return text # Load all the HTML files X = [] all_filenames = listdir('resources/html/') all_filenames.sort() for filename in all_filenames: X.append(load_doc('resources/html/'+filename)) # Create the vocabulary from the html files tokenizer.fit_on_texts(X) # Add +1 to leave space for empty words vocab_size = len(tokenizer.word_index) + 1 # Translate each word in text file to the matching vocabulary index sequences = tokenizer.texts_to_sequences(X) # The longest HTML file max_length = max(len(s) for s in sequences) # Intialize our final input to the model X, y, image_data = list(), list(), list() for img_no, seq in enumerate(sequences): for i in range(1, len(seq)): # Add the entire sequence to the input and only keep the next word for the output in_seq, out_seq = seq[:i], seq[i] # If the sentence is shorter than max_length, fill it up with empty words in_seq = pad_sequences([in_seq], maxlen=max_length)[0] # Map the output to one-hot encoding out_seq = to_categorical([out_seq], num_classes=vocab_size)[0] # Add and image corresponding to the HTML file image_data.append(features[img_no]) # Cut the input sentence to 100 tokens, and add it to the input data X.append(in_seq[-100:]) y.append(out_seq) X, y, image_data = np.array(X), np.array(y), np.array(image_data) # Create the encoder image_features = Input(shape=(1536,)) image_flat = Dense(128, activation='relu')(image_features) ir2_out = RepeatVector(max_caption_len)(image_flat) # Create the decoder language_input = Input(shape=(max_caption_len,)) language_model = Embedding(vocab_size, 200, input_length=max_caption_len)(language_input) language_model = LSTM(256, return_sequences=True)(language_model) language_model = LSTM(256, return_sequences=True)(language_model) language_model = TimeDistributed(Dense(128, activation='relu'))(language_model) # Create the decoder decoder = concatenate([ir2_out, language_model]) decoder = LSTM(512, return_sequences=True)(decoder) decoder = LSTM(512, return_sequences=False)(decoder) decoder_output = Dense(vocab_size, activation='softmax')(decoder) # Compile the model model = Model(inputs=[image_features, language_input], outputs=decoder_output) #model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.load_weights("/weights/org-weights-epoch-0900---loss-0.0000.hdf5") # Train the neural network #model.fit([image_data, X], y, batch_size=64, shuffle=False, epochs=2) # map an integer to a word def word_for_id(integer, tokenizer): for word, index in tokenizer.word_index.items(): if index == integer: return word return None # generate a description for an image def generate_desc(model, tokenizer, photo, max_length): # seed the generation process in_text = 'START' # iterate over the whole length of the sequence for i in range(900): # integer encode input sequence sequence = tokenizer.texts_to_sequences([in_text])[0][-100:] # pad input sequence = pad_sequences([sequence], maxlen=max_length) # predict next word yhat = model.predict([photo,sequence], verbose=0) # convert probability to integer yhat = np.argmax(yhat) # map integer to word word = word_for_id(yhat, tokenizer) # stop if we cannot map the word if word is None: break # append as input for generating the next word in_text += ' ' + word # Print the prediction print(' ' + word, end='') # stop if we predict the end of the sequence if word == 'END': break return # Load and image, preprocess it for IR2, extract features and generate the HTML test_image = img_to_array(load_img('resources/images/86.jpg', target_size=(299, 299))) test_image = np.array(test_image, dtype=float) test_image = preprocess_input(test_image) test_features = IR2.predict(np.array([test_image])) generate_desc(model, tokenizer, np.array(test_features), 100) ```
github_jupyter
# import required library ``` # Import numpy, pandas for data manipulation import numpy as np import pandas as pd # Import matplotlib, seaborn for visualization import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') # Import the data weather_data = pd.read_csv('weather.csv') weather_data.head() rain_df = weather_data[['Date','Rainfall']] rain_df.head() rain_df.shape rain_df.info() ``` **Using 50 values** ``` rain_df = rain_df.loc[:49] rain_df.head() rain_df.shape # Convert the time column into datetime rain_df['Date'] = pd.to_datetime(rain_df['Date']) rain_df['Date'].head() rain_df.info() # fill the empty row rain_df = rain_df.fillna(rain_df['Rainfall'].mean()) rain_df.head() ``` ### Dataset Explanation ``` rain_df.describe() # Output the maximum and minimum rain date print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].max()]) print(rain_df.loc[rain_df["Rainfall"] == rain_df["Rainfall"].min()]) # Reset the index rain_df.set_index("Date", inplace=True) ``` ### Data Visualization ``` # Plot the daily temperature change plt.figure(figsize=(16,10), dpi=100) plt.plot(rain_df.index, rain_df.Rainfall, color='tab:red') plt.gca().set(title="Daily Rain", xlabel='Date', ylabel="rain value") plt.show() # Apply the Moving Average function by a subset of size 10 days. rain_df_mean = rain_df.Rainfall.rolling(window=10).mean() rain_df_mean.plot(figsize=(16,10)) plt.show() from statsmodels.tsa.seasonal import seasonal_decompose # Additive Decomposition result_add = seasonal_decompose(rain_df.Rainfall, model='additive', extrapolate_trend=0) # Plot plt.rcParams.update({'figure.figsize': (10,10)}) result_add.plot().suptitle('Additive Decomposition', fontsize=22) plt.show() ``` ### Baseline Model ``` # Shift the current rain to the next day. predicted_df = rain_df["Rainfall"].to_frame().shift(1).rename(columns = {"Rainfall": "rain_pred" }) actual_df = rain_df["Rainfall"].to_frame().rename(columns = {"Rainfall": "rain_actual" }) # Concatenate the actual and predicted rain one_step_df = pd.concat([actual_df,predicted_df],axis=1) # Select from the second row, because there is no prediction for today due to shifting. one_step_df = one_step_df[1:] one_step_df.head(10) ``` > Here you can the we have two column one is our **actual rain** column and othe is **predicted rain** column that we use next model We could validate how well our model is by looking at the Root Mean Squared Error(RMSE) between the predicted and actual rain ``` from sklearn.metrics import mean_squared_error as MSE from math import sqrt # Calculate the RMSE rain_pred_err = MSE(one_step_df.rain_actual, one_step_df.rain_pred, squared=False) print("The RMSE is",rain_pred_err) ``` > Our RMSE value is 4.002 is arround 4 that are pretty good for model. ## Using SARIMA model ### Parameter Selection #### Grid Search We are going to apply one of the most commonly used method for time-series forecasting, known as SARIMA, which stands for Seasonal Autoregressive Integrated Moving Average. SARIMA models are denoted with the notation SARIMA(p,d,q)(P,D,Q,s). These three parameters account for seasonality, trend, and noise in data: We will use a “grid search” to iteratively explore different combinations of parameters. For each combination of parameters, we fit a new seasonal SARIMA model with the SARIMAX() function from the statsmodels module and assess its overall quality. ``` import itertools # Define the p, d and q parameters to take any value between 0 and 2 p = d = q = range(0, 2) # Generate all different combinations of p, q and q triplets pdq = list(itertools.product(p, d, q)) # Generate all different combinations of seasonal p, q and q triplets seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))] print('Examples of parameter combinations for Seasonal ARIMA...') print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1])) print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2])) print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3])) print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4])) for param in pdq: for param_seasonal in seasonal_pdq: try: mod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual, order=param, seasonal_order=param_seasonal, enforce_stationarity=False, enforce_invertibility=False) results = mod.fit() print('SARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic)) except: continue ``` ### Fitting the Model ``` import warnings warnings.filterwarnings("ignore") # specify to ignore warning messages # Import the statsmodels library for using SARIMAX model import statsmodels.api as sm # Fit the SARIMAX model using optimal parameters mod = sm.tsa.statespace.SARIMAX(one_step_df.rain_actual, order=(1,1,1), seasonal_order=(1,1,1,12), enforce_stationarity=False, enforce_invertibility=False) results = mod.fit() results.summary() ``` **Predictions** ``` pred = results.predict(start=0,end=49)[1:] pred pred = results.get_prediction(start=0,end = 49, dynamic=False) pred_ci = pred.conf_int() pred_ci.head() print(pred) ax = one_step_df.rain_actual.plot(label='observed',figsize=(16,10)) ax.set_xlabel('Date') ax.set_ylabel('value') plt.ylim([0,2.0]) plt.legend() plt.show() ``` ### Forecast Diagnostic It is also useful to quantify the accuracy of our forecasts. We will use the MSE (Mean Squared Error), in which for each predicted value, we compute its distance to the true value and square the result ``` y_forecasted = pred.predicted_mean[:49] y_truth = one_step_df.rain_actual print(y_forecasted.shape) print(y_truth.shape) # Compute the mean square error mse = MSE(y_truth, y_forecasted, squared=True) print('The Mean Squared Error of our forecasts is {}'.format(round(mse, 2))) ``` Amazziingggg! Our forecast model forecasts the rain with only an error of 25.85. In the weather forecast field, the prediction error of 2.19 degrees seems promising and sufficient, as there are many other factors that contribute to the change in rain, including but not limited to the wind speed, the air pressure, etc. ### Validating the Dynamic Forecast In this case, we only use information from the time series up to a certain point, and after that, forecasts are generated using values from previous forecasted time points. ``` pred_dynamic = results.get_prediction(start=0,end = 49, dynamic=True, full_results=True) pred_dynamic_ci = pred_dynamic.conf_int() pred_dynamic_ci.head() ``` Once again, we plot the real and forecasted values of the average daily rain to assess how well we did: ``` ax = one_step_df.rain_actual.plot(label='observed', figsize=(15, 11)) pred_dynamic.predicted_mean.plot(label='Dynamic Forecast', ax=ax) ax.fill_between(pred_dynamic_ci.index, pred_dynamic_ci.iloc[:, 0], pred_dynamic_ci.iloc[:, 1], color='k', alpha=.25) ax.set_xlabel('Date') ax.set_ylabel('Temperature (in Celsius)') plt.ylim([0,2.0]) plt.legend() plt.show() ``` > In this case, the model seems to predict the rain inaccurately, with major fluctuations between the true value and the predicted value. ### Forecast Diagnostic ``` # Extract the predicted and true values of our time series y_forecasted = pred_dynamic.predicted_mean[:49] y_truth = one_step_df.rain_actual # Compute the mean square error mse = sqrt(MSE(y_truth, y_forecasted).mean()) print('The Root Mean Squared Error of our forecasts is {}'.format(round(mse, 2))) ``` The **predicted** values obtained from the dynamic forecasts yield an MSE of 3.68. This is significantly higher than the one-step ahead, which is to be expected given that we are relying on less historical data from the time series. # Conclusion I described how to implement a seasonal SARIMA model in Python. I made extensive use of the pandas and statsmodels libraries and showed how to run model diagnostics, as well as how to produce forecasts of the Rain. Recall that in the assumption I made in the section 2.2 Baseline Model, I could even reinforce our assumption and continue our belief that the rainfall today depends on the rainfall yesterday, the rainfall yesterday depends on the day before yesterday, and so on. It is the best so far to use the history up to the point that we would like to make **predictions** on. Especially it holds for weather forecasting, where the rainfall today does not change much from yesterday, and the transition to another season signaling through the rainfall should gradually occur, unless there is any disastrous factors such as storm, drought, etc.
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D2_LinearSystems/student/W2D2_Tutorial3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Tutorial 3: Combining determinism and stochasticity **Week 2, Day 2: Linear Systems** **By Neuromatch Academy** **Content Creators**: Bing Wen Brunton, Alice Schwarze, Biraj Pandey **Content Reviewers**: Norma Kuhn, John Butler, Matthew Krause, Ella Batty, Richard Gao, Michael Waskom **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> --- # Tutorial Objectives *Estimated timing of tutorial: 45 minutes* Time-dependent processes rule the world. Now that we've spent some time familiarizing ourselves with the behavior of such systems when their trajectories are (1) entirely predictable and deterministic, or (2) governed by random processes, it's time to consider that neither is sufficient to describe neuroscience. Instead, we are often faced with processes for which we know some dynamics, but there is some random aspect as well. We call these **dynamical systems with stochasticity**. This tutorial will build on our knowledge and gain some intuition for how deterministic and stochastic processes can both be a part of a dynamical system by: * Simulating random walks * Investigating the mean and variance of a Ornstein-Uhlenbeck (OU) process * Quantifying the OU process's behavior at equilibrium. ``` # @title Tutorial slides # @markdown These are the slides for the videos in all tutorials today from IPython.display import IFrame IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/snv4m/?direct%26mode=render%26action=download%26mode=render", width=854, height=480) ``` --- # Setup ``` # Imports import numpy as np import matplotlib.pyplot as plt # @title Figure Settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # @title Plotting Functions # drift-diffusion model # returns t, x def plot_random_walk_sims(sims, nsims=10): """Helper for exercise 3A""" fig = plt.figure() plt.plot(sim[:nsims, :].T) plt.xlabel('time') plt.ylabel('position x') plt.show() def plot_mean_var_by_timestep(mu, var): """Helper function for exercise 3A.2""" fig, (ah1, ah2) = plt.subplots(2) # plot mean of distribution as a function of time ah1.plot(mu) ah1.set(ylabel='mean') ah1.set_ylim([-5, 5]) # plot variance of distribution as a function of time ah2.plot(var) ah2.set(xlabel='time') ah2.set(ylabel='variance') plt.show() def plot_ddm(t, x, xinfty, lam, x0): fig = plt.figure() plt.plot(t, xinfty * (1 - lam**t) + x0 * lam**t, 'r') plt.plot(t, x, 'k.') # simulated data pts plt.xlabel('t') plt.ylabel('x') plt.legend({'deterministic solution', 'simulation'}) plt.show() def var_comparison_plot(empirical, analytical): fig = plt.figure() plt.plot(empirical, analytical, '.', markersize=15) plt.xlabel('empirical equilibrium variance') plt.ylabel('analytic equilibrium variance') plt.plot(np.arange(8), np.arange(8), 'k', label='45 deg line') plt.legend() plt.grid(True) plt.show() def plot_dynamics(x, t, lam, xinfty=0): """ Plot the dynamics """ fig = plt.figure() plt.title('$\lambda=%0.1f$' % lam, fontsize=16) x0 = x[0] plt.plot(t, xinfty + (x0 - xinfty) * lam**t, 'r', label='analytic solution') plt.plot(t, x, 'k.', label='simulation') # simulated data pts plt.ylim(0, x0+1) plt.xlabel('t') plt.ylabel('x') plt.legend() plt.show() ``` --- # Section 1: Random Walks ``` # @title Video 1: E. coli and Random Walks from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1LC4y1h7gD", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="VHwTBCQJjfw", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` To begin, let's first take a gander at how life sometimes wanders around aimlessly. One of the simplest and best-studied living systems that has some interesting behaviors is the _E. coli_ bacterium, which is capable of navigating odor gradients on a substrate to seek a food source. Larger life (including flies, dogs, and blindfolded humans) sometimes use the same strategies to guide their decisions. Here, we will consider what the _E. coli_ does in the absence of food odors. What's the best strategy when one does not know where to head? Why, flail around randomly, of course! The **random walk** is exactly that --- at every time step, use a random process like flipping a coin to change one's heading accordingly. Note that this process is closely related to _Brownian motion_, so you may sometimes hear that terminology used as well. Let's start with a **one-dimensional random walk**. A bacterium starts at $x=0$. At every time step, it flips a coin (a very small, microscopic coin of protein mintage), then heads left $\Delta x = -1$ or right $\Delta x = +1$ for with equal probability. For instance, if at time step $1$ the result of the coin flip is to head right, then its position at that time step becomes $x_1 = x_0 + \Delta x = 1.$ Continuing in this way, its position at time step $k+1$ is given by $$x_{k+1} = x_k + \Delta x $$ We will simulate this process below and plot the position of the bacterium as a function of the time step. ``` # @markdown Execute to simulate a random walk # parameters of simulation T = 100 t = np.arange(T) x = np.zeros_like(t) np.random.seed(2020) # set random seed # initial position x[0] = 0 # step forward in time for k in range(len(t)-1): # choose randomly between -1 and 1 (coin flip) this_step = np.random.choice([-1,1]) # make the step x[k+1] = x[k] + this_step # plot this trajectory fig = plt.figure() plt.step(t, x) plt.xlabel('time') plt.ylabel('position x'); ``` ## Coding Exercise 1A: Random walk simulation *Referred to in video as exercise 3A* In the previous plot, we assumed that the bacterium takes a step of size $1$ at every point in time. Let's let it take steps of different sizes! We will code a random walk where the steps have a standard normal distribution (with mean $\mu$ and standard deviation $\sigma$). Instead of running one trajectory at a time, we will write our code so that we can simulate a large number of trajectories efficiently. We will combine this all into a function ``random_walk_simulator`` that generates $N$ random walks each with $T$ time points efficiently. We will plot 10 random walks for 10000 time steps each. ``` def random_walk_simulator(N, T, mu=0, sigma=1): '''Simulate N random walks for T time points. At each time point, the step is drawn from a Gaussian distribution with mean mu and standard deviation sigma. Args: T (integer) : Duration of simulation in time steps N (integer) : Number of random walks mu (float) : mean of step distribution sigma (float) : standard deviation of step distribution Returns: (numpy array) : NxT array in which each row corresponds to trajectory ''' ############################################################################### ## TODO: Code the simulated random steps to take ## Hints: you can generate all the random steps in one go in an N x T matrix raise NotImplementedError('Complete random_walk_simulator_function') ############################################################################### # generate all the random steps for all steps in all simulations in one go # produces a N x T array steps = np.random.normal(..., ..., size=(..., ...)) # compute the cumulative sum of all the steps over the time axis sim = np.cumsum(steps, axis=1) return sim np.random.seed(2020) # set random seed # simulate 1000 random walks for 10000 time steps sim = random_walk_simulator(1000, 10000, mu=0, sigma=1) # take a peek at the first 10 simulations plot_random_walk_sims(sim, nsims=10) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_4265c9d0.py) *Example output:* <img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_4265c9d0_0.png> We see that the trajectories all look a little different from each other. But there are some general observations one can make: at the beginning almost all trajectories are very close to $x=0$, which is where our bacterium started. As time progresses, some trajectories move further and further away from the starting point. However, a lot of trajectories stay close to the starting point of $x=0$. Now let's take a look in the next cell at the distribution of bacteria positions at different points in time, analyzing all the trajectories we just generated above. ``` # @markdown Execute to visualize distribution of bateria positions fig = plt.figure() # look at the distribution of positions at different times for i, t in enumerate([1000,2500,10000]): # get mean and standard deviation of distribution at time t mu = sim[:, t-1].mean() sig2 = sim[:, t-1].std() # make a plot label mytitle = '$t=${time:d} ($\mu=${mu:.2f}, $\sigma=${var:.2f})' # plot histogram plt.hist(sim[:,t-1], color=['blue','orange','black'][i], #make sure the histograms have the same bins! bins=np.arange(-300,300,20), # make histograms a little see-through alpha=0.6, # draw second histogram behind the first one zorder=3-i, label=mytitle.format(time=t, mu=mu, var=sig2)) plt.xlabel('position x') # plot range plt.xlim([-500, 250]) # add legend plt.legend(loc=2) # add title plt.title(r'Distribution of trajectory positions at time $t$') ``` At the beginning of the simulation, the distribution of positions is sharply peaked about $0$. As time progresses, the distribution becomes wider but its center stays closer to $0$. In other words, the mean of the distribution is independent of time, but the variance and standard deviation of the distribution scale with time. Such a process is called a **diffusive process**. ## Coding Exercise 1B: Random walk mean & variance Compute and then plot the mean and variance of our bacterium's random walk as a function of time. ``` # Simulate random walks np.random.seed(2020) # set random seed sim = random_walk_simulator(5000, 1000, mu=0, sigma=1) ############################################################################## # TODO: Insert your code here to compute the mean and variance of trajectory positions # at every time point: raise NotImplementedError("Student exercise: need to compute mean and variance") ############################################################################## # Compute mean mu = ... # Compute variance var = ... # Visualize plot_mean_var_by_timestep(mu, var) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_796a6346.py) *Example output:* <img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_796a6346_0.png> The expected value of $x$ stays close to 0, even for random walks of very long time. Cool! The variance, on the other hand, clearly increases with time. In fact, the variance seems to increase linearly with time! ## Interactive Demo 1: Influence of Parameter Choice How do the parameters $\mu$ and $\sigma$ of the Gaussian distribution from which we choose the steps affect the mean and variance of the bacterium's random walk? ``` #@title #@markdown Make sure you execute this cell to enable the widget! @widgets.interact def plot_gaussian(mean=(-0.5, 0.5, .02), std=(.5, 10, .5)): sim = random_walk_simulator(5000, 1000, mu=mean, sigma=std) # compute the mean and variance of trajectory positions at every time point mu = np.mean(sim, axis=0) var = np.var(sim, axis=0) # make a figure fig, (ah1, ah2) = plt.subplots(2) # plot mean of distribution as a function of time ah1.plot(mu) ah1.set(ylabel='mean') # plot variance of distribution as a function of time ah2.plot(var) ah2.set(xlabel='time') ah2.set(ylabel='variance') ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_55aa7188.py) --- # Section 2: The Ornstein-Uhlenbeck (OU) process *Estimated timing to here from start of tutorial: 14 min* ``` # @title Video 2: Combining Deterministic & Stochastic Processes from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV1o5411Y7N2", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="pDNfs5p38fI", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` The random walk process we just explored is diffusive, and the distribution of possible trajectories _spreads_, taking on increasing variance with time. Even so, at least in one dimension, the mean remains close to the initial value (in the example above, 0). Our goal is now to build on this model to construct a **drift-diffusion** model (DDM). DDM is a popular model for memory, which as we all know, is often an exercise in hanging on to a value imperfectly. Decision-making and memory will be the topic for tomorrow, so here we build the mathematical foundations and develop some intuition for how such systems behave! To build such a model, let's combine the random walk model with the first differential equations we explored in Tutorial 1 earlier. Although those models had been written in continuous time as $\dot{x} = a x$, here let's consider the discrete version of the same system and write: $x_{k+1} = \lambda x_k$, whose solution can be written as $x_k = x_0 \lambda^k$, where $x_0$ is the value of $x$ at time $t=0$. Now, let's simulate and plot the solution of the discrete version of our first differential equation from Tutorial 1 below. **Run the code below.** ``` # parameters lam = 0.9 T = 100 # total Time duration in steps x0 = 4. # initial condition of x at time 0 # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): x[k+1] = lam * x[k] # plot x as it evolves in time plot_dynamics(x, t, lam) ``` Notice that this process decays towards position $x=0$. We can make it decay towards any position by adding another parameter $x_\infty$. The rate of decay is proportional to the difference between $x$ and $x_\infty$. Our new system is $x_{k+1} = x_\infty + \lambda(x_k - x_{\infty})$ We have to modify our analytic solution slightly to take this into account: $x_k = x_\infty(1 - \lambda^k) + x_0 \lambda^k$. Let's simulate and plot the dynamics of this process below. Hopefully, we see that it start at $x_0$ and decay towards $x_{\infty}.$ ``` # parameters lam = 0.9 # decay rate T = 100 # total Time duration in steps x0 = 4. # initial condition of x at time 0 xinfty = 1. # x drifts towards this value in long time # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): x[k+1] = xinfty + lam * (x[k] - xinfty) # plot x as it evolves in time plot_dynamics(x, t, lam, xinfty) ``` Now we are ready to take this basic, deterministic difference equation and add a diffusion process on top of it! Fun times in Python land. As a point of terminology: this type of process is commonly known as a **drift-diffusion model** or **Ornstein-Uhlenbeck (OU) process**. The model is a combination of a _drift_ term toward $x_{\infty}$ and a _diffusion_ term that walks randomly. You may sometimes see them written as continuous stochastic differential equations, but here we are doing the discrete version to maintain continuity in the tutorial. The discrete version of our OU process has the following form: $x_{k+1} = x_\infty + \lambda(x_k - x_{\infty}) + \sigma \eta$ where $\eta$ is sampled from a standard normal distribution ($\mu=0, \sigma=1$). ## Coding Exercise 2: Drift-diffusion model Modify the code below so that each step through time has a _deterministic_ part (_hint_: exactly like the code above) plus a _random, diffusive_ part that is drawn from from a normal distribution with standard deviation of $\sigma$ (sig in the code). It will plot the dynamics of this process. ``` def simulate_ddm(lam, sig, x0, xinfty, T): """ Simulate the drift-diffusion model with given parameters and initial condition. Args: lam (scalar): decay rate sig (scalar): standard deviation of normal distribution x0 (scalar): initial condition (x at time 0) xinfty (scalar): drift towards convergence in the limit T (scalar): total duration of the simulation (in steps) Returns: ndarray, ndarray: `x` for all simulation steps and the time `t` at each step """ # initiatialize variables t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 # Step through in time for k in range(len(t)-1): ############################################################################## ## TODO: Insert your code below then remove raise NotImplementedError("Student exercise: need to implement simulation") ############################################################################## # update x at time k+1 with a determinstic and a stochastic component # hint: the deterministic component will be like above, and # the stochastic component is drawn from a scaled normal distribution x[k+1] = ... return t, x lam = 0.9 # decay rate sig = 0.1 # standard deviation of diffusive process T = 500 # total Time duration in steps x0 = 4. # initial condition of x at time 0 xinfty = 1. # x drifts towards this value in long time # Plot x as it evolves in time np.random.seed(2020) t, x = simulate_ddm(lam, sig, x0, xinfty, T) plot_ddm(t, x, xinfty, lam, x0) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_c67c12d7.py) *Example output:* <img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_c67c12d7_0.png> ## Think! 2: Drift-Diffusion Simulation Observations Describe the behavior of your simulation by making some observations. How does it compare to the deterministic solution? How does it behave in the beginning of the stimulation? At the end? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_301f6f83.py) --- # Section 3: Variance of the OU process *Estimated timing to here from start of tutorial: 35 min* ``` # @title Video 3: Balance of Variances from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="BV15f4y1R7PU", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="49A-3kftau0", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) ``` As we can see, the **mean** of the process follows the solution to the deterministic part of the governing equation. So far, so good! But what about the **variance**? Unlike the random walk, because there's a decay process that "pulls" $x$ back towards $x_\infty$, the variance does not grow without bound with large $t$. Instead, when it gets far from $x_\infty$, the position of $x$ is restored, until an equilibrium is reached. The equilibrium variance for our drift-diffusion system is Var $= \frac{\sigma^2}{1 - \lambda^2}$. Notice that the value of this equilibrium variance depends on $\lambda$ and $\sigma$. It does not depend on $x_0$ and $x_\infty$. To convince ourselves that things are behaving sensibly, let's compare the empirical variances of the equilibrium solution to the OU equations with the expected formula. ## Coding Exercise 3: Computing the variances empirically Write code to compute the analytical variance: Var $= \frac{\sigma^2}{1 - \lambda^2}$, and compare against the empirical variances (which is already provided for you using the helper function). You should see that they should be about equal to each other and lie close to the 45 degree ($y=x$) line. ``` def ddm(T, x0, xinfty, lam, sig): t = np.arange(0, T, 1.) x = np.zeros_like(t) x[0] = x0 for k in range(len(t)-1): x[k+1] = xinfty + lam * (x[k] - xinfty) + sig * np.random.standard_normal(size=1) return t, x # computes equilibrium variance of ddm # returns variance def ddm_eq_var(T, x0, xinfty, lam, sig): t, x = ddm(T, x0, xinfty, lam, sig) # returns variance of the second half of the simulation # this is a hack: assumes system has settled by second half return x[-round(T/2):].var() np.random.seed(2020) # set random seed # sweep through values for lambda lambdas = np.arange(0.05, 0.95, 0.01) empirical_variances = np.zeros_like(lambdas) analytical_variances = np.zeros_like(lambdas) sig = 0.87 # compute empirical equilibrium variance for i, lam in enumerate(lambdas): empirical_variances[i] = ddm_eq_var(5000, x0, xinfty, lambdas[i], sig) ############################################################################## ## Insert your code below to calculate the analytical variances raise NotImplementedError("Student exercise: need to compute variances") ############################################################################## # Hint: you can also do this in one line outside the loop! analytical_variances = ... # Plot the empirical variance vs analytical variance var_comparison_plot(empirical_variances, analytical_variances) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial3_Solution_b972f241.py) *Example output:* <img alt='Solution hint' align='left' width=1120.0 height=832.0 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D2_LinearSystems/static/W2D2_Tutorial3_Solution_b972f241_0.png> --- # Summary *Estimated timing of tutorial: 45 minutes* In this tutorial, we have built and observed OU systems, which have both deterministic and stochastic parts. We see that they behave, on average, similar to our expectations from analyzing deterministic dynamical systems. Importantly, **the interplay between the deterministic and stochastic parts** serve to _balance_ the tendency of purely stochastic processes (like the random walk) to increase in variance over time. This behavior is one of the properties of OU systems that make them popular choices for modeling cognitive functions, including short-term memory and decision-making.
github_jupyter
# String ## `print()` Fungsi `print()` mencetak seluruh argumennya sebagai *string*, dipisahkan dengan spasi dan diikuti dengan sebuah *line break*: ``` name = "Budi" print("Hello World") print("Hello", 'World') print("Hello", name) ``` > Catatan: Fungsi untuk mencetak di Python 2.7 dan Python 3 berbeda. Di Python 2.7, kita tidak perlu menggunakan tanda kurung di sekitar argumennya (contoh: `print "Hello World"`). ``` print("Hello", "World") ``` Fungsi `print()` memiliki argumen opsional untuk mengontrol di mana dan bagaimana statemen yang diberikan akan dicetak. Di antaranya adalah: - `sep`, yaitu pemisah antar kata (nilai *default*-nya adalah spasi) - `end`, yaitu karakter yang akan ditambahkan di akhir statemen (nilai *default*-nya adalah `\n` (karakter *newline*)) ``` print("Hello", "World", sep="...", end="!!") print("Good", "Morning", "Everyone", sep="...", end=":)") ``` ## Mengatur format string Ada banyak metode yang dapat digunakan untuk mengatur format dan memanipulasi string. Beberapa metode tersebut akan ditunjukkan di sini. *String concatenation* adalah penggabungan dari dua *string*. Perhatikan bahwa ketika kita melakukan penggabungan, tidak ada spasi di antara kedua *string*. ``` string1 = 'World' string2 = '!' print('Hello' + string1 + string2) ``` Operator `%` digunakan untuk melakukan format pada sebuah *string*, dengan cara menyisipkan nilai yang disertakan setelahnya. *String* tersebut harus memiliki penanda yang mengidentifikasikan di mana kita harus menyisipkan nilai tersebut. Penanda yang sering digunakan adalah: - `%s`: string - `%d`: integer - `%f`: float - `%o`: oktal - `%x`: heksadesimal - `%e`: eksponensial ``` print("Hello %s" % string1) print("Actual Number = %d" %18) print("Float of the number = %f" %18) print("Octal equivalent of the number = %o" %18) print("Hexadecimal equivalent of the number = %x" %18) print("Exponential equivalent of the number = %e" %18) ``` Ketika kita merujuk ke lebih dari satu variabel, kita harus menggunakan tanda kurung. Nilai-nilai disisipkan sesuai dengan urutan mereka di dalam tanda kurung. ``` print("Hello %s%s The meaning of life is %d" % (string1, string2, 42)) ``` ## Metode-metode terkait string lainnya Mengalikan sebuah *string* sebuah integer akan mengembalikan sebuah *string* dengan *string* asli yang diulang-ulang sebanyak nilai integer tersebut. ``` print("Hello World! " * 5) ``` *String* dapat ditransformasikan dengan menggunakan banyak fungsi: ``` s = "hello wOrld" print(s.capitalize()) # mengubah seluruh huruf di string menjadi huruf kecil, kecuali huruf pertama yang menjadi huruf kapital print(s.upper()) # mengubah seluruh huruf di string menjadi huruf besar print(s.lower()) # mengubah seluruh huruf di string menjadi huruf kecil print('|%s|'% " lots of space ".strip()) # menghilangkan spasi di awal dan akhir string print("Hello World".replace("World", "Class")) # mengganti kata "World" dengan kata "Class" ``` Python juga menyediakan banyak fungsi yang dapat kita gunakan untuk melakukan pengecekan pada *string*. ``` s = "Hello World" print("The length of '%s' is" %s, len(s), "characters") # len() memberikan panjang string s.startswith("Hello") and s.endswith("World") # mengecek awal dan akhir print("There are %d 'l's but only %d World in %s" % (s.count('l'), s.count('World'), s)) # menghitung huruf di sebuah string print('"el" is at index', s.find('el'), "in", s) # mencari index potongan kata "el" di kalimat "Hello World" s.find('ab') # mencari index potongan kata "ab" di kalimat "Hello World". Apabila tidak ditemukan, maka fungsi akan mengembalikan -1 ``` ## Operator untuk perbandingan string *String* dapat dibandingkan satu sama lain sesuai dengan urutan leksikal/alfabet. ``` 'abc' < 'bbc' <= 'bbc' 'abc' > 'def' ``` Kita dapat menggunakan `in` untuk mengecek apakah sebuah *string* merupakan potongan (*substring*) dari *string* lainnya. ``` "ABC" in "This is the ABC of Python" ``` ## Mengakses bagian dari string Kita dapat mengakses bagian dari *string* dengan menggunakan indeks dan kurung siku. Indeks dimulai dari 0. ``` s = '123456789' print('The first character of', s, 'is', s[0]) print('The last character of', s, 'is', s[len(s)-1]) ``` Indeks negatif dapat digunakan untuk memulai perhitungan dari belakang. ``` print('The first character of', s, 'is', s[-len(s)]) print('The last character of', s, 'is', s[-1]) ``` *Substring* bisa didapatkan dengan menggunakan `a:b` untuk menandakan karakter dari indeks `a` sampai indeks `b-1`. Perhatikan bahwa karakter terakhir (indeks `b`) tidak diikutsertakan. ``` print("First three charcters", s[0:3]) print("Next three characters", s[3:6]) ``` Indeks awal yang kosong menandakan awal *string* (sama dengan indeks 0), sementara indeks akhir yang kosong menandakan akhir *string*. ``` print("First three characters", s[:3]) print("Last three characters", s[-3:]) ```
github_jupyter
<style>div.container { width: 100% }</style> <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="../assets/holoviz-logo-unstacked.svg" /> <div style="float:right; vertical-align:text-bottom;"><h2>Tutorial 5. Interactive Pipelines</h2></div> The plots built up over the first few tutorials were all highly interactive in the web browser, with interactivity provided by Bokeh plotting tools within the plots or in some cases by HoloViews generating a Bokeh widget to select for a `groupby` over a categorical variable. However, when you are exploring a dataset, you might want to see how _any_ aspect of the data or plot changes if varied interactively. Luckily, hvPlot makes it almost trivially easy to do this, so that you can very easily explore any parameter or setting in your code. ## Panel widgets To do this, we will need a widget library, and here we will be using [Panel](https://panel.holoviz.org/) to generate Bokeh widgets under user control, just as hvPlot uses Panel to generate widgets for a `groupby` as shown previously. Let's first get ahold of a Panel widget to see how they work. Here, let's create a Panel floating-point number slider to specify an earthquake magnitude between zero and nine: ``` import panel as pn pn.extension(sizing_mode='stretch_width') mag_slider = pn.widgets.FloatSlider(name='Minimum Magnitude', start=0, end=9, value=6) mag_slider ``` The widget is a JavaScript object, but there are bidirectional connections between JS and Python that let us see and change the value of this slider using its `value` parameter: ``` mag_slider.value mag_slider.value = 7 ``` #### Exercise Try moving the slider around and rerunning the `mag_slider.value` above to access the current slider value. As you can see, you can easily get the value of any widget to use in subsequent cells, but you'd need to re-run any cell that accesses that value for it to get updated. # hvPlot .interactive() hvPlot provides an easy way to connect widgets directly into an expression you want to control. First, let's read in our data: ``` import numpy as np import pandas as pd import holoviews as hv import hvplot.pandas # noqa %%time df = pd.read_parquet('../data/earthquakes-projected.parq') df = df.set_index('time').tz_localize(None) ``` Now, let's do a little filtering that we might want to control with such a widget, such as selecting the highest-magnitude events: ``` from holoviews.element.tiles import WEB_MERCATOR_LIMITS df2 = df[['mag', 'depth', 'latitude', 'longitude', 'place', 'type']][df['northing'] < WEB_MERCATOR_LIMITS[1]] df2[df2['mag'] > 5].head() ``` What if instead of '5', we want the output above always to reflect the current value of `mag_slider`? We can do that by using hvPlot's `.interactive()` support, passing in a widget almost anywhere we want in a pipeline: ``` dfi = df2.interactive() dfi[dfi['mag'] > mag_slider].head() ``` Here, `.interactive` is a wrapper around your DataFrame or Xarray object that lets you provide Panel widgets almost anywhere you'd otherwise be using a number. Just as importing `hvplot.pandas` provides a `.hvplot()` method or object on your dataframe, it also provides a `.interactive` method or object that gives you a general-purpose *interactive* `Dataframe` driven by widgets. `.interactive` stores a copy of your pipeline (series of method calls or other expressions on your data) and dynamically replays the pipeline whenever that widget changes. `.interactive` supports just about any output you might want to get out of such a pipeline, such as text or numbers: ``` dfi[dfi['mag'] > mag_slider].shape ``` Or Matplotlib plots: ``` dfi[dfi['mag'] > mag_slider].plot(y='depth', kind='hist', bins=np.linspace(0, 50, 51)) ``` Each time you drag the widget, hvPlot replays the pipeline and updates the output shown. Of course, `.interactive` also supports `.hvplot()`, here with a new copy of a widget so that it will be independent of the other cells above: ``` mag_slider2 = pn.widgets.FloatSlider(name='Minimum magnitude', start=0, end=9, value=6) dfi[dfi['mag'] > mag_slider2].hvplot(y='depth', kind='hist', bins=np.linspace(0, 50, 51)) ``` You can see that the depth distribution varies dramatically as you vary the minimum magnitude, with the lowest magnitude events apparently only detectable at short depths. There also seems to be some artifact at depth 10, which is the largest bin regardless of the filtering for all but the largest magnitudes. ## Date widgets A `.interactive()` pipeline can contain any number of widgets, including any from the Panel [reference gallery](https://panel.holoviz.org/reference/index.html#widgets). For instance, let's make a widget to specify a date range covering the dates found in this data: ``` date = pn.widgets.DateRangeSlider(name='Date', start=df.index[0], end=df.index[-1]) date ``` Now we can access the value of this slider: ``` date.value ``` As this widget is specifying a range, this time the value is returned as a tuple. If you prefer, you can get the components of the tuple directly via the `value_start` and `value_end` parameters respectively: ``` f'Start is at {date.value_start} and the end is at {date.value_end}' ``` Once again, try specifying different ranges with the widgets and rerunning the cell above. Now let's use this widget to expand our expression to filter by date as well as magnitude: ``` mag = pn.widgets.FloatSlider(name='Minimum magnitude', start=0, end=9, value=6) filtered = dfi[ (dfi['mag'] > mag) & (dfi.index >= date.param.value_start) & (dfi.index <= date.param.value_end)] filtered.head() ``` You can now use either the magnitude or the date range (or both) to filter the data, and the output will update. Note that here you want to move the start date of the range slider rather than the end; otherwise, you may not see the table change because the earthquakes are displayed in date order. #### Exercise To specify the minimum earthquake magnitude, notice that we supplied the whole `mag` widget but `.interactive()` used only the `value` parameter of this widget by default. To be explicit, you may use `mag.param.value` instead if you wish. Try it! #### Exercise For readability, seven columns were chosen before displaying the `DataFrame`. Have a look at `df.columns` and pick a different set of columns for display. ## .interactive() and HoloViews `.interactive()` lets you work naturally with the compositional HoloViews plots provided by `.hvplot()`. Here, let's combine such plots using the HoloViews `+` operator: ``` mag_hist = filtered.hvplot(y='mag', kind='hist', responsive=True, min_height=200) depth_hist = filtered.hvplot(y='depth', kind='hist', responsive=True, min_height=200) mag_hist + depth_hist ``` These are the same two histograms we saw earlier, but now we can filter them on data dimensions like `time` that aren't even explicitly shown in the plot, using the Panel widgets. ## Filtering earthquakes on a map To display the earthquakes on a map, we will first create a subset of the data to make it quick to update without needing Datashader.: ``` subset_df = df[ (df.northing < WEB_MERCATOR_LIMITS[1]) & (df.mag > 4) & (df.index >= pd.Timestamp('2017-01-01')) & (df.index <= pd.Timestamp('2018-01-01'))] ``` Now we can make a new interactive `DataFrame` from this new subselection: ``` subset_dfi = subset_df.interactive(sizing_mode='stretch_width') ``` And now we can declare our widgets and use them to filter the interactive `DataFrame` as before: ``` date_subrange = pn.widgets.DateRangeSlider( name='Date', start=subset_df.index[0], end=subset_df.index[-1]) mag_subrange = pn.widgets.FloatSlider(name='Magnitude', start=3, end=9, value=3) filtered_subrange = subset_dfi[ (subset_dfi.mag > mag_subrange) & (subset_dfi.index >= date_subrange.param.value_start) & (subset_dfi.index <= date_subrange.param.value_end)] ``` Now we can plot the earthquakes on an ESRI tilesource, including the filtering widgets as follows: ``` geo = filtered_subrange.hvplot( 'easting', 'northing', color='mag', kind='points', xaxis=None, yaxis=None, responsive=True, min_height=500, tiles='ESRI') geo ``` You'll likely notice some flickering as Panel updates the display when the widgets change in value. The flickering comes because the entire plot gets recreated each time the widget is dragged. You can get finer control over such updates, but doing so requires more advanced methods covered in later tutorials, so here, we will just accept that the plot flickers. ## Terminating methods for `.interactive` The examples above all illustrate cases where you can display the output of `.interactive()` and not worry about its type, which is no longer a DataFrame or a HoloViews object, but an `Interactive` object: ``` type(geo) ``` What if you need to work with some part of the interactive pipeline, e.g. to feed it to some function or object that does not understand `Interactive` objects? In such a case, you can use what is called a `terminating method` on your Interactive object to get at the underlying object for you to use. For instance, let's create magnitude and depth histograms on this subset of the data as in an earlier notebook and see if we can enable linked selections on them: ``` mag_subhist = filtered_subrange.hvplot(y='mag', kind='hist', responsive=True, min_height=200) depth_subhist = filtered_subrange.hvplot(y='depth', kind='hist', responsive=True, min_height=200) combined = mag_subhist + depth_subhist combined ``` Note that this looks like a HoloViews layout with some widgets, but this object is *not* a HoloViews object. Instead it is still an `Interactive` object: ``` type(combined) ``` `link_selections` does not currently understand `Interactive` objects, and so it will raise an exception when given one. If we need a HoloViews `Layout`, e.g. for calling `link_selections`, we can build a layout from the constituent objects using the `.holoviews()` terminating method on `Interactive`: ``` layout = mag_subhist.holoviews() + depth_subhist.holoviews() layout ``` This is now a HoloViews object, so we can use it with `link_selections`: ``` print(type(layout)) ls = hv.link_selections.instance() ls(mag_subhist.holoviews()) + ls(depth_subhist.holoviews()) ``` You can use the box selection tool to see how selections compare between these plots. However, you will note that the widgets are no longer displayed. To address this, we can display the widgets separately using a different terminating method, namely `.widgets()`: ``` filtered_subrange.widgets() ``` For reference, the terminating methods for an `Interactive` object are: - `.holoviews()`: Give me a HoloViews object - `.panel()`: Give me a Panel ParamFunction - `.widgets()`: Give me a layout of widgets associated with this interactive object - `.layout()`: Give me the layout of the widgets and display `pn.Column(obj.widgets(), obj.panel())` where `pn.Column` will be described in the [Dashboards notebook](./06_Dashboards.ipynb). ## Conclusion Using the techniques above, you can build up a collection of plots, and other outputs with Panel widgets to control individual bits of computation and display. What if you want to collect these pieces and put them together into a coherent app or dashboard? If so, then the next tutorial will show you how to do so!
github_jupyter
# Cell Basic Filtering ## Content The purpose of this step is to get rid of cells having **obvious** issues, including the cells with low mapping rate (potentially contaminated), low final reads (empty well or lost a large amount of DNA during library prep.), or abnormal methylation fractions (failed in bisulfite conversion or contaminated). We have two principles when applying these filters: 1. **We set the cutoff based on the distribution of the whole dataset**, where we assume the input dataset is largely successful (mostly > 80-90% cells will pass QC). The cutoffs below are typical values we used in brain methylome analysis. Still, you may need to adjust cutoffs based on different data quality or sample source. 2. **The cutoff is intended to be loose.** We do not use stringent cutoffs here to prevent potential data loss. Abormal cells may remain after basic filtering, and will likely be identified in the analysis based filtering (see later notebooks about doublet score and outliers in clustering) ## Input - Cell metadata table that contains mapping metric for basic QC filtering. ## Output - Filtered cell metadata table that contains only cells passed QC. ## About Cell Mapping Metrics We usually gather many mapping metrics from each processing step, but not all of the metrics are relevant to the cell filtering. Below are the most relevant metrics that we use to filter cells. The name of these metrics might be different in your dataset. Change it according to the file you have. If you use [YAP](https://hq-1.gitbook.io/mc) to do mapping, you can find up-to-date mapping metrics documentation for [key metrics](https://hq-1.gitbook.io/mc/mapping-metrics/key-mapping-metrics) and [all metrics](https://hq-1.gitbook.io/mc/mapping-metrics/all-mapping-metrics) in YAP doc. ## Import ``` import pandas as pd import seaborn as sns sns.set_context(context='notebook', font_scale=1.3) ``` ## Parameters ``` # change this to the path to your metadata metadata_path = '../../../data/Brain/snmC-seq2/HIP.CellMetadata.csv.gz' # Basic filtering parameters mapping_rate_cutoff = 0.5 mapping_rate_col_name = 'MappingRate' # Name may change final_reads_cutoff = 500000 final_reads_col_name = 'FinalReads' # Name may change mccc_cutoff = 0.03 mccc_col_name = 'mCCCFrac' # Name may change mch_cutoff = 0.2 mch_col_name = 'mCHFrac' # Name may change mcg_cutoff = 0.5 mcg_col_name = 'mCGFrac' # Name may change ``` ## Load metadata ``` metadata = pd.read_csv(metadata_path, index_col=0) total_cells = metadata.shape[0] print(f'Metadata of {total_cells} cells') metadata.head() ``` ## Filter by key mapping metrics ### Bismark Mapping Rate - Low mapping rate indicates potential contamination. - Usually R1 mapping rate is 8-10% higher than R2 mapping rate for snmC based technologies, but they should be highly correlated. Here I am using the combined mapping rate. If you are using the R1MappingRate or R2MappingRate, change the cutoff accordingly. - Usually there is a peak on the left, which corresponding to the empty wells. ``` _cutoff = mapping_rate_cutoff _col_name = mapping_rate_col_name # plot distribution to make sure cutoff is appropriate g = sns.displot(metadata[_col_name], binrange=(0, 1)) g.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--') mapping_rate_judge = metadata[_col_name] > _cutoff _passed_cells = mapping_rate_judge.sum() print( f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) ' f'passed the {_col_name} cutoff {_cutoff}.') ``` ### Final Reads - The cutoff may change depending on how deep the library has been sequenced. - Usually there is a peak on the left, which corresponding to the empty wells. - There are also some cells having small number of reads, these wells may lost most of the DNA during library prep. Cells having too less reads can be hard to classify, since the methylome sequencing is an untargeted whole-genome sequencing. ``` _cutoff = final_reads_cutoff _col_name = final_reads_col_name # plot distribution to make sure cutoff is appropriate g = sns.displot(metadata[_col_name], binrange=(0, 5e6)) g.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--') final_reads_judge = metadata[_col_name] > _cutoff _passed_cells = final_reads_judge.sum() print( f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) ' f'passed the {_col_name} cutoff {_cutoff}.') ``` ### mCCC / CCC - The mCCC fraction is used as the proxy of the upper bound of the non-conversion rate for cell-level QC. The methylation level at CCC sites is the lowest among all of the different 3 base-contexts (CNN), and, in fact, it is very close to the unmethylated lambda mC fraction. - However, mCCC fraction is correlated with mCH (especially in brain data), so you can see a similar shape of distribution of mCCC and mCH, but the range is different. ``` _cutoff = mccc_cutoff _col_name = mccc_col_name # plot distribution to make sure cutoff is appropriate g = sns.displot(metadata[_col_name], binrange=(0, 0.05)) g.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--') mccc_judge = metadata[_col_name] < _cutoff _passed_cells = mccc_judge.sum() print( f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) ' f'passed the {_col_name} cutoff {_cutoff}.') ``` ### mCH / CH - Usually failed cells (empty well or contaminated) tend to have abormal methylation level as well. ``` _cutoff = mch_cutoff _col_name = mch_col_name # plot distribution to make sure cutoff is appropriate g = sns.displot(metadata[_col_name], binrange=(0, 0.3)) g.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--') mch_judge = metadata[_col_name] < _cutoff _passed_cells = mch_judge.sum() print( f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) ' f'passed the {_col_name} cutoff {_cutoff}.') ``` ### mCG - Usually failed cells (empty well or contaminated) tend to have abormal methylation level as well. ``` _cutoff = mcg_cutoff _col_name = mcg_col_name # plot distribution to make sure cutoff is appropriate g = sns.displot(metadata[_col_name], binrange=(0.3, 1)) g.ax.plot((_cutoff, _cutoff), g.ax.get_ylim(), c='r', linestyle='--') mcg_judge = metadata[_col_name] > _cutoff _passed_cells = mcg_judge.sum() print( f'{_passed_cells} / {total_cells} cells ({_passed_cells / total_cells * 100:.1f}%) ' f'passed the {_col_name} cutoff {_cutoff}.') ``` ## Combine filters ``` judge = mapping_rate_judge & final_reads_judge & mccc_judge & mch_judge & mcg_judge passed_cells = judge.sum() print( f'{passed_cells} / {total_cells} cells ({passed_cells / total_cells * 100:.1f}%) ' f'passed all the filters.') ``` ## Sanity Test ``` try: assert (passed_cells / total_cells) > 0.6 except AssertionError as e: e.args += ( 'A large amount of the cells do not pass filter, check your cutoffs or overall dataset quality.', ) raise e try: assert passed_cells > 0 except AssertionError as e: e.args += ('No cell remained after all the filters.', ) raise e print('Feel good') ``` ## Save filtered metadata ``` metadata_filtered = metadata[judge].copy() metadata_filtered.to_csv('CellMetadata.PassQC.csv.gz') metadata_filtered.head() ```
github_jupyter
``` # i 可能的取值:0、2、4、6、len(A) from collections import Counter class Solution: def canReorderDoubled(self, A): if not A: return True a_freq = Counter(A) seen = set() for a in A: if a in seen: continue if a_freq[a] == 0: seen.add(a) continue if a_freq[a * 2] >= a_freq[a] and a * 2 not in seen: a_freq[a * 2] -= a_freq[a] elif a % 2 == 0 and a_freq[a // 2] >= a_freq[a] and a // 2 not in seen: a_freq[a // 2] -= a_freq[a] else: return False return True from collections import Counter class Solution: def canReorderDoubled(self, A): if not A: return True a_freq = Counter(A) for a in sorted(a_freq.keys(), key=abs): if a_freq[a] == 0: continue if a == 0 and a_freq[0] % 2 == 0: a_freq[0] = 0 continue if a_freq[a * 2] > 0: min_val = min(a_freq[a * 2], a_freq[a]) a_freq[a * 2] -= min_val a_freq[a] -= min_val return all(not v for v in a_freq.values()) solution = Solution() solution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4]) from collections import Counter class Solution: def canReorderDoubled(self, A): a_freq = Counter(A) for n in sorted(a_freq.keys(), key=abs): double = 2 * n while a_freq[n] > 0 and a_freq[double] > 0: double = 2 * n a_freq[n] -= 1 a_freq[double] -= 1 return all(not v for v in a_freq.values()) solution = Solution() solution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4]) if 0: print(2) class Solution: def canReorderDoubled(self, A: List[int]) -> bool: c = Counter(A) for n in sorted(c.keys(), key=abs): while c[n] > 0 and c[(double := 2 * n)] > 0: c[n] -= 1 c[double] -= 1 return all(not v for v in c.values()) from collections import Counter class Solution: def canReorderDoubled(self, A): if not A: return True a_freq = Counter(A) for a in sorted(a_freq.keys(), key=abs): if a_freq[a] == 0: continue if a == 0 and a_freq[0] % 2 == 0: a_freq[0] = 0 continue if a_freq[a * 2] > 0: min_val = min(a_freq[a * 2], a_freq[a]) a_freq[a * 2] -= min_val a_freq[a] -= min_val return all(not v for v in a_freq.values()) solution = Solution() solution.canReorderDoubled([-6,2,-6,4,-3,8,3,2,-2,6,1,-3,-4,-4,-8,4]) ```
github_jupyter
# Recommendations with IBM In this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.** By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. ## Table of Contents I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br> II. [Rank Based Recommendations](#Rank)<br> III. [User-User Based Collaborative Filtering](#User-User)<br> IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br> V. [Matrix Factorization](#Matrix-Fact)<br> VI. [Extras & Concluding](#conclusions) At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import project_tests as t import pickle import seaborn as sns from scipy import stats import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords %matplotlib inline nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') nltk.download('abc') df = pd.read_csv('data/user-item-interactions.csv') df_content = pd.read_csv('data/articles_community.csv') del df['Unnamed: 0'] del df_content['Unnamed: 0'] # Show df to get an idea of the data df.head() # inspect the first row df.iloc[0]['title'] # show df_content to get an idea of the data df_content.head() print(df_content.iloc[0]) ``` ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a> Use the dictionary and cells below to provide some insight into the descriptive statistics of the data. `1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article. ``` # group by email def group_by_title(df, column = "email"): """group the user-item interactions dataframe by column. Args: df (Dataframe): a dataframe object column (string): column to group by per column. Returns: Dataframe: dataframe of user article interaction counts sorted in descending order """ df_title_counts = df.groupby(['email']).size().reset_index(name='counts') df_title_counts = df_title_counts.sort_values(by=['counts'], ascending=False) return df_title_counts user_article_counts = group_by_title(df) user_article_counts.head() print( "The number of articles are {} and users interacted {} times these articles" \ .format(user_article_counts.shape[0], user_article_counts["counts"].sum())) def histogram (df, column = "counts", title="Distribution of user articles interactions"): """Create a distribution of user article interactions. Args: df (Dataframe): a dataframe object column (string): column that holds article counts title (string): the title of the distribution chart Returns: figure: a matplotlib distribution figure of article counts """ sns.set(color_codes=True) plt.figure(figsize=(15,8)) sns.distplot(df[column], kde=False, hist_kws=dict(edgecolor="k", linewidth=2)) plt.xlabel(title) plt.ylabel('Frequency'); histogram(user_article_counts) # fill in the median and maximum number of user_article interactios below median_val = user_article_counts["counts"].median() # 50% of individuals interact with 3 number of articles or fewer. max_views_by_user = user_article_counts["counts"].max() # The maximum number of user-article interactions by any 1 user is 364. ``` `2.` Explore and remove duplicate articles from the **df_content** dataframe. ``` # find and explore duplicate articles df_content.info() # get duplicate articles df_content[df_content.duplicated(subset="article_id")] # remove any rows that have the same article_id - only keep the first df_content_clean = df_content.drop_duplicates(subset="article_id") assert df_content_clean.shape[0] + 5 == df_content.shape[0] ``` `3.` Use the cells below to find: **a.** The number of unique articles that have an interaction with a user. **b.** The number of unique articles in the dataset (whether they have any interactions or not).<br> **c.** The number of unique users in the dataset. (excluding null values) <br> **d.** The number of user-article interactions in the dataset. ``` # the number of unique articles that have an interaction with a user. df.article_id.nunique() # the number of unique articles in the dataset (whether they have any interactions or not). df_content_clean.article_id.nunique() # the number of unique users in the dataset. (excluding null values) df.email.nunique() # the number of user-article interactions in the dataset. df.shape[0] unique_articles = 714 # The number of unique articles that have at least one interaction total_articles = 1051 # The number of unique articles on the IBM platform unique_users = 5148 # The number of unique users user_article_interactions = 45993 # The number of user-article interactions ``` `4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below). ``` # most viewed article article_counts = df.groupby(['article_id']).count() article_counts['email'].max() article_counts.sort_values("email", ascending=False).iloc[0,:] most_viewed_article_id = "1429.0" # the most viewed article in the dataset as a string with one value following the decimal max_views = 937 # the most viewed article in the dataset was viewed how many times? ## No need to change the code here - this will be helpful for later parts of the notebook # Run this cell to map the user email to a user_id column and remove the email column def email_mapper(): coded_dict = dict() cter = 1 email_encoded = [] for val in df['email']: if val not in coded_dict: coded_dict[val] = cter cter+=1 email_encoded.append(coded_dict[val]) return email_encoded email_encoded = email_mapper() del df['email'] df['user_id'] = email_encoded # show header df.head() ## If you stored all your results in the variable names above, ## you shouldn't need to change anything in this cell sol_1_dict = { '`50% of individuals have _____ or fewer interactions.`': median_val, '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions, '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user, '`The most viewed article in the dataset was viewed _____ times.`': max_views, '`The article_id of the most viewed article is ______.`': most_viewed_article_id, '`The number of unique articles that have at least 1 rating ______.`': unique_articles, '`The number of unique users in the dataset is ______`': unique_users, '`The number of unique articles on the IBM platform`': total_articles } # Test your dictionary against the solution t.sol_1_test(sol_1_dict) ``` ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a> Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with. `1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below. ``` df.head() def get_top_articles(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' # Your code here top_articles = df.groupby(['article_id', 'title']).size()\ .reset_index(name='counts').sort_values('counts', ascending=False)[:n].title.tolist() return top_articles # Return the top article titles from df (not df_content) def get_top_article_ids(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article ids ''' # Your code here top_articles = df.groupby("article_id").count()["title"].sort_values(ascending=False).index[:n].astype('str') return top_articles.tolist() # Return the top article ids print(get_top_articles(10)) print(get_top_article_ids(10)) # Test your function by returning the top 5, 10, and 20 articles top_5 = get_top_articles(5) top_10 = get_top_articles(10) top_20 = get_top_articles(20) # Test each of your three lists from above t.sol_2_test(get_top_articles) ``` ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a> `1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. * Each **user** should only appear in each **row** once. * Each **article** should only show up in one **column**. * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. Use the tests to make sure the basic structure of your matrix matches what is expected by the solution. ``` # create the user-article matrix with 1's and 0's def create_user_item_matrix(df): ''' INPUT: df - pandas dataframe with article_id, title, user_id columns OUTPUT: user_item - user item matrix Description: Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with an article and a 0 otherwise ''' # Fill in the function here # unstack the user-item interaction dataframe user_item = df.drop_duplicates().groupby(['user_id', 'article_id']).size().unstack() # fill missing values with 0 user_item = user_item.fillna(0) # convert int user_item = user_item.astype('int') return user_item # return the user_item matrix user_item = create_user_item_matrix(df) ## Tests: You should just need to run this cell. Don't change the code. assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right." assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right." assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right." print("You have passed our quick tests! Please proceed!") ``` `2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. Use the tests to test your function. ``` def find_similar_users(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: similar_users - (list) an ordered list where the closest users (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product Returns an ordered ''' # compute similarity of each user to the provided user user_similr = user_item.loc[user_id,:].dot(user_item.T) # sort by similarity user_similr = user_similr.sort_values(ascending=False) # create list of just the ids # remove the own user's id most_similar_users = user_similr.loc[~(user_similr.index==user_id)].index.values.tolist() return most_similar_users # return a list of the users in order from most to least similar # Do a spot check of your function print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10])) print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5])) print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3])) ``` `3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user. ``` def get_article_names(article_ids, df=df): ''' INPUT: article_ids - (list) a list of article ids df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: article_names - (list) a list of article names associated with the list of article ids (this is identified by the title column) ''' # Your code here article_names = [] # select articles with the same article_id and drop duplicates article_names = df[df['article_id'].isin(article_ids)]['title'].drop_duplicates().values.tolist() return article_names # Return the article names associated with list of article ids def get_user_articles(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: article_ids - (list) a list of the article ids seen by the user article_names - (list) a list of article names associated with the list of article ids (this is identified by the doc_full_name column in df_content) Description: Provides a list of the article_ids and article titles that have been seen by a user ''' # Your code here user_idx = user_item.loc[user_id, :] #get all articles for this user id article_ids = user_idx[user_idx == 1].index.values.astype('str').tolist() #get articles user interacted with article_names = get_article_names(article_ids) # get article names return article_ids, article_names # return the ids and names def user_user_recs(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: Users who are the same closeness are chosen arbitrarily as the 'next' user For the user where the number of recommended articles starts below m and ends exceeding m, the last items are chosen arbitrarily ''' # Your code here most_similar_users = find_similar_users(user_id) # get most similar users user_article_ids = set(get_user_articles(user_id)[0]) # get article ids recs = [] # create recommendations for this user for user_neighb in most_similar_users: neighb_article_ids = set(get_user_articles(user_neighb)[0]) recs += list(set(neighb_article_ids) - set(user_article_ids)) if len(recs) > m: break recs = recs[:m] return recs # return your recommendations for this user_id # Check Results get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1 # Test your functions here - No need to change this code - just run this cell assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0']) assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']) assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']) print("If this is all you see, you passed all of our tests! Nice job!") ``` `4.` Now we are going to improve the consistency of the **user_user_recs** function from above. * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions. * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier. ``` def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' # Your code here colName = ['neighbor_id', 'similarity', 'num_interactions'] # column names neighbors_df = pd.DataFrame(columns= colName) # create dataframe to hold top users # populate the dataframe for id in user_item.index.values: if id != user_id: neighbor_id = id # get user to user similarity similarity = user_item[user_item.index == user_id].dot(user_item.loc[id].T).values[0] # get number of interactions for user ot article num_interactions = user_item.loc[id].values.sum() neighbors_df.loc[neighbor_id] = [neighbor_id, similarity, num_interactions] neighbors_df['similarity'] = neighbors_df['similarity'].astype('int') neighbors_df['neighbor_id'] = neighbors_df['neighbor_id'].astype('int') neighbors_df = neighbors_df.sort_values(by = ['similarity', 'neighbor_id'], ascending = [False, True]) return neighbors_df # return the dataframe def user_user_recs_part2(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: * Choose the users that have the most total article interactions before choosing those with fewer article interactions. * Choose articles with the articles with the most total interactions before choosing those with fewer total interactions. ''' # Your code here # get similar users neighbours = get_top_sorted_users(user_id) top_similar_users = neighbours['neighbor_id'].values.tolist() recs = [] # recommended article Id's # get articles read by the user user_article_ids = list(set(get_user_articles(user_id)[0])) for neighbour_id in top_similar_users: recs += df[df['user_id'] == neighbour_id]['article_id'].values.tolist() recs = list(set(recs)) # selecting articles not seen by User_id recs = [ x for x in recs if x not in user_article_ids] recs_df = df[df.article_id.isin(recs)][['article_id', 'title']].drop_duplicates().head(m) recs = recs_df['article_id'].values.tolist() # get ids rec_names = recs_df['title'].values.tolist() # get title return recs, rec_names # Quick spot check - don't change this code - just use it to test your functions rec_ids, rec_names = user_user_recs_part2(20, 10) print("The top 10 recommendations for user 20 are the following article ids:") print(rec_ids) print() print("The top 10 recommendations for user 20 are the following article names:") print(rec_names) ``` `5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below. ``` ### Tests with a dictionary of results user1_most_sim = get_top_sorted_users(1).iloc[0].neighbor_id # Find the user that is most similar to user 1 user131_10th_sim = get_top_sorted_users(131).iloc[9].neighbor_id # Find the 10th most similar user to user 131 ## Dictionary Test Here sol_5_dict = { 'The user that is most similar to user 1.': user1_most_sim, 'The user that is the 10th most similar to user 131': user131_10th_sim, } t.sol_5_test(sol_5_dict) ``` `6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users. **Provide your response here.** I will choose ```user_user_recs_part2```. It's a good start to recommend articles from the most active users and make sure these articles are the most interacted articles as well. For new users, we can ask them about their preferences, then recommend top articles that are matching this preference. Once we have more data on them, we can move to matrix factorization. `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation. ``` new_user = '0.0' # What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles. # Provide a list of the top 10 article ids you would give to new_user_recs = get_top_article_ids(10) # Your recommendations here assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users." print("That's right! Nice job!") ``` ### <a class="anchor" id="Content-Recs">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a> Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations. ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. ``` def tokenize(x): ''' Tokenize a string into words. Args: x(string): string to tokezine. Returns: (list): list of lemmatized words ''' # get stop words stop_words = (set(stopwords.words('english')) | set(nltk.corpus.abc.words())) tokens = word_tokenize(x) # split each article title into individual words lemmatizer = WordNetLemmatizer() clean_tokens=[] for token in tokens: #clean each token from whitespace and punctuation, and conver to root word clean_token = lemmatizer.lemmatize(token).lower().strip() clean_tokens.append(clean_token) filtered = [word for word in clean_tokens if word not in stop_words and word.isalpha()] return filtered def make_content_recs(data_id, user_id=True, m=10, df=df): ''' This recommender goes through each article title and nltk library to finds the most common words (related to content) throughout all the articles. The recommender will look at the sums of words in the title of each article and based on the number of matches and popularity of an article. Args: data_id (str) - id of either user or article user_id (bool) - if true, make recs based on user m (int) - number of recommendations to give based on term Returns: recs (list) - list of article ids that are recommended rec_names (list) - list of article names that are recommended ''' if(user_id): user_id = data_id try: # get past articles read by the user article_ids, _ = get_user_articles(user_id) except KeyError: # user does not exist print('User Doesn\'t Exist, Recommending Top Articles') recs = get_top_article_ids(m) return recs, get_article_names(recs) else: article_ids = data_id title_data = df.drop_duplicates(subset='article_id') #drop duplicates titles = title_data[title_data.article_id.isin(list(map(float, article_ids)))].title # get article titles #tokenize the words in each article title title_words=[] tokenized = tokenize(titles.str.cat(sep=' ')) title_words.extend(tokenized) #find the highest occuring words common_words = pd.value_counts(title_words).sort_values(ascending=False)[:10].index top_matches={} # measure of similarity: count number of occurences of each common word in other article titles for word in common_words: word_count = pd.Series(title_data.title.str.count(word).fillna(0)) #gets occurences of each word in title top_matches[word] = word_count # most common words top_matches = pd.DataFrame(top_matches) top_matches['top_matches'] = top_matches.sum(axis=1) top_matches['article_id'] = title_data.article_id.astype(float) # get most interacted with articles article_occurences = pd.DataFrame({'occurences':df.article_id.value_counts()}) # sort matches by most popular articles top_matches = top_matches.merge(article_occurences, left_on='article_id', right_index=True) top_matches.sort_values(['top_matches', 'occurences'], ascending=False, inplace=True) # drop already read articles recs_df = top_matches[~top_matches.article_id.isin(list(map(float, article_ids)))] # get rec id and names recs = recs_df.article_id[:m].values.astype(str) rec_names = get_article_names(recs) return recs, rec_names ``` `2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender? ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. This content based recommender scans through previously interacted articles. The nltk library finds the most common words in the titles of each article. Based on these most common words, the recommender looks at the sums of words relevant words in the title of each article, and based on the number of matches in the titles as well as the general popularity of the article it gives back the best recommendations. If the user has not read any articles yet, then we can't really give any content based recommendations, and just return back some of the most popular articles. There is a lot of potential improvement and optimization for this recommender. For example one could construct a custom NLTK corpus which would filter out article words. Currently I use a combination of a couple standard NLTK corpora. Furthermore, If df_content had information for all articles we could expand this recommender to look through not only the title but also the body of the articles. **Write an explanation of your content based recommendation system here.** `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations. We are using the NLTK library to search for articles with similar keywords. If the user has no history yet, then no content-based recommendation is given, and we will return some of the most popular articles. We can improve this further by looking at similar keywords but semantically not just the exact keyword. Also, if the user doesn't like a specific article because it has deep learning contents, it doesn't mean that he or she will disklike every article with deep learning content. It will be interesting to augment content-based recommendation with some ML algorithm that can handle such situations. ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. ``` # make recommendations for a brand new user make_content_recs('0.0', user_id=True) # make a recommendations for a user who only has interacted with article id '1427.0' make_content_recs(['1427.0'], user_id=False) ``` ### <a class="anchor" id="Matrix-Fact">Part V: Matrix Factorization</a> In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform. `1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook. ``` # Load the matrix here user_item_matrix = pd.read_pickle('user_item_matrix.p') # quick look at the matrix user_item_matrix.head() ``` `2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson. ``` # Perform SVD on the User-Item Matrix Here u, s, vt = np.linalg.svd(user_item_matrix) # use the built in to get the three matrices print("Number of Nans in the users to item interactions matrix is: {}".format(np.isnan(user_item_matrix).sum().sum())) print("Number of Nans in the users to latent features matrix is: {}".format(np.isnan(u).sum().sum())) print("Number of Nans in the segma matrix is: {}".format(np.isnan(s).sum().sum())) print("Number of Nans in the items to latent features matrix is: {}".format(np.isnan(vt).sum().sum())) ``` **Provide your response here.** We can use Singular Value Decomposition because **there are no missing values (NANs) in our data.** `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features. ``` num_latent_feats = np.arange(10,700+10,20) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :] # take dot product user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_matrix, user_item_est) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.figure(figsize=(15,10)) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features'); ``` `4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: * How many users can we make predictions for in the test set? * How many users are we not able to make predictions for because of the cold start problem? * How many articles can we make predictions for in the test set? * How many articles are we not able to make predictions for because of the cold start problem? ``` df_train = df.head(40000) df_test = df.tail(5993) def create_test_and_train_user_item(df_train, df_test): ''' INPUT: df_train - training dataframe df_test - test dataframe OUTPUT: user_item_train - a user-item matrix of the training dataframe (unique users for each row and unique articles for each column) user_item_test - a user-item matrix of the testing dataframe (unique users for each row and unique articles for each column) test_idx - all of the test user ids test_arts - all of the test article ids ''' # Your code here # create user item matrix for the train dataset user_item_train = create_user_item_matrix(df_train) # create the test dataset user_item_test = create_user_item_matrix(df_test) # get the ids of the train dataset and test dataset train_idx = set(user_item_train.index) test_idx = set(user_item_test.index) # get shared rows shared_rows = train_idx.intersection(test_idx) # get columns in train and test datasets train_arts = set(user_item_train.columns) test_arts = set(user_item_test.columns) # get shared columns shared_cols = train_arts.intersection(test_arts) # Creating new user-item matrix for tets with common values user_item_test = user_item_test.ix[shared_rows, shared_cols] return user_item_train, user_item_test, test_idx, test_arts user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test) print(user_item_test.shape[0]) print(len(test_idx) - user_item_test.shape[0]) print(user_item_test.shape[1]) print(len(test_arts) - user_item_test.shape[1]) # Replace the values in the dictionary below a = 662 b = 574 c = 20 d = 0 sol_4_dict = { 'How many users can we make predictions for in the test set?': c, 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, 'How many movies can we make predictions for in the test set?': b, 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d } # this should be article not movies. it was bugging me and wasted some time on it t.sol_4_test(sol_4_dict) ``` `5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`. Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data. ``` # fit SVD on the user_item_train matrix u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below # Use these cells to see how well you can use the training # decomposition to predict on test data def svd_algorithm(u_train, s_train, vt_train): """ Return the results of the svd algorithm. Args: u_train (np.array): user item interaction matrix s_train (np.array): sigma matrix vt_train (np.array): v transpose matrix Returns: Dataframe: dataframe of user article interaction counts sorted in descending order """ num_latent_feats = np.arange(10,700+10,20) sum_errs_train = [] sum_errs_test = [] all_errs = [] for k in num_latent_feats: # ge u_test and vt_test row_idxs = user_item_train.index.isin(test_idx) col_idxs = user_item_train.columns.isin(test_arts) u_test = u_train[row_idxs, :] vt_test = vt_train[:, col_idxs] # split data s_train_lat, u_train_lat, vt_train_lat = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :] u_test_lat, vt_test_lat = u_test[:, :k], vt_test[:k, :] # dot product: user_item_train_preds = np.around(np.dot(np.dot(u_train_lat, s_train_lat), vt_train_lat)) user_item_test_preds = np.around(np.dot(np.dot(u_test_lat, s_train_lat), vt_test_lat)) all_errs.append(1 - ((np.sum(user_item_test_preds)+np.sum(np.sum(user_item_test))) \ /(user_item_test.shape[0]*user_item_test.shape[1]))) # calculate the error of each prediction diffs_train = np.subtract(user_item_train, user_item_train_preds) diffs_test = np.subtract(user_item_test, user_item_test_preds) # get total Error err_train = np.sum(np.sum(np.abs(diffs_train))) err_test = np.sum(np.sum(np.abs(diffs_test))) sum_errs_train.append(err_train) sum_errs_test.append(err_test) # plot accuracy for train and test vs number of latent features plt.figure(figsize=(15,10)) # latent features and training plt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/(user_item_train.shape[0]*user_item_test.shape[1]), label='Train', color='darkred') # latent features and testing plt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/(user_item_test.shape[0]*user_item_test.shape[1]), label='Test', color='darkblue') plt.plot(num_latent_feats, all_errs, label='Total Error', color = "orange") plt.xlabel('Number of Latent Features') plt.ylabel('Accuracy') plt.legend(); # call the svd algorithm svd_algorithm(u_train, s_train, vt_train) ``` `6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? **Your response here.** - Test accuracy decreases as the number of latent features increases for the testing dataset. - In this project, only 20 users had records of old interactions. - To solve the cold-start problem, we can deploy rank based recommendation method or content based recommendation. <a id='conclusions'></a> ### Extras Using your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here! ## Conclusion > Congratulations! You have reached the end of the Recommendations with IBM project! > **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/#!/rubrics/2322/view). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible. ## Directions to Submit > Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left). > Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button. > Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations! ``` from subprocess import call call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb']) ```
github_jupyter
# ELG Signal-to-Noise Calculations This notebook provides a standardized calculation of the DESI emission-line galaxy (ELG) signal-to-noise (SNR) figure of merit, for tracking changes to simulation inputs and models. See the accompanying technical note [DESI-3977](https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=3977) for details. ``` %pylab inline import astropy.table import astropy.cosmology import astropy.io.fits as fits import astropy.units as u ``` Parts of this notebook assume that the [desimodel package](https://github.com/desihub/desimodel) is installed (both its git and svn components) and its `data/` directory is accessible via the `$DESIMODEL` environment variable: ``` import os.path assert 'DESIMODEL' in os.environ assert os.path.exists(os.path.join(os.getenv('DESIMODEL'), 'data', 'spectra', 'spec-sky.dat')) ``` Document relevant version numbers: ``` import desimodel import specsim print(f'Using desimodel {desimodel.__version__}, specsim {specsim.__version__}') ``` ## ELG Spectrum All peaks are assumed to have the same log-normal rest lineshape specified by a velocity dispersion $\sigma_v$, total flux $F_0$ and central wavelength $\lambda_0$ as: $$ f(\lambda; F_0, \lambda_0) = \frac{F_0}{\sqrt{2\pi}\,\lambda\,\sigma_{\log}}\, \exp\left[ -\frac{1}{2}\left( \frac{\log_{10}\lambda - \log_{10}\lambda_0}{\sigma_{\log}}\right)^2\right]\; , $$ where $$ \sigma_{\log} \equiv \frac{\sigma_v}{c \log 10} \; . $$ We use the pretabulated spectrum in `$DESIMODEL/data/spectra/spec-elg-o2flux-8e-17-average-line-ratios.dat` described in Section 2.3 of DESI-867-v1, which consists of only the following emission lines: - \[OII](3727A) and \[OII](3730A) - H-beta - \[OIII](4960A) and \[OIII](5008A) - H-alpha Note that H-alpha is never observable for $z > 0.5$, as is always the case for DESI ELG targets. Continuum is omitted since we are primarily interested in how well the \[OII] doublet can be identified and measured. All lines are assumed to have the same velocity dispersion of 70 km/s. ``` elg_spec = astropy.table.Table.read( os.path.join(os.environ['DESIMODEL'], 'data', 'spectra', 'spec-elg-o2flux-8e-17-average-line-ratios.dat'), format='ascii') elg_wlen0 = elg_spec['col1'].data elg_flux0 = 1e-17 * elg_spec['col2'].data ``` ## DESI ELG Sample Look up the expected redshift distribution of DESI ELG targets from `$DESIMODEL/data/targets/nz_elg.dat`. Note that the [OII] doublet falls off the spectrograph around z = 1.63. ``` def get_elg_nz(): # Read the nz file from $DESIMODEL. full_name = os.path.join(os.environ['DESIMODEL'], 'data', 'targets', 'nz_elg.dat') table = astropy.table.Table.read(full_name, format='ascii') # Extract the n(z) histogram into numpy arrays. z_lo, z_hi = table['col1'], table['col2'] assert np.all(z_hi[:-1] == z_lo[1:]) z_edge = np.hstack((z_lo, [z_hi[-1]])) nz = table['col3'] # Trim to bins where n(z) > 0. non_zero = np.where(nz > 0)[0] lo, hi = non_zero[0], non_zero[-1] + 1 nz = nz[lo: hi] z_edge = z_edge[lo: hi + 1] return nz, z_edge elg_nz, elg_z_edge = get_elg_nz() ``` Calculate n(z) weights corresponding to an array of ELG redshifts: ``` def get_nz_weight(z): """Calculate n(z) weights corresponding to input z values. """ nz = np.zeros_like(z) idx = np.digitize(z, elg_z_edge) sel = (idx > 0) & (idx <= len(elg_nz)) nz[sel] = elg_nz[idx[sel] - 1] return nz ``` Sample random redshifts from n(z): ``` def generate_elg_z(n=100, seed=123): cdf = np.cumsum(elg_nz) cdf = np.hstack(([0], cdf / cdf[-1])) gen = np.random.RandomState(seed) return np.interp(gen.rand(n), cdf, elg_z_edge) z=generate_elg_z(n=20000) plt.hist(z, bins=elg_z_edge, histtype='stepfilled') plt.xlim(elg_z_edge[0], elg_z_edge[-1]) print(f'Mean ELG redshift is {np.mean(z):.3f}') ``` Define a background cosmology for the angular-diameter distance used to scale galaxy angular sizes: ``` LCDM = astropy.cosmology.Planck15 ``` Generate random ELG profiles for each target. The mean half-light radius is 0.45" and scales with redshift. ``` def generate_elg_profiles(z, seed=123, verbose=False): """ELG profiles are assumed to be disk (Sersic n=1) only. """ gen = np.random.RandomState(seed) nsrc = len(z) source_fraction = np.zeros((nsrc, 2)) source_half_light_radius = np.zeros((nsrc, 2)) source_minor_major_axis_ratio = np.zeros((nsrc, 2)) source_position_angle = 360. * gen.normal(size=(nsrc, 2)) # Precompute cosmology scale factors. angscale = ( LCDM.angular_diameter_distance(1.0) / LCDM.angular_diameter_distance(z)).to(1).value if verbose: print(f'mean n(z) DA(1.0)/DA(z) = {np.mean(angscale):.3f}') # Disk only with random size and ellipticity. source_fraction[:, 0] = 1. source_half_light_radius[:, 0] = 0.427 * np.exp(0.25 * gen.normal(size=nsrc)) * angscale source_minor_major_axis_ratio[:, 0] = np.minimum(0.99, 0.50 * np.exp(0.15 * gen.normal(size=nsrc))) if verbose: print(f'mean HLR = {np.mean(source_half_light_radius[:, 0]):.3f}"') return dict( source_fraction=source_fraction, source_half_light_radius=source_half_light_radius, source_minor_major_axis_ratio=source_minor_major_axis_ratio, source_position_angle=source_position_angle) ``` Diagnostic plot showing the assumed ELG population (Figure 1 of DESI-3977): ``` def plot_elg_profiles(save=None): z = generate_elg_z(50000) sources = generate_elg_profiles(z, verbose=True) fig, ax = plt.subplots(2, 2, figsize=(8, 6)) ax = ax.flatten() ax[0].hist(sources['source_minor_major_axis_ratio'][:, 0], range=(0,1), bins=25) ax[0].set_xlabel('ELG minor/major axis ratio') ax[0].set_xlim(0, 1) ax[1].hist(z, bins=np.arange(0.6, 1.8, 0.1)) ax[1].set_xlim(0.6, 1.7) ax[1].set_xlabel('ELG redshift') ax[2].hist(sources['source_half_light_radius'][:, 0], bins=25) ax[2].set_xlabel('ELG half-light radius [arcsec]') ax[2].set_xlim(0.1, 1.1) ax[3].scatter(z, sources['source_half_light_radius'][:, 0], s=0.5, alpha=0.5) ax[3].set_xlabel('ELG redshift') ax[3].set_ylabel('ELG half-light radius [arcsec]') ax[3].set_xlim(0.6, 1.7) ax[3].set_ylim(0.1, 1.1) plt.tight_layout() if save: plt.savefig(save) plot_elg_profiles(save='elg-sample.png') ``` ## Simulated SNR Given an initialized simulator object, step through different redshifts and calculate the SNR recorded by all fibers for a fixed ELG spectrum. Save the results to a FITS file that can be used by `plot_elg_snr()`. ``` def calculate_elg_snr(simulator, save, description, z1=0.6, z2=1.65, dz=0.002, zref=1.20, seed=123, wlen=elg_wlen0, flux=elg_flux0): """Calculate the ELG [OII] SNR as a function of redshift. Parameters ---------- simulator : specsim.simulator.Simulator Instance of an initialized Simulator object to use. Each fiber will be simulated independently to study variations across the focal plane. save : str Filename to use for saving FITS results. description : str Short description for the saved file header, also used for plots later. z1 : float Minimum ELG redshift to calculate. z2 : float Maximum ELG redshift to calculate. dz : float Spacing of equally spaced grid to cover [z1, z2]. z2 will be increased by up to dz if necessary. zref : float Reference redshift used to save signal, noise and fiberloss. Must be on the grid specified by (z1, z2, dz). seed : int or None Random seed used to generate fiber positions and galaxy profiles. wlen : array 1D array of N rest wavelengths in Angstroms. flux : array 1D array of N corresponding rest fluxes in erg / (s cm2 Angstrom). """ zooms = (3715., 3742.), (4850., 4875.), (4950., 5020.) gen = np.random.RandomState(seed=seed) # Generate random focal plane (x,y) positions for each fiber in mm units. nfibers = simulator.num_fibers focal_r = np.sqrt(gen.uniform(size=nfibers)) * simulator.instrument.field_radius phi = 2 * np.pi * gen.uniform(size=nfibers) xy = (np.vstack([np.cos(phi), np.sin(phi)]) * focal_r).T # Build the grid of redshifts to simulate. nz = int(np.ceil((z2 - z1) / dz)) + 1 z2 = z1 + (nz - 1) * dz z_grid = np.linspace(z1, z2, nz) iref = np.argmin(np.abs(z_grid - zref)) assert np.abs(zref - z_grid[iref]) < 1e-5, 'zref not in z_grid' snr2 = np.zeros((4, nz, simulator.num_fibers)) # Initialize the results. hdus = fits.HDUList() hdus.append(fits.PrimaryHDU( header=fits.Header({'SEED': seed, 'NFIBERS': nfibers, 'DESCRIBE': description}))) # Zero-pad the input spectrum if necessary. wlo = 0.99 * desi.simulated['wavelength'][0] / (1 + z2) if wlen[0] > wlo: wlen = np.hstack([[wlo], wlen]) flux = np.hstack([[0.], flux]) # Simulate the specified rest-frame flux. simulator.source.update_in( 'ELG [OII] doublet', 'elg', wlen * u.Angstrom, flux * u.erg/(u.s * u.cm**2 * u.Angstrom), z_in=0.) # Simulate each redshift. for i, z in enumerate(z_grid): # Redshift the ELG spectrum. simulator.source.update_out(z_out=z) source_flux = np.tile(simulator.source.flux_out, [nfibers, 1]) # Generate source profiles for each target at this redshift. Since the seed is # fixed, only the redshift scaling of the HLR will change. sources = generate_elg_profiles(np.full(nfibers, z), seed=seed) # Simulate each source. simulator.simulate(source_fluxes=source_flux, focal_positions=xy, **sources) # Calculate the quadrature sum of SNR in each camera, by fiber. for output in simulator.camera_output: rest_wlen = output['wavelength'] / (1 + z) # Loop over emission lines. for j, (lo, hi) in enumerate(zooms): sel = (rest_wlen >= lo) & (rest_wlen < hi) if not np.any(sel): continue # Sum SNR2 over pixels. pixel_snr2 = output['num_source_electrons'][sel] ** 2 / output['variance_electrons'][sel] snr2[j, i] += pixel_snr2.sum(axis=0) if i == iref: # Save the fiberloss fraction and total variance tabulated on the simulation grid. table = astropy.table.Table(meta={'ZREF': zref}) sim = simulator.simulated table['WLEN'] = sim['wavelength'].data table['FLUX'] = sim['source_flux'].data table['FIBERLOSS'] = sim['fiberloss'].data table['NSRC'] = sim['num_source_electrons_b'] + sim['num_source_electrons_r'] + sim['num_source_electrons_z'] table['SKYVAR'] = sim['num_sky_electrons_b'] + sim['num_sky_electrons_r'] + sim['num_sky_electrons_z'] table['NOISEVAR'] = ( sim['read_noise_electrons_b'] ** 2 + sim['read_noise_electrons_r'] ** 2 + sim['read_noise_electrons_z'] ** 2 + sim['num_dark_electrons_b'] + sim['num_dark_electrons_r'] + sim['num_dark_electrons_z']) hdus.append(fits.table_to_hdu(table)) hdus[-1].name = 'REF' # Calculate the n(z) weighted mean SNR for [OII], using the median over fibers at each redshift. snr_oii = np.median(np.sqrt(snr2[0]), axis=-1) wgt = get_nz_weight(z_grid) snr_oii_eff = np.sum(snr_oii * wgt) / np.sum(wgt) print(f'n(z)-weighted effective [OII] SNR = {snr_oii_eff:.3f}') # Save the SNR vs redshift arrays for each emission line. table = astropy.table.Table(meta={'SNREFF': snr_oii_eff}) table['Z'] = z_grid table['ZWGT'] = wgt table['SNR_OII'] = np.sqrt(snr2[0]) table['SNR_HBETA'] = np.sqrt(snr2[1]) table['SNR_OIII'] = np.sqrt(snr2[2]) hdus.append(fits.table_to_hdu(table)) hdus[-1].name = 'SNR' hdus.writeto(save, overwrite=True) ``` Calculate flux limits in bins of redshift, to compare with SRD L3.1.3: ``` def get_flux_limits(z, snr, nominal_flux=8., nominal_snr=7., ax=None): fluxlim = np.zeros_like(snr) nonzero = snr > 0 fluxlim[nonzero] = nominal_flux * (nominal_snr / snr[nonzero]) bins = np.linspace(0.6, 1.6, 6) nlim = len(bins) - 1 medians = np.empty(nlim) for i in range(nlim): sel = (z >= bins[i]) & (z < bins[i + 1]) medians[i] = np.median(fluxlim[sel]) if ax is not None: zmid = 0.5 * (bins[1:] + bins[:-1]) dz = 0.5 * (bins[1] - bins[0]) ax.errorbar(zmid, medians, xerr=dz, color='b', fmt='o', zorder=10, capsize=3) return fluxlim, medians ``` Plot a summary of the results saved by `calculate_elg_snr()`. Shaded bands show the 5-95 percentile range, with the median drawn as a solid curve. The fiberloss in the lower plot is calculated at the redshift `zref` specified in `calculate_elg_snr()` (since the ELG size distribution is redshift dependent). ``` def plot_elg_snr(name, save=True): """Plot a summary of results saved by calculate_elg_snr(). Parameters ---------- name : str Name of the FITS file saved by calculate_elg_snr(). """ hdus = fits.open(name) hdr = hdus[0].header nfibers = hdr['NFIBERS'] description = hdr['DESCRIBE'] fig, axes = plt.subplots(2, 1, figsize=(8, 6)) plt.suptitle(description, fontsize=14) snr_table = astropy.table.Table.read(hdus['SNR']) snr_oii_eff = snr_table.meta['SNREFF'] ref_table = astropy.table.Table.read(hdus['REF']) zref = ref_table.meta['ZREF'] ax = axes[0] color = 'rgb' labels = '[OII]', 'H$\\beta$', '[OIII]' z_grid = snr_table['Z'].data for i, tag in enumerate(('SNR_OII', 'SNR_HBETA', 'SNR_OIII')): snr = snr_table[tag].data snr_q = np.percentile(snr, (5, 50, 95), axis=-1) ax.fill_between(z_grid, snr_q[0], snr_q[2], color=color[i], alpha=0.25, lw=0) ax.plot(z_grid, snr_q[1], c=color[i], ls='-', label=labels[i]) ax.plot([], [], 'k:', label='n(z)') ax.legend(ncol=4) ax.set_xlabel('ELG redshift') ax.set_ylabel(f'Total signal-to-noise ratio') ax.axhline(7, c='k', ls='--') rhs = ax.twinx() rhs.plot(z_grid, snr_table['ZWGT'], 'k:') rhs.set_yticks([]) ax.set_xlim(z_grid[0], z_grid[-1]) ax.set_ylim(0, 12) rhs.set_ylim(0, None) ax.text(0.02, 0.03, f'n(z)-wgtd [OII] SNR={snr_oii_eff:.3f}', fontsize=12, transform=ax.transAxes) # Calculate the median [OII] flux limits. _, fluxlim = get_flux_limits(z_grid, np.median(snr_table['SNR_OII'], axis=-1)) # Print latex-format results for DESI-3977 Table 2. print(f'&{snr_oii_eff:7.3f}', end='') for m in fluxlim: print(f' &{m:5.1f}', end='') print(' \\\\') ax = axes[1] wlen = ref_table['WLEN'].data dwlen = wlen[1] - wlen[0] sky_q = np.percentile(ref_table['SKYVAR'].data, (5, 50, 95), axis=-1) sky_q[sky_q > 0] = 1 / sky_q[sky_q > 0] ax.fill_between(wlen, sky_q[0], sky_q[2], color='b', alpha=0.5, lw=0) ax.plot([], [], 'b-', label='sky ivar') ax.plot(wlen, sky_q[1], 'b.', ms=0.25, alpha=0.5) noise_q = np.percentile(ref_table['NOISEVAR'].data, (5, 50, 95), axis=-1) noise_q[noise_q > 0] = 1 / noise_q[noise_q > 0] ax.fill_between(wlen, noise_q[0], noise_q[2], color='r', alpha=0.25, lw=0) ax.plot(wlen, noise_q[1], c='r', ls='-', label='noise ivar') floss_q = np.percentile(ref_table['FIBERLOSS'].data, (5, 50, 95), axis=-1) ax.plot([], [], 'k-', label='fiberloss') rhs = ax.twinx() rhs.fill_between(wlen, floss_q[0], floss_q[2], color='k', alpha=0.25, lw=0) rhs.plot(wlen, floss_q[1], 'k-') rhs.set_ylim(0.2, 0.6) rhs.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.1)) rhs.set_ylabel('Fiberloss') ax.set_xlabel('Wavelength [A]') ax.set_ylabel(f'Inverse Variance / {dwlen:.1f}A') ax.set_xlim(wlen[0], wlen[-1]) ax.set_ylim(0, 0.25) ax.legend(ncol=3) plt.subplots_adjust(wspace=0.1, top=0.95, bottom=0.08, left=0.10, right=0.92) if save: base, _ = os.path.splitext(name) plot_name = base + '.png' plt.savefig(plot_name) print(f'Saved {plot_name}') ``` ## Examples Demonstrate this calculation for the baseline DESI configuration with 100 fibers: ``` import specsim.simulator desi = specsim.simulator.Simulator('desi', num_fibers=100) ``` **NOTE: the next cell takes about 15 minutes to run.** ``` %time calculate_elg_snr(desi, save='desimodel-0.9.6.fits', description='desimodel 0.9.6') ``` Plot the results (Figure 2 of DESI-3977): ``` plot_elg_snr('desimodel-0.9.6.fits') ``` Check that the results with GalSim are compatible with those using the (default) fastsim mode of fiberloss calculations: ``` desi.instrument.fiberloss_method = 'galsim' ``` **NOTE: the next cell takes about 30 minutes to run.** ``` %time calculate_elg_snr(desi, save='desimodel-0.9.6-galsim.fits', description='desimodel 0.9.6 (galsim)') plot_elg_snr('desimodel-0.9.6-galsim.fits') ``` This comparison shows that the "fastsim" fiberloss fractions are about 1% (absolute) higher than "galsim", leading to a slight increase in signal and therefore SNR. The reason for this increase is that "fastsim" assumes a fixed minor / major axis ratio of 0.7 while our ELG population has a distribution of ratios with a median of 0.5. The weighted [OII] SNR values are 6.764 (fastsim) and 6.572 (galsim), which agree at the few percent level. We use GalSim fiberloss calculations consistently in Figure 2 and Table 2 of DESI-3977. ### CDR Comparison Compare with the CDR forecasts based on desimodel 0.3.1 and documented in DESI-867, using data from this [FITS file](https://desi.lbl.gov/svn/docs/technotes/spectro/elg-snr/trunk/data/elg_snr2_desimodel-0-3-1.fits): ``` desi867 = astropy.table.Table.read('elg_snr2_desimodel-0-3-1.fits', hdu=1) ``` Check that we can reproduce the figures from DESI-867: ``` def desi_867_fig1(): z = desi867['Z'] snr_all = np.sqrt(desi867['SNR2']) snr_oii = np.sqrt(desi867['SNR2_OII']) fig = plt.figure(figsize=(6, 5)) plt.plot(z, snr_all, 'k-', lw=1, label='all lines') plt.plot(z, snr_oii, 'r-', lw=1, label='[OII] only') plt.legend(fontsize='large') plt.axhline(7, c='b', ls='--') plt.ylim(0, 22) plt.xlim(z[0], z[-1]) plt.xticks([0.5, 1.0, 1.5]) plt.xlabel('Redshift') plt.ylabel('S/N') desi_867_fig1() def desi_867_fig2(): z = desi867['Z'] snr_all = np.sqrt(desi867['SNR2']) snr_oii = np.sqrt(desi867['SNR2_OII']) flux_limit_all, _ = get_flux_limits(z, snr_all) flux_limit_oii, medians = get_flux_limits(z, snr_oii) fig = plt.figure(figsize=(6, 5)) plt.plot(z, flux_limit_all, 'k-', lw=1, label='all lines') plt.plot(z, flux_limit_oii, 'r-', lw=1, label='[OII] only') plt.legend(loc='upper right', fontsize='large') _, _ = get_flux_limits(z, snr_oii, ax=plt.gca()) plt.ylim(0, 40) plt.xlim(z[0], z[-1]) plt.xticks([0.5, 1.0, 1.5]) plt.xlabel('Redshift') plt.ylabel('[OII] Flux limit ($10^{-17}$ ergs cm$^{-2}$ s$^{-1}$)') desi_867_fig2() ``` Print a summary for Table 2 of DESI-3977: ``` def cdr_summary(): z = desi867['Z'] snr_oii = np.sqrt(desi867['SNR2_OII']) wgt = get_nz_weight(z) snreff = np.sum(wgt * snr_oii) / wgt.sum() _, medians = get_flux_limits(z, snr_oii) print(f'0.3.1 (CDR) & {snreff:6.3f}', end='') for m in medians: print(f' &{m:5.1f}', end='') print(' \\\\') cdr_summary() ```
github_jupyter
# Face Generation In this project, you'll define and train a DCGAN on a dataset of faces. Your goal is to get a generator network to generate *new* images of faces that look as realistic as possible! The project will be broken down into a series of tasks from **loading in data to defining and training adversarial networks**. At the end of the notebook, you'll be able to visualize the results of your trained Generator to see how it performs; your generated samples should look like fairly realistic faces with small amounts of noise. ### Get the Data You'll be using the [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) to train your adversarial networks. This dataset is more complex than the number datasets (like MNIST or SVHN) you've been working with, and so, you should prepare to define deeper networks and train them for a longer time to get good results. It is suggested that you utilize a GPU for training. ### Pre-processed Data Since the project's main focus is on building the GANs, we've done *some* of the pre-processing for you. Each of the CelebA images has been cropped to remove parts of the image that don't include a face, then resized down to 64x64x3 NumPy images. Some sample data is show below. <img src='assets/processed_face_data.png' width=60% /> > If you are working locally, you can download this data [by clicking here](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/November/5be7eb6f_processed-celeba-small/processed-celeba-small.zip) This is a zip file that you'll need to extract in the home directory of this notebook for further loading and processing. After extracting the data, you should be left with a directory of data `processed_celeba_small/` ``` # can comment out after executing #!unzip processed_celeba_small.zip data_dir = 'processed_celeba_small/' """ DON'T MODIFY ANYTHING IN THIS CELL """ import pickle as pkl import matplotlib.pyplot as plt import numpy as np import problem_unittests as tests #import helper %matplotlib inline ``` ## Visualize the CelebA Data The [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset contains over 200,000 celebrity images with annotations. Since you're going to be generating faces, you won't need the annotations, you'll only need the images. Note that these are color images with [3 color channels (RGB)](https://en.wikipedia.org/wiki/Channel_(digital_image)#RGB_Images) each. ### Pre-process and Load the Data Since the project's main focus is on building the GANs, we've done *some* of the pre-processing for you. Each of the CelebA images has been cropped to remove parts of the image that don't include a face, then resized down to 64x64x3 NumPy images. This *pre-processed* dataset is a smaller subset of the very large CelebA data. > There are a few other steps that you'll need to **transform** this data and create a **DataLoader**. #### Exercise: Complete the following `get_dataloader` function, such that it satisfies these requirements: * Your images should be square, Tensor images of size `image_size x image_size` in the x and y dimension. * Your function should return a DataLoader that shuffles and batches these Tensor images. #### ImageFolder To create a dataset given a directory of images, it's recommended that you use PyTorch's [ImageFolder](https://pytorch.org/docs/stable/torchvision/datasets.html#imagefolder) wrapper, with a root directory `processed_celeba_small/` and data transformation passed in. ``` # necessary imports import torch from torchvision import datasets from torchvision import transforms from torch.utils.data import DataLoader def get_dataloader(batch_size, image_size, data_dir='processed_celeba_small/'): """ Batch the neural network data using DataLoader :param batch_size: The size of each batch; the number of images in a batch :param img_size: The square size of the image data (x, y) :param data_dir: Directory where image data is located :return: DataLoader with batched data """ # TODO: Implement function and return a dataloader transform = transforms.Compose([transforms.Resize(image_size), # resize to 128x128 transforms.ToTensor()]) dataset = datasets.ImageFolder(data_dir, transform) loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True) return loader ``` ## Create a DataLoader #### Exercise: Create a DataLoader `celeba_train_loader` with appropriate hyperparameters. Call the above function and create a dataloader to view images. * You can decide on any reasonable `batch_size` parameter * Your `image_size` **must be** `32`. Resizing the data to a smaller size will make for faster training, while still creating convincing images of faces! ``` # Define function hyperparameters batch_size = 128 img_size = 32 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # Call your function and get a dataloader celeba_train_loader = get_dataloader(batch_size, img_size) ``` Next, you can view some images! You should seen square images of somewhat-centered faces. Note: You'll need to convert the Tensor images into a NumPy type and transpose the dimensions to correctly display an image, suggested `imshow` code is below, but it may not be perfect. ``` # helper display function def imshow(img): npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # obtain one batch of training images dataiter = iter(celeba_train_loader) images, _ = dataiter.next() # _ for no labels # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(20, 4)) plot_size=20 for idx in np.arange(plot_size): ax = fig.add_subplot(2, plot_size/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ``` #### Exercise: Pre-process your image data and scale it to a pixel range of -1 to 1 You need to do a bit of pre-processing; you know that the output of a `tanh` activated generator will contain pixel values in a range from -1 to 1, and so, we need to rescale our training images to a range of -1 to 1. (Right now, they are in a range from 0-1.) ``` # TODO: Complete the scale function def scale(x, feature_range=(-1, 1)): ''' Scale takes in an image x and returns that image, scaled with a feature_range of pixel values from -1 to 1. This function assumes that the input x is already scaled from 0-1.''' # assume x is scaled to (0, 1) # scale to feature_range and return scaled x min, max = feature_range x = x * (max - min) + min return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # check scaled range # should be close to -1 to 1 img = images[0] scaled_img = scale(img) print('Min: ', scaled_img.min()) print('Max: ', scaled_img.max()) ``` --- # Define the Model A GAN is comprised of two adversarial networks, a discriminator and a generator. ## Discriminator Your first task will be to define the discriminator. This is a convolutional classifier like you've built before, only without any maxpooling layers. To deal with this complex data, it's suggested you use a deep network with **normalization**. You are also allowed to create any helper functions that may be useful. #### Exercise: Complete the Discriminator class * The inputs to the discriminator are 32x32x3 tensor images * The output should be a single value that will indicate whether a given image is real or fake ``` import torch.nn as nn import torch.nn.functional as F # helper conv function def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False) # append conv layer layers.append(conv_layer) if batch_norm: # append batchnorm layer layers.append(nn.BatchNorm2d(out_channels)) # using Sequential container return nn.Sequential(*layers) class Discriminator(nn.Module): def __init__(self, conv_dim): """ Initialize the Discriminator Module :param conv_dim: The depth of the first convolutional layer """ super(Discriminator, self).__init__() # complete init function self.conv_dim = conv_dim # [3, 32, 32] input self.conv1 = conv(3, conv_dim, 4, batch_norm=False) # first layer, no batch_norm # [10, 16, 16] input self.conv2 = conv(conv_dim, conv_dim*2, 4) # [20, 8, 8] input self.conv3 = conv(conv_dim*2, conv_dim*4, 4) # [40, 4, 4] input self.conv4 = conv(conv_dim*4, conv_dim*8, 1, padding=0, stride=1) # [80, 4, 4] output self.conv5 = conv(conv_dim*8, conv_dim*16, 1, padding=0, stride=1) # [160, 4, 4] output self.out_dim = self.conv_dim *16*4*4 # final, fully-connected layer self.fc = nn.Linear(self.out_dim, 1) def forward(self, x): """ Forward propagation of the neural network :param x: The input to the neural network :return: Discriminator logits; the output of the neural network """ # define feedforward behavior #print(x.shape) x = F.leaky_relu(self.conv1(x)) #print(x.shape) x = F.leaky_relu(self.conv2(x)) #print(x.shape) x = F.leaky_relu(self.conv3(x)) #print(x.shape) x = F.leaky_relu(self.conv4(x)) #print(x.shape) x = F.leaky_relu(self.conv5(x)) #print(x.shape) x = x.view(-1,self.out_dim) #print(x.shape) x = self.fc(x) return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_discriminator(Discriminator) ``` ## Generator The generator should upsample an input and generate a *new* image of the same size as our training data `32x32x3`. This should be mostly transpose convolutional layers with normalization applied to the outputs. #### Exercise: Complete the Generator class * The inputs to the generator are vectors of some length `z_size` * The output should be a image of shape `32x32x3` ``` def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a transposed-convolutional layer, with optional batch normalization. """ ## TODO: Complete this function ## create a sequence of transpose + optional batch norm layers layers = [] transpose_conv_layer = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False) # append transpose convolutional layer layers.append(transpose_conv_layer) if batch_norm: # append batchnorm layer layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) class Generator(nn.Module): def __init__(self, z_size, conv_dim): """ Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer """ super(Generator, self).__init__() # complete init function self.conv_dim = conv_dim self.out_dim = self.conv_dim *16*4*4 # first, fully-connected layer self.fc = nn.Linear(z_size, self.out_dim) # transpose conv layers #[160, 4, 4] input self.dconv1 = deconv(conv_dim*16, conv_dim*8, 1, padding=0, stride=1) #[80, 4, 4] input self.dconv2 = deconv(conv_dim*8, conv_dim*4, 1, padding=0, stride=1) #[40, 4, 4] input self.dconv3 = deconv(conv_dim*4, conv_dim*2, 4) #[20, 8, 8] input self.dconv4 = deconv(conv_dim*2, conv_dim, 4) #[10, 16, 16] input self.dconv5 = deconv(conv_dim, 3, 4, batch_norm=False) #[3, 32, 32] output def forward(self, x): """ Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output """ # define feedforward behavior #print(x.shape) x = self.fc(x) #print(x.shape) x = x.view(-1, self.conv_dim*16, 4, 4) #print(x.shape) x = F.relu(self.dconv1(x)) #print(x.shape) x = F.relu(self.dconv2(x)) #print(x.shape) x = F.relu(self.dconv3(x)) #print(x.shape) x = F.relu(self.dconv4(x)) #print(x.shape) x = self.dconv5(x) #print(x.shape) x = F.tanh(x) return x """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_generator(Generator) ``` ## Initialize the weights of your networks To help your models converge, you should initialize the weights of the convolutional and linear layers in your model. From reading the [original DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf), they say: > All weights were initialized from a zero-centered Normal distribution with standard deviation 0.02. So, your next task will be to define a weight initialization function that does just this! You can refer back to the lesson on weight initialization or even consult existing model code, such as that from [the `networks.py` file in CycleGAN Github repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py) to help you complete this function. #### Exercise: Complete the weight initialization function * This should initialize only **convolutional** and **linear** layers * Initialize the weights to a normal distribution, centered around 0, with a standard deviation of 0.02. * The bias terms, if they exist, may be left alone or set to 0. ``` def weights_init_normal(m): """ Applies initial weights to certain layers in a model . The weights are taken from a normal distribution with mean = 0, std dev = 0.02. :param m: A module or layer in a network """ # classname will be something like: # `Conv`, `BatchNorm2d`, `Linear`, etc. classname = m.__class__.__name__ # TODO: Apply initial weights to convolutional and linear layers if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): m.weight.data.normal_(mean=0,std=0.02) if hasattr(m, 'bias') and m.bias is not None: m.bias.data.fill_(0) ``` ## Build complete network Define your models' hyperparameters and instantiate the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments. ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ def build_network(d_conv_dim, g_conv_dim, z_size): # define discriminator and generator D = Discriminator(d_conv_dim) G = Generator(z_size=z_size, conv_dim=g_conv_dim) # initialize model weights D.apply(weights_init_normal) G.apply(weights_init_normal) print(D) print() print(G) return D, G ``` #### Exercise: Define model hyperparameters ``` # Define model hyperparams d_conv_dim = 32 g_conv_dim = 32 z_size = 100 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ D, G = build_network(d_conv_dim, g_conv_dim, z_size) ``` ### Training on GPU Check if you can train on GPU. Here, we'll set this as a boolean variable `train_on_gpu`. Later, you'll be responsible for making sure that >* Models, * Model inputs, and * Loss function arguments Are moved to GPU, where appropriate. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') else: print('Training on GPU!') ``` --- ## Discriminator and Generator Losses Now we need to calculate the losses for both types of adversarial networks. ### Discriminator Losses > * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`. * Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. ### Generator Loss The generator loss will look similar only with flipped labels. The generator's goal is to get the discriminator to *think* its generated images are *real*. #### Exercise: Complete real and fake loss functions **You may choose to use either cross entropy or a least squares error loss to complete the following `real_loss` and `fake_loss` functions.** ``` def real_loss(D_out): '''Calculates how close discriminator outputs are to being real. param, D_out: discriminator logits return: real loss''' batch_size = D_out.size(0) labels = torch.ones(batch_size) # move labels to GPU if available if train_on_gpu: labels = labels.cuda() # binary cross entropy with logits loss criterion = nn.BCEWithLogitsLoss() # calculate loss loss = criterion(D_out.squeeze(), labels) return loss def fake_loss(D_out): '''Calculates how close discriminator outputs are to being fake. param, D_out: discriminator logits return: fake loss''' batch_size = D_out.size(0) labels = torch.zeros(batch_size) # fake labels = 0 if train_on_gpu: labels = labels.cuda() criterion = nn.BCEWithLogitsLoss() # calculate loss loss = criterion(D_out.squeeze(), labels) return loss ``` ## Optimizers #### Exercise: Define optimizers for your Discriminator (D) and Generator (G) Define optimizers for your models with appropriate hyperparameters. ``` import torch.optim as optim lr = .0002 beta1=0.5 beta2=0.999 # Create optimizers for the discriminator and generator d_optimizer = optim.Adam(D.parameters(), lr, [beta1, beta2]) g_optimizer = optim.Adam(G.parameters(), lr, [beta1, beta2]) ``` --- ## Training Training will involve alternating between training the discriminator and the generator. You'll use your functions `real_loss` and `fake_loss` to help you calculate the discriminator losses. * You should train the discriminator by alternating on real and fake images * Then the generator, which tries to trick the discriminator and should have an opposing loss function #### Saving Samples You've been given some code to print out some loss statistics and save some generated "fake" samples. #### Exercise: Complete the training function Keep in mind that, if you've moved your models to GPU, you'll also have to move any model inputs to GPU. ``` def train(D, G, n_epochs, print_every=50): '''Trains adversarial networks for some number of epochs param, D: the discriminator network param, G: the generator network param, n_epochs: number of epochs to train for param, print_every: when to print and record the models' losses return: D and G losses''' # move models to GPU if train_on_gpu: D.cuda() G.cuda() # keep track of loss and generated, "fake" samples samples = [] losses = [] # Get some fixed data for sampling. These are images that are held # constant throughout training, and allow us to inspect the model's performance sample_size=16 fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size)) fixed_z = torch.from_numpy(fixed_z).float() # move z to GPU if available if train_on_gpu: fixed_z = fixed_z.cuda() # epoch training loop for epoch in range(n_epochs): # batch training loop for batch_i, (real_images, _) in enumerate(celeba_train_loader): batch_size = real_images.size(0) real_images = scale(real_images) # =============================================== # YOUR CODE HERE: TRAIN THE NETWORKS # =============================================== d_optimizer.zero_grad() # Compute the discriminator losses on real images if train_on_gpu: real_images = real_images.cuda() D_real = D(real_images) d_real_loss = real_loss(D_real) # 2. Train with fake images # Generate fake images z = np.random.uniform(-1, 1, size=(batch_size, z_size)) z = torch.from_numpy(z).float() # move x to GPU, if available if train_on_gpu: z = z.cuda() fake_images = G(z) # Compute the discriminator losses on fake images D_fake = D(fake_images) d_fake_loss = fake_loss(D_fake) # add up loss and perform backprop d_loss = d_real_loss + d_fake_loss d_loss.backward() d_optimizer.step() # 2. Train the generator with an adversarial loss g_optimizer.zero_grad() # 1. Train with fake images and flipped labels # Generate fake images z = np.random.uniform(-1, 1, size=(batch_size, z_size)) z = torch.from_numpy(z).float() if train_on_gpu: z = z.cuda() fake_images = G(z) # Compute the discriminator losses on fake images # using flipped labels! D_fake = D(fake_images) g_loss = real_loss(D_fake) # use real loss to flip labels # perform backprop g_loss.backward() g_optimizer.step() # =============================================== # END OF YOUR CODE # =============================================== # Print some loss stats if batch_i % print_every == 0: # append discriminator loss and generator loss losses.append((d_loss.item(), g_loss.item())) # print discriminator and generator loss print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format( epoch+1, n_epochs, d_loss.item(), g_loss.item())) ## AFTER EACH EPOCH## # this code assumes your generator is named G, feel free to change the name # generate and save sample, fake images G.eval() # for generating samples samples_z = G(fixed_z) samples.append(samples_z) G.train() # back to training mode # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) # finally return losses return losses ``` Set your number of training epochs and train your GAN! ``` # set number of epochs n_epochs = 20 """ DON'T MODIFY ANYTHING IN THIS CELL """ # call training function losses = train(D, G, n_epochs=n_epochs) ``` ## Training loss Plot the training losses for the generator and discriminator, recorded after each epoch. ``` fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() ``` ## Generator samples from training View samples of images from the generator, and answer a question about the strengths and weaknesses of your trained models. ``` # helper function for viewing a list of passed in sample images def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(16,4), nrows=2, ncols=8, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): img = img.detach().cpu().numpy() img = np.transpose(img, (1, 2, 0)) img = ((img + 1)*255 / (2)).astype(np.uint8) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((32,32,3))) # Load samples from generator, taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) _ = view_samples(-1, samples) ``` ### Question: What do you notice about your generated samples and how might you improve this model? When you answer this question, consider the following factors: * The dataset is biased; it is made of "celebrity" faces that are mostly white * Model size; larger models have the opportunity to learn more features in a data feature space * Optimization strategy; optimizers and number of epochs affect your final result **Answer:** (Write your answer in this cell) ### Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_face_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "problem_unittests.py" files in your submission.
github_jupyter
``` """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. """ # If you're using Google Colab and not running locally, run this cell # install NeMo BRANCH = 'v1.0.0b3' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp] # If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error: # 'ImportError: IProgress not found. Please update jupyter and ipywidgets.' ! pip install ipywidgets ! jupyter nbextension enable --py widgetsnbextension # Please restart the kernel after running this cell from nemo.collections import nlp as nemo_nlp from nemo.utils.exp_manager import exp_manager import os import wget import torch import pytorch_lightning as pl from omegaconf import OmegaConf ``` In this tutorial, we are going to describe how to finetune a BERT-like model based on [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) on [GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding](https://openreview.net/pdf?id=rJ4km2R5t7). # GLUE tasks GLUE Benchmark includes 9 natural language understanding tasks: ## Single-Sentence Tasks * CoLA - [The Corpus of Linguistic Acceptability](https://arxiv.org/abs/1805.12471) is a set of English sentences from published linguistics literature. The task is to predict whether a given sentence is grammatically correct or not. * SST-2 - [The Stanford Sentiment Treebank](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf) consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence: positive or negative. ## Similarity and Paraphrase tasks * MRPC - [The Microsoft Research Paraphrase Corpus](https://www.aclweb.org/anthology/I05-5002.pdf) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent. * QQP - [The Quora Question Pairs](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs) dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent. * STS-B - [The Semantic Textual Similarity Benchmark](https://arxiv.org/abs/1708.00055) is a collection of sentence pairs drawn from news headlines, video, and image captions, and natural language inference data. The task is to determine how similar two sentences are. ## Inference Tasks * MNLI - [The Multi-Genre Natural Language Inference Corpus](https://cims.nyu.edu/~sbowman/multinli/multinli_0.9.pdf) is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The task has the matched (in-domain) and mismatched (cross-domain) sections. * QNLI - [The Stanford Question Answering Dataset](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question. The task is to determine whether the context sentence contains the answer to the question. * RTE The Recognizing Textual Entailment (RTE) datasets come from a series of annual [textual entailment challenges](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment). The task is to determine whether the second sentence is the entailment of the first one or not. * WNLI - The Winograd Schema Challenge is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices (Hector Levesque, Ernest Davis, and Leora Morgenstern. The winograd schema challenge. In Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning. 2012). All tasks are classification tasks, except for the STS-B task which is a regression task. All classification tasks are 2-class problems, except for the MNLI task which has 3-classes. More details about GLUE benchmark could be found [here](https://gluebenchmark.com/). # Datasets **To proceed further, you need to download the GLUE data.** For example, you can download [this script](https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py) using `wget` and then execute it by running: `python download_glue_data.py` use `--tasks TASK` if datasets for only selected GLUE tasks are needed After running the above commands, you will have a folder `glue_data` with data folders for every GLUE task. For example, data for MRPC task would be under glue_data/MRPC. This tutorial and [examples/nlp/glue_benchmark/glue_benchmark.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/glue_benchmark/glue_benchmark.py) work with all GLUE tasks without any modifications. For this tutorial, we are going to use MRPC task. ``` # supported task names: ["cola", "sst-2", "mrpc", "sts-b", "qqp", "mnli", "qnli", "rte", "wnli"] TASK = 'mrpc' DATA_DIR = 'glue_data/MRPC' WORK_DIR = "WORK_DIR" MODEL_CONFIG = 'glue_benchmark_config.yaml' ! ls -l $DATA_DIR ``` For each task, there are 3 files: `train.tsv, dev.tsv, and test.tsv`. Note, MNLI has 2 dev sets: matched and mismatched, evaluation on both dev sets will be done automatically. ``` # let's take a look at the training data ! head -n 5 {DATA_DIR}/train.tsv ``` # Model configuration Now, let's take a closer look at the model's configuration and learn to train the model. GLUE model is comprised of the pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) model followed by a Sequence Regression module (for STS-B task) or Sequence classifier module (for the rest of the tasks). The model is defined in a config file which declares multiple important sections. They are: - **model**: All arguments that are related to the Model - language model, a classifier, optimizer and schedulers, datasets and any other related information - **trainer**: Any argument to be passed to PyTorch Lightning ``` # download the model's configuration file config_dir = WORK_DIR + '/configs/' os.makedirs(config_dir, exist_ok=True) if not os.path.exists(config_dir + MODEL_CONFIG): print('Downloading config file...') wget.download('https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b2/examples/nlp/glue_benchmark/' + MODEL_CONFIG, config_dir) else: print ('config file is already exists') # this line will print the entire config of the model config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}' print(config_path) config = OmegaConf.load(config_path) print(OmegaConf.to_yaml(config)) ``` # Model Training ## Setting up Data within the config Among other things, the config file contains dictionaries called **dataset**, **train_ds** and **validation_ds**. These are configurations used to setup the Dataset and DataLoaders of the corresponding config. We assume that both training and evaluation files are located in the same directory, and use the default names mentioned during the data download step. So, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below. Also notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user. Let's now add the data directory path, task name and output directory for saving predictions to the config. ``` config.model.task_name = TASK config.model.output_dir = WORK_DIR config.model.dataset.data_dir = DATA_DIR ``` ## Building the PyTorch Lightning Trainer NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem. Let's first instantiate a Trainer object ``` print("Trainer config - \n") print(OmegaConf.to_yaml(config.trainer)) # lets modify some trainer configs # checks if we have GPU available and uses it cuda = 1 if torch.cuda.is_available() else 0 config.trainer.gpus = cuda config.trainer.precision = 16 if torch.cuda.is_available() else 32 # for mixed precision training, uncomment the line below (precision should be set to 16 and amp_level to O1): # config.trainer.amp_level = O1 # remove distributed training flags config.trainer.accelerator = None # setup max number of steps to reduce training time for demonstration purposes of this tutorial config.trainer.max_steps = 128 trainer = pl.Trainer(**config.trainer) ``` ## Setting up a NeMo Experiment NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it: ``` exp_dir = exp_manager(trainer, config.get("exp_manager", None)) # the exp_dir provides a path to the current experiment for easy access exp_dir = str(exp_dir) exp_dir ``` Before initializing the model, we might want to modify some of the model configs. For example, we might want to modify the pretrained BERT model and use [Megatron-LM BERT](https://arxiv.org/abs/1909.08053) or [AlBERT model](https://arxiv.org/abs/1909.11942): ``` # get the list of supported BERT-like models, for the complete list of HugginFace models, see https://huggingface.co/models print(nemo_nlp.modules.get_pretrained_lm_models_list(include_external=True)) # specify BERT-like model, you want to use, for example, "megatron-bert-345m-uncased" or 'bert-base-uncased' PRETRAINED_BERT_MODEL = "albert-base-v1" # add the specified above model parameters to the config config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL ``` Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation. Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model. ``` model = nemo_nlp.models.GLUEModel(cfg=config.model, trainer=trainer) ``` ## Monitoring training progress Optionally, you can create a Tensorboard visualization to monitor training progress. ``` try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: %load_ext tensorboard %tensorboard --logdir {exp_dir} else: print("To use tensorboard, please use this notebook in a Google Colab environment.") ``` Note, it’s recommended to finetune the model on each task separately. Also, based on [GLUE Benchmark FAQ#12](https://gluebenchmark.com/faq), there are might be some differences in dev/test distributions for QQP task and in train/dev for WNLI task. ``` # start model training trainer.fit(model) ``` ## Training Script If you have NeMo installed locally, you can also train the model with [examples/nlp/glue_benchmark/glue_benchmark.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/glue_benchmark/glue_benchmark.py). To run training script, use: `python glue_benchmark.py \ model.dataset.data_dir=PATH_TO_DATA_DIR \ model.task_name=TASK` Average results after 3 runs: | Task | Metric | ALBERT-large | ALBERT-xlarge | Megatron-345m | BERT base paper | BERT large paper | |-------|--------------------------|--------------|---------------|---------------|-----------------|------------------| | CoLA | Matthew's correlation | 54.94 | 61.72 | 64.56 | 52.1 | 60.5 | | SST-2 | Accuracy | 92.74 | 91.86 | 95.87 | 93.5 | 94.9 | | MRPC | F1/Accuracy | 92.05/88.97 | 91.87/88.61 | 92.36/89.46 | 88.9/- | 89.3/- | | STS-B | Person/Spearman corr. | 90.41/90.21 | 90.07/90.10 | 91.51/91.61 | -/85.8 | -/86.5 | | QQP | F1/Accuracy | 88.26/91.26 | 88.80/91.65 | 89.18/91.91 | 71.2/- | 72.1/- | | MNLI | Matched /Mismatched acc. | 86.69/86.81 | 88.66/88.73 | 89.86/89.81 | 84.6/83.4 | 86.7/85.9 | | QNLI | Accuracy | 92.68 | 93.66 | 94.33 | 90.5 | 92.7 | | RTE | Accuracy | 80.87 | 82.86 | 83.39 | 66.4 | 70.1 | WNLI task was excluded from the experiments due to the problematic WNLI set. The dev sets were used for evaluation for ALBERT and Megatron models, and the test sets results for [the BERT paper](https://arxiv.org/abs/1810.04805). Hyperparameters used to get the results from the above table, could be found in the table below. Some tasks could be further finetuned to improve performance numbers, the tables are for a baseline reference only. Each cell in the table represents the following parameters: Number of GPUs used/ Batch Size/ Learning Rate/ Number of Epochs. For not specified parameters, please refer to the default parameters in the training script. | Task | ALBERT-large | ALBERT-xlarge | Megatron-345m | |-------|--------------|---------------|---------------| | CoLA | 1 / 32 / 1e-5 / 3 | 1 / 32 / 1e-5 / 10 | 4 / 16 / 2e-5 / 12 | | SST-2 | 4 / 16 / 2e-5 / 5 | 4 / 16 / 2e-5 /12 | 4 / 16 / 2e-5 / 12 | | MRPC | 1 / 32 / 1e-5 / 5 | 1 / 16 / 2e-5 / 5 | 1 / 16 / 2e-5 / 10 | | STS-B | 1 / 16 / 2e-5 / 5 | 1 / 16 / 4e-5 / 12 | 4 / 16 / 3e-5 / 12 | | QQP | 1 / 16 / 2e-5 / 5 | 4 / 16 / 1e-5 / 12 | 4 / 16 / 1e-5 / 12 | | MNLI | 4 / 64 / 1e-5 / 5 | 4 / 32 / 1e-5 / 5 | 4 / 32 / 1e-5 / 5 | | QNLI | 4 / 16 / 1e-5 / 5 | 4 / 16 / 1e-5 / 5 | 4 / 16 / 2e-5 / 5 | | RTE | 1 / 16 / 1e-5 / 5 | 1 / 16 / 1e-5 / 12 | 4 / 16 / 3e-5 / 12 |
github_jupyter
# 빠른 학습을 위한 tfrecords 데이터셋 생성 - 컴페티션 기본 데이터는 data/public 하위 폴더에 있다고 가정합니다. (train.csv, sample_submission.csv, etc) - 또한 train.zip, test.zip 역시 data/public 하위에 압축을 풀어놓았다고 가정하고 시작하겠습니다. ``` import os import os.path as pth import json import shutil import pandas as pd from tqdm import tqdm data_base_path = pth.join('data', 'public') os.makedirs(data_base_path, exist_ok=True) category_csv_name = 'category.csv' category_json_name = 'category.json' submission_csv_name = 'sample_submisstion.csv' train_csv_name = 'train.csv' train_zip_name = 'train.zip' test_zip_name = 'test.zip' ``` 일단 모든 jpg 파일을 한 경로에 놓고 작업하기 편하게 하는 방식입니다. 파일이 많다보니 파일 옮기는 작업을 쉘 한줄로 하려니 명령어가 너무 길어져 오류가 발생힙니다. 조금 번거롭더라도 하나씩 가져와서 한 경로 이하에 놓도록 하였습니다. ``` train_data_path = pth.join(data_base_path, 'train') test_data_path = pth.join(data_base_path, 'test') if not pth.exists(train_data_path): os.system('unzip {}/{} -d {}'.format(data_base_path, train_zip_name, train_data_path)) # os.system('mv {}/*/*/* {}'.format(train_data_path, train_data_path)) place_name_list = [name for name in os.listdir(train_data_path) if not name.endswith('.JPG')] for place_name in place_name_list: place_fullpath = pth.join(train_data_path, place_name) landmark_name_list = os.listdir(place_fullpath) for landmark_name in landmark_name_list: landmark_fullpath = pth.join(place_fullpath, landmark_name) image_name_list = os.listdir(landmark_fullpath) for image_name in image_name_list: image_fullpath = pth.join(landmark_fullpath, image_name) if not image_fullpath.endswith('.JPG'): continue shutil.move(image_fullpath, train_data_path) if not pth.exists(test_data_path): os.system('unzip {}/{} -d {}'.format(data_base_path, test_zip_name, test_data_path)) # os.system('mv {}/*/* {}'.format(test_data_path, test_data_path)) temp_name_list = [name for name in os.listdir(test_data_path) if not name.endswith('.JPG')] for temp_name in temp_name_list: temp_fullpath = pth.join(test_data_path, temp_name) image_name_list = os.listdir(temp_fullpath) for image_name in image_name_list: image_fullpath = pth.join(temp_fullpath, image_name) if not image_fullpath.endswith('.JPG'): continue shutil.move(image_fullpath, test_data_path) train_csv_path = pth.join(data_base_path, train_csv_name) train_df = pd.read_csv(train_csv_path) train_dict = {k:v for k, v in train_df.values} submission_csv_path = pth.join(data_base_path, submission_csv_name) submission_df = pd.read_csv(submission_csv_path) # submission_df.head() train_df.head() ### Check all file is exist for basename in tqdm(train_df['id']): if not pth.exists(pth.join(train_data_path, basename+'.JPG')): print(basename) for basename in tqdm(submission_df['id']): if not pth.exists(pth.join(test_data_path, basename+'.JPG')): print(basename) category_csv_path = pth.join(data_base_path, category_csv_name) category_df = pd.read_csv(category_csv_path) category_dict = {k:v for k, v in category_df.values} category_df.head() # category_json_path = pth.join(data_base_path, category_json_name) # with open(category_json_path) as f: # category_dict = json.load(f) # category_dict ``` ## 2. 추출한 csv와 생성한 이미지를 기반으로 tfrecord 생성 데이터를 읽는 오버헤드를 줄이기 위해 학습 데이터를 tfrecord형태로 새로 생성합니다 ``` !pip install tensorflow !pip install opencv-python import tensorflow as tf from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt from PIL import Image from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, GroupKFold, RepeatedStratifiedKFold from sklearn.utils import shuffle import numpy as np import pandas as pd import os import os.path as pth import shutil import time from tqdm import tqdm import numpy as np from PIL import Image from IPython.display import clear_output from multiprocessing import Process, Queue import datetime def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _floatarray_feature(array): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=array)) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _validate_text(text): """If text is not str or unicode, then try to convert it to str.""" if isinstance(text, str): return text elif isinstance(text, 'unicode'): return text.encode('utf8', 'ignore') else: return str(text) def to_tfrecords(id_list, randmark_id_list, tfrecords_name): print("Start converting") options = tf.io.TFRecordOptions(compression_type = 'GZIP') with tf.io.TFRecordWriter(path=pth.join(tfrecords_name+'.tfrecords'), options=options) as writer: for id_, randmark_id in tqdm(zip(id_list, randmark_id_list), total=len(id_list), position=0, leave=True): image_path = pth.join(train_data_path, id_ + '.JPG') _binary_image = tf.io.read_file(image_path) string_set = tf.train.Example(features=tf.train.Features(feature={ 'image_raw': _bytes_feature(_binary_image), 'randmark_id': _int64_feature(randmark_id), 'id': _bytes_feature(id_.encode()), })) writer.write(string_set.SerializeToString()) ``` Training 때 사용할 validation을 분리합니다. (Train:0.8, Validation:0.2) ``` train_ids, val_ids, train_landmark_ids, val_landmark_ids = train_test_split(train_df['id'], train_df['landmark_id'], test_size=0.2, random_state=7777, shuffle=True) to_tfrecords(train_ids, train_landmark_ids, pth.join(data_base_path, 'all_train')) to_tfrecords(val_ids, val_landmark_ids, pth.join(data_base_path, 'all_val')) ``` Testset 또한 속도를 위해 tfrecord 형태로 변환해줍니다. ``` def to_test_tfrecords(id_list, tfrecords_name): print("Start converting") options = tf.io.TFRecordOptions(compression_type = 'GZIP') with tf.io.TFRecordWriter(path=pth.join(tfrecords_name+'.tfrecords'), options=options) as writer: for id_ in tqdm(id_list, total=len(id_list), position=0, leave=True): image_path = pth.join(test_data_path, id_+'.JPG') _binary_image = tf.io.read_file(image_path) string_set = tf.train.Example(features=tf.train.Features(feature={ 'image_raw': _bytes_feature(_binary_image), # 'randmark_id': _int64_feature(randmark_id), 'id': _bytes_feature(id_.encode()), })) writer.write(string_set.SerializeToString()) test_ids = submission_df['id'] to_test_tfrecords(test_ids, pth.join(data_base_path, 'test')) ``` ### Usage ``` train_tfrecord_path = pth.join(data_base_path, 'all_train.tfrecords') val_tfrecord_path = pth.join(data_base_path, 'all_val.tfrecords') test_tfrecord_path = pth.join(data_base_path, 'test.tfrecords') BUFFER_SIZE = 256 BATCH_SIZE = 64 NUM_CLASS = 1049 image_feature_description = { 'image_raw': tf.io.FixedLenFeature([], tf.string), 'randmark_id': tf.io.FixedLenFeature([], tf.int64), # 'id': tf.io.FixedLenFeature([], tf.string), } def _parse_image_function(example_proto): return tf.io.parse_single_example(example_proto, image_feature_description) def map_func(target_record): img = target_record['image_raw'] label = target_record['randmark_id'] img = tf.image.decode_jpeg(img, channels=3) img = tf.dtypes.cast(img, tf.float32) return img, label def prep_func(image, label): result_image = image / 255 # result_image = tf.image.resize(image, (300, 300)) onehot_label = tf.one_hot(label, depth=NUM_CLASS) return result_image, onehot_label dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP') dataset = dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE) # dataset = dataset.cache() dataset = dataset.map(map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.map(prep_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) target_class = np.argmax(batch_y.numpy(), axis=1) target_class for batch_x, batch_y in dataset: print(batch_x.shape, batch_y.shape) target_class = np.argmax(batch_y[0].numpy()) print(category_dict[target_class]) plt.figure() plt.imshow(batch_x[0].numpy()) # plt.title('{}'.format(category_dict[target_class])) plt.show() break ``` ### TFRecords vs Normal benchmark 동일한 조건에서 순수한 파일 I/O 속도만을 비교하기 위해서 cache, prepetch, multiprocess와 같은 속도에 영향을 줄 수 있는 요소는 제외하고 측정하였습니다. - TFRecords 사용 시 ``` get_file(pth.join(data_base_path, 'all_train.tfrecords')) get_file(pth.join(data_base_path, 'all_val.tfrecords')) get_file(pth.join(data_base_path, 'test.tfrecords')) dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP') dataset = dataset.map(_parse_image_function) for _ in tqdm(dataset, position=0, leave=True): pass ``` - 일반적인 jpg파일 사용 시 ``` train_ids, val_ids, train_landmark_ids, val_landmark_ids = train_test_split(train_df['id'], train_df['landmark_id'], test_size=0.2, random_state=7777, shuffle=True) def load_image(image_path, label): img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.dtypes.cast(img, tf.float32) return img, label train_tfrecord_array = np.array([pth.join(data_base_path, 'train', img_name+'.JPG') for img_name in train_ids.values]) dataset = tf.data.Dataset.from_tensor_slices((train_tfrecord_array, train_landmark_ids)) dataset = dataset.map(load_image) for _ in tqdm(dataset, position=0, leave=True): pass ``` - 결과를 보았을 때, 5분 43초(TFRecords) vs 14분 40초(Normal)로 TFRecord를 사용하는 것이 3배 정도 더 빨랐습니다. - jpg 이미지가 속도가 더 오래 걸리는 이유는, jpg 방식으로 압축되어 있는 이미지를 raw 이미지로 해독하는데 걸리는 시간으로 인한 오버헤드로 추정됩니다. - 저 같은 경우 MobileNetV2 기반 모델이고, 코랩 T4 VGA 기준 학습 속도가 한 에폭에 8~9분정도 걸리는 상황이는 파일 I/O 속도가 전체 학습 속도에 미치는 영향은 상당히 큰 것으로 생각됩니다. - 또한 실제 사용에서는 Multiprocessing이나 prefetch와 같은 기능도 같이 사용하기 떄문에 이를 사용해서도 테스트 해보겠습니다. - TFRecords 사용 시 ``` dataset = tf.data.TFRecordDataset(train_tfrecord_path, compression_type='GZIP') dataset = dataset.map(_parse_image_function, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) for _ in tqdm(dataset, position=0, leave=True): pass ``` - 일반적인 jpg로 로딩 시 ``` dataset = tf.data.Dataset.from_tensor_slices((train_tfrecord_array, train_landmark_ids)) dataset = dataset.map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) for _ in tqdm(dataset, position=0, leave=True): pass ``` - 결과를 보았을 때, 놀랍게도 TFRecord를 사용하는 것과 일반 이미지 로딩 방식이 거의 동일한 시간을 보이는 것을 확인할 수 있습니다. - 코랩에서는 앞서 언급한 이미지 압축 해제에 대한 오버헤드를 tf.data의 부가기능을 활용하여 충분히 극복할 수 있는 것으로 보입니다. - 또한, TRRecord는 그냥 읽는 것과 동일한 시간이 걸리는 것으로 확인되는데, 이는 TFRecord로 읽는 방식 자체가 파일 I/O 이외에 별다른 오버헤드가 크게 없어서 그런 것으로 추정됩니다.
github_jupyter
## In situ data and trajectories incl. Bepi Colombo, PSP, Solar Orbiter https://github.com/cmoestl/heliocats Author: C. Moestl, IWF Graz, Austria twitter @chrisoutofspace, https://github.com/cmoestl last update: 2021 August 24 needs python 3.7 with the conda helio environment (see README.md) uses heliopy for generating spacecraft positions, for data source files see README.md --- MIT LICENSE Copyright 2020-2021, Christian Moestl Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` #change path for ffmpeg for animation production if needed ffmpeg_path='' import os import datetime from datetime import datetime, timedelta from sunpy.time import parse_time import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.cm as cmap from scipy.signal import medfilt import numpy as np import pdb import pickle import seaborn as sns import sys import heliopy.data.spice as spicedata import heliopy.spice as spice import astropy import importlib import time import numba from numba import jit import multiprocessing import urllib import copy from astropy import constants as const import warnings warnings.filterwarnings('ignore') from heliocats import data as hd importlib.reload(hd) #reload again while debugging from heliocats import plot as hp importlib.reload(hp) #reload again while debugging #where the in situ data files are located is read #from config.py import config importlib.reload(config) from config import data_path ``` ## load HIGeoCAT ``` #load HIGeoCAT from heliocats import cats as hc importlib.reload(hc) #reload again while debugging #https://www.helcats-fp7.eu/ #LOAD HELCATS HIGeoCAT url_higeocat='https://www.helcats-fp7.eu/catalogues/data/HCME_WP3_V06.vot' try: urllib.request.urlretrieve(url_higeocat,'data/HCME_WP3_V06.vot') except urllib.error.URLError as e: print('higeocat not loaded') higeocat=hc.load_higeocat_vot('data/HCME_WP3_V06.vot') higeocat_time=parse_time(higeocat['Date']).datetime higeocat_t0=parse_time(higeocat['SSE Launch']).datetime #backprojected launch time sse_speed=higeocat['SSE Speed'] sse_lon=higeocat['SSE HEEQ Long'] sse_lat=higeocat['SSE HEEQ Lat'] higeocat_name=np.array(higeocat['SC'].astype(str)) print('done') ``` ## generate HIGeoCAT kinematics ``` print('generate kinematics for each SSEF30 CME') generate_hi_kin=False if generate_hi_kin: t0=higeocat_t0 kindays=60 #lists for all times, r, longitude, latitude all_time=[] all_r=[] all_lat=[] all_lon=[] all_name=[] #go through all HI CMEs for i in np.arange(len(higeocat)): #for i in np.arange(100): #times for each event kinematic time1=[] tstart1=copy.deepcopy(t0[i]) tend1=tstart1+timedelta(days=kindays) #make 30 min datetimes while tstart1 < tend1: time1.append(tstart1) tstart1 += timedelta(minutes=30) #make kinematics timestep=np.zeros(kindays*24*2) cme_r=np.zeros(kindays*24*2) cme_lon=np.zeros(kindays*24*2) cme_lat=np.zeros(kindays*24*2) cme_name=np.chararray(kindays*24*2) for j in np.arange(0,len(cme_r)-1,1): cme_r[j]=sse_speed[i]*timestep[j]/(const.au.value*1e-3) #km to AU cme_lon[j]=sse_lon[i] cme_lat[j]=sse_lat[i] timestep[j+1]=timestep[j]+30*60 #seconds cme_name[j]=higeocat_name[i] #### linear interpolate to 30 min resolution #find next full hour after t0 format_str = '%Y-%m-%d %H' t0r = datetime.strptime(datetime.strftime(t0[i], format_str), format_str) +timedelta(hours=1) time2=[] tstart2=copy.deepcopy(t0r) tend2=tstart2+timedelta(days=kindays) #make 30 min datetimes while tstart2 < tend2: time2.append(tstart2) tstart2 += timedelta(minutes=30) time2_num=parse_time(time2).plot_date time1_num=parse_time(time1).plot_date #linear interpolation to time_mat times cme_r = np.interp(time2_num, time1_num,cme_r ) cme_lat = np.interp(time2_num, time1_num,cme_lat ) cme_lon = np.interp(time2_num, time1_num,cme_lon ) #cut at 5 AU cutoff=np.where(cme_r<5)[0] #write to all #print(cutoff[0],cutoff[-1]) all_time.extend(time2[cutoff[0]:cutoff[-2]]) all_r.extend(cme_r[cutoff[0]:cutoff[-2]]) all_lat.extend(cme_lat[cutoff[0]:cutoff[-2]]) all_lon.extend(cme_lon[cutoff[0]:cutoff[-2]]) all_name.extend(cme_name[cutoff[0]:cutoff[-2]]) plt.figure(1) plt.plot(all_time,all_r) plt.figure(2) plt.plot(all_time,all_lat,'ok') plt.figure(3) plt.plot(all_time,all_lon,'ok') ################### sort all kinematics by time all_time_num=mdates.date2num(all_time) all_r=np.array(all_r) all_lat=np.array(all_lat) all_lon=np.array(all_lon) all_name=np.array(all_name) #get indices for sorting for time sortind=np.argsort(all_time_num,axis=0) #cme_time_sort=mdates.num2date(all_time_num[sortind]) cme_time_sort_num=all_time_num[sortind] cme_r_sort=all_r[sortind] cme_lat_sort=all_lat[sortind] cme_lon_sort=all_lon[sortind] cme_name_sort=all_name[sortind].astype(str) #plt.plot(cme_time_sort,cme_r_sort) #plt.plot(cme_time_sort,cme_r_sort) plt.figure(4) plt.plot(all_time,all_lon,'.k') plt.plot(cme_time_sort_num,cme_lon_sort,'.b') pickle.dump([cme_time_sort_num,cme_r_sort,cme_lat_sort,cme_lon_sort,cme_name_sort], open('data/higeocat_kinematics.p', "wb")) print('load HIGEOCAT kinematics') [hc_time_num,hc_r,hc_lat,hc_lon,hc_name]=pickle.load(open('data/higeocat_kinematics.p', "rb")) print('done') ``` ### define functions ``` def make_positions(): ############### PSP starttime =datetime(2018, 8,13) endtime = datetime(2025, 8, 31) psp_time = [] while starttime < endtime: psp_time.append(starttime) starttime += timedelta(days=res_in_days) psp_time_num=mdates.date2num(psp_time) spice.furnish(spicedata.get_kernel('psp_pred')) psp=spice.Trajectory('SPP') psp.generate_positions(psp_time,'Sun',frame) print('PSP pos') psp.change_units(astropy.units.AU) [psp_r, psp_lat, psp_lon]=hd.cart2sphere(psp.x,psp.y,psp.z) print('PSP conv') ############### BepiColombo starttime =datetime(2018, 10, 21) endtime = datetime(2025, 11, 2) bepi_time = [] while starttime < endtime: bepi_time.append(starttime) starttime += timedelta(days=res_in_days) bepi_time_num=mdates.date2num(bepi_time) spice.furnish(spicedata.get_kernel('bepi_pred')) bepi=spice.Trajectory('BEPICOLOMBO MPO') # or BEPICOLOMBO MMO bepi.generate_positions(bepi_time,'Sun',frame) bepi.change_units(astropy.units.AU) [bepi_r, bepi_lat, bepi_lon]=hd.cart2sphere(bepi.x,bepi.y,bepi.z) print('Bepi') ############### Solar Orbiter starttime = datetime(2020, 3, 1) endtime = datetime(2029, 12, 31) solo_time = [] while starttime < endtime: solo_time.append(starttime) starttime += timedelta(days=res_in_days) solo_time_num=mdates.date2num(solo_time) spice.furnish(spicedata.get_kernel('solo_2020')) solo=spice.Trajectory('Solar Orbiter') solo.generate_positions(solo_time, 'Sun',frame) solo.change_units(astropy.units.AU) [solo_r, solo_lat, solo_lon]=hd.cart2sphere(solo.x,solo.y,solo.z) print('Solo') ########### plots plt.figure(1, figsize=(12,9)) plt.plot_date(psp_time,psp_r,'-', label='R') plt.plot_date(psp_time,psp_lat,'-',label='lat') plt.plot_date(psp_time,psp_lon,'-',label='lon') plt.ylabel('AU / RAD') plt.legend() plt.figure(2, figsize=(12,9)) plt.plot_date(bepi_time,bepi_r,'-', label='R') plt.plot_date(bepi_time,bepi_lat,'-',label='lat') plt.plot_date(bepi_time,bepi_lon,'-',label='lon') plt.title('Bepi Colombo position '+frame) plt.ylabel('AU / RAD') plt.legend() plt.figure(3, figsize=(12,9)) plt.plot_date(solo_time,solo_r,'-', label='R') plt.plot_date(solo_time,solo_lat,'-',label='lat') plt.plot_date(solo_time,solo_lon,'-',label='lon') plt.title('Solar Orbiter position '+frame) plt.ylabel('AU / RAD') plt.legend() ######## R with all three plt.figure(4, figsize=(16,10)) plt.plot_date(psp_time,psp.r,'-',label='PSP') plt.plot_date(bepi_time,bepi.r,'-',label='Bepi Colombo') plt.plot_date(solo_time,solo.r,'-',label='Solar Orbiter') plt.legend() plt.title('Heliocentric distance of heliospheric observatories') plt.ylabel('AU') plt.savefig(positions_plot_directory+'/bepi_psp_solo_R.png') ##### Longitude all three plt.figure(5, figsize=(16,10)) plt.plot_date(psp_time,psp_lon*180/np.pi,'-',label='PSP') plt.plot_date(bepi_time,bepi_lon*180/np.pi,'-',label='Bepi Colombo') plt.plot_date(solo_time,solo_lon*180/np.pi,'-',label='Solar Orbiter') plt.legend() plt.title(frame+' longitude') plt.ylabel('DEG') plt.savefig(positions_plot_directory+'/bepi_psp_solo_longitude_'+frame+'.png') ############# Earth, Mercury, Venus, STA #see https://docs.heliopy.org/en/stable/data/spice.html planet_kernel=spicedata.get_kernel('planet_trajectories') starttime =datetime(2018, 1, 1) endtime = datetime(2029, 12, 31) earth_time = [] while starttime < endtime: earth_time.append(starttime) starttime += timedelta(days=res_in_days) earth_time_num=mdates.date2num(earth_time) earth=spice.Trajectory('399') #399 for Earth, not barycenter (because of moon) earth.generate_positions(earth_time,'Sun',frame) earth.change_units(astropy.units.AU) [earth_r, earth_lat, earth_lon]=hd.cart2sphere(earth.x,earth.y,earth.z) print('Earth') ################ mercury mercury_time_num=earth_time_num mercury=spice.Trajectory('1') #barycenter mercury.generate_positions(earth_time,'Sun',frame) mercury.change_units(astropy.units.AU) [mercury_r, mercury_lat, mercury_lon]=hd.cart2sphere(mercury.x,mercury.y,mercury.z) print('mercury') ################# venus venus_time_num=earth_time_num venus=spice.Trajectory('2') venus.generate_positions(earth_time,'Sun',frame) venus.change_units(astropy.units.AU) [venus_r, venus_lat, venus_lon]=hd.cart2sphere(venus.x,venus.y,venus.z) print('venus') ############### Mars mars_time_num=earth_time_num mars=spice.Trajectory('4') mars.generate_positions(earth_time,'Sun',frame) mars.change_units(astropy.units.AU) [mars_r, mars_lat, mars_lon]=hd.cart2sphere(mars.x,mars.y,mars.z) print('mars') #############stereo-A sta_time_num=earth_time_num spice.furnish(spicedata.get_kernel('stereo_a_pred')) sta=spice.Trajectory('-234') sta.generate_positions(earth_time,'Sun',frame) sta.change_units(astropy.units.AU) [sta_r, sta_lat, sta_lon]=hd.cart2sphere(sta.x,sta.y,sta.z) print('STEREO-A') #save positions if high_res_mode: pickle.dump([psp_time,psp_time_num,psp_r,psp_lon,psp_lat,bepi_time,bepi_time_num,bepi_r,bepi_lon,bepi_lat,solo_time,solo_time_num,solo_r,solo_lon,solo_lat], open( 'positions_plots/psp_solo_bepi_'+frame+'_1min.p', "wb" ) ) else: psp=np.rec.array([psp_time_num,psp_r,psp_lon,psp_lat, psp.x, psp.y,psp.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) bepi=np.rec.array([bepi_time_num,bepi_r,bepi_lon,bepi_lat,bepi.x, bepi.y,bepi.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) solo=np.rec.array([solo_time_num,solo_r,solo_lon,solo_lat,solo.x, solo.y,solo.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) sta=np.rec.array([sta_time_num,sta_r,sta_lon,sta_lat,sta.x, sta.y,sta.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) earth=np.rec.array([earth_time_num,earth_r,earth_lon,earth_lat, earth.x, earth.y,earth.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) venus=np.rec.array([venus_time_num,venus_r,venus_lon,venus_lat, venus.x, venus.y,venus.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) mars=np.rec.array([mars_time_num,mars_r,mars_lon,mars_lat, mars.x, mars.y,mars.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) mercury=np.rec.array([mercury_time_num,mercury_r,mercury_lon,mercury_lat,mercury.x, mercury.y,mercury.z],dtype=[('time','f8'),('r','f8'),('lon','f8'),('lat','f8'),('x','f8'),('y','f8'),('z','f8')]) pickle.dump([psp, bepi, solo, sta, earth, venus, mars, mercury,frame], open( 'data/positions_psp_solo_bepi_sta_planets_'+frame+'_1hour.p', "wb" ) ) #load with [psp, bepi, solo, sta, earth, venus, mars, mercury,frame]=pickle.load( open( 'positions_psp_solo_bepi_sta_planets_HCI_6hours_2018_2025.p', "rb" ) ) end=time.time() print( 'generate position took time in seconds:', round((end-start),1) ) def make_frame(k): ''' loop each frame in multiprocessing ''' fig=plt.figure(1, figsize=(19.2,10.8), dpi=100) #full hd #fig=plt.figure(1, figsize=(19.2*2,10.8*2), dpi=100) #4k ax = plt.subplot2grid((7,2), (0, 0), rowspan=7, projection='polar') backcolor='black' psp_color='black' bepi_color='blue' solo_color='coral' frame_time_str=str(mdates.num2date(frame_time_num+k*res_in_days)) #print( 'current frame_time_num', frame_time_str, ' ',k) #these have their own times dct=frame_time_num+k*res_in_days-psp.time psp_timeind=np.argmin(abs(dct)) dct=frame_time_num+k*res_in_days-bepi.time bepi_timeind=np.argmin(abs(dct)) dct=frame_time_num+k*res_in_days-solo.time solo_timeind=np.argmin(abs(dct)) #all same times dct=frame_time_num+k*res_in_days-earth.time earth_timeind=np.argmin(abs(dct)) #plot all positions including text R lon lat for some #white background ax.scatter(venus.lon[earth_timeind], venus.r[earth_timeind]*np.cos(venus.lat[earth_timeind]), s=symsize_planet, c='orange', alpha=1,lw=0,zorder=3) ax.scatter(mercury.lon[earth_timeind], mercury.r[earth_timeind]*np.cos(mercury.lat[earth_timeind]), s=symsize_planet, c='dimgrey', alpha=1,lw=0,zorder=3) ax.scatter(earth.lon[earth_timeind], earth.r[earth_timeind]*np.cos(earth.lat[earth_timeind]), s=symsize_planet, c='mediumseagreen', alpha=1,lw=0,zorder=3) ax.scatter(sta.lon[earth_timeind], sta.r[earth_timeind]*np.cos(sta.lat[earth_timeind]), s=symsize_spacecraft, c='red', marker='s', alpha=1,lw=0,zorder=3) ax.scatter(mars.lon[earth_timeind], mars.r[earth_timeind]*np.cos(mars.lat[earth_timeind]), s=symsize_planet, c='orangered', alpha=1,lw=0,zorder=3) #plot stereoa fov hi1/2 hp.plot_stereo_hi_fov(sta,frame_time_num, earth_timeind, ax,'A') #positions text f10=plt.figtext(0.01,0.93,' R lon lat', fontsize=fsize+2, ha='left',color=backcolor) if frame=='HEEQ': earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{0.0:8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}') else: earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(earth.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}') mars_text='Mars: '+str(f'{mars.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(mars.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(mars.lat[earth_timeind]):8.1f}') sta_text='STA: '+str(f'{sta.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(sta.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(sta.lat[earth_timeind]):8.1f}') #position and text if psp_timeind > 0: #plot trajectorie ax.scatter(psp.lon[psp_timeind], psp.r[psp_timeind]*np.cos(psp.lat[psp_timeind]), s=symsize_spacecraft, c=psp_color, marker='s', alpha=1,lw=0,zorder=3) #plot positiona as text psp_text='PSP: '+str(f'{psp.r[psp_timeind]:6.2f}')+str(f'{np.rad2deg(psp.lon[psp_timeind]):8.1f}')+str(f'{np.rad2deg(psp.lat[psp_timeind]):8.1f}') f5=plt.figtext(0.01,0.78,psp_text, fontsize=fsize, ha='left',color=psp_color) if plot_orbit: fadestart=psp_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(psp.lon[fadestart:psp_timeind+fadeind], psp.r[fadestart:psp_timeind+fadeind]*np.cos(psp.lat[fadestart:psp_timeind+fadeind]), c=psp_color, alpha=0.6,lw=1,zorder=3) if bepi_timeind > 0: ax.scatter(bepi.lon[bepi_timeind], bepi.r[bepi_timeind]*np.cos(bepi.lat[bepi_timeind]), s=symsize_spacecraft, c=bepi_color, marker='s', alpha=1,lw=0,zorder=3) bepi_text='Bepi: '+str(f'{bepi.r[bepi_timeind]:6.2f}')+str(f'{np.rad2deg(bepi.lon[bepi_timeind]):8.1f}')+str(f'{np.rad2deg(bepi.lat[bepi_timeind]):8.1f}') f6=plt.figtext(0.01,0.74,bepi_text, fontsize=fsize, ha='left',color=bepi_color) if plot_orbit: fadestart=bepi_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(bepi.lon[fadestart:bepi_timeind+fadeind], bepi.r[fadestart:bepi_timeind+fadeind]*np.cos(bepi.lat[fadestart:bepi_timeind+fadeind]), c=bepi_color, alpha=0.6,lw=1,zorder=3) if solo_timeind > 0: ax.scatter(solo.lon[solo_timeind], solo.r[solo_timeind]*np.cos(solo.lat[solo_timeind]), s=symsize_spacecraft, c=solo_color, marker='s', alpha=1,lw=0,zorder=3) solo_text='SolO: '+str(f'{solo.r[solo_timeind]:6.2f}')+str(f'{np.rad2deg(solo.lon[solo_timeind]):8.1f}')+str(f'{np.rad2deg(solo.lat[solo_timeind]):8.1f}') f7=plt.figtext(0.01,0.7,solo_text, fontsize=fsize, ha='left',color=solo_color) if plot_orbit: fadestart=solo_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(solo.lon[fadestart:solo_timeind+fadeind], solo.r[fadestart:solo_timeind+fadeind]*np.cos(solo.lat[fadestart:solo_timeind+fadeind]), c=solo_color, alpha=0.6,lw=1,zorder=3) f10=plt.figtext(0.01,0.9,earth_text, fontsize=fsize, ha='left',color='mediumseagreen') f9=plt.figtext(0.01,0.86,mars_text, fontsize=fsize, ha='left',color='orangered') f8=plt.figtext(0.01,0.82,sta_text, fontsize=fsize, ha='left',color='red') ######################## 1 plot all active CME circles plot_hi_geo=True if plot_hi_geo: lamda=30 #check for active CME indices from HIGeoCAT (with the lists produced above in this notebook) #check where time is identical to frame time cmeind=np.where(hc_time_num == frame_time_num+k*res_in_days) #print(cmeind) #plot all active CME circles #if np.size(cmeind) >0: for p in range(0,np.size(cmeind)): #print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]] #central d dir=np.array([np.cos(hc_lon[cmeind[0][p]]*np.pi/180),np.sin(hc_lon[cmeind[0][p]]*np.pi/180)])*hc_r[cmeind[0][p]] #points on circle, correct for longitude circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(hc_lon[cmeind[0][p]]*np.pi/180) #these equations are from moestl and davies 2013 xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang) yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang) #now convert to polar coordinates rcirc=np.sqrt(xc**2+yc**2) longcirc=np.arctan2(yc,xc) #plot in correct color if hc_name[cmeind[0][p]] == 'A': #make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 - #so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events ax.plot(longcirc,rcirc, c='red', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) if hc_name[cmeind[0][p]] == 'B': ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) #parker spiral if plot_parker: for q in np.arange(0,12): omega=2*np.pi/(sun_rot*60*60*24) #solar rotation in seconds v=400/AUkm #km/s r0=695000/AUkm r=v/omega*theta+r0*7 if not black: ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/12*q), r, alpha=0.4, lw=0.5,color='grey',zorder=2) if black: ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/12*q), r, alpha=0.7, lw=0.7,color='grey',zorder=2) #set axes and grid ax.set_theta_zero_location('E') #plt.thetagrids(range(0,360,45),(u'0\u00b0 '+frame+' longitude',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'- 135\u00b0',u'- 90\u00b0',u'- 45\u00b0'), ha='right', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9) plt.thetagrids(range(0,360,45),(u'0\u00b0',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'- 135\u00b0',u'- 90\u00b0',u'- 45\u00b0'), ha='center', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9,zorder=4) #plt.rgrids((0.10,0.39,0.72,1.00,1.52),('0.10','0.39','0.72','1.0','1.52 AU'),angle=125, fontsize=fsize,alpha=0.9, color=backcolor) plt.rgrids((0.1,0.3,0.5,0.7,1.0),('0.10','0.3','0.5','0.7','1.0 AU'),angle=125, fontsize=fsize-3,alpha=0.5, color=backcolor) #ax.set_ylim(0, 1.75) #with Mars ax.set_ylim(0, 1.2) #Sun ax.scatter(0,0,s=100,c='yellow',alpha=1, edgecolors='black', linewidth=0.3) #------------------------------------------------ IN SITU DATA ------------------------------------------------------ time_now=frame_time_num+k*res_in_days #cut data for plot window so faster windex1=np.where(w_time_num > time_now-days_window)[0][0] windex2=np.where(w_time_num > time_now+days_window)[0][0] w=w1[windex1:windex2] sindex1=np.where(s_time_num > time_now-days_window)[0][0] sindex2=np.where(s_time_num > time_now+days_window)[0][0] s=s1[sindex1:sindex2] #is data available from new missions? if p_time_num[-1] > time_now+days_window: pindex1=np.where(p_time_num > time_now-days_window)[0][0] pindex2=np.where(p_time_num > time_now+days_window)[0][0] #pindex2=np.size(p1)-1 p=p1[pindex1:pindex2] elif np.logical_and((p_time_num[-1] < time_now+days_window),(p_time_num[-1] > time_now-days_window)): pindex1=np.where(p_time_num > time_now-days_window)[0][0] pindex2=np.size(p1)-1 p=p1[pindex1:pindex2] else: p=[] if o_time_num[-1] > time_now+days_window: oindex1=np.where(o_time_num > time_now-days_window)[0][0] oindex2=np.where(o_time_num > time_now+days_window)[0][0] #use last index oindex2=np.size(o1)-1 o=o1[oindex1:oindex2] elif np.logical_and((o_time_num[-1] < time_now+days_window),(o_time_num[-1] > time_now-days_window)): oindex1=np.where(o_time_num > time_now-days_window)[0][0] oindex2=np.size(o1)-1 o=o1[oindex1:oindex2] else: o=[] if b_time_num[-1] > time_now+days_window: bindex1=np.where(b_time_num > time_now-days_window)[0][0] bindex2=np.where(b_time_num > time_now+days_window)[0][0] #bindex2=np.size(b1)-1 b=b1[bindex1:bindex2] else: b=[] #---------------- Wind mag ax4 = plt.subplot2grid((7,2), (0, 1)) #plt.plot_date(w_tm,wbx,'-r',label='BR',linewidth=0.5) #plt.plot_date(w_tm,wby,'-g',label='BT',linewidth=0.5) #plt.plot_date(w_tm,wbz,'-b',label='BN',linewidth=0.5) #plt.plot_date(w_tm,wbt,'-k',label='Btotal',lw=0.5) plt.plot_date(w.time,w.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(w.time,w.by,'-g',label='BT',linewidth=0.5) plt.plot_date(w.time,w.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(w.time,w.bt,'-k',label='Btotal',lw=0.5) ax4.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax4.set_ylabel('B [nT] HEEQ',fontsize=fsize-1) ax4.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax4.set_xlim(time_now-days_window,time_now+days_window) ax4.set_ylim(np.nanmin(-w.bt)-5, np.nanmax(w.bt)+5) #plt.ylim((-18, 18)) plt.yticks(fontsize=fsize-1) ax4.set_xticklabels([]) #---------------- STEREO-A mag ax6 = plt.subplot2grid((7,2), (1, 1)) #plt.plot_date(s_tm,sbx,'-r',label='BR',linewidth=0.5) #plt.plot_date(s_tm,sby,'-g',label='BT',linewidth=0.5) #plt.plot_date(s_tm,sbz,'-b',label='BN',linewidth=0.5) #plt.plot_date(s_tm,sbt,'-k',label='Btotal') plt.plot_date(s.time,s.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(s.time,s.by,'-g',label='BT',linewidth=0.5) plt.plot_date(s.time,s.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(s.time,s.bt,'-k',label='Btotal',linewidth=0.5) ax6.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax6.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) #ax6.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax6.set_xlim(time_now-days_window,time_now+days_window) ax6.set_xticklabels([]) ax6.set_ylim(np.nanmin(-s.bt)-5, np.nanmax(s.bt)+5) plt.yticks(fontsize=fsize-1) plt.tick_params( axis='x', labelbottom='off') #plt.ylim((-18, 18)) #---------------- STEREO, Wind speed ax5 = plt.subplot2grid((7,2), (2, 1)) plt.plot_date(w.time,w.vt,'-g',label='Wind',linewidth=0.7) plt.plot_date(s.time,s.vt,'-r',label='STEREO-A',linewidth=0.7) #ax5.legend(loc=1, fontsize=10) ax5.plot_date([time_now,time_now], [0,900],'-k', lw=0.5, alpha=0.8) ax5.set_xlim(time_now-days_window,time_now+days_window) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.ylim((240, 750)) plt.yticks(fontsize=fsize-1) ax5.set_xticklabels([]) #ax7 = plt.subplot2grid((6,2), (5, 1)) #plt.plot_date(s.time,s.vt,'-k',label='V',linewidth=0.7) #ax7.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8) #ax7.set_xlim(time_now-days_window,time_now+days_window) #ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) #plt.ylabel('V [km/s]',fontsize=fsize-1) #plt.tick_params(axis='x', labelbottom='off') #plt.ylim((240, 810)) #plt.yticks(fontsize=fsize-1) #plt.xticks(fontsize=fsize) #---------------------- PSP speed ax3 = plt.subplot2grid((7,2), (3, 1)) ax3.plot_date([time_now,time_now], [0,1000],'-k', lw=0.5, alpha=0.8) ax3.set_xticklabels([]) ax3.set_xlim(time_now-days_window,time_now+days_window) ax3.set_ylim((240, 810)) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) ax3.set_xticklabels([]) if np.size(p)>0: #plt.plot_date(p_tp,pv,'-k',label='V',linewidth=0.5) plt.plot_date(p.time,p.vt,'-k',label='V',linewidth=0.7) ax3.set_xlim(time_now-days_window,time_now+days_window) ax3.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8) ax3.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.ylim((240, 750)) plt.yticks(fontsize=fsize-1) ax3.set_xticklabels([]) #---------------------- PSP mag ax2 = plt.subplot2grid((7,2), (4, 1)) ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8) ax2.set_xticklabels([]) ax2.set_xlim(time_now-days_window,time_now+days_window) ax2.set_ylim((-18, 18)) ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) #when there is data, plot: if np.size(p)>0: plt.plot_date(p.time,p.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(p.time,p.by,'-g',label='BT',linewidth=0.5) plt.plot_date(p.time,p.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(p.time,p.bt,'-k',label='Btotal',lw=0.5) ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8) ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax2.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax2.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmin(-p.bt)): ax2.set_ylim(np.nanmin(-p.bt)-5, np.nanmax(p.bt)+5) ax2.set_xticklabels([]) plt.yticks(fontsize=fsize-1) #---------------------- SolO mag ax7 = plt.subplot2grid((7,2), (5, 1)) ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax7.set_xticklabels([]) ax7.set_xlim(time_now-days_window,time_now+days_window) ax7.set_ylim((-18, 18)) ax7.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) ax7.set_xticklabels([]) #when there is data, plot: if np.size(o)>0: plt.plot_date(o.time,o.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(o.time,o.by,'-g',label='BT',linewidth=0.5) plt.plot_date(o.time,o.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(o.time,o.bt,'-k',label='Btotal',lw=0.5) ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax7.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmax(o.bt)): ax7.set_ylim((np.nanmin(-o.bt)-5, np.nanmax(o.bt)+5)) else: ax7.set_ylim((-15, 15)) ax7.set_xticklabels([]) plt.yticks(fontsize=fsize-1) #---------------------- Bepi mag ax8 = plt.subplot2grid((7,2), (6, 1)) ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax8.set_xlim(time_now-days_window,time_now+days_window) ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax8.set_ylim((-18, 18)) ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) if np.size(b)>0: plt.plot_date(b.time,b.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(b.time,b.by,'-g',label='BT',linewidth=0.5) plt.plot_date(b.time,b.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(b.time,b.bt,'-k',label='Btotal',lw=0.5) ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax8.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmax(b.bt)): ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) else: ax8.set_ylim((-15, 15)) #ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) plt.yticks(fontsize=fsize-1) plt.figtext(0.95,0.82,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.71,'STEREO-A', color='red', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.63,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.58,'STEREO-A', color='red', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.49,'PSP ', color='black', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.38,'PSP ', color='black', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.28,'Solar Orbiter', color='coral', ha='center',fontsize=fsize+5) plt.figtext(0.95,0.16,'BepiColombo', color='blue', ha='center',fontsize=fsize+5) ############################ #plot text for date extra so it does not move #year f1=plt.figtext(0.45,0.93,frame_time_str[0:4], ha='center',color=backcolor,fontsize=fsize+6) #month f2=plt.figtext(0.45+0.04,0.93,frame_time_str[5:7], ha='center',color=backcolor,fontsize=fsize+6) #day f3=plt.figtext(0.45+0.08,0.93,frame_time_str[8:10], ha='center',color=backcolor,fontsize=fsize+6) #hours f4=plt.figtext(0.45+0.12,0.93,frame_time_str[11:13], ha='center',color=backcolor,fontsize=fsize+6) plt.figtext(0.02, 0.02,'Spacecraft trajectories '+frame+' 2D projection', fontsize=fsize-1, ha='left',color=backcolor) plt.figtext(0.32,0.02,'――― trajectory from - 60 days to + 60 days', color='black', ha='center',fontsize=fsize-1) #signature #BC MPO-MAG (IGEP/IWF/ISAS/IC) #auch für Solar Orbiter (MAG, IC), Parker (FIELDS, UCB), STA (IMPACT/PLASTIC, UNH, UCLA), Wind (MFI, SWE, NASA??) STA-HI (RAL) plt.figtext(0.85,0.02,'Data sources: BepiColombo: MPO-MAG (IGEP/IWF/ISAS/IC), PSP (FIELDS, UCB), Solar Orbiter (MAG, IC)', fontsize=fsize-2, ha='right',color=backcolor) #signature plt.figtext(0.99,0.01/2,'Möstl, Weiss, Bailey, Reiss / Helio4Cast', fontsize=fsize-4, ha='right',color=backcolor) #save figure framestr = '%05i' % (k) filename=outputdirectory+'/pos_anim_'+framestr+'.jpg' if k==0: print(filename) plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none') #plt.clf() #if close==True: plt.close('all') plt.close('all') ########################################### loop end def make_frame2(k): ''' loop each frame in multiprocessing ''' fig=plt.figure(1, figsize=(19.2,10.8), dpi=100) #full hd #fig=plt.figure(1, figsize=(19.2*2,10.8*2), dpi=100) #4k ax = plt.subplot2grid((7,2), (0, 0), rowspan=7, projection='polar') backcolor='black' psp_color='black' bepi_color='blue' solo_color='coral' frame_time_str=str(mdates.num2date(frame_time_num+k*res_in_days)) print( 'current frame_time_num', frame_time_str, ' ',k) #these have their own times dct=frame_time_num+k*res_in_days-psp.time psp_timeind=np.argmin(abs(dct)) dct=frame_time_num+k*res_in_days-bepi.time bepi_timeind=np.argmin(abs(dct)) dct=frame_time_num+k*res_in_days-solo.time solo_timeind=np.argmin(abs(dct)) #all same times dct=frame_time_num+k*res_in_days-earth.time earth_timeind=np.argmin(abs(dct)) #plot all positions including text R lon lat for some #white background ax.scatter(venus.lon[earth_timeind], venus.r[earth_timeind]*np.cos(venus.lat[earth_timeind]), s=symsize_planet, c='orange', alpha=1,lw=0,zorder=3) ax.scatter(mercury.lon[earth_timeind], mercury.r[earth_timeind]*np.cos(mercury.lat[earth_timeind]), s=symsize_planet, c='dimgrey', alpha=1,lw=0,zorder=3) ax.scatter(earth.lon[earth_timeind], earth.r[earth_timeind]*np.cos(earth.lat[earth_timeind]), s=symsize_planet, c='mediumseagreen', alpha=1,lw=0,zorder=3) ax.scatter(sta.lon[earth_timeind], sta.r[earth_timeind]*np.cos(sta.lat[earth_timeind]), s=symsize_spacecraft, c='red', marker='s', alpha=1,lw=0,zorder=3) ax.scatter(mars.lon[earth_timeind], mars.r[earth_timeind]*np.cos(mars.lat[earth_timeind]), s=symsize_planet, c='orangered', alpha=1,lw=0,zorder=3) #plot stereoa fov hi1/2 hp.plot_stereo_hi_fov(sta,frame_time_num, earth_timeind, ax,'A') #positions text f10=plt.figtext(0.01,0.93,' R lon lat', fontsize=fsize+2, ha='left',color=backcolor) if frame=='HEEQ': earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{0.0:8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}') else: earth_text='Earth: '+str(f'{earth.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(earth.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(earth.lat[earth_timeind]):8.1f}') mars_text='Mars: '+str(f'{mars.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(mars.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(mars.lat[earth_timeind]):8.1f}') sta_text='STA: '+str(f'{sta.r[earth_timeind]:6.2f}')+str(f'{np.rad2deg(sta.lon[earth_timeind]):8.1f}')+str(f'{np.rad2deg(sta.lat[earth_timeind]):8.1f}') #position and text if psp_timeind > 0: #plot trajectorie ax.scatter(psp.lon[psp_timeind], psp.r[psp_timeind]*np.cos(psp.lat[psp_timeind]), s=symsize_spacecraft, c=psp_color, marker='s', alpha=1,lw=0,zorder=3) #plot positiona as text psp_text='PSP: '+str(f'{psp.r[psp_timeind]:6.2f}')+str(f'{np.rad2deg(psp.lon[psp_timeind]):8.1f}')+str(f'{np.rad2deg(psp.lat[psp_timeind]):8.1f}') f5=plt.figtext(0.01,0.78,psp_text, fontsize=fsize, ha='left',color=psp_color) if plot_orbit: fadestart=psp_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(psp.lon[fadestart:psp_timeind+fadeind], psp.r[fadestart:psp_timeind+fadeind]*np.cos(psp.lat[fadestart:psp_timeind+fadeind]), c=psp_color, alpha=0.6,lw=1,zorder=3) if bepi_timeind > 0: ax.scatter(bepi.lon[bepi_timeind], bepi.r[bepi_timeind]*np.cos(bepi.lat[bepi_timeind]), s=symsize_spacecraft, c=bepi_color, marker='s', alpha=1,lw=0,zorder=3) bepi_text='Bepi: '+str(f'{bepi.r[bepi_timeind]:6.2f}')+str(f'{np.rad2deg(bepi.lon[bepi_timeind]):8.1f}')+str(f'{np.rad2deg(bepi.lat[bepi_timeind]):8.1f}') f6=plt.figtext(0.01,0.74,bepi_text, fontsize=fsize, ha='left',color=bepi_color) if plot_orbit: fadestart=bepi_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(bepi.lon[fadestart:bepi_timeind+fadeind], bepi.r[fadestart:bepi_timeind+fadeind]*np.cos(bepi.lat[fadestart:bepi_timeind+fadeind]), c=bepi_color, alpha=0.6,lw=1,zorder=3) if solo_timeind > 0: ax.scatter(solo.lon[solo_timeind], solo.r[solo_timeind]*np.cos(solo.lat[solo_timeind]), s=symsize_spacecraft, c=solo_color, marker='s', alpha=1,lw=0,zorder=3) solo_text='SolO: '+str(f'{solo.r[solo_timeind]:6.2f}')+str(f'{np.rad2deg(solo.lon[solo_timeind]):8.1f}')+str(f'{np.rad2deg(solo.lat[solo_timeind]):8.1f}') f7=plt.figtext(0.01,0.7,solo_text, fontsize=fsize, ha='left',color=solo_color) if plot_orbit: fadestart=solo_timeind-fadeind if fadestart < 0: fadestart=0 ax.plot(solo.lon[fadestart:solo_timeind+fadeind], solo.r[fadestart:solo_timeind+fadeind]*np.cos(solo.lat[fadestart:solo_timeind+fadeind]), c=solo_color, alpha=0.6,lw=1,zorder=3) f10=plt.figtext(0.01,0.9,earth_text, fontsize=fsize, ha='left',color='mediumseagreen') f9=plt.figtext(0.01,0.86,mars_text, fontsize=fsize, ha='left',color='orangered') f8=plt.figtext(0.01,0.82,sta_text, fontsize=fsize, ha='left',color='red') ######################## 1 plot all active CME circles plot_hi_geo=True if plot_hi_geo: lamda=30 #check for active CME indices from HIGeoCAT (with the lists produced above in this notebook) #check where time is identical to frame time cmeind=np.where(hc_time_num == frame_time_num+k*res_in_days) #print(cmeind) #plot all active CME circles #if np.size(cmeind) >0: for p in range(0,np.size(cmeind)): #print p, h.all_apex_long[cmeind[0][p]], h.all_apex_r[cmeind[0][p]] #central d dir=np.array([np.cos(hc_lon[cmeind[0][p]]*np.pi/180),np.sin(hc_lon[cmeind[0][p]]*np.pi/180)])*hc_r[cmeind[0][p]] #points on circle, correct for longitude circ_ang = ((np.arange(111)*2-20)*np.pi/180)-(hc_lon[cmeind[0][p]]*np.pi/180) #these equations are from moestl and davies 2013 xc = 0+dir[0]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.sin(circ_ang) yc = 0+dir[1]/(1+np.sin(lamda*np.pi/180)) + (hc_r[cmeind[0][p]]*np.sin(lamda*np.pi/180)/(1+np.sin(lamda*np.pi/180)))*np.cos(circ_ang) #now convert to polar coordinates rcirc=np.sqrt(xc**2+yc**2) longcirc=np.arctan2(yc,xc) #plot in correct color if hc_name[cmeind[0][p]] == 'A': #make alpha dependent on distance to solar equatorial plane - maximum latitude is -40/+40 - #so to make also the -/+40 latitude CME visible, divide by 50 so alpha > 0 for these events ax.plot(longcirc,rcirc, c='red', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) if hc_name[cmeind[0][p]] == 'B': ax.plot(longcirc,rcirc, c='royalblue', alpha=1-abs(hc_lat[cmeind[0][p]]/50), lw=1.5) #set axes and grid ax.set_theta_zero_location('E') #plt.thetagrids(range(0,360,45),(u'0\u00b0 '+frame+' longitude',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'- 135\u00b0',u'- 90\u00b0',u'- 45\u00b0'), ha='right', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9) plt.thetagrids(range(0,360,45),(u'0\u00b0',u'45\u00b0',u'90\u00b0',u'135\u00b0',u'+/- 180\u00b0',u'- 135\u00b0',u'- 90\u00b0',u'- 45\u00b0'), ha='center', fmt='%d',fontsize=fsize-1,color=backcolor, alpha=0.9,zorder=4) #plt.rgrids((0.10,0.39,0.72,1.00,1.52),('0.10','0.39','0.72','1.0','1.52 AU'),angle=125, fontsize=fsize,alpha=0.9, color=backcolor) plt.rgrids((0.1,0.3,0.5,0.7,1.0),('0.10','0.3','0.5','0.7','1.0 AU'),angle=125, fontsize=fsize-3,alpha=0.5, color=backcolor) #ax.set_ylim(0, 1.75) #with Mars ax.set_ylim(0, 1.2) #Sun ax.scatter(0,0,s=100,c='yellow',alpha=1, edgecolors='black', linewidth=0.3) #------------------------------------------------ IN SITU DATA ------------------------------------------------------ time_now=frame_time_num+k*res_in_days #cut data for plot window so faster windex1=np.where(w_time_num > time_now-days_window)[0][0] windex2=np.where(w_time_num > time_now+days_window)[0][0] w=w1[windex1:windex2] sindex1=np.where(s_time_num > time_now-days_window)[0][0] sindex2=np.where(s_time_num > time_now+days_window)[0][0] s=s1[sindex1:sindex2] #is data available from new missions? if p_time_num[-1] > time_now+days_window: pindex1=np.where(p_time_num > time_now-days_window)[0][0] pindex2=np.where(p_time_num > time_now+days_window)[0][0] #pindex2=np.size(p1)-1 p=p1[pindex1:pindex2] elif np.logical_and((p_time_num[-1] < time_now+days_window),(p_time_num[-1] > time_now-days_window)): pindex1=np.where(p_time_num > time_now-days_window)[0][0] pindex2=np.size(p1)-1 p=p1[pindex1:pindex2] else: p=[] if o_time_num[-1] > time_now+days_window: oindex1=np.where(o_time_num > time_now-days_window)[0][0] oindex2=np.where(o_time_num > time_now+days_window)[0][0] #use last index oindex2=np.size(o1)-1 o=o1[oindex1:oindex2] elif np.logical_and((o_time_num[-1] < time_now+days_window),(o_time_num[-1] > time_now-days_window)): oindex1=np.where(o_time_num > time_now-days_window)[0][0] oindex2=np.size(o1)-1 o=o1[oindex1:oindex2] else: o=[] if b_time_num[-1] > time_now+days_window: bindex1=np.where(b_time_num > time_now-days_window)[0][0] bindex2=np.where(b_time_num > time_now+days_window)[0][0] #bindex2=np.size(b1)-1 b=b1[bindex1:bindex2] else: b=[] #---------------- Wind mag ax4 = plt.subplot2grid((7,2), (0, 1)) #plt.plot_date(w_tm,wbx,'-r',label='BR',linewidth=0.5) #plt.plot_date(w_tm,wby,'-g',label='BT',linewidth=0.5) #plt.plot_date(w_tm,wbz,'-b',label='BN',linewidth=0.5) #plt.plot_date(w_tm,wbt,'-k',label='Btotal',lw=0.5) plt.plot_date(w.time,w.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(w.time,w.by,'-g',label='BT',linewidth=0.5) plt.plot_date(w.time,w.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(w.time,w.bt,'-k',label='Btotal',lw=0.5) ax4.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax4.set_ylabel('B [nT] HEEQ',fontsize=fsize-1) ax4.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax4.set_xlim(time_now-days_window,time_now+days_window) ax4.set_ylim(np.nanmin(-w.bt)-5, np.nanmax(w.bt)+5) #plt.ylim((-18, 18)) plt.yticks(fontsize=fsize-1) ax4.set_xticklabels([]) #---------------- STEREO-A mag ax6 = plt.subplot2grid((7,2), (1, 1)) #plt.plot_date(s_tm,sbx,'-r',label='BR',linewidth=0.5) #plt.plot_date(s_tm,sby,'-g',label='BT',linewidth=0.5) #plt.plot_date(s_tm,sbz,'-b',label='BN',linewidth=0.5) #plt.plot_date(s_tm,sbt,'-k',label='Btotal') plt.plot_date(s.time,s.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(s.time,s.by,'-g',label='BT',linewidth=0.5) plt.plot_date(s.time,s.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(s.time,s.bt,'-k',label='Btotal',linewidth=0.5) ax6.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax6.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) #ax6.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax6.set_xlim(time_now-days_window,time_now+days_window) ax6.set_xticklabels([]) ax6.set_ylim(np.nanmin(-s.bt)-5, np.nanmax(s.bt)+5) plt.yticks(fontsize=fsize-1) plt.tick_params( axis='x', labelbottom='off') #plt.ylim((-18, 18)) #---------------- STEREO, Wind speed ax5 = plt.subplot2grid((7,2), (2, 1)) plt.plot_date(w.time,w.vt,'-g',label='Wind',linewidth=0.7) plt.plot_date(s.time,s.vt,'-r',label='STEREO-A',linewidth=0.7) #ax5.legend(loc=1, fontsize=10) ax5.plot_date([time_now,time_now], [0,900],'-k', lw=0.5, alpha=0.8) ax5.set_xlim(time_now-days_window,time_now+days_window) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.ylim((240, 750)) plt.yticks(fontsize=fsize-1) ax5.set_xticklabels([]) #ax7 = plt.subplot2grid((6,2), (5, 1)) #plt.plot_date(s.time,s.vt,'-k',label='V',linewidth=0.7) #ax7.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8) #ax7.set_xlim(time_now-days_window,time_now+days_window) #ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) #plt.ylabel('V [km/s]',fontsize=fsize-1) #plt.tick_params(axis='x', labelbottom='off') #plt.ylim((240, 810)) #plt.yticks(fontsize=fsize-1) #plt.xticks(fontsize=fsize) #---------------------- PSP speed ax3 = plt.subplot2grid((7,2), (3, 1)) ax3.plot_date([time_now,time_now], [0,1000],'-k', lw=0.5, alpha=0.8) ax3.set_xticklabels([]) ax3.set_xlim(time_now-days_window,time_now+days_window) ax3.set_ylim((240, 810)) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) ax3.set_xticklabels([]) if np.size(p)>0: #plt.plot_date(p_tp,pv,'-k',label='V',linewidth=0.5) plt.plot_date(p.time,p.vt,'-k',label='V',linewidth=0.7) ax3.set_xlim(time_now-days_window,time_now+days_window) ax3.plot_date([time_now,time_now], [0,800],'-k', lw=0.5, alpha=0.8) ax3.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) plt.ylabel('V [km/s]',fontsize=fsize-1) plt.ylim((240, 750)) plt.yticks(fontsize=fsize-1) ax3.set_xticklabels([]) #---------------------- PSP mag ax2 = plt.subplot2grid((7,2), (4, 1)) ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8) ax2.set_xticklabels([]) ax2.set_xlim(time_now-days_window,time_now+days_window) ax2.set_ylim((-18, 18)) ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) #when there is data, plot: if np.size(p)>0: plt.plot_date(p.time,p.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(p.time,p.by,'-g',label='BT',linewidth=0.5) plt.plot_date(p.time,p.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(p.time,p.bt,'-k',label='Btotal',lw=0.5) ax2.plot_date([time_now,time_now], [-1000,1000],'-k', lw=0.5, alpha=0.8) ax2.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax2.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax2.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmin(-p.bt)): ax2.set_ylim(np.nanmin(-p.bt)-5, np.nanmax(p.bt)+5) ax2.set_xticklabels([]) plt.yticks(fontsize=fsize-1) #---------------------- SolO mag ax7 = plt.subplot2grid((7,2), (5, 1)) ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax7.set_xticklabels([]) ax7.set_xlim(time_now-days_window,time_now+days_window) ax7.set_ylim((-18, 18)) ax7.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) ax7.set_xticklabels([]) #when there is data, plot: if np.size(o)>0: plt.plot_date(o.time,o.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(o.time,o.by,'-g',label='BT',linewidth=0.5) plt.plot_date(o.time,o.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(o.time,o.bt,'-k',label='Btotal',lw=0.5) ax7.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax7.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax7.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmax(o.bt)): ax7.set_ylim((np.nanmin(-o.bt)-5, np.nanmax(o.bt)+5)) else: ax7.set_ylim((-15, 15)) ax7.set_xticklabels([]) plt.yticks(fontsize=fsize-1) #---------------------- Bepi mag ax8 = plt.subplot2grid((7,2), (6, 1)) ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax8.set_xlim(time_now-days_window,time_now+days_window) ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax8.set_ylim((-18, 18)) ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1) plt.yticks(fontsize=fsize-1) if np.size(b)>0: plt.plot_date(b.time,b.bx,'-r',label='BR',linewidth=0.5) plt.plot_date(b.time,b.by,'-g',label='BT',linewidth=0.5) plt.plot_date(b.time,b.bz,'-b',label='BN',linewidth=0.5) plt.plot_date(b.time,b.bt,'-k',label='Btotal',lw=0.5) ax8.plot_date([time_now,time_now], [-100,100],'-k', lw=0.5, alpha=0.8) ax8.set_ylabel('B [nT] RTN',fontsize=fsize-1) ax8.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%b-%d') ) ax8.set_xlim(time_now-days_window,time_now+days_window) if np.isfinite(np.nanmax(b.bt)): ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) else: ax8.set_ylim((-15, 15)) #ax8.set_ylim((np.nanmin(-b.bt)-5, np.nanmax(b.bt)+5)) plt.yticks(fontsize=fsize-1) plt.figtext(0.95,0.82,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.71,'STEREO-A', color='red', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.63,'Wind', color='mediumseagreen', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.58,'STEREO-A', color='red', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.49,'PSP ', color='black', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.38,'PSP ', color='black', ha='center',fontsize=fsize+3) plt.figtext(0.95,0.28,'Solar Orbiter', color='coral', ha='center',fontsize=fsize+5) plt.figtext(0.95,0.16,'BepiColombo', color='blue', ha='center',fontsize=fsize+5) ############################ #plot text for date extra so it does not move #year f1=plt.figtext(0.45,0.93,frame_time_str[0:4], ha='center',color=backcolor,fontsize=fsize+6) #month f2=plt.figtext(0.45+0.04,0.93,frame_time_str[5:7], ha='center',color=backcolor,fontsize=fsize+6) #day f3=plt.figtext(0.45+0.08,0.93,frame_time_str[8:10], ha='center',color=backcolor,fontsize=fsize+6) #hours f4=plt.figtext(0.45+0.12,0.93,frame_time_str[11:13], ha='center',color=backcolor,fontsize=fsize+6) plt.figtext(0.02, 0.02,'Spacecraft trajectories in '+frame+' coordinates', fontsize=fsize-1, ha='left',color=backcolor) plt.figtext(0.32,0.02,'――― trajectory from - 60 days to + 60 days', color='black', ha='center',fontsize=fsize-1) #signature #BC MPO-MAG (IGEP/IWF/ISAS/IC) #auch für Solar Orbiter (MAG, IC), Parker (FIELDS, UCB), STA (IMPACT/PLASTIC, UNH, UCLA), Wind (MFI, SWE, NASA??) STA-HI (RAL) plt.figtext(0.85,0.02,'Data sources: BepiColombo: MPO-MAG (IGEP/IWF/ISAS/IC), PSP (FIELDS, UCB), Solar Orbiter (MAG, IC)', fontsize=fsize-2, ha='right',color=backcolor) #signature plt.figtext(0.99,0.01/2,'Möstl, Weiss, Bailey, Reiss / Helio4Cast', fontsize=fsize-4, ha='right',color=backcolor) categories = np.array([0, 2, 1, 1, 1, 2, 0, 0]) colormap = np.array(['r', 'g', 'b']) steps=60 #parker spiral if plot_parker: for q in np.arange(0,steps): omega=2*np.pi/(sun_rot*60*60*24) #solar rotation in seconds v=400/AUkm #km/s r0=695000/AUkm r=v/omega*theta+r0*7 windcolor=cmap.hot(w.vt[2315]/5) #print(windcolor) #print(w.vt[2315+q*10]) ax.plot(-theta+np.deg2rad(0+(360/24.47)*res_in_days*k+360/steps*q), r, alpha=0.1, lw=5.0,color=windcolor, zorder=2) #print(theta) #save figure framestr = '%05i' % (k) filename=outputdirectory+'/pos_anim_'+framestr+'.jpg' if k==0: print(filename) plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none') #plt.clf() #if close==True: plt.close('all') filename='lineups/pos_anim_'+framestr+'.png' plt.savefig(filename,dpi=200,facecolor=fig.get_facecolor(), edgecolor='none') filename='lineups/pos_anim_'+framestr+'.jpg' plt.savefig(filename,dpi=100,facecolor=fig.get_facecolor(), edgecolor='none') #plt.close('all') ########################################### loop end #for multipoint lineup paper #june event #make_frame2(3810) #nov event make_frame2(10910) ``` ### get data ``` get_data=1 if get_data > 0: file=data_path+'wind_2018_now_heeq.p' [w,wh]=pickle.load(open(file, "rb" ) ) #function for spike removal, see list with times in that function w=hd.remove_wind_spikes_gaps(w) #cut with 2018 Oct 1 wcut=np.where(w.time> parse_time('2018-10-01').datetime)[0][0] w=w[wcut:-1] #file=data_path+'stereoa_2007_2019_sceq.p' #[s,sh]=pickle.load(open(file, "rb" ) ) #file=data_path+'stereoa_2019_now_sceq.p' ########### STA print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ filesta1='stereoa_2007_2020_rtn.p' sta1=pickle.load(open(data_path+filesta1, "rb" ) ) #beacon data #filesta2="stereoa_2019_2020_sceq_beacon.p" #filesta2='stereoa_2019_2020_sept_sceq_beacon.p' #filesta2='stereoa_2019_now_sceq_beacon.p' #filesta2="stereoa_2020_august_november_rtn_beacon.p" filesta2='stereoa_2020_now_sceq_beacon.p' [sta2,hsta2]=pickle.load(open(data_path+filesta2, "rb" ) ) #sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]] #make array sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float),\ ('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\ ('x', float),('y', float),('z', float),\ ('r', float),('lat', float),('lon', float)]) #convert to recarray sta = sta.view(np.recarray) sta.time=np.hstack((sta1.time,sta2.time)) sta.bx=np.hstack((sta1.bx,sta2.bx)) sta.by=np.hstack((sta1.by,sta2.by)) sta.bz=np.hstack((sta1.bz,sta2.bz)) sta.bt=np.hstack((sta1.bt,sta2.bt)) sta.vt=np.hstack((sta1.vt,sta2.vt)) sta.np=np.hstack((sta1.np,sta2.np)) sta.tp=np.hstack((sta1.tp,sta2.tp)) sta.x=np.hstack((sta1.x,sta2.x)) sta.y=np.hstack((sta1.y,sta2.y)) sta.z=np.hstack((sta1.z,sta2.z)) sta.r=np.hstack((sta1.r,sta2.r)) sta.lon=np.hstack((sta1.lon,sta2.lon)) sta.lat=np.hstack((sta1.lat,sta2.lat)) print('STA Merging done') #cut with 2018 Oct 1 scut=np.where(sta.time> parse_time('2018-10-01').datetime)[0][0] s=sta[scut:-1] ######### Bepi file=data_path+'bepi_2019_2021_rtn.p' b1=pickle.load(open(file, "rb" ) ) file=data_path+'bepi_2021_ib_rtn.p' b2=pickle.load(open(file, "rb" ) ) #make array b=np.zeros(np.size(b1.time)+np.size(b2.time),dtype=[('time',object),('bx', float),('by', float),\ ('bz', float),('bt', float),\ ('x', float),('y', float),('z', float),\ ('r', float),('lat', float),('lon', float)]) #convert to recarray b = b.view(np.recarray) b.time=np.hstack((b1.time,b2.time)) b.bx=np.hstack((b1.bx,b2.bx)) b.by=np.hstack((b1.by,b2.by)) b.bz=np.hstack((b1.bz,b2.bz)) b.bt=np.hstack((b1.bt,b2.bt)) b.x=np.hstack((b1.x,b2.x)) b.y=np.hstack((b1.y,b2.y)) b.z=np.hstack((b1.z,b2.z)) b.r=np.hstack((b1.r,b2.r)) b.lon=np.hstack((b1.lon,b2.lon)) b.lat=np.hstack((b1.lat,b2.lat)) print('Bepi Merging done') #################################### PSP, SolO file=data_path+'psp_2018_2021_rtn.p' [p,ph]=pickle.load(open(file, "rb" ) ) file=data_path+'solo_2020_april_2021_july_rtn.p' o=pickle.load(open(file, "rb" ) ) #save data for faster use file='data/movie_data_aug21.p' pickle.dump([p,w,s,o,b], open(file, 'wb')) print('load data from data/movie_data_aug21.p') [p1,w1,s1,o1,b1]=pickle.load(open('data/movie_data_aug21.p', "rb" ) ) p_time_num=parse_time(p1.time).plot_date w_time_num=parse_time(w1.time).plot_date s_time_num=parse_time(s1.time).plot_date o_time_num=parse_time(o1.time).plot_date b_time_num=parse_time(b1.time).plot_date #median filter psp speed because of spikes p1.vt=medfilt(p1.vt,31) print('done') ``` # Make movie ### Settings ``` plt.close('all') #Coordinate System #frame='HCI' frame='HEEQ' print(frame) #sidereal solar rotation rate if frame=='HCI': sun_rot=24.47 #synodic if frame=='HEEQ': sun_rot=26.24 AUkm=149597870.7 #black background on or off #black=True black=False #animation settings plot_orbit=True #plot_orbit=False plot_parker=True #plot_parker=False high_res_mode=False #orbit 1 #outputdirectory='results/anim_plots_sc_insitu_final_orbit1' #animdirectory='results/anim_movie_sc_insitu_final_orbit1' #t_start ='2018-Oct-15' #t_end ='2018-Dec-06' #t_start ='2018-Dec-03' #t_end ='2018-Dec-06' #orbit all #from Parker start #outputdirectory='results/overview_movie_nov_2020_frames_2' #animdirectory='results/overview_movie_nov_2020_2' #t_start ='2018-Oct-25' #t_end ='2020-Apr-15' #res_in_days=1/24. #1hour =1/24 #make time range #time_array = [ parse_time(t_start).datetime + timedelta(hours=1*n) \ # for n in range(int ((parse_time(t_end).datetime - parse_time(t_start).datetime).days*24))] ######## from Solar Orbiter Start outputdirectory='results/overview_movie_apr21_sep21_frames' animdirectory='results/overview_movie_apr21_sep21' t_start ='2021-Apr-1' t_end ='2021-Sep-30' #t_end ='2021-Jun-20' res_in_days=1/48. #1hour =1/24 #make time range to see how much frames are needed starttime = parse_time(t_start).datetime endtime = parse_time(t_end).datetime alltimes = [] while starttime < endtime: alltimes.append(starttime) starttime += timedelta(days=res_in_days) k_all=np.size(alltimes) days_window=3 #size of in situ timerange if os.path.isdir(outputdirectory) == False: os.mkdir(outputdirectory) if os.path.isdir(animdirectory) == False: os.mkdir(animdirectory) positions_plot_directory='results/plots_positions/' if os.path.isdir(positions_plot_directory) == False: os.mkdir(positions_plot_directory) print(k_all) ########## MAKE TRAJECTORIES #make_positions() print('load positions') #load positions [psp, bepi, solo, sta, stb, messenger, ulysses, earth, venus, mars, mercury,jupiter, saturn, uranus, neptune,frame]=pickle.load( open( 'results/positions_HEEQ_1hr.p', "rb" ) ) print('load HIGEOCAT kinematics') [hc_time,hc_r,hc_lat,hc_lon,hc_name]=pickle.load(open('data/higeocat_kinematics.p', "rb")) print('done') ``` ## test animation frames ``` #for server #matplotlib.use('Qt5Agg') %matplotlib inline start_time=time.time() print() print('make animation') #animation start time in matplotlib format frame_time_num=parse_time(t_start).plot_date sns.set_context('talk') if not black: sns.set_style('darkgrid'),#{'grid.linestyle': ':', 'grid.color': '.35'}) if black: sns.set_style('white',{'grid.linestyle': ':', 'grid.color': '.35'}) # animation settings fsize=13 fadeind=int(60/res_in_days) symsize_planet=110 symsize_spacecraft=80 #for parker spiral theta=np.arange(0,np.deg2rad(180),0.01) ######################## make frames #for debugging #don't close plot in make_frame when testing make_frame2(5500) #for i in np.arange(6454,6576,1): # make_frame(i) print('done') ``` ## Make full movie ``` matplotlib.use('Agg') print(k_all,' frames in total') print() #number of processes depends on your machines memory; check with command line "top" #how much memory is used by all your processesii nr_of_processes_used=100 print('Using multiprocessing, nr of cores',multiprocessing.cpu_count(), \ 'with nr of processes used: ',nr_of_processes_used) #run multiprocessing pool to make all movie frames, depending only on frame number pool = multiprocessing.Pool(processes=nr_of_processes_used) input=[i for i in range(k_all)] #input=[i for i in np.arange(6721,6851,1)] pool.map(make_frame, input) pool.close() # pool.join() print('time in min: ',np.round((time.time()-start_time)/60)) print('plots done, frames saved in ',outputdirectory) #os.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \ # -r 30 '+str(animdirectory)+'/overview_27nov_2020_from2018.mp4 -y -loglevel quiet') #os.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \ # -r 30 '+str(animdirectory)+'/overview_apr2020_jul2021.mp4 -y -loglevel quiet') os.system(ffmpeg_path+'ffmpeg -r 30 -i '+str(outputdirectory)+'/pos_anim_%05d.jpg -b 5000k \ -r 30 '+str(animdirectory)+'/overview_apr2021_sep2021.mp4 -y -loglevel quiet') print('movie done, saved in ',animdirectory) ``` ## Lineup event images ``` #load lineup catalog url='lineups/HELIO4CAST_multipoint_v10.csv' lineups=pd.read_csv(url) #alltimes are the movie frame times #time of event 1 etime1=parse_time(lineups['event_start_time'][1]).datetime eframe1=np.where(np.array(alltimes)> etime1)[0][0] make_frame2(eframe1) plt.close('all') etime2=parse_time(lineups['event_start_time'][6]).datetime eframe2=np.where(np.array(alltimes)> etime2)[0][0] make_frame2(eframe2) plt.close('all') etime4=parse_time(lineups['event_start_time'][12]).datetime eframe4=np.where(np.array(alltimes)> etime4)[0][0] make_frame2(eframe4) plt.close('all') etime4_2=parse_time(lineups['event_start_time'][11]).datetime eframe4_2=np.where(np.array(alltimes)> etime4_2)[0][0] make_frame2(eframe4_2) plt.close('all') etime12=parse_time(lineups['event_start_time'][29]).datetime eframe12=np.where(np.array(alltimes)> etime12)[0][0] make_frame2(eframe12) plt.close('all') ```
github_jupyter
``` import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns import torch from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from xgboost import XGBRegressor from lightgbm import LGBMRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error as mse from scipy.stats import entropy import warnings import logging from causalml.inference.meta import BaseXRegressor, BaseRRegressor, BaseSRegressor, BaseTRegressor from causalml.inference.nn import CEVAE from causalml.propensity import ElasticNetPropensityModel from causalml.metrics import * from causalml.dataset import simulate_hidden_confounder %matplotlib inline warnings.filterwarnings('ignore') logger = logging.getLogger('causalml') logger.setLevel(logging.DEBUG) plt.style.use('fivethirtyeight') sns.set_palette('Paired') plt.rcParams['figure.figsize'] = (12,8) ``` # IHDP semi-synthetic dataset Hill introduced a semi-synthetic dataset constructed from the Infant Health and Development Program (IHDP). This dataset is based on a randomized experiment investigating the effect of home visits by specialists on future cognitive scores. The IHDP simulation is considered the de-facto standard benchmark for neural network treatment effect estimation methods. ``` # load all ihadp data df = pd.DataFrame() for i in range(1, 10): data = pd.read_csv('./data/ihdp_npci_' + str(i) + '.csv', header=None) df = pd.concat([data, df]) cols = ["treatment", "y_factual", "y_cfactual", "mu0", "mu1"] + [i for i in range(25)] df.columns = cols print(df.shape) # replicate the data 100 times replications = 100 df = pd.concat([df]*replications, ignore_index=True) print(df.shape) # set which features are binary binfeats = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] # set which features are continuous contfeats = [i for i in range(25) if i not in binfeats] # reorder features with binary first and continuous after perm = binfeats + contfeats df = df.reset_index(drop=True) df.head() X = df[perm].values treatment = df['treatment'].values y = df['y_factual'].values y_cf = df['y_cfactual'].values tau = df.apply(lambda d: d['y_factual'] - d['y_cfactual'] if d['treatment']==1 else d['y_cfactual'] - d['y_factual'], axis=1) mu_0 = df['mu0'].values mu_1 = df['mu1'].values # seperate for train and test itr, ite = train_test_split(np.arange(X.shape[0]), test_size=0.2, random_state=1) X_train, treatment_train, y_train, y_cf_train, tau_train, mu_0_train, mu_1_train = X[itr], treatment[itr], y[itr], y_cf[itr], tau[itr], mu_0[itr], mu_1[itr] X_val, treatment_val, y_val, y_cf_val, tau_val, mu_0_val, mu_1_val = X[ite], treatment[ite], y[ite], y_cf[ite], tau[ite], mu_0[ite], mu_1[ite] ``` ## CEVAE Model ``` # cevae model settings outcome_dist = "normal" latent_dim = 20 hidden_dim = 200 num_epochs = 5 batch_size = 1000 learning_rate = 0.001 learning_rate_decay = 0.01 num_layers = 2 cevae = CEVAE(outcome_dist=outcome_dist, latent_dim=latent_dim, hidden_dim=hidden_dim, num_epochs=num_epochs, batch_size=batch_size, learning_rate=learning_rate, learning_rate_decay=learning_rate_decay, num_layers=num_layers) # fit losses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float), treatment=torch.tensor(treatment_train, dtype=torch.float), y=torch.tensor(y_train, dtype=torch.float)) # predict ite_train = cevae.predict(X_train) ite_val = cevae.predict(X_val) ate_train = ite_train.mean() ate_val = ite_val.mean() print(ate_train, ate_val) ``` ## Meta Learners ``` # fit propensity model p_model = ElasticNetPropensityModel() p_train = p_model.fit_predict(X_train, treatment_train) p_val = p_model.fit_predict(X_val, treatment_val) s_learner = BaseSRegressor(LGBMRegressor()) s_ate = s_learner.estimate_ate(X_train, treatment_train, y_train)[0] s_ite_train = s_learner.fit_predict(X_train, treatment_train, y_train) s_ite_val = s_learner.predict(X_val) t_learner = BaseTRegressor(LGBMRegressor()) t_ate = t_learner.estimate_ate(X_train, treatment_train, y_train)[0][0] t_ite_train = t_learner.fit_predict(X_train, treatment_train, y_train) t_ite_val = t_learner.predict(X_val, treatment_val, y_val) x_learner = BaseXRegressor(LGBMRegressor()) x_ate = x_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0] x_ite_train = x_learner.fit_predict(X_train, treatment_train, y_train, p_train) x_ite_val = x_learner.predict(X_val, treatment_val, y_val, p_val) r_learner = BaseRRegressor(LGBMRegressor()) r_ate = r_learner.estimate_ate(X_train, treatment_train, y_train, p_train)[0][0] r_ite_train = r_learner.fit_predict(X_train, treatment_train, y_train, p_train) r_ite_val = r_learner.predict(X_val) ``` ## Model Results Comparsion ### Training ``` df_preds_train = pd.DataFrame([s_ite_train.ravel(), t_ite_train.ravel(), x_ite_train.ravel(), r_ite_train.ravel(), ite_train.ravel(), tau_train.ravel(), treatment_train.ravel(), y_train.ravel()], index=['S','T','X','R','CEVAE','tau','w','y']).T df_cumgain_train = get_cumgain(df_preds_train) df_result_train = pd.DataFrame([s_ate, t_ate, x_ate, r_ate, ate_train, tau_train.mean()], index=['S','T','X','R','CEVAE','actual'], columns=['ATE']) df_result_train['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_train, t_ite_train, x_ite_train, r_ite_train, ite_train], [tau_train.values.reshape(-1,1)]*5 ) ] + [None] df_result_train['AUUC'] = auuc_score(df_preds_train) df_result_train plot_gain(df_preds_train) ``` ### Validation ``` df_preds_val = pd.DataFrame([s_ite_val.ravel(), t_ite_val.ravel(), x_ite_val.ravel(), r_ite_val.ravel(), ite_val.ravel(), tau_val.ravel(), treatment_val.ravel(), y_val.ravel()], index=['S','T','X','R','CEVAE','tau','w','y']).T df_cumgain_val = get_cumgain(df_preds_val) df_result_val = pd.DataFrame([s_ite_val.mean(), t_ite_val.mean(), x_ite_val.mean(), r_ite_val.mean(), ate_val, tau_val.mean()], index=['S','T','X','R','CEVAE','actual'], columns=['ATE']) df_result_val['MAE'] = [mean_absolute_error(t,p) for t,p in zip([s_ite_val, t_ite_val, x_ite_val, r_ite_val, ite_val], [tau_val.values.reshape(-1,1)]*5 ) ] + [None] df_result_val['AUUC'] = auuc_score(df_preds_val) df_result_val plot_gain(df_preds_val) ``` # Synthetic Data ``` y, X, w, tau, b, e = simulate_hidden_confounder(n=100000, p=5, sigma=1.0, adj=0.) X_train, X_val, y_train, y_val, w_train, w_val, tau_train, tau_val, b_train, b_val, e_train, e_val = \ train_test_split(X, y, w, tau, b, e, test_size=0.2, random_state=123, shuffle=True) preds_dict_train = {} preds_dict_valid = {} preds_dict_train['Actuals'] = tau_train preds_dict_valid['Actuals'] = tau_val preds_dict_train['generated_data'] = { 'y': y_train, 'X': X_train, 'w': w_train, 'tau': tau_train, 'b': b_train, 'e': e_train} preds_dict_valid['generated_data'] = { 'y': y_val, 'X': X_val, 'w': w_val, 'tau': tau_val, 'b': b_val, 'e': e_val} # Predict p_hat because e would not be directly observed in real-life p_model = ElasticNetPropensityModel() p_hat_train = p_model.fit_predict(X_train, w_train) p_hat_val = p_model.fit_predict(X_val, w_val) for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor], ['S', 'T', 'X', 'R']): for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']): # RLearner will need to fit on the p_hat if label_l != 'R': learner = base_learner(model()) # fit the model on training data only learner.fit(X=X_train, treatment=w_train, y=y_train) try: preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train, p=p_hat_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val, p=p_hat_val).flatten() except TypeError: preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train, treatment=w_train, y=y_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val, treatment=w_val, y=y_val).flatten() else: learner = base_learner(model()) learner.fit(X=X_train, p=p_hat_train, treatment=w_train, y=y_train) preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val).flatten() # cevae model settings outcome_dist = "normal" latent_dim = 20 hidden_dim = 200 num_epochs = 5 batch_size = 1000 learning_rate = 1e-3 learning_rate_decay = 0.1 num_layers = 3 num_samples = 10 cevae = CEVAE(outcome_dist=outcome_dist, latent_dim=latent_dim, hidden_dim=hidden_dim, num_epochs=num_epochs, batch_size=batch_size, learning_rate=learning_rate, learning_rate_decay=learning_rate_decay, num_layers=num_layers, num_samples=num_samples) # fit losses = cevae.fit(X=torch.tensor(X_train, dtype=torch.float), treatment=torch.tensor(w_train, dtype=torch.float), y=torch.tensor(y_train, dtype=torch.float)) preds_dict_train['CEVAE'] = cevae.predict(X_train).flatten() preds_dict_valid['CEVAE'] = cevae.predict(X_val).flatten() actuals_train = preds_dict_train['Actuals'] actuals_validation = preds_dict_valid['Actuals'] synthetic_summary_train = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_train)] for label, preds in preds_dict_train.items() if 'generated' not in label.lower()}, index=['ATE', 'MSE']).T synthetic_summary_train['Abs % Error of ATE'] = np.abs( (synthetic_summary_train['ATE']/synthetic_summary_train.loc['Actuals', 'ATE']) - 1) synthetic_summary_validation = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_validation)] for label, preds in preds_dict_valid.items() if 'generated' not in label.lower()}, index=['ATE', 'MSE']).T synthetic_summary_validation['Abs % Error of ATE'] = np.abs( (synthetic_summary_validation['ATE']/synthetic_summary_validation.loc['Actuals', 'ATE']) - 1) # calculate kl divergence for training for label in synthetic_summary_train.index: stacked_values = np.hstack((preds_dict_train[label], actuals_train)) stacked_low = np.percentile(stacked_values, 0.1) stacked_high = np.percentile(stacked_values, 99.9) bins = np.linspace(stacked_low, stacked_high, 100) distr = np.histogram(preds_dict_train[label], bins=bins)[0] distr = np.clip(distr/distr.sum(), 0.001, 0.999) true_distr = np.histogram(actuals_train, bins=bins)[0] true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999) kl = entropy(distr, true_distr) synthetic_summary_train.loc[label, 'KL Divergence'] = kl # calculate kl divergence for validation for label in synthetic_summary_validation.index: stacked_values = np.hstack((preds_dict_valid[label], actuals_validation)) stacked_low = np.percentile(stacked_values, 0.1) stacked_high = np.percentile(stacked_values, 99.9) bins = np.linspace(stacked_low, stacked_high, 100) distr = np.histogram(preds_dict_valid[label], bins=bins)[0] distr = np.clip(distr/distr.sum(), 0.001, 0.999) true_distr = np.histogram(actuals_validation, bins=bins)[0] true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999) kl = entropy(distr, true_distr) synthetic_summary_validation.loc[label, 'KL Divergence'] = kl df_preds_train = pd.DataFrame([preds_dict_train['S Learner (LR)'].ravel(), preds_dict_train['S Learner (XGB)'].ravel(), preds_dict_train['T Learner (LR)'].ravel(), preds_dict_train['T Learner (XGB)'].ravel(), preds_dict_train['X Learner (LR)'].ravel(), preds_dict_train['X Learner (XGB)'].ravel(), preds_dict_train['R Learner (LR)'].ravel(), preds_dict_train['R Learner (XGB)'].ravel(), preds_dict_train['CEVAE'].ravel(), preds_dict_train['generated_data']['tau'].ravel(), preds_dict_train['generated_data']['w'].ravel(), preds_dict_train['generated_data']['y'].ravel()], index=['S Learner (LR)','S Learner (XGB)', 'T Learner (LR)','T Learner (XGB)', 'X Learner (LR)','X Learner (XGB)', 'R Learner (LR)','R Learner (XGB)', 'CEVAE','tau','w','y']).T synthetic_summary_train['AUUC'] = auuc_score(df_preds_train).iloc[:-1] df_preds_validation = pd.DataFrame([preds_dict_valid['S Learner (LR)'].ravel(), preds_dict_valid['S Learner (XGB)'].ravel(), preds_dict_valid['T Learner (LR)'].ravel(), preds_dict_valid['T Learner (XGB)'].ravel(), preds_dict_valid['X Learner (LR)'].ravel(), preds_dict_valid['X Learner (XGB)'].ravel(), preds_dict_valid['R Learner (LR)'].ravel(), preds_dict_valid['R Learner (XGB)'].ravel(), preds_dict_valid['CEVAE'].ravel(), preds_dict_valid['generated_data']['tau'].ravel(), preds_dict_valid['generated_data']['w'].ravel(), preds_dict_valid['generated_data']['y'].ravel()], index=['S Learner (LR)','S Learner (XGB)', 'T Learner (LR)','T Learner (XGB)', 'X Learner (LR)','X Learner (XGB)', 'R Learner (LR)','R Learner (XGB)', 'CEVAE','tau','w','y']).T synthetic_summary_validation['AUUC'] = auuc_score(df_preds_validation).iloc[:-1] synthetic_summary_train synthetic_summary_validation plot_gain(df_preds_train) plot_gain(df_preds_validation) ```
github_jupyter
# 深度学习工具 PyTorch 简介 在此 notebook 中,你将了解 [PyTorch](http://pytorch.org/),一款用于构建和训练神经网络的框架。PyTorch 在很多方面都和 Numpy 数组很像。毕竟,这些 Numpy 数组也是张量。PyTorch 会将这些张量当做输入并使我们能够轻松地将张量移到 GPU 中,以便在训练神经网络时加快处理速度。它还提供了一个自动计算梯度的模块(用于反向传播),以及另一个专门用于构建神经网络的模块。总之,与 TensorFlow 和其他框架相比,PyTorch 与 Python 和 Numpy/Scipy 堆栈更协调。 ## 神经网络 深度学习以人工神经网络为基础。人工神经网络大致产生于上世纪 50 年代末。神经网络由多个像神经元一样的单个部分组成,这些部分通常称为单元或直接叫做“神经元”。每个单元都具有一定数量的加权输入。我们对这些加权输入求和,然后将结果传递给激活函数,以获得单元的输出。 <img src="assets/simple_neuron.png" width=400px> 数学公式如下所示: $$ \begin{align} y &= f(w_1 x_1 + w_2 x_2 + b) \\ y &= f\left(\sum_i w_i x_i +b \right) \end{align} $$ 对于向量来说,为两个向量的点积/内积: $$ h = \begin{bmatrix} x_1 \, x_2 \cdots x_n \end{bmatrix} \cdot \begin{bmatrix} w_1 \\ w_2 \\ \vdots \\ w_n \end{bmatrix} $$ ## 张量 实际上神经网络计算只是对*张量*进行一系列线性代数运算,张量是矩阵的泛化形式。向量是一维张量,矩阵是二维张量,包含 3 个索引的数组是三维张量(例如 RGB 彩色图像)。神经网络的基本数据结构是张量,PyTorch(以及几乎所有其他深度学习框架)都是以张量为基础。 <img src="assets/tensor_examples.svg" width=600px> 这些是基本知识,我们现在来看 PyTorch 如何构建简单的神经网络。 ``` # First, import PyTorch import torch def activation(x): """ Sigmoid activation function Arguments --------- x: torch.Tensor """ return 1/(1+torch.exp(-x)) ### Generate some data torch.manual_seed(7) # Set the random seed so things are predictable # Features are 3 random normal variables features = torch.randn((1, 5)) # True weights for our data, random normal variables again weights = torch.randn_like(features) # and a true bias term bias = torch.randn((1, 1)) ``` 我在上面生成了一些数据,我们可以使用该数据获取这个简单网络的输出。这些暂时只是随机数据,之后我们将使用正常数据。我们来看看: `features = torch.randn((1, 5))` 创建一个形状为 `(1, 5)` 的张量,其中有 1 行和 5 列,包含根据正态分布(均值为 0,标准偏差为 1)随机分布的值。 `weights = torch.randn_like(features)` 创建另一个形状和 `features` 一样的张量,同样包含来自正态分布的值。 最后,`bias = torch.randn((1, 1))` 根据正态分布创建一个值。 和 Numpy 数组一样,PyTorch 张量可以相加、相乘、相减。行为都很类似。但是 PyTorch 张量具有一些优势,例如 GPU 加速,稍后我们会讲解。请计算这个简单单层网络的输出。 > **练习**:计算网络的输出:输入特征为 `features`,权重为 `weights`,偏差为 `bias`。和 Numpy 类似,PyTorch 也有一个对张量求和的 [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) 函数和 `.sum()` 方法。请使用上面定义的函数 `activation` 作为激活函数。 ``` ## Calculate the output of this network using the weights and bias tensors ``` 你可以在同一运算里使用矩阵乘法进行乘法和加法运算。推荐使用矩阵乘法,因为在 GPU 上使用现代库和高效计算资源使矩阵乘法更高效。 如何对特征和权重进行矩阵乘法运算?我们可以使用 [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) 或 [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul),后者更复杂,并支持广播。如果不对`features` 和 `weights` 进行处理,就会报错: ``` >> torch.mm(features, weights) --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-13-15d592eb5279> in <module>() ----> 1 torch.mm(features, weights) RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033 ``` 在任何框架中构建神经网络时,我们都会频繁遇到这种情况。原因是我们的张量不是进行矩阵乘法的正确形状。注意,对于矩阵乘法,第一个张量里的列数必须等于第二个张量里的行数。`features` 和 `weights` 具有相同的形状,即 `(1, 5)`。意味着我们需要更改 `weights` 的形状,以便进行矩阵乘法运算。 **注意:**要查看张量 `tensor` 的形状,请使用 `tensor.shape`。以后也会经常用到。 现在我们有以下几个选择:[`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape)、[`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_) 和 [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view)。 * `weights.reshape(a, b)` 有时候将返回一个新的张量,数据和 `weights` 的一样,大小为 `(a, b)`;有时候返回克隆版,将数据复制到内存的另一个部分。 * `weights.resize_(a, b)` 返回形状不同的相同张量。但是,如果新形状的元素数量比原始张量的少,则会从张量里删除某些元素(但是不会从内存中删除)。如果新形状的元素比原始张量的多,则新元素在内存里未初始化。注意,方法末尾的下划线表示这个方法是**原地**运算。要详细了解如何在 PyTorch 中进行原地运算,请参阅[此论坛话题](https://discuss.pytorch.org/t/what-is-in-place-operation/16244)。 * `weights.view(a, b)` 将返回一个张量,数据和 `weights` 的一样,大小为 `(a, b)`。 我通常使用 `.view()`,但这三个方法对此示例来说都可行。现在,我们可以通过 `weights.view(5, 1)` 变形 `weights`,使其具有 5 行和 1 列。 > **练习**:请使用矩阵乘法计算网络的输出 ``` ## Calculate the output of this network using matrix multiplication ``` ### 堆叠 这就是计算单个神经元的输出的方式。当你将单个单元堆叠为层,并将层堆叠为神经元网络后,你就会发现这个算法的强大之处。一个神经元层的输出变成下一层的输入。对于多个输入单元和输出单元,我们现在需要将权重表示为矩阵。 <img src='assets/multilayer_diagram_weights.png' width=450px> 底部显示的第一个层级是输入,称为**输入层**。中间层称为**隐藏层**,最后一层(右侧)是**输出层**。我们可以再次使用矩阵从数学角度来描述这个网络,然后使用矩阵乘法将每个单元线性组合到一起。例如,可以这样计算隐藏层($h_1$ 和 $h_2$): $$ \vec{h} = [h_1 \, h_2] = \begin{bmatrix} x_1 \, x_2 \cdots \, x_n \end{bmatrix} \cdot \begin{bmatrix} w_{11} & w_{12} \\ w_{21} &w_{22} \\ \vdots &\vdots \\ w_{n1} &w_{n2} \end{bmatrix} $$ 我们可以将隐藏层当做输出单元的输入,从而得出这个小网络的输出,简单表示为: $$ y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right) $$ ``` ### Generate some data torch.manual_seed(7) # Set the random seed so things are predictable # Features are 3 random normal variables features = torch.randn((1, 3)) # Define the size of each layer in our network n_input = features.shape[1] # Number of input units, must match number of input features n_hidden = 2 # Number of hidden units n_output = 1 # Number of output units # Weights for inputs to hidden layer W1 = torch.randn(n_input, n_hidden) # Weights for hidden layer to output layer W2 = torch.randn(n_hidden, n_output) # and bias terms for hidden and output layers B1 = torch.randn((1, n_hidden)) B2 = torch.randn((1, n_output)) ``` > **练习:**使用权重 `W1` 和 `W2` 以及偏差 `B1` 和 `B2` 计算此多层网络的输出。 ``` ## Your solution here ``` 如果计算正确,输出应该为 `tensor([[ 0.3171]])`。 隐藏层数量是网络的参数,通常称为**超参数**,以便与权重和偏差参数区分开。稍后当我们讨论如何训练网络时会提到,层级越多,网络越能够从数据中学习规律并作出准确的预测。 ## Numpy 和 Torch 相互转换 加分题!PyTorch 可以实现 Numpy 数组和 Torch 张量之间的转换。Numpy 数组转换为张量数据,可以用 `torch.from_numpy()`。张量数据转换为 Numpy 数组,可以用 `.numpy()` 。 ``` import numpy as np a = np.random.rand(4,3) a b = torch.from_numpy(a) b b.numpy() ``` Numpy 数组与 Torch 张量之间共享内存,因此如果你原地更改一个对象的值,另一个对象的值也会更改。 ``` # Multiply PyTorch Tensor by 2, in place b.mul_(2) ``` ```python # Numpy array matches new values from Tensor a ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from bokeh.plotting import * from sklearn.cluster.bicluster import SpectralCoclustering from bokeh.models import HoverTool, ColumnDataSource from itertools import product whisky = pd.read_csv('whiskies.txt') whisky["Region"] = pd.read_csv('regions.txt') whisky.head() whisky.tail() flavors = whisky.iloc[:, 2:14] flavors corr_flavors = pd.DataFrame.corr(flavors) print(corr_flavors) plt.figure(figsize=(10,10)) plt.pcolor(corr_flavors) plt.colorbar() plt.savefig("corr_flavors.pdf") corr_whisky = pd.DataFrame.corr(flavors.transpose()) plt.figure(figsize=(10,10)) plt.pcolor(corr_whisky) plt.colorbar() plt.savefig("corr_whisky.pdf") model = SpectralCoclustering(n_clusters=6, random_state=0) model.fit(corr_whisky) model.rows_ np.sum(model.rows_, axis=0) model.row_labels_ whisky['Group'] = pd.Series(model.row_labels_, index=whisky.index) whisky = whisky.iloc[np.argsort(model.row_labels_)] whisky = whisky.reset_index(drop=True) correlations = pd.DataFrame.corr(whisky.iloc[:,2:14].transpose()) correlations = np.array(correlations) plt.figure(figsize = (14,7)) plt.subplot(121) plt.pcolor(corr_whisky) plt.title('Original') plt.axis('tight') plt.subplot(122) plt.pcolor(correlations) plt.title('Rearranged') plt.axis('tight') plt.savefig('correlations.pdf') # Let's plot a simple 5x5 grid of squares, alternating in color as red and blue. plot_values = [1, 2, 3, 4, 5] plot_colors = ["red", "blue"] # How do we tell Bokeh to plot each point in a grid? Let's use a function that # finds each combination of values from 1-5. from itertools import product grid = list(product(plot_values, plot_values)) print(grid) # The first value is the x coordinate, and the second value is the y coordinate. # Let's store these in separate lists. xs, ys = zip(*grid) print(xs) print(ys) # Now we will make a list of colors, alternating between red and blue. colors = [plot_colors[i % 2] for i in range(len(grid))] print(colors) # Finally, let's determine the strength of transparency (alpha) for each point, # where 0 is completely transparent. alphas = np.linspace(0, 1, len(grid)) # Bokeh likes each of these to be stored in a special dataframe, called # ColumnDataSource. Let's store our coordinates, colors, and alpha values. source = ColumnDataSource( data={ "x": xs, "y": ys, "colors": colors, "alphas": alphas, } ) # We are ready to make our interactive Bokeh plot! output_file("Basic_Example.html", title="Basic Example") fig = figure(tools="hover, save") fig.rect("x", "y", 0.9, 0.9, source=source, color="colors", alpha="alphas") hover = fig.select(dict(type=HoverTool)) hover.tooltips = { "Value": "@x, @y", } show(fig) cluster_colors = ["red", "orange", "green", "blue", "purple", "gray"] regions = ["Speyside", "Highlands", "Lowlands", "Islands", "Campbelltown", "Islay"] region_colors = dict(zip(regions, cluster_colors)) region_colors distilleries = list(whisky.Distillery) correlation_colors = [] for i in range(len(distilleries)): for j in range(len(distilleries)): if correlations[i, j] < 0.70: # if low correlation, correlation_colors.append('white') # just use white. else: # otherwise, if whisky.Group[i] == whisky.Group[j]: # if the groups match, correlation_colors.append(cluster_colors[whisky.Group[i]]) # color them by their mutual group. else: # otherwise correlation_colors.append('lightgray') # color them lightgray. source = ColumnDataSource( data = { "x": np.repeat(distilleries,len(distilleries)), "y": list(distilleries)*len(distilleries), "colors": correlation_colors, "alphas": correlations.flatten(), "correlations": correlations.flatten(), } ) output_file("Whisky Correlations.html", title="Whisky Correlations") fig = figure(title="Whisky Correlations", x_axis_location="above", tools="hover,save", x_range=list(reversed(distilleries)), y_range=distilleries) fig.grid.grid_line_color = None fig.axis.axis_line_color = None fig.axis.major_tick_line_color = None fig.axis.major_label_text_font_size = "5pt" fig.xaxis.major_label_orientation = np.pi / 3 fig.rect('x', 'y', .9, .9, source=source, color='colors', alpha='correlations') hover = fig.select(dict(type=HoverTool)) hover.tooltips = { "Whiskies": "@x, @y", "Correlation": "@correlations", } show(fig) points = [(0,0), (1,2), (3,1)] xs, ys = zip(*points) colors = ["red", "blue", "green"] output_file("Spatial_Example.html", title="Regional Example") location_source = ColumnDataSource( data={ "x": xs, "y": ys, "colors": colors, } ) fig = figure(title = "Title", x_axis_location = "above", tools="hover, save") fig.plot_width = 300 fig.plot_height = 380 fig.circle("x", "y",size=10, source=location_source, color='colors', line_color=None) hover = fig.select(dict(type = HoverTool)) hover.tooltips = { "Location": "(@x, @y)" } show(fig) def location_plot(title, colors): output_file(title + ".html") location_source = ColumnDataSource( data={ "x": whisky[" Latitude"], "y": whisky[" Longitude"], "colors": colors, "regions": whisky.Region, "distilleries": whisky.Distillery } ) fig = figure(title=title, x_axis_location="above", tools="hover, save") fig.plot_width = 400 fig.plot_height = 500 fig.circle("x", "y", size=9, source=location_source, color='colors', line_color=None) fig.xaxis.major_label_orientation = np.pi / 3 hover = fig.select(dict(type=HoverTool)) hover.tooltips = { "Distillery": "@distilleries", "Location": "(@x, @y)" } show(fig) region_cols = [region_colors[i] for i in list(whisky["Region"])] location_plot("Whisky Locations and Regions", region_cols) region_cols = [region_colors[i] for i in list(whisky.Region)] classification_cols = [cluster_colors[i] for i in list(whisky.Group)] location_plot("Whisky Locations and Regions", region_cols) location_plot("Whisky Locations and Groups", classification_cols) ```
github_jupyter
``` import datetime as dt import panel as pn pn.extension() ``` The ``DateRangeSlider`` widget allows selecting a date range using a slider with two handles. For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). #### Parameters: For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). ##### Core * **``start``** (datetime): The range's lower bound * **``end``** (datetime): The range's upper bound * **``value``** (tuple): Tuple of upper and lower bounds of the selected range expressed as datetime types * **``value_throttled``** (tuple): Tuple of upper and lower bounds of the selected range expressed as datetime types throttled until mouseup ##### Display * **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value * **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup') * **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved * **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl') * **``disabled``** (boolean): Whether the widget is editable * **``name``** (str): The title of the widget * **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation. * **``tooltips``** (boolean): Whether to display tooltips on the slider handle ___ The slider start and end can be adjusted by dragging the handles and whole range can be shifted by dragging the selected range. ``` date_range_slider = pn.widgets.DateRangeSlider( name='Date Range Slider', start=dt.datetime(2017, 1, 1), end=dt.datetime(2019, 1, 1), value=(dt.datetime(2017, 1, 1), dt.datetime(2018, 1, 10)) ) date_range_slider ``` ``DateRangeSlider.value`` returns a tuple of datetime values that can be read out and set like other widgets: ``` date_range_slider.value ``` ### Controls The `DateRangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively: ``` pn.Row(date_range_slider.controls(jslink=True), date_range_slider) ```
github_jupyter
# MBZ-XML-TO-EXCEL First pubished version May 22, 2019. This is version 0.0004 (revision July 26, 2019) Licensed under the NCSA Open source license Copyright (c) 2019 Lawrence Angrave All rights reserved. Developed by: Lawrence Angrave Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. Neither the names of Lawrence Angrave, University of Illinois nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. # Citations and acknowledgements welcomed! In a presentation, report or paper please recognise and acknowledge the the use of this software. Please contact angrave@illinois.edu for a Bibliography citation. For presentations, the following is sufficient MBZ-XML-TO-EXCEL (https://github.com/angrave/Moodle-mbz-to-excel) by Lawrence Angrave. MBZ-XML-TO-EXCEL is an iLearn project, supported by an Institute of Education Sciences Award R305A180211 If also using Geo-IP data, please cite IP2Location. For example, "This report uses geo-ip location data from IP2Location.com" # Known limitations and issues The assessment sheet (generated from workshop.xml) may generate URLs that are longer than 255 characters, the largested supported by Excel. These very long URLs will be excluded No verification of the data has been performed. It is unknown if the inferred timestamps based on the Unix Epoch timestamp require a timezone adjustment. # Requirements This project uses Python3, Jupiter notebooks and Pandas. # Set up ``` #import xml.etree.ElementTree as ET #lxml supports line numbers import lxml.etree as ET from collections import OrderedDict import pandas as pd import numpy as np import re import os import urllib import datetime import glob import tarfile import tempfile import base64 # geoip support import bisect import ipaddress # timestamp support from datetime import datetime # Extract text from html messages from bs4 import BeautifulSoup import uuid import traceback import xlsxwriter excelengine = 'xlsxwriter' # 'xlsxwriter' is currently recommended though it did not improve the write speed using generic pandas interface) # Todo Perhaps using workbook interface directly will be faster? (https://xlsxwriter.readthedocs.io/) # io.excel.xlsx.writer' (default, allegedly slow), # 'pyexcelerate' (untested) ``` # Load GeoIP data (optional) ``` def load_geoip_data(geoip_datadir): global geoip_all_colnames, geoip_geo_columns,geoipv4_df,geoipv4_ipvalues geoip_all_colnames = ['geoip_ipfrom' ,'geoip_ipto' ,'geoip_country_code' ,'geoip_country_name' ,'geoip_region_name' ,'geoip_city_name' ,'geoip_latitude' ,'geoip_longitude' ,'geoip_zip_code' ,'geoip_time_zone'] geoip_geo_columns = geoip_all_colnames[2:] #geoip_datadir = 'geoip' #change to your local directory of where the downloaded zip has been unpacked geoipv4_csv = os.path.join(geoip_datadir,'IP2LOCATION-LITE-DB11.CSV') if os.path.exists(geoipv4_csv): print("Reading geoip csv",geoipv4_csv) geoipv4_df = pd.read_csv(geoipv4_csv, names= geoip_all_colnames) geoipv4_ipvalues = geoipv4_df['geoip_ipfrom'].values # bisect searching assumes geoipv4_ipvalues are in increasing order else: geoipv4_df = None geoipv4_ipvalues = None print("No GeoIP csv data at ",geoipv4_csv) print("IP addresses will not be converted into geographic locations") print("Free Geo-IP data can be downloaded from IP2LOCATION.com") ``` # Phase 1 - Extract XMLs from mbz file and create hundreds of Excel files ``` # Each file can generate a list of tables (dataframes) # Recursively process each element. # For each non-leaf element we build an ordered dictionary of key-value pairs and attach this to an array for the particular element name # <foo id='1' j='a'> becomes data['foo'] = [ {'id':'1', j:'a'} ] # The exception is for leaf elements (no-child elements) in the form e.g. <blah>123</blah> # We treat these equivalently to attributes on the surrounding (parent) xml element # <foo id='1'><blah>123</blah></foo> becomes data['foo'] = [ {'id':'1', 'blah':'123'} ] # and no data['blah'] is created AUTOMATIC_IMPLICIT_XML_COLUMNS = 4 #SOURCE_LINE,PARENT_SHEET,PARENT_INDEX def process_element(data,dest_basedir, tablename_list, context, e): #deprecated has_no_children = len(e.getchildren()) == 0 has_no_children = len(e) == 0 has_no_attribs = len(e.attrib.keys()) == 0 text = e.text has_text = text is not None if has_text: text = text.strip() has_text = len(text) > 0 # Is this a leaf element e.g. <blah>123</blah> # For the datasets we care about, leaves should not be tables; we only want their value ignore_attribs_on_leaves = True # This could be refactored to return a dictionary, so multiple attributes can be attached to the parent if has_no_children and (has_no_attribs or ignore_attribs_on_leaves): if not has_no_attribs: print() print("Warning: Ignoring attributes on leaf element:" + e.tag+ ":"+ str(e.attrib)) print() return [e.tag,e.text] # Early return, attach the value to the parent (using the tag as the attribute name) table_name = e.tag if table_name not in data: tablename_list.append(table_name) data[table_name] = [] key_value_pairs = OrderedDict() key_value_pairs['SOURCE_LINE'] = e.sourceline key_value_pairs['PARENT_SHEET'] = context[0] key_value_pairs['PARENT_ROW_INDEX'] = context[1] key_value_pairs['PARENT_ID'] = context[2] #print(e.sourceline) # For correctness child_context needs to be after this line and before recursion data[table_name].append(key_value_pairs) myid = '' if 'id' in e.attrib: myid = e.attrib['id'] child_context = [table_name, len(data[table_name])-1, myid] # Used above context[0] during recursive call for key in sorted(e.attrib.keys()): key_value_pairs[key] = e.attrib[key] for child in e.iterchildren(): # Could refactor here to use dictionary to enable multiple key-values from a discarded leaf key,value = process_element(data,dest_basedir, tablename_list, child_context, child) if value: if key in key_value_pairs: key_value_pairs[key] += ',' + str(value) else: key_value_pairs[key] = str(value) if has_text: key_value_pairs['TEXT'] = e.text # If at least some non-whitespace text, then use original text return [e.tag,None] def tablename_to_sheetname(elided_sheetnames, tablename): sheetname = tablename # Future: There may be characters that are invalid. If so, remove them here.. #Excel sheetnames are limited to 31 characters. max_excel_sheetname_length = 31 if len(sheetname) <= max_excel_sheetname_length: return sheetname sheetname = sheetname[0:5] + '...' + sheetname[-20:] elided_sheetnames.append(sheetname) if elided_sheetnames.count(sheetname)>1: sheetname += str( elided_sheetnames.count(sheetname) + 1) return sheetname def decode_base64_to_latin1(encoded_val): try: return str(base64.b64decode(encoded_val) , 'latin-1') except Exception as e: traceback.print_exc() print("Not base64 latin1?", e) return '??Not-latin1 text' def decode_geoip(ip): try: ip = ip.strip() if not ip or geoipv4_df is None: return pd.Series(None, index=geoip_geo_columns) ipv4 = int(ipaddress.IPv4Address(ip)) index = bisect.bisect(geoipv4_ipvalues, ipv4) - 1 entry = geoipv4_df.iloc[index] assert entry.geoip_ipfrom <= ipv4 and entry.geoip_ipto >= ipv4 return entry[2:] # [geoip_geo_columns] # Drop ip_from and ip_to except Exception as e: traceback.print_exc() print("Bad ip?",ip, e) return pd.Series(None, index=geoip_geo_columns) def decode_unixtimestamp_to_UTC(seconds): if seconds == '': return '' try: return datetime.utcfromtimestamp(int(seconds)).strftime('%Y-%m-%d %H:%M:%S') except Exception as e: traceback.print_exc() print("Bad unix timestamp?", seconds , e) return '' def decode_html_to_text(html): if html is np.nan: return '' try: soup = BeautifulSoup(html,"lxml") return soup.get_text() except Exception as e: traceback.print_exc() print('Bad html?',html, e) return '???' def validate_anonid_data(anonid_df): #Expected columns for c in ['anonid','userid']: if c not in anonid_df.columns: raise ('anonid_csv_file\'' + anonid_csv_file + '\'should have a column named '+c) # No duplicate userid entries check_for_duplicates = anonid_df['userid'].duplicated(keep=False) if check_for_duplicates.any(): print(anonid_df[check_for_duplicates]) raise Exception('See above - fix the duplicates userid entries found in \'' + anonid_csv_file +'\'') anonid_df['userid'] = anonid_df['userid'].astype(str) def userid_to_anonid(userid): global anonid_df, generate_missing_anonid if userid is np.nan or len(userid) == 0: return '' row = anonid_df[ anonid_df['userid'] == userid ] if len( row ) == 1: return row['anonid'].values[0] if generate_missing_anonid: result = uuid.uuid4().hex anonid_df = anonid_df.append({ 'userid':userid, 'anonid':result}, ignore_index=True) else: result = '' return result def to_dataframe(table_name, table_data): df = pd.DataFrame(table_data) # Moodle dumps use $@NULL@$ for nulls df.replace('$@NULL@$','',inplace = True) # We found two base64 encoded columns in Moodle data- for col in df.columns & ['other','configdata']: df[ str(col) + '_base64'] = df[str(col)].map(decode_base64_to_latin1) for col in df.columns & ['timestart','timefinish','added','backup_date','original_course_startdate','original_course_enddate','timeadded','firstaccess','lastaccess','lastlogin','currentlogin','timecreated','timemodified','created','modified']: df[ str(col) + '_utc'] = df[str(col)].map(decode_unixtimestamp_to_UTC) # Extract text from html content for col in df.columns & ['message', 'description','commenttext','intro','conclusion','summary','feedbacktext','content','feedback','info', 'questiontext' , 'answertext']: df[ str(col) + '_text'] = df[str(col)].map(decode_html_to_text) # Moodle data has 'ip' and 'lastip' that are ipv4 dotted # Currently only ipv4 is implemented. geoipv4_df is None if the cvs file was not found if geoipv4_df is None: for col in df.columns & ['ip','lastip']: df = df.join( df[str(col)].apply(decode_geoip) ) for col in df.columns & ['userid','relateduserid' , 'realuserid']: col=str(col) if col == 'userid': out = 'anondid' else: out = col[0:-6] + '_anonid' df[ out ] = df[col].map(userid_to_anonid) if delete_userids: df.drop(columns=[col],inplace=True) if table_name == 'user': df['anonid'] = df['id'].map(userid_to_anonid) # Can add more MOODLE PROCESSING HERE :-) return df def to_absolute_file_url(filepath): return urllib.parse.urljoin( 'file:', urllib.request.pathname2url(os.path.abspath(filepath))) def write_excel_sheets(source_file, excelwriter, data, tablename_list): elided_sheetnames = [] table_sheet_mapping = dict() table_sheet_mapping[''] = '' # Top level parents have empty PARENT_SHEET for tablename in tablename_list: sheetname = tablename_to_sheetname(elided_sheetnames, tablename) table_sheet_mapping[tablename] = sheetname for tablename in tablename_list: df = to_dataframe(tablename, data[tablename]) #Convert table (=original xml tag) into real sheet name (not tag name) if 'PARENT_SHEET' in df.columns: df['PARENT_SHEET'] = df['PARENT_SHEET'].apply(lambda x: table_sheet_mapping[x]) df.index.rename(tablename, inplace=True) df.insert(0, 'SOURCE_FILE',source_file ,allow_duplicates=True) df.insert(1, 'SOURCE_TAG', tablename, allow_duplicates=True) sheetname = table_sheet_mapping[tablename] if sheetname != tablename: print("Writing "+ tablename + " as sheet "+ sheetname) else: print("Writing sheet "+ sheetname) df.to_excel(excelwriter, sheet_name=sheetname, index_label=tablename) return table_sheet_mapping def re_adopt_child_table(data, parent_tablename, parent_table, child_tablename): child_table = data[child_tablename] for row in child_table: if 'PARENT_SHEET' not in row.keys(): continue if row['PARENT_SHEET'] == parent_tablename: idx = row['PARENT_ROW_INDEX'] # Time to follow the pointer parent_row = parent_table[idx] #row['PARENT_TAG'] = parent_row['PARENT_TAG'] row['PARENT_ROW_INDEX'] = parent_row['PARENT_ROW_INDEX'] row['PARENT_ID'] = parent_row['PARENT_ID'] row['PARENT_SHEET'] = parent_row['PARENT_SHEET'] def discard_empty_tables(data,tablename_list): nonempty_tables = [] for tablename in tablename_list: table = data[tablename] # print(tablename, len(table),'rows') if len(table) == 0: # print("Skipping empty table",tablename) continue include = False for row in table: if len(row) > AUTOMATIC_IMPLICIT_XML_COLUMNS: # Found more than just PARENT_TAG,... columns include = True break if include: # print("Including",tablename) nonempty_tables.append(tablename) else: # print("Skipping unnecessary table",tablename) # Will need to fixup child items that still think this is their container # More efficient if we kept a mapping of child tables, rather than iterate over tables for childname in tablename_list: re_adopt_child_table(data, tablename, table, childname) pass return nonempty_tables def process_one_file(dest_basedir, relative_sub_dir, xml_filename, dry_run): print('process_one_file(\''+dest_basedir+'\',\''+relative_sub_dir+'\',\''+xml_filename+'\')') #print("Reading XML " + xml_filename) #Original parser xmlroot = ET.parse(xml_filename).getroot() # Use lxml #xmlroot = etree.parse(xml_filename) #print("Processing...") data = dict() tablename_list = [] initial_context = ['','',''] # Todo : Consider missing integer index e.g. ['',None,''] process_element(data, dest_basedir ,tablename_list, initial_context, xmlroot) nonempty_tables = discard_empty_tables(data,tablename_list) if len(nonempty_tables) == 0: #print("no tables left to write") return # We use underscore to collate source subdirectories basename = os.path.basename(xml_filename).replace('.xml','').replace('_','') use_sub_dirs = False if use_sub_dirs: output_dir = os.path.join(dest_basedir, relative_sub_dir) if not os.path.exists(output_dir): os.mkdirs(output_dir) output_filename = os.path.join(output_dir, basename + '.xlsx') else: sub = relative_sub_dir.replace(os.sep,'_').replace('.','') if (len(sub) > 0) and sub[-1] != '_': sub = sub + '_' output_filename = os.path.join(dest_basedir, sub + basename + '.xlsx') if dry_run: # For debugging return print("** Writing ", output_filename) if os.path.exists(output_filename): os.remove(output_filename) excelwriter = pd.ExcelWriter(output_filename, engine= excelengine) # absolute path is useful to open original files on local machine if(False): source_file = to_absolute_file_url(xml_filename) else: source_file = os.path.normpath(xml_filename) try: write_excel_sheets(source_file, excelwriter, data,nonempty_tables) excelwriter.close() except Exception as ex: traceback.print_exc() print(type(ex)) print(ex) pass finally: excelwriter = None print() def process_directory(xml_basedir, out_basedir, relative_sub_dir,toplevel_xml_only, dry_run): xml_dir = os.path.join(xml_basedir, relative_sub_dir) file_list = sorted(os.listdir(xml_dir)) for filename in file_list: if filename.endswith('.xml'): print("Processing", filename) process_one_file(out_basedir, relative_sub_dir, os.path.join(xml_dir,filename), dry_run) if toplevel_xml_only: return # No recursion into subdirs(e.g. for testing) # Recurse for filename in file_list: candidate_sub_dir = os.path.join(relative_sub_dir, filename) if os.path.isdir( os.path.join(xml_basedir, candidate_sub_dir)) : process_directory(xml_basedir, out_basedir, candidate_sub_dir,toplevel_xml_only, dry_run) def extract_xml_files_in_tar(tar_file, extract_dir): os.makedirs(extract_dir) extract_count = 0 for tarinfo in tar_file: if os.path.splitext(tarinfo.name)[1] == ".xml": #print(extract_dir, tarinfo.name) tar_file.extract( tarinfo, path = extract_dir) extract_count = extract_count + 1 return extract_count def archive_file_to_output_dir(archive_file): return os.path.splitext(archive_file)[0] + '-out' def archive_file_to_xml_dir(archive_file): return os.path.splitext(archive_file)[0] + '-xml' def lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found): has_xml_files = len( glob.glob( os.path.join(expanded_archive_directory,'*.xml') ) ) > 0 if has_xml_files and skip_expanding_if_xml_files_found: print("*** Reusing existing xml files in", expanded_archive_directory) return if os.path.isdir(expanded_archive_directory): print("*** Deleting existing files in", expanded_archive_directory) raise "Comment out this line if it is going to delete the correct directory" shutil.rmtree(expanded_archive_directory) with tarfile.open(archive_source_file, mode='r|*') as tf: print("*** Expanding",archive_source_file, "to", expanded_archive_directory) extract_count = extract_xml_files_in_tar(tf, expanded_archive_directory) print('***',extract_count,' xml files extracted') def process_xml_files(expanded_archive_directory,out_basedir,toplevel_xml_only,dry_run, anonid_output_csv): global anonid_df print("*** Source xml directory :", expanded_archive_directory) print("*** Output directory:", out_basedir) if not os.path.isdir(out_basedir): os.makedirs(out_basedir) process_directory(expanded_archive_directory, out_basedir,'.',toplevel_xml_only,dry_run) if anonid_output_csv: filepath = os.path.join(out_basedir,anonid_output_csv) print("Writing ",filepath,len(anonid_df.index),'rows') anonid_df.to_csv( filepath, index = None, header=True) print("*** Finished processing XML") ``` # Phase 2 - Aggregate Excel documents ``` def list_xlsx_files_in_dir(xlsx_dir): xlsx_files = sorted(glob.glob(os.path.join(xlsx_dir,'*.xlsx'))) xlsx_files = [file for file in xlsx_files if os.path.basename(file)[0] != '~' ] return xlsx_files # Phase 2 - Aggregate multiple xlsx that are split across multiple course sections into a single Excel file def create_aggregate_sections_map(xlsx_dir): xlsx_files = list_xlsx_files_in_dir(xlsx_dir) sections_map = dict() for source_file in xlsx_files: path = source_file.split(os.path.sep) # TODO os.path.sep nameparts = path[-1].split('_') target = nameparts[:] subnumber = None if len(nameparts)>3 and nameparts[-3].isdigit(): subnumber = -3 # probably unnecessary as _ are removed from basename if len(nameparts)>2 and nameparts[-2].isdigit(): subnumber = -2 if not subnumber: continue target[subnumber] = 'ALLSECTIONS' key = (os.path.sep.join(path[:-1])) + os.path.sep+ ( '_'.join(target)) if key not in sections_map.keys(): sections_map[key] = [] sections_map[key].append(source_file) return sections_map # Phase 3 - Aggregate over common objects def create_aggregate_common_objects_map(xlsx_dir): xlsx_files = list_xlsx_files_in_dir(xlsx_dir) combined_map = dict() # path/_activities_workshop_ALLSECTIONS_logstores.xlsx will map to key=logstores.xlsx for source_file in xlsx_files: path = source_file.split(os.path.sep) # TODO os.path.sep nameparts = path[-1].split('_') target = nameparts[-1] if 'ALL_' == path[-1][:4]: continue # Guard against restarts key = (os.path.sep.join(path[:-1])) + os.path.sep+ ('ALL_' + target) if key not in combined_map.keys(): combined_map[key] = [] combined_map[key].append(source_file) return combined_map def rebase_row(row,rebase_map): if isinstance(row['PARENT_SHEET'] , str): return str(int(row['PARENT_ROW_INDEX']) + int(rebase_map[ row['XLSX_SOURCEFILE'] + '#' + row['PARENT_SHEET'] ])) else: return '' def check_no_open_Excel_documents_in_Excel(dir): # Excel creates temporary backup files that start with tilde when an Excel file is open in Excel if not os.path.isdir(dir): return open_files = glob.glob(os.path.join(dir,'~*.xlsx')) if len(open_files): print( 'Please close ' + '\n'.join(open_files) + '\nin directory\n'+dir) raise IOError('Excel files '+('\n'.join(open_files))+' are currently open in Excel') def aggregate_multiple_excel_files(source_filenames): allsheets = OrderedDict() rebase_map = {} # !! Poor sort - it assumes the integers are the same char length. Todo improve so that filename_5_ < filename_10_ for filename in sorted(source_filenames): print('Reading and aggregating sheets in' , filename) xl = pd.ExcelFile(filename) for sheet in xl.sheet_names: df = xl.parse(sheet) df['XLSX_SOURCEFILE'] = filename if sheet not in allsheets.keys(): allsheets[sheet] = df rebase_map[filename+'#'+sheet] = 0 else: row_offset = len(allsheets[sheet]) rebase_map[filename+'#'+sheet] = row_offset # We will need this to rebase parent values df[ df.columns[0] ] += row_offset allsheets[sheet] = allsheets[sheet].append(df, ignore_index =True, sort = False) xl.close() # print('rebase_map',rebase_map) # The row index of the parent no longer starts at zero print('Rebasing parent index entries in all sheets') for sheet in xl.sheet_names: df = allsheets[sheet] df['PARENT_ROW_INDEX'] = df.apply( lambda row: rebase_row( row,rebase_map), axis = 1) df.drop('XLSX_SOURCEFILE', axis = 1, inplace = True) return allsheets def write_aggregated_model(output_filename, allsheets, dry_run): print("Writing",output_filename) if dry_run: print("Dry run. Skipping ", allsheets.keys()) return excelwriter = pd.ExcelWriter(output_filename, engine = excelengine) try: print("Writing Sheets ", allsheets.keys()) for sheetname,df in allsheets.items(): df.to_excel(excelwriter, sheet_name = sheetname, index = 'INDEX') excelwriter.close() except Exception as ex: print(type(ex)) print(ex) pass finally: excelwriter.close() print('Writing finished\n') def move_old_files(xlsx_dir, filemap, subdirname,dry_run): xlsxpartsdir = os.path.join(xlsx_dir,subdirname) if dry_run: print('Dry run. Skipping move_old_files', filemap.items(),' to ', subdirname) return if not os.path.isdir(xlsxpartsdir): os.mkdir(xlsxpartsdir) for targetfile,sources in filemap.items(): for file in sources: dest=os.path.join(xlsxpartsdir, os.path.basename(file)) print(dest) os.rename(file, dest) def aggreate_over_sections(xlsx_dir,dry_run): sections_map= create_aggregate_sections_map(xlsx_dir) for targetfile,sources in sections_map.items(): allsheets = aggregate_multiple_excel_files(sources) write_aggregated_model(targetfile, allsheets, dry_run) move_old_files(xlsx_dir, sections_map,'_EACH_SECTION_', dry_run) def aggreate_over_common_objects(xlsx_dir,dry_run): combined_map = create_aggregate_common_objects_map(xlsx_dir) for targetfile,sources in combined_map.items(): allsheets = aggregate_multiple_excel_files(sources) write_aggregated_model(targetfile, allsheets, dry_run) move_old_files(xlsx_dir, combined_map, '_ALL_SECTIONS_', dry_run) def create_column_metalist(xlsx_dir,dry_run): xlsx_files = list_xlsx_files_in_dir(xlsx_dir) metalist = [] for filename in xlsx_files: print(filename) xl = pd.ExcelFile(filename) filename_local = os.path.basename(filename) for sheet in xl.sheet_names: df = xl.parse(sheet,nrows=1) for column_name in df.columns: metalist.append([filename_local,sheet,column_name]) xl.close() meta_df = pd.DataFrame(metalist, columns=['file','sheet','column']) meta_filename = os.path.join(xlsx_dir,'__All_COLUMNS.csv') if dry_run: print('Dry run. Skipping',meta_filename) else: meta_df.to_csv(meta_filename,sep='\t',index=False) ``` # Run ``` # Configuration / settings here archive_source_file = None expanded_archive_directory = None skip_expanding_if_xml_files_found = True output_directory = None generate_missing_anonid = True geoip_datadir = None anonid_csv_file = None # A simple csv file with header 'userid','anonid' anonid_output_filename='userids_anonids.csv' # None if mapping should not be written delete_userids = False # User table will still have an 'id' column #relateduserids,realuserid andu userid columns in other tables are dropped # Internal testing options toplevel_xml_only = False # Don't process subdirectories. Occasionally useful for internal testing dry_run = False # Don't write Excel files. Occasionally useful for internal testing # Override the above here with the path to your mbz file (or expanded contents) archive_source_file = os.path.join('..','example.mbz') # ... or use expanded_archive_directory to point to an mbz file that has already been expanded into XML files anonid_csv_file = None # os.path.join('..', 'example-userid-to-anonid.csv') generate_missing_anonid = True delete_userids = True geoip_datadir= './geoip' # Some typical numbers: # A 400 student 15 week course with 16 sections # Created a 4GB mbz which expanded to 367 MB of xml. (the non-xml files were not extracted) # 30 total minutes processing time: 15 minutes to process xml, # 6 minutes for each aggegration step, 2 minutes for the column summary # Final output: 60MB of 'ALL_' Excel 29 files (largest: ALL_quiz.xlsx 35MB, ALL_logstores 10MB, ALL_forum 5MB) # The initial section output (moved to _EACH_SECTION_/) has 334 xlsx files, # which is futher reduced (see _ALL_SECTIONS_ ) 67 files. if not archive_source_file and not expanded_archive_directory: raise ValueError('Nothing to do: No mbz archive file or archive directory (with .xml files) specified') if archive_source_file and not os.path.isfile(archive_source_file) : raise ValueError('archive_source_file (' + os.path.abspath(archive_source_file) + ") does not refer to an existing archive") if not expanded_archive_directory: expanded_archive_directory = archive_file_to_xml_dir(archive_source_file) if not output_directory: if archive_source_file: output_directory = archive_file_to_output_dir(archive_source_file) else: raise ValueError('Please specify output_directory') if anonid_csv_file: print ('Using ' + anonid_csv_file + ' mapping') anonid_df = pd.read_csv(anonid_csv_file) validate_anonid_data(anonid_df) else: anonid_df = pd.DataFrame([{'userid':'-1','anonid':'example1234'}]) start_time = datetime.now() print(start_time) if(geoip_datadir and 'geoipv4_df' not in globals()): load_geoip_data(geoip_datadir) if archive_source_file: lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found) check_no_open_Excel_documents_in_Excel(output_directory) # Now the actual processing can begin process_xml_files(expanded_archive_directory,output_directory, toplevel_xml_only, dry_run, anonid_output_filename) # At this point we have 100s of Excel documents (one per xml file), each with several sheets (~ one per xml tag)! # We can aggregate over all of the course sections aggreate_over_sections(output_directory, dry_run) # Workshops, assignments etc have a similar structure, so we also aggregate over similar top-level objects aggreate_over_common_objects(output_directory, dry_run) create_column_metalist(output_directory, dry_run) end_time = datetime.now() print(end_time) print(end_time-start_time) ```
github_jupyter
![qiskit_header.png](attachment:qiskit_header.png) # _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_ The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial. *** ### Contributors Jakub Marecek<sup>[1]</sup> ### Affiliation - <sup>[1]</sup>IBMQ ### Introduction Across many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures. ``` %matplotlib inline from qiskit.finance.data_providers import * import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import datetime import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() data = RandomDataProvider(tickers=["TICKER1", "TICKER2"], start = datetime.datetime(2016, 1, 1), end = datetime.datetime(2016, 1, 30), seed = 1) data.run() ``` Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on <a target="_blank" href="https://en.wikipedia.org/wiki/Dynamic_time_warping">dynamic time warping</a> (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated. ``` means = data.get_mean_vector() print("Means:") print(means) rho = data.get_similarity_matrix() print("A time-series similarity measure:") print(rho) plt.imshow(rho) plt.show() cov = data.get_covariance_matrix() print("A covariance matrix:") print(cov) plt.imshow(cov) plt.show() ``` If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit. ``` print("The underlying evolution of stock prices:") for (cnt, s) in enumerate(data._tickers): plt.plot(data._data[cnt], label=s) plt.legend() plt.xticks(rotation=90) plt.show() for (cnt, s) in enumerate(data._tickers): print(s) print(data._data[cnt]) ``` Clearly, you can adapt the number and names of tickers and the range of dates: ``` data = RandomDataProvider(tickers=["CompanyA", "CompanyB", "CompanyC"], start = datetime.datetime(2015, 1, 1), end = datetime.datetime(2016, 1, 30), seed = 1) data.run() for (cnt, s) in enumerate(data._tickers): plt.plot(data._data[cnt], label=s) plt.legend() plt.xticks(rotation=90) plt.show() ``` ### Access to closing-price time-series While the access to real-time data usually requires a payment, it is possible to access historical (adjusted) closing prices via Wikipedia and Quandl free of charge, following registration at: https://www.quandl.com/?modal=register In the code below, one needs to specify actual tickers of actual NASDAQ issues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and conditions, including a liability waiver. Notice that at least two tickers are required for the computation of covariance and time-series matrices, but hundreds of tickers may go beyond the fair usage limits of Quandl. ``` stocks = ["REPLACEME1", "REPLACEME2"] wiki = WikipediaDataProvider( token = "REPLACEME", tickers = stocks, stockmarket = StockMarket.NASDAQ, start = datetime.datetime(2016,1,1), end = datetime.datetime(2016,1,30)) wiki.run() ``` Once the data are loaded, you can again compute the covariance matrix or its DTW variants. ``` if wiki._n <= 1: raise Exception("Not enough data to plot covariance or time-series similarity. Please use at least two tickers.") rho = wiki.get_similarity_matrix() print("A time-series similarity measure:") print(rho) plt.imshow(rho) plt.show() cov = wiki.get_covariance_matrix() print("A covariance matrix:") print(cov) plt.imshow(cov) plt.show() ``` If you wish, you can look into the underlying time-series using: ``` print("The underlying evolution of stock prices:") for (cnt, s) in enumerate(stocks): plt.plot(wiki._data[cnt], label=s) plt.legend() plt.xticks(rotation=90) plt.show() for (cnt, s) in enumerate(stocks): print(s) print(wiki._data[cnt]) ``` ### [Optional] Setup token to access recent, fine-grained time-series If you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues. If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license. If and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. To assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. The DataOnDemandProvider constructor has an optional argument `verify`, which can be `None` or a string or a boolean. If it is `None`, certify certificates will be used (default). If verify is a string, it should be pointing to a certificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look. ``` from qiskit.finance.data_providers.data_on_demand_provider import StockMarket try: nasdaq = DataOnDemandProvider(token = "REPLACE-ME", tickers = stocks, stockmarket = StockMarket.NASDAQ, start = datetime.datetime(2016,1,1), end = datetime.datetime(2016,1,2)) nasdaq.run() nasdaq.plot() except QiskitFinanceError as e: print(e) print("You need to replace REPLACE-ME with a valid token.") ``` Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See: https://www.exchange-data.com/pricing-data/adjusted-prices.php#exchange-coverage for an overview of the coverage. The access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at: https://www.quandl.com/ In the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange. ``` from qiskit.finance.data_providers.exchangedataprovider import StockMarket try: lse = ExchangeDataProvider(token = "REPLACE-ME", tickers = ["TICKER1", "TICKER2"], stockmarket = StockMarket.LONDON, start = datetime.datetime(2019,1,1), end = datetime.datetime(2019,1,30)) lse.run() lse.plot() except QiskitFinanceError as e: print(e) print("You need to replace REPLACE-ME with a valid token.") ``` For the actual use of the data, please see the <a href="../optimization/portfolio_optimization.ipynb">portfolio_optimization</a> or <a href="../optimization/portfolio_diversification.ipynb">portfolio_diversification</a> notebooks. ``` import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
Azure ML & Azure Databricks notebooks by Parashar Shah. Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster. **install azureml-sdk** * Source: Upload Python Egg or PyPi * PyPi Name: `azureml-sdk[databricks]` * Select Install Library ``` import azureml.core # Check core SDK version number - based on build number of preview/master. print("SDK version:", azureml.core.VERSION) ``` Please specify the Azure subscription Id, resource group name, workspace name, and the region in which you want to create the Azure Machine Learning Workspace. You can get the value of your Azure subscription ID from the Azure Portal, and then selecting Subscriptions from the menu on the left. For the resource_group, use the name of the resource group that contains your Azure Databricks Workspace. NOTE: If you provide a resource group name that does not exist, the resource group will be automatically created. This may or may not succeed in your environment, depending on the permissions you have on your Azure Subscription. ``` # subscription_id = "<your-subscription-id>" # resource_group = "<your-existing-resource-group>" # workspace_name = "<a-new-or-existing-workspace; it is unrelated to Databricks workspace>" # workspace_region = "<your-resource group-region>" # Set auth to be used by workspace related APIs. # For automation or CI/CD ServicePrincipalAuthentication can be used. # https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py auth = None # import the Workspace class and check the azureml SDK version # exist_ok checks if workspace exists or not. from azureml.core import Workspace ws = Workspace.create(name = workspace_name, subscription_id = subscription_id, resource_group = resource_group, location = workspace_region, auth = auth, exist_ok=True) #get workspace details ws.get_details() ws = Workspace(workspace_name = workspace_name, subscription_id = subscription_id, resource_group = resource_group, auth = auth) # persist the subscription id, resource group name, and workspace name in aml_config/config.json. ws.write_config() #if you need to give a different path/filename please use this #write_config(path="/databricks/driver/aml_config/",file_name=<alias_conf.cfg>) help(Workspace) # import the Workspace class and check the azureml SDK version from azureml.core import Workspace ws = Workspace.from_config(auth = auth) #ws = Workspace.from_config(<full path>) print('Workspace name: ' + ws.name, 'Azure region: ' + ws.location, 'Subscription id: ' + ws.subscription_id, 'Resource group: ' + ws.resource_group, sep = '\n') ```
github_jupyter
# 🔢 Vectorizing Guide Firstly, we must import what we need from Relevance AI ``` from relevanceai import Client from relevanceai.utils.datasets import ( get_iris_dataset, get_palmer_penguins_dataset, get_online_ecommerce_dataset, ) client = Client() ``` ## Example 1 For this first example we going to work with a purely numeric dataset. The Iris dataset contains 4 numeric features and another text column with the label ``` iris_documents = get_iris_dataset() dataset = client.Dataset("iris") dataset.insert_documents(iris_documents, create_id=True) ``` Here we can see the dataset schema, pre-vectorization ``` dataset.schema ``` Vectorizing is as simple specifying `create_feature_vector=True` While species is a text feature, we do not need to vectorize this. Besides, smart typechecking recognises this field as a text field we would not usually vectorize. `create_feature_vector=True` is what creates our "document" vectors. This concatenates all numeric/vector fields in a single "document" vector. This new vector_field is always called `f"_dim{n_dims}_feature_vector_"`, with n_dims being the size of the concatenated vector. Furthermore, for nuermic stability accross algorithms, sklearn's StandardScaler is applied to the concatenated vector field. If the concatenated size of a vector field is >512 dims, PCA is automatically applied. ``` dataset.vectorize(create_feature_vector=True) ``` ### or ``` dataset.vectorize(fields=["numeric"], create_feature_vector=True) ``` You can see below that the dataset schema has been altered accordingly ``` dataset.schema ``` ## Example 2 For this second example we going to work with a mixed numeric and text dataset. The Palmer Penguins dataset contains several numeric features and another text column called "Comments" ``` penguins_documents = get_palmer_penguins_dataset() dataset.insert_documents(penguins_documents, create_id=True) ``` We must install the default Encoders for text vectorizing from vectorhub ``` !pip install vectorhub[encoders-text-tfhub-windows] # If you are on windows !pip install vectorhub[encoders-text-tfhub] # other ``` No arguments automatically detects what text and image fieds are presetn in your dataset. Since this is a new function, its typechecking could be faulty. If need be, specifiy the data types in the same format as the schema with `_text_` denoting text_fields and `_image_` denoting image fields. ``` dataset.vectorize() ``` ### or ``` dataset.vectorize(fields=["Comments"], create_feature_vector=True) ```
github_jupyter
1. Recap == In the last mission, we explored how to use a simple k-nearest neighbors machine learning model that used just one feature, or attribute, of the listing to predict the rent price. We first relied on the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> column, which describes the number of people a living space can comfortably accommodate. Then, we switched to the <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> column and observed an improvement in accuracy. While these were good features to become familiar with the basics of machine learning, it's clear that using just a single feature to compare listings doesn't reflect the reality of the market. An apartment that can accommodate 4 guests in a popular part of Washington D.C. will rent for much higher than one that can accommodate 4 guests in a crime ridden area. There are 2 ways we can tweak the model to try to improve the accuracy (decrease the RMSE during validation): - increase the number of attributes the model uses to calculate similarity when ranking the closest neighbors - increase <span style="background-color: #F9EBEA; color:##C0392B">k</span>, the number of nearby neighbors the model uses when computing the prediction In this mission, we'll focus on increasing the number of attributes the model uses. When selecting more attributes to use in the model, we need to watch out for columns that don't work well with the distance equation. This includes columns containing: - non-numerical values (e.g. city or state) - Euclidean distance equation expects numerical values - missing values - distance equation expects a value for each observation and attribute - non-ordinal values (e.g. latitude or longitude) - ranking by Euclidean distance doesn't make sense if all attributes aren't ordinal In the following code screen, we've read the <span style="background-color: #F9EBEA; color:##C0392B">dc_airbnb.csv</span> dataset from the last mission into pandas and brought over the data cleaning changes we made. Let's first look at the first row's values to identify any columns containing non-numerical or non-ordinal values. In the next screen, we'll drop those columns and then look for missing values in each of the remaining columns. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Use the <span style="background-color: #F9EBEA; color:##C0392B">DataFrame.info()</span> method to return the number of non-null values in each column. ``` import pandas as pd import numpy as np np.random.seed(1) dc_listings = pd.read_csv('dc_airbnb.csv') dc_listings = dc_listings.loc[np.random.permutation(len(dc_listings))] stripped_commas = dc_listings['price'].str.replace(',', '') stripped_dollars = stripped_commas.str.replace('$', '') dc_listings['price'] = stripped_dollars.astype('float') dc_listings.info() ``` 2. Removing features == The following columns contain non-numerical values: - <span style="background-color: #F9EBEA; color:##C0392B">room_type</span>: e.g. **Private room** - <span style="background-color: #F9EBEA; color:##C0392B">city</span>: e.g. **Washington** - <span style="background-color: #F9EBEA; color:##C0392B">state</span>: e.g. **DC** while these columns contain numerical but non-ordinal values: - <span style="background-color: #F9EBEA; color:##C0392B">latitude</span>: e.g. **38.913458** - <span style="background-color: #F9EBEA; color:##C0392B">longitude</span>: e.g. **-77.031** - <span style="background-color: #F9EBEA; color:##C0392B">zipcode</span>: e.g. **20009** Geographic values like these aren't ordinal, because a smaller numerical value doesn't directly correspond to a smaller value in a meaningful way. For example, the zip code 20009 isn't smaller or larger than the zip code 75023 and instead both are unique, identifier values. Latitude and longitude value pairs describe a point on a geographic coordinate system and different equations are used in those cases (e.g. [haversine](https://en.wikipedia.org/wiki/Haversine_formula)). While we could convert the <span style="background-color: #F9EBEA; color:##C0392B">host_response_rate</span> and <span style="background-color: #F9EBEA; color:##C0392B">host_acceptance_rate</span> columns to be numerical (right now they're object data types and contain the <span style="background-color: #F9EBEA; color:##C0392B">%</span> sign), these columns describe the host and not the living space itself. Since a host could have many living spaces and we don't have enough information to uniquely group living spaces to the hosts themselves, let's avoid using any columns that don't directly describe the living space or the listing itself: - <span style="background-color: #F9EBEA; color:##C0392B">host_response_rate</span> - <span style="background-color: #F9EBEA; color:##C0392B">host_acceptance_rate</span> - <span style="background-color: #F9EBEA; color:##C0392B">host_listings_count</span> Let's remove these 9 columns from the Dataframe <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Remove the 9 columns we discussed above from <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span>: - 3 containing non-numerical values - 3 containing numerical but non-ordinal values - 3 describing the host instead of the living space itself 2. Verify the number of null values of each remain columns ``` import pandas as pd import numpy as np np.random.seed(1) dc_listings = pd.read_csv('dc_airbnb.csv') dc_listings = dc_listings.loc[np.random.permutation(len(dc_listings))] stripped_commas = dc_listings['price'].str.replace(',', '') stripped_dollars = stripped_commas.str.replace('$', '') dc_listings['price'] = stripped_dollars.astype('float') columns = ['room_type', 'city', 'state', 'latitude', 'longitude', 'zipcode', 'host_response_rate','host_acceptance_rate','host_listings_count'] dc_listings.drop(columns, inplace=True, axis=1) dc_listings.info() ``` 3. Handling missing values == Of the remaining columns, 3 columns have a few missing values (less than 1% of the total number of rows): - <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">beds</span> Since the number of rows containing missing values for one of these 3 columns is low, we can select and remove those rows without losing much information. There are also 2 columns have a large number of missing values: - <span style="background-color: #F9EBEA; color:##C0392B">cleaning_fee</span> - 37.3% of the rows - <span style="background-color: #F9EBEA; color:##C0392B">security_deposit</span> - 61.7% of the rows and we can't handle these easily. We can't just remove the rows containing missing values for these 2 columns because we'd miss out on the majority of the observations in the dataset. Instead, let's remove these 2 columns entirely from consideration. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Drop the <span style="background-color: #F9EBEA; color:##C0392B">cleaning_fee</span> and <span style="background-color: #F9EBEA; color:##C0392B">security_deposit</span> columns from <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span>. 2. Then, remove all rows that contain a missing value for the <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span>, <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span>, or <span style="background-color: #F9EBEA; color:##C0392B">beds</span> column from <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span>. - You can accomplish this by using the [Dataframe method dropna()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html) and setting the <span style="background-color: #F9EBEA; color:##C0392B">axis</span> parameter to **0**. - Since only the <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span>, <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> and <span style="background-color: #F9EBEA; color:##C0392B">beds</span> columns contain any missing values, rows containing missing values in these columns will be removed. 3. Display the null value counts for the updated <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span> Dataframe to confirm that there are no missing values left. ``` dc_listings.drop(['cleaning_fee','security_deposit'], inplace=True, axis=1) dc_listings = dc_listings.replace('', np.nan) dc_listings.dropna(how='any', inplace=True) dc_listings.info() ``` 4. Normalize columns == Here's how the <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span> Dataframe looks like after all the changes we made: | accommodates | bedrooms | bathrooms | beds | price | minimum_nights | maximum_nights | number_of_reviews | |--------------|----------|-----------|------|-------|----------------|----------------|-------------------| | 2 | 1.0 | 1.0 | 1.0 | 125.0 | 1 | 4 | 149 | | 2 | 1.0 | 1.5 | 1.0 | 85.0 | 1 | 30 | 49 | | 1 | 1.0 | 0.5 | 1.0 | 50.0 | 1 | 1125 | 1 | | 2 | 1.0 | 1.0 | 1.0 | 209.0 | 4 | 730 | 2 | | 12 | 5.0 | 2.0 | 5.0 | 215.0 | 2 | 1825 | 34 | You may have noticed that while the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span>, <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span>, <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span>, <span style="background-color: #F9EBEA; color:##C0392B">beds</span>, and <span style="background-color: #F9EBEA; color:##C0392B">minimum_nights</span> columns hover between 0 and 12 (at least in the first few rows), the values in the <span style="background-color: #F9EBEA; color:##C0392B">maximum_nights</span> and <span style="background-color: #F9EBEA; color:##C0392B">number_of_reviews</span> columns span much larger ranges. For example, the <span style="background-color: #F9EBEA; color:##C0392B">maximum_nights</span> column has values as low as 4 and high as 1825, in the first few rows itself. If we use these 2 columns as part of a k-nearest neighbors model, these attributes could end up having an outsized effect on the distance calculations because of the largeness of the values. For example, 2 living spaces could be identical across every attribute but be vastly different just on the <span style="background-color: #F9EBEA; color:##C0392B">maximum_nights</span> column. If one listing had a <span style="background-color: #F9EBEA; color:##C0392B">maximum_nights</span> value of 1825 and the other a <span style="background-color: #F9EBEA; color:##C0392B">maximum_nights</span> value of 4, because of the way Euclidean distance is calculated, these listings would be considered very far apart because of the outsized effect the largeness of the values had on the overall Euclidean distance. To prevent any single column from having too much of an impact on the distance, we can **normalize** all of the columns to have a mean of 0 and a standard deviation of 1. Normalizing the values in each columns to the [standard normal distribution](https://en.wikipedia.org/wiki/Normal_distribution#Standard_normal_distribution) (mean of 0, standard deviation of 1) preserves the distribution of the values in each column while aligning the scales. To normalize the values in a column to the standard normal distribution, you need to: - from each value, subtract the mean of the column - divide each value by the standard deviation of the column Here's the mathematical formula describing the transformation that needs to be applied for all values in a column: $\displaystyle z= \frac{x − \mu}{\sigma}$ where x is a value in a specific column, $\mu$ is the mean of all the values in the column, and $\sigma$ is the standard deviation of all the values in the column. Here's what the corresponding code, using pandas, looks like: >```python # Subtract each value in the column by the mean. first_transform = dc_listings['maximum_nights'] - dc_listings['maximum_nights'].mean() # Divide each value in the column by the standard deviation. normalized_col = first_transform / dc_listings['maximum_nights'].std() ``` To apply this transformation across all of the columns in a Dataframe, you can use the corresponding Dataframe methods mean() and std(): >```python normalized_listings = (dc_listings - dc_listings.mean()) / (dc_listings.std()) ``` These methods were written with mass column transformation in mind and when you call <span style="background-color: #F9EBEA; color:##C0392B">mean()</span> or <span style="background-color: #F9EBEA; color:##C0392B">std()</span>, the appropriate column means and column standard deviations are used for each value in the Dataframe. Let's now normalize all of the feature columns in <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span>. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Normalize all of the feature columns in <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span> and assign the new Dataframe containing just the normalized feature columns to <span style="background-color: #F9EBEA; color:##C0392B">normalized_listings</span>. 2. Add the price column from <span style="background-color: #F9EBEA; color:##C0392B">dc_listings</span> to <span style="background-color: #F9EBEA; color:##C0392B">normalized_listings</span>. 3. Display the first 3 rows in <span style="background-color: #F9EBEA; color:##C0392B">normalized_listings</span>. ``` normalized_listings = (dc_listings - dc_listings.mean()) / (dc_listings.std()) normalized_listings['price'] = dc_listings['price'] normalized_listings.head(3) ``` 5. Euclidean distance for multivariate case == In the last mission, we trained 2 univariate k-nearest neighbors models. The first one used the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> attribute while the second one used the <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> attribute. Let's now train a model that uses **both** attributes when determining how similar 2 living spaces are. Let's refer to the Euclidean distance equation again to see what the distance calculation using 2 attributes would look like: $\displaystyle d = \sqrt{(q_1 - p_1)^2 + (q_2 - p_2)^2 + \ldots + (q_n - p_n)^2}$ Since we're using 2 attributes, the distance calculation would look like: $\displaystyle d = \sqrt{(accommodates_1 - accomodates_2)^2 + (bathrooms_1 - bathrooms_2)^2}$ To find the distance between 2 living spaces, we need to calculate the squared difference between both <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> values, the squared difference between both <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> values, add them together, and then take the square root of the resulting sum. Here's what the Euclidean distance between the first 2 rows in <span style="background-color: #F9EBEA; color:##C0392B">normalized_listings</span> looks like: <img width="600" alt="creating a repo" src="https://drive.google.com/uc?export=view&id=15uoTMT1rzRLx9T8kIbsOWw7HaTmdBP0o"> So far, we've been calculating Euclidean distance ourselves by writing the logic for the equation ourselves. We can instead use the [distance.euclidean()](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.euclidean.html) function from <span style="background-color: #F9EBEA; color:##C0392B">scipy.spatial</span>, which takes in 2 vectors as the parameters and calculates the Euclidean distance between them. The <span style="background-color: #F9EBEA; color:##C0392B">euclidean()</span> function expects: - both of the vectors to be represented using a **list-like** object (Python list, NumPy array, or pandas Series) - both of the vectors must be 1-dimensional and have the same number of elements Here's a simple example: >```python from scipy.spatial import distance first_listing = [-0.596544, -0.439151] second_listing = [-0.596544, 0.412923] dist = distance.euclidean(first_listing, second_listing) ``` Let's use the <span style="background-color: #F9EBEA; color:##C0392B">euclidean()</span> function to calculate the Euclidean distance between 2 rows in our dataset to practice. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Calculate the Euclidean distance using only the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> and <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> features between the first row and fifth row in <span style="background-color: #F9EBEA; color:##C0392B">normalized_listings</span> using the <span style="background-color: #F9EBEA; color:##C0392B">distance.euclidean()</span> function. 2. Assign the distance value to <span style="background-color: #F9EBEA; color:##C0392B">first_fifth_distance</span> and display using the <span style="background-color: #F9EBEA; color:##C0392B">print</span> function. ``` from scipy.spatial import distance vector1 = normalized_listings[['accommodates','bathrooms']].iloc[0] vector2 = normalized_listings[['accommodates', 'bathrooms']].iloc[14] first_fifth_distance = distance.euclidean(vector1, vector2) print(first_fifth_distance) ``` 6. Introduction to scikit-learn == So far, we've been writing functions from scratch to train the k-nearest neighbor models. While this is helpful deliberate practice to understand how the mechanics work, you can be more productive and iterate quicker by using a library that handles most of the implementation. In this screen, we'll learn about the [scikit-learn library](http://scikit-learn.org/), which is the most popular machine learning in Python. Scikit-learn contains functions for all of the major machine learning algorithms and a simple, unified workflow. Both of these properties allow data scientists to be incredibly productive when training and testing different models on a new dataset. The scikit-learn workflow consists of 4 main steps: - instantiate the specific machine learning model you want to use - fit the model to the training data - use the model to make predictions - evaluate the accuracy of the predictions We'll focus on the first 3 steps in this screen and the next screen. Each model in scikit-learn is implemented as a [separate class](http://scikit-learn.org/dev/modules/classes.html) and the first step is to identify the class we want to create an instance of. In our case, we want to use the [KNeighborsRegressor class](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor). Any model that helps us predict numerical values, like listing price in our case, is known as a **regression** model. The other main class of machine learning models is called classification, where we're trying to predict a label from a fixed set of labels (e.g. blood type or gender). The word **regressor** from the class name <span style="background-color: #F9EBEA; color:##C0392B">KNeighborsRegressor</span> refers to the regression model class that we just discussed. Scikit-learn uses a similar object-oriented style to Matplotlib and you need to instantiate an empty model first by calling the constructor: >```python from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor() ``` If you refer to the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor), you'll notice that by default: - <span style="background-color: #F9EBEA; color:##C0392B">n_neighbors:</span> the number of neighbors, is set to **5** - <span style="background-color: #F9EBEA; color:##C0392B">algorithm:</span> for computing nearest neighbors, is set to **auto** - <span style="background-color: #F9EBEA; color:##C0392B">p:</span> set to **2**, corresponding to Euclidean distance Let's set the <span style="background-color: #F9EBEA; color:##C0392B">algorithm</span> parameter to <span style="background-color: #F9EBEA; color:##C0392B">brute</span> and leave the <span style="background-color: #F9EBEA; color:##C0392B">n_neighbors</span> value as **5**, which matches the implementation we wrote in the last mission. If we leave the <span style="background-color: #F9EBEA; color:##C0392B">algorithm</span> parameter set to the default value of <span style="background-color: #F9EBEA; color:##C0392B">auto</span>, scikit-learn will try to use tree-based optimizations to improve performance (which are outside of the scope of this mission): >```python knn = KNeighborsRegressor(algorithm='brute') ``` 7. Fitting a model and making predictions == Now, we can fit the model to the data using the [fit method](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor.fit). For all models, the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method takes in 2 required parameters: - matrix-like object, containing the feature columns we want to use from the training set. - list-like object, containing correct target values. Matrix-like object means that the method is flexible in the input and either a Dataframe or a NumPy 2D array of values is accepted. This means you can select the columns you want to use from the Dataframe and use that as the first parameter to the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method. If you recall from earlier in the mission, all of the following are acceptable list-like objects: - NumPy array - Python list - pandas Series object (e.g. when selecting a column) You can select the target column from the Dataframe and use that as the second parameter to the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method: >```python # Split full dataset into train and test sets. train_df = normalized_listings.iloc[0:2792] test_df = normalized_listings.iloc[2792:] # Matrix-like object, containing just the 2 columns of interest from training set. train_features = train_df[['accommodates', 'bathrooms']] # List-like object, containing just the target column, `price`. train_target = normalized_listings['price'] # Pass everything into the fit method. knn.fit(train_features, train_target) ``` When the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method is called, scikit-learn stores the training data we specified within the KNearestNeighbors instance (<span style="background-color: #F9EBEA; color:##C0392B">knn</span>). If you try passing in data containing missing values or non-numerical values into the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method, scikit-learn will return an error. Scikit-learn contains many such features that help prevent us from making common mistakes. Now that we specified the training data we want used to make predictions, we can use the [predict method](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor.predict) to make predictions on the test set. The <span style="background-color: #F9EBEA; color:##C0392B">predict</span> method has only one required parameter: - matrix-like object, containing the feature columns from the dataset we want to make predictions on The number of feature columns you use during both training and testing need to match or scikit-learn will return an error: >```python predictions = knn.predict(test_df[['accommodates', 'bathrooms']]) ``` The <span style="background-color: #F9EBEA; color:##C0392B">predict()</span> method returns a NumPy array containing the predicted <span style="background-color: #F9EBEA; color:##C0392B">price</span> values for the test set. You now have everything you need to practice the entire scikit-learn workflow. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Create an instance of the [KNeighborsRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor) class with the following parameters: - <span style="background-color: #F9EBEA; color:##C0392B">n_neighbors</span>: 5 - <span style="background-color: #F9EBEA; color:##C0392B">algorithm</span>: brute 2. Use the <span style="background-color: #F9EBEA; color:##C0392B">fit</span> method to specify the data we want the k-nearest neighbor model to use. Use the following parameters: - training data, feature columns: just the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> and <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> columns, in that order, from <span style="background-color: #F9EBEA; color:##C0392B">train_df</span>. - training data, target column: the <span style="background-color: #F9EBEA; color:##C0392B">price</span> column from <span style="background-color: #F9EBEA; color:##C0392B">train_df</span>. 3. Call the <span style="background-color: #F9EBEA; color:##C0392B">predict</span> method to make predictions on: - the <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> and <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> columns from <span style="background-color: #F9EBEA; color:##C0392B">test_df</span> - assign the resulting NumPy array of predicted price values to <span style="background-color: #F9EBEA; color:##C0392B">predictions</span>. ``` from sklearn.neighbors import KNeighborsRegressor train_df = normalized_listings.iloc[0:2792] test_df = normalized_listings.iloc[2792:] knn = KNeighborsRegressor(n_neighbors=5, algorithm='brute') train_features = train_df[['accommodates', 'bathrooms']] train_target = train_df['price'] knn.fit(train_features, train_target) predictions = knn.predict(test_df[['accommodates', 'bathrooms']]) print(predictions) ``` 8. Calculating MSE using Scikit-Learn == Earlier in this mission, we calculated the MSE and RMSE values using the pandas arithmetic operators to compare each predicted value with the actual value from the <span style="background-color: #F9EBEA; color:##C0392B">price</span> column of our test set. Alternatively, we can instead use the [sklearn.metrics.mean_squared_error function()](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error). Once you become familiar with the different machine learning concepts, unifying your workflow using scikit-learn helps save you a lot of time and avoid mistakes. The <span style="background-color: #F9EBEA; color:##C0392B">mean_squared_error()</span> function takes in 2 inputs: - list-like object, representing the true values - list-like object, representing the predicted values using the model For this function, we won't show any sample code and will leave it to you to understand the function [from the documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error) itself to calculate the MSE and RMSE values for the predictions we just made. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Use the <span style="background-color: #F9EBEA; color:##C0392B">mean_squared_error</span> function to calculate the MSE value for the predictions we made in the previous screen. 2. Assign the MSE value to <span style="background-color: #F9EBEA; color:##C0392B">two_features_mse</span>. 3. Calculate the RMSE value by taking the square root of the MSE value and assign to <span style="background-color: #F9EBEA; color:##C0392B">two_features_rmse</span>. 4. Display both of these error scores using the <span style="background-color: #F9EBEA; color:##C0392B">print</span> function. ``` from sklearn.metrics import mean_squared_error two_features_mse = mean_squared_error(test_df['price'], predictions) two_features_rmse = np.sqrt(two_features_mse) print('MSE two features:',two_features_mse, '\nRMSE two features:',two_features_rmse) ``` 9. Using more features == Here's a table comparing the MSE and RMSE values for the 2 univariate models from the last mission and the multivariate model we just trained: | feature(s) | MSE | RMSE | |-------------------------|---------|-------| | accommodates | 18646.5 | 136.6 | | bathrooms | 17333.4 | 131.7 | | accommodates, bathrooms | 15660.4 | 125.1 | As you can tell, the model we trained using both features ended up performing better (lower error score) than either of the univariate models from the last mission. Let's now train a model using the following 4 features: - <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> - <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">number_of_reviews</span> Scikit-learn makes it incredibly easy to swap the columns used during training and testing. We're going to leave this for you as a challenge to train and test a k-nearest neighbors model using these columns instead. Use the code you wrote in the last screen as a guide. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Create a new instance of the [KNeighborsRegressor class](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor) with the following parameters: - <span style="background-color: #F9EBEA; color:##C0392B">n_neighbors</span>: 5 - <span style="background-color: #F9EBEA; color:##C0392B">algorithm</span>: brute 2. Fit a model that uses the following columns from our training set (**train_df**): - <span style="background-color: #F9EBEA; color:##C0392B">accommodates</span> - <span style="background-color: #F9EBEA; color:##C0392B">bedrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">bathrooms</span> - <span style="background-color: #F9EBEA; color:##C0392B">number_of_reviews</span> 3. Use the model to make predictions on the test set (**test_df**) using the same columns. Assign the NumPy array of predictions to <span style="background-color: #F9EBEA; color:##C0392B">four_predictions</span>. 4. Use the <span style="background-color: #F9EBEA; color:##C0392B">mean_squared_error()</span> function to calculate the MSE value for these predictions by comparing <span style="background-color: #F9EBEA; color:##C0392B">four_predictions</span> with the price column from **test_df**. Assign the computed MSE value to <span style="background-color: #F9EBEA; color:##C0392B">four_mse</span>. 5. Calculate the RMSE value and assign to <span style="background-color: #F9EBEA; color:##C0392B">four_rmse</span>. 6. Display <span style="background-color: #F9EBEA; color:##C0392B">four_mse</span> and <span style="background-color: #F9EBEA; color:##C0392B">four_rmse</span> using the print function. ``` from sklearn.neighbors import KNeighborsRegressor features = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews'] knn = KNeighborsRegressor(n_neighbors=5, algorithm='brute') knn.fit(train_df[features], train_df['price']) four_predictions = knn.predict(test_df[features]) four_mse = mean_squared_error(test_df['price'], four_predictions) four_rmse = four_mse** (1/2) print('MSE four features:', four_mse,'\nRMSE four features:', four_rmse) ``` 10. Using all features == So far so good! As we increased the features the model used, we observed lower MSE and RMSE values: | feature(s) | MSE | RMSE | |------------------------------------------------------|---------|-------| | accommodates | 18646.5 | 136.6 | | bathrooms | 17333.4 | 131.7 | | accommodates, bathrooms | 15660.4 | 125.1 | | accommodates, bathrooms, bedrooms, number_of_reviews | 13320.2 | 115.4 | Let's take this to the extreme and use all of the potential features. We should expect the error scores to decrease since so far adding more features has helped do so. <br> <div class="alert alert-info"> <b>Exercise Start.</b> </div> **Description**: 1. Use all of the columns, except for the <span style="background-color: #F9EBEA; color:##C0392B">price</span> column, to train a k-nearest neighbors model using the same parameters for the <span style="background-color: #F9EBEA; color:##C0392B">KNeighborsRegressor</span> class as the ones from the last few screens. 2. Use the model to make predictions on the test set and assign the resulting NumPy array of predictions to <span style="background-color: #F9EBEA; color:##C0392B">all_features_predictions</span>. 3. Calculate the MSE and RMSE values and assign to <span style="background-color: #F9EBEA; color:##C0392B">all_features_mse</span> and <span style="background-color: #F9EBEA; color:##C0392B">all_features_rmse</span> accordingly. 4. Use the **print** function to display both error scores. ``` features = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews', 'minimum_nights','maximum_nights','beds'] knn = KNeighborsRegressor(n_neighbors=5, algorithm='brute') knn.fit(train_df[features], train_df['price']) all_features_predictions = knn.predict(test_df[features]) all_features_mse = mean_squared_error(test_df['price'], all_features_predictions) all_features_rmse = all_features_mse ** (1/2) print('MSE four features:',all_features_mse, '\nRMSE four features:', all_features_rmse) ``` 11. Next steps == Interestingly enough, the RMSE value actually increased to **125.1** when we used all of the features available to us. This means that selecting the right features is important and that using more features doesn't automatically improve prediction accuracy. We should re-phrase the lever we mentioned earlier from: - increase the number of attributes the model uses to calculate similarity when ranking the closest neighbors to: - select the relevant attributes the model uses to calculate similarity when ranking the closest neighbors The process of selecting features to use in a model is known as **feature selection**. In this mission, we prepared the data to be able to use more features, trained a few models using multiple features, and evaluated the different performance tradeoffs. We explored how using more features doesn't always improve the accuracy of a k-nearest neighbors model. In the next mission, we'll explore another knob for tuning k-nearest neighbor models - the k value.
github_jupyter
``` #convert ``` # babilim.model.layers.roi_ops > Operations for region of interest extraction. ``` #export from babilim.core.annotations import RunOnlyOnce from babilim.core.module_native import ModuleNative #export def _convert_boxes_to_roi_format(boxes): """ Convert rois into the torchvision format. :param boxes: The roi boxes as a native tensor[B, K, 4]. :return: The roi boxes in the format that roi pooling and roi align in torchvision require. Native tensor[B*K, 5]. """ import torch concat_boxes = boxes.view((-1, 4)) ids = torch.full_like(boxes[:, :, :1], 0) for i in range(boxes.shape[0]): ids[i, :, :] = i ids = ids.view((-1, 1)) rois = torch.cat([ids, concat_boxes], dim=1) return rois #export class RoiPool(ModuleNative): def __init__(self, output_size, spatial_scale=1.0): """ Performs Region of Interest (RoI) Pool operator described in Fast R-CNN. Creates a callable object, when calling you can use these Arguments: * **features**: (Tensor[N, C, H, W]) input tensor * **rois**: (Tensor[N, K, 4]) the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. * **return**: (Tensor[N, K, C, output_size[0], output_size[1]]) The feature maps crops corresponding to the input rois. Parameters to RoiPool constructor: :param output_size: (Tuple[int, int]) the size of the output after the cropping is performed, as (height, width) :param spatial_scale: (float) a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 """ super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale @RunOnlyOnce def _build_pytorch(self, features, rois): pass def _call_pytorch(self, features, rois): from torchvision.ops import roi_pool as _roi_pool torchvision_rois = _convert_boxes_to_roi_format(rois) result = _roi_pool(features, torchvision_rois, self.output_size, self.spatial_scale) # Fix output shape N, C, _, _ = features.shape result = result.view((N, -1, C, self.output_size[0], self.output_size[1])) return result @RunOnlyOnce def _build_tf(self, features, rois): # TODO implement raise NotImplementedError() def _call_tf(self, features, rois): # TODO implement raise NotImplementedError() from babilim.core.tensor import Tensor import numpy as np roi = RoiPool(output_size=(7, 4)) tensor = Tensor(data=np.zeros((2,3,24,24), dtype=np.float32), trainable=False) rois = Tensor(data=np.array([[[0,0,12,12],[4,7,6,23]], [[0,0,12,12], [4,7,6,23]]], dtype=np.float32), trainable=False) print(rois.shape) print(tensor.shape) result = roi(tensor, rois) print(result.shape) #export class RoiAlign(ModuleNative): def __init__(self, output_size, spatial_scale=1.0): """ Performs Region of Interest (RoI) Align operator described in Mask R-CNN. Creates a callable object, when calling you can use these Arguments: * **features**: (Tensor[N, C, H, W]) input tensor * **rois**: (Tensor[N, K, 4]) the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. * **return**: (Tensor[N, K, C, output_size[0], output_size[1]]) The feature maps crops corresponding to the input rois. Parameters to RoiAlign constructor: :param output_size: (Tuple[int, int]) the size of the output after the cropping is performed, as (height, width) :param spatial_scale: (float) a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 """ super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale @RunOnlyOnce def _build_pytorch(self, features, rois): pass def _call_pytorch(self, features, rois): from torchvision.ops import roi_align as _roi_align torchvision_rois = _convert_boxes_to_roi_format(rois) # :param aligned: (bool) If False, use the legacy implementation. # If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices. # This version in Detectron2 result = _roi_align(features, torchvision_rois, self.output_size, self.spatial_scale, aligned=True) # Fix output shape N, C, _, _ = features.shape result = result.view((N, -1, C, self.output_size[0], self.output_size[1])) return result @RunOnlyOnce def _build_tf(self, features, rois): # TODO implement raise NotImplementedError() def _call_tf(self, features, rois): # TODO implement raise NotImplementedError() from babilim.core.tensor import Tensor import numpy as np roi = RoiAlign(output_size=(7, 4)) tensor = Tensor(data=np.zeros((2,3,24,24), dtype=np.float32), trainable=False) rois = Tensor(data=np.array([[[0,0,12,12],[4,7,6,23]], [[0,0,12,12], [4,7,6,23]]], dtype=np.float32), trainable=False) print(rois.shape) print(tensor.shape) result = roi(tensor, rois) print(result.shape) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 import sys import pathlib sys.path.append(str(pathlib.Path().cwd().parent)) from typing import Tuple from load_dataset import Dataset from plotting import plot_ts dataset = Dataset('../data/dataset/') ``` ### В чем заключаются недостатки полносвязных сетей? * невозможность улавливать временные закономерности в контексте предыдущих точек (архитектурное ограничение) * фиксированный размер входных данных * фиксированный размер выходных данных ### Область применимости рекуретных сетей для задачи анализа временных рядов * большое количество экзогенных признаков, имеющих сложную нелинейную зависимость с целевым рядом * очень сложная временная структура имеющая наложение разных сезонных и цикличных паттернов * ряды с часто меняющимся паттерном, или большим количеством аномалий * когда есть необходимость в нефиксированной длине входных и выходных данных (например многомерные ряды, где для разных компонент хочется предоставить разное количество лагов) ### Особенности подготовки данных - необходима нормализация данных, иначе сеть будет плохо сходиться и медленно обучаться. ``` import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler data = np.array(range(0, 100, 10)).reshape(-1, 1) scaler = MinMaxScaler((0, 1)) scaler.fit(data) transformed = scaler.transform(data) transformed inverse = scaler.inverse_transform(transformed) inverse ``` ### Особенность подготвки данных - обработка последовательностей разной длины. ``` from keras.preprocessing.sequence import pad_sequences sequences = [ [1, 2, 3, 4], [3, 4, 5], [5, 6], [3] ] pad_sequences(sequences, padding='pre') pad_sequences(sequences, padding='post') pad_sequences(sequences, maxlen=2) pad_sequences(sequences, maxlen=2, truncating='post') ``` ### Какие архитектуры lstm нас интересуют в контексте временных рядов? * one-to-one - предсказание следующей точки по предыдущей - нет * one-to-many - предсказание следующих N точeк про предыдущей - нет * many-to-one - one-step-ahead предсказание - в некоторой степени * many-to-many - предсказание вектора из следующих m точек по предыдущим n точкам - наибольший интерес ### Простая LSTM сеть ``` from keras.models import Sequential from keras.layers import LSTM, Dense ts = dataset['daily-min-temperatures.csv'] ts.plot(figsize=(15, 5)) def transform_into_matrix(ts: pd.Series, num_lags: int) -> Tuple[np.array]: """ Transforms time series into lags matrix to allow applying supervised learning algorithms Parameters ------------ ts Time series to transform num_lags Number of lags to use Returns -------- train, test: np.arrays of shapes (ts-num_lags, num_lags), (num_lags,) """ ts_values = ts.values data = {} for i in range(num_lags + 1): data[f'lag_{num_lags - i}'] = np.roll(ts_values, -i) lags_matrix = pd.DataFrame(data)[:-num_lags] lags_matrix.index = ts.index[num_lags:] return lags_matrix.drop('lag_0', axis=1).values, lags_matrix['lag_0'].values NUM_LAGS = 14 X, y = transform_into_matrix(ts, NUM_LAGS) X[0] X = X.reshape((X.shape[0], X.shape[1], 1)) X[0] split_idx = int(len(X)*0.8) X_train, X_test = X[:split_idx], X[split_idx:] y_train, y_test = y[:split_idx], y[split_idx:] model = Sequential() model.add(LSTM(50, activation='relu', input_shape=(NUM_LAGS, 1))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse') model.fit(X, y, epochs=100) y_pred = model.predict(X_test) pd.Series(y_test.flatten())[-50:].plot() pd.Series(y_pred.flatten())[-50:].plot() ### данный результат на самом деле не сильно лучше наивного предсказания from sklearn.metrics import mean_squared_error as mse mse(y_test.flatten(), y_pred.flatten()) ``` ### Stacked LSTM #### Добавьте дополнительные скрытые слои в сеть (используйте return_sequences=True) и сравните качество ``` model = Sequential() # your code here model.compile(optimizer='adam', loss='mse') model.fit(X_train, y_train, epochs=100, verbose=0) y_pred = model.predict(X_test) pd.Series(y_test.flatten())[-50:].plot() pd.Series(y_pred.flatten())[-50:].plot() ``` ### Bidirectional LSTM #### Сделаем LSTM слой сети Bidirectional при помощи доп слоя Biderectional и сравним качество ``` from keras.layers import Bidirectional model = Sequential() # your code here model.compile(optimizer='adam', loss='mse') model.fit(X_train, y_train, epochs=10, verbose=0) y_pred = model.predict(X_test) ``` ### Seq2Seq LSTM - когда нужно сделать предсказание на несколько точек вперед #### Подготовим данные ``` from typing import Tuple def transform_ts_into_matrix(ts: pd.Series, num_lags_in: int, num_lags_out: int) -> Tuple[np.array, np.array]: """ Данная функция должна пройтись скользящим окном по временному ряду и для каждых num_lags_in точек в качестве признаков собрать num_lags_out следующих точек в качестве таргета. Вернуть два np.array массива из X_train и y_train соответственно """ sequence = ts.values X, y = list(), list() i = 0 outer_idx = num_lags_out while outer_idx < len(sequence): inner_idx = i + num_lags_in outer_idx = inner_idx + num_lags_out X_, y_ = sequence[i:inner_idx], sequence[inner_idx:outer_idx] X.append(X_) y.append(y_) i += 1 return np.array(X), np.array(y) # получим X и y при помощи предыдущей функции и разбейте на трейн и тест NUM_LAGS_IN = 28 NUM_LAGS_OUT = 7 X, y = transform_ts_into_matrix(ts, NUM_LAGS_IN, NUM_LAGS_OUT) X = X.reshape((X.shape[0], X.shape[1], 1)) split_idx = int(len(X)*0.8) X_train, X_test = X[:split_idx], X[split_idx:] y_train, y_test = y[:split_idx], y[split_idx:] # объявим енкодер model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS_IN, 1))) # добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер from keras.layers import RepeatVector model.add(RepeatVector(NUM_LAGS_OUT)) # обьявим декодер model.add(LSTM(50, activation='relu', return_sequences=True)) # обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed from keras.layers import TimeDistributed model.add(TimeDistributed(Dense(1))) ``` #### Обучим модель и получим предсказание на тесте ``` model.compile(optimizer='adam', loss='mse') model.fit(X_train, y_train, epochs=10, verbose=0) y_pred = model.predict(X_test) ``` ### Пример с многомерным рядом. ``` ts_multi = pd.read_csv('../data/stability_index.csv', index_col='timestamp', parse_dates=True) ts_multi.fillna(ts_multi.mean(), axis=0, inplace=True) def transform_multi_ts_into_matrix(ts: pd.DataFrame, num_lags: int): """ Данная функция должна пройтись скользящим окном по временному ряду и собрать в качестве признаков X np.array размерности (len(ts)-num_lags, n_dims, num_lags), а в качестве y np.array размерности (len(ts)-num_lags, n_dims), где n_dims - размерность многомерного ряда. То есть для всех компонент временного ряда мы должны взять num_lags предыдущих точек каждой компонент в качестве признаков и все компоненты текущей точки в качестве target """ sequence = ts.values X, y = list(), list() i = 0 end_i = num_lags while end_i < len(sequence): seq_x, seq_y = sequence[i:end_i], sequence[end_i] X.append(seq_x) y.append(seq_y) i += 1 end_i = i + num_lags return np.array(X), np.array(y) NUM_LAGS = 14 N_DIMS = ts_multi.shape[1] X, y = transform_multi_ts_into_matrix(ts_multi, NUM_LAGS) X[0].shape # объявим енкодер model = Sequential() model.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS, N_DIMS))) # добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер from keras.layers import RepeatVector model.add(RepeatVector(N_DIMS)) # обьявим декодер model.add(LSTM(50, activation='relu', return_sequences=True)) # обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed from keras.layers import TimeDistributed model.add(TimeDistributed(Dense(1))) model.compile(optimizer='adam', loss='mse') model.fit(X, y, epochs=50) ```
github_jupyter
# Milestone2 Document ## Feedback - Introduction: A nice introduction! - Background -0.5: It would be hard for users to understand automatic differentiation, computational graph, and evaluation trace if you don't give the corresponding illustrations in the Background section **Revision: provided a concrete example of evaluation trace and computational graph** - How to use -0.5: didn't show how the users can get the package from online. Is AutodiffCST the name of a python file or the package? Please give different names to avoid confusion. **Revision: added instructions for installation, and change the python file name to AD.py** - Implementation: Using a tree as the core data structure sounds new. It would be better if you could explain it with more details. **Revision: Changed core data structure to AD object, and updated the implementation part accordingly.** ## Section 1: Introduction This package autodiffCST implements automatic differentiation. It can be used to automatically differentiate functions via forward mode and reverse mode, depending on the user's choice. It also provides an option of performing second order differentiation. Differentiation, namely, the process of finding the derivatives of functions, is very prevalent in various areas of science and engineering. It can often be used to find the extrema of functions with single or multiple variables. With the advance of technology, more complicated functions and larger dataset are developed. The difficulty of performing differentiation has greatly increased and we are more dependent on computers to take derivates. Nowadays, we have three major ways of performing differentiation: symbolic, numerical and automatic (algorithmic) differentiation. We will focus on automatic differentiation for the rest of this document. ## Section 2: Background ### 2.1 An Overview of Auto Differentiation Automatic differentiation (AD) uses algorithms to efficiently and accurately evaluating derivatives of numeric functions. It has the advantage of avoiding symbolic manipulation of functions while reaching an accuracy close to machine precision. Application of automatic differentiation includes but is not limited to astronomy, dynamic systems, numerical analysis research, optimization in finance and engineering. The idea behind AD is to break down a function into a sequence of elementary operations and functions that have easily attained derivatives, and then sequencially apply the chain rule to evaluate the derivatives of these operations to compute the derivative of the whole function. The two main methods of performing automatic differentiation are forward mode and reverse mode. Some other AD algorithms implement a combination of forward mode and reverse mode, but this package will implement them seperately. To better understand automatic differentiation, it is uncessary to get familar with some key concepts that are used in the algorithms of AD. We will use the rest of this section to briefly introduce them. ### 2.2 Elementary operations and functions The algorithm of automatic differentiation breaks down functions into elementary arithmetic operations and elementary functions. Elementary arithmetic operations include addition, subtraction, multiplication, division and raising power (we can also consider taking roots of a number as raising it to powers less than $1$). Elementary functions include exponential, logrithmatic, and trigonometry. All of these operations and functions mentioned here have derivates that are easy to compute, so we use them as elementary steps in the evaluation trace of AD. ### 2.3 The Chain Rule The chain rule can be used to calculate the derivate of nested functions, such in the form of $u(v(t))$. For this function, the derivative of $u$ with respect to $t$ is $$\dfrac{\partial u}{\partial t} = \dfrac{\partial u}{\partial v}\dfrac{\partial v}{\partial t}.$$ A more general form of chain rule applies when a function $h$ has several arguments, or when its argument is a vector. Suppose we have $h = h(y(t))$ where $y \in R^n$ and $t \in R^m $. Here, $h$ is the combination of $n$ functions, each of which has $m$ variables. Using the chain rule, the derivative of $h$ with respect to $t$, now called the gradient of $h$, is $$ \nabla_{t}h = \sum_{i=1}^{n}{\frac{\partial h}{\partial y_{i}}\nabla y_{i}\left(t\right)}.$$ The chain rule enables us to break down complicated and nested functions into layers and operations. Our automatic differentiation algrithm sequencially sues chain rule to compute the derivative of funtions. ### 2.4 Evaluation Trace and Computational Graph These two concepts are the core of our automatic differentiation algorithm. Since they are so important and can be created at the same time, creating them would be the first thing to do when a function is inputted into the algorithm. The evaluation trace tracks each layer of operations while evaluate the input function and its derivative. At each step the evaluation trace holds the traces, elementary operations, numerical values, elementary derivatives and partial derivatives. The computational graph is a graphical visualization of the evaluation trace. It holds the traces and elementary operations of the steps, connecting them via arrows pointing from input to output for each step. The computational graph helps us to better understand the structure of the function and its evaluation trace. Forward mode performs the operations from the start to the end of the graph or evaluation trace. Reverse mode performs the operations backwards, while applying the chain rule at each time determining the derivate of the trace. Here, we provide an example of a evaluation trace and a computational graph of the function $f(x,y)=exp(−(sin(x)−cos(y))^2)$, with derivatives evaluated at $f(π/2,π/3)$. Evaluation trace: |Trace|Elementary Function| &nbsp; &nbsp; &nbsp; &nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;Current Value &nbsp; &nbsp; &nbsp; &nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;|Elementary Function Derivative| &nbsp; &nbsp; &nbsp; &nbsp;&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;$\nabla_x$ &nbsp; &nbsp; &nbsp; &nbsp;&nbsp;&nbsp; &nbsp; &nbsp; &nbsp;|&nbsp; &nbsp; &nbsp;&nbsp; &nbsp; &nbsp;&nbsp; &nbsp; &nbsp; &nbsp;$\nabla_y$ &nbsp; &nbsp; &nbsp; &nbsp;&nbsp; &nbsp;| | :---: | :-----------: | :-------: | :-------------: | :----------: | :-----------: | | $x_{1}$ | $x_{1}$ | $\frac{\pi}{2}$ | $\dot{x}_{1}$ | $1$ | $0$ | | $y_{1}$ | $y_{1}$ | $\frac{\pi}{3}$ | $\dot{y}_{1}$ | $0$ | $1$ | | $v_{1}$ | $sin(x_{1})$ | $1$ | $cos(x_{1})\dot{x}_{1}$ | $0$ | $0$ | | $v_{2}$ | $cos(y_{1})$ | $0.5$ | $-sin(y_{1})\dot{y}_{1}$| $0$ | $-0.866$ | | $v_{3}$ | $v_{1}-v_{2}$ | $0.5$ | $\dot{v}_{1}-\dot{v}_{2}$| $0$ | $0.866$ | | $v_{4}$ | $v_{3}^2$ | $0.25$ | $2v_{3}\dot{v}_{3}$ | $0$ | $0.866$ | | $v_{5}$ | $-v_{4}$ | $-0.25$| $-\dot{v}_{4}$ | $0$ | $-0.866$ | | $v_{6}$ | $exp(v_{5})$ | $0.779$| $exp(v_{5})\dot{v}_{5}$ | $0$ | $-0.6746$ | | $f$ | $v_{6}$ | $0.779$| $\dot{v}_{6}$ | $0$ | $-0.6746$ | Computational graph: ![2.4 Graph](C_graph_example.jpg "Computational Graph") ## Section 3: How to Use AutodiffCST **Installation** Our package is for Python 3 only. To install AutodiffCST, you need to have pip3 installed first. If you don't, please install pip3 following these instructions https://pip.pypa.io/en/stable/installing/. Then, you could install this package by running ```pip3 install AutodiffCST``` from the command line. An alternative is to clone our repository by running ```git clone https://github.com/auto-differentiaters-in-CST/cs107-FinalProject.git``` from the command line and then ```cd <AD directory>```(directory name will be determined later), ```pip install -r requirements.txt```. **User Guide** After installation, users could import this package by ```from AutodiffCST import AD``` and ```from autodiffcst import admath```. These two packages would allow the users to do differentiation on functions with most mathematic operations. Then, they could simply initiate the AD object by giving the point where they wish to differentiate. Moreover, they could also try other supplementary features as in the code demo provided below. ``` python # import modules import numpy as np from AutodiffCST import AD as ad from autodiffcst import admath as admath # base case: initialize AD object with scalar values x = ad(5, tag = "x") # initialize AD object called "x" with the value 5 y = ad(3, tag = "y") # initialize AD object called "y" with the value 3 f = x*y + 1 # build a function with AD objects, the function will also be an AD object print(f) # print 9.0 dfdx = f1.diff(direction = "x") # returns the derivative with respect to x print(dfdx) # print 3 jacobian = ad.jacobian(f1) # returns a gradient vector of f print(jacobian) # print [5,3] f2 = x + admath.sin(y) # build a function with AD objects print(f2) # print AD(value: 5.141120008059867, derivatives: {'x': 1, 'y': -0.9899924966004454}) dfdy = f2.diff(direction= = "y") # returns the derivative with respect to x print(dfdy) # print -0.9899924966004454 jacobian2 = ad.jacobian(f2) # returns a gradient vector of f print(jacobian2) # print [1, -0.9899924966004454] # These are the most important features for our forward AD. Would add more later ... ``` ## Section 4: Software Organization The home directory of our software package would be structured as follows. - LICENSE - README.md - requirements.txt - docs/ * quickstart_tutotial.md * model_documentation.md * testing_guidelines.md * concepts_explanation.md * references.md - setup.py - autodiffcst/ * \_\_init\_\_.py * AD.py * admath.py - tests/ * test_core.py * test_extension.py - TravisCI.yml - CodeCov.yml Specificly speaking, the README file would contain a general package description and the necessary information for users to navigate in the subdirectories. Besides, we would place our documentation, testing guidelines, a simple tutorial and relative references in the doc directory. Moreover, to package our model with PyPI, we need to include setup.py and a src directory, where stores the source code about our model. Furthermore, we would put a collection of test cases in tests directory. Last but not least, we would include TravisCI.yml and CodeCov.yml in our home directory for integrated test. In this package, we plan to use the following public modules. - Modules for mathmatical calculation: * Numpy: we would use it for matrix operations, and basic math functions and values, such as sin, cos, \pi, e, etc. - Modules for testing: * pydoc * doctest * Pytest - Other modules: * sys * setuptools: we would use is for publishing our model with PyPI. To distribute our package, we would use PyPI so that users could easily install the package with *pip install*. After installing the package, users can use ```from AutodiffCST import AD``` and ```from autodiffcst import admath``` to import the package. These two modules are where the core of this package resides: * AD: defines the AD object class that we use to perform automatic differentiation and overwrites basic math operation dunder methods for AD. Also provides two core functions to perform on AD: diff() and jacobian(). * admath: defines functions that perform elementary math operations on AD, which include those that cannot be performed by overwriting dunder methods, such as logarithm and trigonometry. To better organize our software, we plan to use PyScaffold and Sphinx. The former could help us setting up the project while the latter would polish our documentation. ## Section 5: Implementation Our main data structure is the AD object, which has the attributes of a value, a derivative and a tag. In terms of the classes, our main class is the AD object, and we would probably have several heritaged class for our extensions. In the AD class, we would have the following methods: - a constructor ``` python def __init__(self, val, tags, der=1, mode = "forward"): self.val = val if (isinstance(tags, list)) and (isinstance(ders,dict)): self.tags = tags self.ders = ders else: self.tags = [tags] self.ders = {tags: ders} self.mode = mode ``` - overloaded dunder methods as follows: ``` python __add__ __sub__ __pow__ __mul__ __mod__ __div__ __iadd__ ``` &ensp; and more basic operations according to https://www.python-course.eu/python3_magic_methods.php - a diff method, which takes in a direction, and returns the derivative of the function. ``` python def diff(self, dir = x): if isinstance(dir, AD): return self.der[dir] else: return 0 ``` - a gradient method, which takes in a vector of directions, and returns a vector of the partial derivatives at each direction. - a jacobian method, which takes in a vector of AD functions and a vector of directions, and returns the jacobian matrix. In our implementation, we would use some external dependencies such as Numpy and Math. To deal with elementary functions, we would allow users to enter functions that can be recognized by Python, factor a input function to a series of basic operations/functions (such as sin, sqrt, log, and exp) and use if-statements to check functions and return their symbolic derivatives. These operations are handled in admath.py. The functions in admath takes an AD object as input and performs the corresponding operations on the AD objects by updating their values and derivatives. # Future Features 1. Differentiate a list of functions. Our package now can deal with one function with multiple varaibles. In the future we plan to take a list of functions as input and output its Jacobian accordingly. Using Numpy array as the data structure to keep the Jacobian would be ideal, so we will need to change the implementation of our current jacobian method. 2. Higher order derivatives. A starting point would be allowing second order derivatives taken on our AD objects and returning the correct Jacobian matrix accordingly. Note that this cannot be achieved by simply applying diff() to an AD object twices, since the Jacobian matrix would be different and the datatype would be different. We would need to store the values of the second derivatives of our AD object at each elementary steps in the evaluation trace. Then we would need another function to return the second derivatives (possibly named second_diff()), which functions similarly to diff(), but returns the second derivatives of the AD object. The jacobian() function will also be modified accordingly. It will include an optional input (possibly initialized as second_order = False for defult and second_order = True for second derivatives), which signals that the function will return the Jacobian containing the second order derivatives of the AD object. Backup extensions: 3. Backward Mode. Right now our mode for doing automatic differetiation is defaulted to forward mode, because we have not implemented backward mode yet. We would need new functions that use the AD object class to implement backward mode. To keep track of the traces, we need to create a trace table, possibly using Numpy array, in the function that runs backward mode. 4. Newton's method. We would like to use our AD package to solve meaningful problems. One way to achieve this is to use it in an implementation of Newton's method. This will be a script that imports our AD package to calculate the derivatives in Newton's method. # Building Timeline - Nov.4: Finish M2A and M2B - Nov.7: Finish basics dunder methods for one variable - Nov.14: Finish Test Suite - Nov.19: Submit M2
github_jupyter
``` import pandas as pd from joblib import dump, load import os #set up directory #os.chdir() #Drug dic #open file df_drugs=pd.read_csv(r"C:\Users\mese4\Documents\The Data incubator\project\Drugmap\drugbank vocabulary.csv", encoding='ISO-8859-1') synonyms = [] drug_names = df_drugs['Common_name'].tolist() drug_names = [item.lower() for item in drug_names] #get synonims into a list for row in df_drugs['Synonyms']: row=str(row).lower() words = row.split(' | ') synonyms.append(words) #add names to synonims for x, y in zip(synonyms, drug_names): x.append(y) #make tuple list drug_lists= list(zip(drug_names, synonyms)) #make dict drug_dic = dict(drug_lists) #remove 'nan' drug_dic = {k:[elem for elem in v if elem != 'nan' ] for k,v in drug_dic.items()} #search engine keys = [key for key, value in drug_dic.items() if 'Cetuximab' in value] drug_dic #Save/open dump(drug_dic, 'drug_dic.joblib') drug_dic = load('drug_dic.joblib') #Gene dic df_genes=pd.read_csv(r"C:\Users\mese4\Documents\The Data incubator\project\genes_dataset\G-SynMiner_miner-geneHUGO.tsv",sep='\t') gene_tag = df_genes['symbol'].tolist() gene_tag = [item.lower() for item in gene_tag] gene_name = df_genes['name'].tolist() gene_name = [item.lower() for item in gene_name] #split synonims into a list synonyms_gene = [] for row in df_genes['alias_symbol']: row=str(row).lower() words = row.split('|') synonyms_gene.append(words) #split alias_name into a list synonyms_alias_name = [] for row in df_genes['alias_name']: row=str(row).lower() words = row.split('|') synonyms_alias_name.append(words) #split prev_symbol into a list synonyms_prev_symbol = [] for row in df_genes['prev_symbol']: row=str(row).lower() words = row.split('|') synonyms_prev_symbol.append(words) #all_combined = list(zip(gene_tag, gene_name, synonyms_gene,synonyms_alias_name,synonyms_prev_symbol )) #add tags for x, y in zip(synonyms_gene, gene_tag): x.append(y) #add name for x, y in zip(synonyms_gene, gene_name): x.append(y) #add alias_name for x, y in zip(synonyms_gene, synonyms_alias_name): x.append(y[0]) #add synonyms_prev_symbol for x, y in zip(synonyms_gene, synonyms_prev_symbol): x.append(y[0]) #make tuple list gene_lists= list(zip(gene_tag, synonyms_gene)) #make dict gene_dic = dict(gene_lists) #remove 'nan' gene_dic = {k:[elem for elem in v if elem != 'nan' ] for k,v in gene_dic.items()} #search engine keys = [key for key, value in gene_dic.items() if 'LORSDH' in value] #save open dump(gene_dic, 'gene_dic.joblib') gene_dic = load('gene_dic.joblib') [key for key, value in gene_dic.items() if 'nrf2' in value] ```
github_jupyter