text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import sys import numpy as np import scipy.io as sio import keras import numpy as np import os import matplotlib import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.optimizers import SGD from keras.optimizers import Adam, Adadelta from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from keras import backend as K from keras.datasets import cifar10 from keras.models import load_model from data_input.data_input import getDataGenerator from model.DenseNet import createSTNDenseNet def flip(data): y_4 = np.zeros_like(data) y_1 = y_4 y_2 = y_4 first = np.concatenate((y_1, y_2, y_1), axis=1) second = np.concatenate((y_4, data, y_4), axis=1) third = first Data = np.concatenate((first, second, third), axis=0) return Data ``` # 数据集1 ``` matfn1 = 'data/dsm/dsm.mat' data1 = sio.loadmat(matfn1) dsm = data1['dsm'] matfn2 = 'data/dsm/truemap.mat' data2 = sio.loadmat(matfn2) groundtruth = data2['groundtruth'] ``` # 数据集2 ``` matfn1 = 'data/recology/Recology_data_DSM_cube.mat' data1 = sio.loadmat(matfn1) dsm = data1['Recology_data_DSM_cube'] matfn2 = 'data/recology/Recology_truthmap.mat' data2 = sio.loadmat(matfn2) groundtruth = data2['groundtruth'] dsm = 1 * ((dsm - np.min(dsm)) / (np.max(dsm) - np.min(dsm)) - 0.5) [nRow, nCol] = dsm.shape nTrain = 3000 nTest = 2000 num_class = int(np.max(groundtruth)) dsm=flip(dsm) groundtruth = flip(groundtruth) HalfWidth = 16 Wid = 2 * HalfWidth G = groundtruth[nRow-HalfWidth : 2 * nRow + HalfWidth, nCol - HalfWidth : 2 * nCol + HalfWidth] data = dsm[nRow-HalfWidth : 2 * nRow + HalfWidth, nCol - HalfWidth : 2 * nCol + HalfWidth] [row, col] = G.shape NotZeroMask = np.zeros([row, col]) Wid = 2 * HalfWidth NotZeroMask[HalfWidth + 1: -1 - HalfWidth + 1, HalfWidth + 1: -1 - HalfWidth + 1] = 1 G = G * NotZeroMask [Row, Col] = np.nonzero(G) nSample = np.size(Row) imdb = {} imdb['data'] = np.zeros([nTrain + nTest, 2 * HalfWidth, 2 * HalfWidth],dtype=np.float64) imdb['Labels'] = np.zeros([nTrain + nTest], dtype=np.int64) imdb['set'] = np.zeros([nTrain + nTest], dtype=np.int64) RandPerm = np.random.permutation(nSample) for iSample in range(nTrain + nTest): imdb['data'][iSample,:, :] = data[Row[RandPerm[iSample]] - HalfWidth: Row[RandPerm[iSample]] + HalfWidth, \ Col[RandPerm[iSample]] - HalfWidth: Col[RandPerm[iSample]] + HalfWidth].astype(np.float64) imdb['Labels'][iSample] = G[Row[RandPerm[iSample]], Col[RandPerm[iSample]]].astype(np.int64) print('Data is OK.') imdb['data'].shape num_class #define DenseNet parms ROWS = 32 COLS = 32 CHANNELS = 1 nb_classes = num_class batch_size = 4 nb_epoch = 100 img_dim = (ROWS,COLS,CHANNELS) densenet_depth = 40 densenet_growth_rate = 12 #define filepath parms check_point_file = r"./densenet_check_point.h5" loss_trend_graph_path = r"./loss.jpg" acc_trend_graph_path = r"./acc.jpg" resume = False print('Now,we start compiling DenseNet model...') model = createSTNDenseNet(nb_classes=nb_classes,img_dim=img_dim,depth=densenet_depth, growth_rate = densenet_growth_rate) if resume == True: try: model.load_weights(check_point_file) except: pass # optimizer = Adam() optimizer = SGD(lr=0.001) #optimizer = Adadelta() model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy']) print('Now,we start loading data...') data_x = imdb['data'] data_x = np.expand_dims(data_x, axis=3) data_y = imdb['Labels']-1 x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=42) x_train = x_train.astype('float32') x_test = x_test.astype('float32') y_train = keras.utils.to_categorical(y_train, nb_classes) y_test= keras.utils.to_categorical(y_test, nb_classes) print('Now,we start defining callback functions...') """ lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=np.sqrt(0.1), cooldown=0, patience=3, min_lr=1e-6) """ model_checkpoint = ModelCheckpoint(check_point_file, monitor="val_acc", save_best_only=True, save_weights_only=True, verbose=1) #callbacks=[lr_reducer,model_checkpoint] callbacks=[model_checkpoint] print("Now,we start training...") history = model.fit(x_train, y_train, epochs=nb_epoch, batch_size = batch_size, callbacks=callbacks, validation_data=(x_test,y_test), verbose=1) print("Now,we start drawing the loss and acc trends graph...") #summarize history for accuracy fig = plt.figure(1) plt.plot(history.history["acc"]) plt.plot(history.history["val_acc"]) plt.title("Model accuracy") plt.ylabel("accuracy") plt.xlabel("epoch") plt.legend(["train","test"],loc="upper left") plt.savefig(acc_trend_graph_path) plt.close(1) #summarize history for loss fig = plt.figure(2) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("Model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train","test"],loc="upper left") plt.savefig(loss_trend_graph_path) plt.close(2) print("We are done, everything seems OK...") data_x.shape data_y.shape ```
github_jupyter
# CycleGAN, Image-to-Image Translation In this notebook, we're going to define and train a CycleGAN to read in an image from a set $X$ and transform it so that it looks as if it belongs in set $Y$. Specifically, we'll look at a set of images of [Yosemite national park](https://en.wikipedia.org/wiki/Yosemite_National_Park) taken either during the summer of winter. The seasons are our two domains! >The objective will be to train generators that learn to transform an image from domain $X$ into an image that looks like it came from domain $Y$ (and vice versa). Some examples of image data in both sets are pictured below. <img src='notebook_images/XY_season_images.png' width=50% /> ### Unpaired Training Data These images do not come with labels, but CycleGANs give us a way to learn the mapping between one image domain and another using an **unsupervised** approach. A CycleGAN is designed for image-to-image translation and it learns from unpaired training data. This means that in order to train a generator to translate images from domain $X$ to domain $Y$, we do not have to have exact correspondences between individual images in those domains. For example, in [the paper that introduced CycleGANs](https://arxiv.org/abs/1703.10593), the authors are able to translate between images of horses and zebras, even though there are no images of a zebra in exactly the same position as a horse or with exactly the same background, etc. Thus, CycleGANs enable learning a mapping from one domain $X$ to another domain $Y$ without having to find perfectly-matched, training pairs! <img src='notebook_images/horse2zebra.jpg' width=50% /> ### CycleGAN and Notebook Structure A CycleGAN is made of two types of networks: **discriminators, and generators**. In this example, the discriminators are responsible for classifying images as real or fake (for both $X$ and $Y$ kinds of images). The generators are responsible for generating convincing, fake images for both kinds of images. This notebook will detail the steps you should take to define and train such a CycleGAN. >1. You'll load in the image data using PyTorch's DataLoader class to efficiently read in images from a specified directory. 2. Then, you'll be tasked with defining the CycleGAN architecture according to provided specifications. You'll define the discriminator and the generator models. 3. You'll complete the training cycle by calculating the adversarial and cycle consistency losses for the generator and discriminator network and completing a number of training epochs. *It's suggested that you enable GPU usage for training.* 4. Finally, you'll evaluate your model by looking at the loss over time and looking at sample, generated images. --- ## Load and Visualize the Data We'll first load in and visualize the training data, importing the necessary libraries to do so. > If you are working locally, you'll need to download the data as a zip file by [clicking here](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/November/5be66e78_summer2winter-yosemite/summer2winter-yosemite.zip). It may be named `summer2winter-yosemite/` with a dash or an underscore, so take note, extract the data to your home directory and make sure the below `image_dir` matches. Then you can proceed with the following loading code. ``` # loading in and transforming data import os import torch from torch.utils.data import DataLoader import torchvision import torchvision.datasets as datasets import torchvision.transforms as transforms # visualizing data import matplotlib.pyplot as plt import numpy as np import warnings %matplotlib inline ``` ### DataLoaders The `get_data_loader` function returns training and test DataLoaders that can load data efficiently and in specified batches. The function has the following parameters: * `image_type`: `summer` or `winter`, the names of the directories where the X and Y images are stored * `image_dir`: name of the main image directory, which holds all training and test images * `image_size`: resized, square image dimension (all images will be resized to this dim) * `batch_size`: number of images in one batch of data The test data is strictly for feeding to our generators, later on, so we can visualize some generated samples on fixed, test data. You can see that this function is also responsible for making sure our images are of the right, square size (128x128x3) and converted into Tensor image types. **It's suggested that you use the default values of these parameters.** Note: If you are trying this code on a different set of data, you may get better results with larger `image_size` and `batch_size` parameters. If you change the `batch_size`, make sure that you create complete batches in the training loop otherwise you may get an error when trying to save sample data. ``` def get_data_loader(image_type, image_dir='summer2winter-yosemite', image_size=128, batch_size=16, num_workers=0): """Returns training and test data loaders for a given image type, either 'summer' or 'winter'. These images will be resized to 128x128x3, by default, converted into Tensors, and normalized. """ # resize and normalize the images transform = transforms.Compose([transforms.Resize(image_size), # resize to 128x128 transforms.ToTensor()]) # get training and test directories image_path = './' + image_dir train_path = os.path.join(image_path, image_type) test_path = os.path.join(image_path, 'test_{}'.format(image_type)) # define datasets using ImageFolder train_dataset = datasets.ImageFolder(train_path, transform) test_dataset = datasets.ImageFolder(test_path, transform) # create and return DataLoaders train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) return train_loader, test_loader # Create train and test dataloaders for images from the two domains X and Y # image_type = directory names for our data dataloader_X, test_dataloader_X = get_data_loader(image_type='summer') dataloader_Y, test_dataloader_Y = get_data_loader(image_type='winter') ``` ## Display some Training Images Below we provide a function `imshow` that reshape some given images and converts them to NumPy images so that they can be displayed by `plt`. This cell should display a grid that contains a batch of image data from set $X$. ``` # helper imshow function def imshow(img): npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) # get some images from X dataiter = iter(dataloader_X) # the "_" is a placeholder for no labels images, _ = dataiter.next() # show images fig = plt.figure(figsize=(12, 8)) imshow(torchvision.utils.make_grid(images)) ``` Next, let's visualize a batch of images from set $Y$. ``` # get some images from Y dataiter = iter(dataloader_Y) images, _ = dataiter.next() # show images fig = plt.figure(figsize=(12,8)) imshow(torchvision.utils.make_grid(images)) ``` ### Pre-processing: scaling from -1 to 1 We need to do a bit of pre-processing; we know that the output of our `tanh` activated generator will contain pixel values in a range from -1 to 1, and so, we need to rescale our training images to a range of -1 to 1. (Right now, they are in a range from 0-1.) ``` # current range img = images[0] print('Min: ', img.min()) print('Max: ', img.max()) # helper scale function def scale(x, feature_range=(-1, 1)): ''' Scale takes in an image x and returns that image, scaled with a feature_range of pixel values from -1 to 1. This function assumes that the input x is already scaled from 0-255.''' # scale from 0-1 to feature_range min, max = feature_range x = x * (max - min) + min return x # scaled range scaled_img = scale(img) print('Scaled min: ', scaled_img.min()) print('Scaled max: ', scaled_img.max()) ``` --- ## Define the Model A CycleGAN is made of two discriminator and two generator networks. ## Discriminators The discriminators, $D_X$ and $D_Y$, in this CycleGAN are convolutional neural networks that see an image and attempt to classify it as real or fake. In this case, real is indicated by an output close to 1 and fake as close to 0. The discriminators have the following architecture: <img src='notebook_images/discriminator_layers.png' width=80% /> This network sees a 128x128x3 image, and passes it through 5 convolutional layers that downsample the image by a factor of 2. The first four convolutional layers have a BatchNorm and ReLu activation function applied to their output, and the last acts as a classification layer that outputs one value. ### Convolutional Helper Function To define the discriminators, you're expected to use the provided `conv` function, which creates a convolutional layer + an optional batch norm layer. ``` import torch.nn as nn import torch.nn.functional as F # helper conv function def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a convolutional layer, with optional batch normalization. """ layers = [] conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) layers.append(conv_layer) if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) ``` ### Define the Discriminator Architecture Your task is to fill in the `__init__` function with the specified 5 layer conv net architecture. Both $D_X$ and $D_Y$ have the same architecture, so we only need to define one class, and later instantiate two discriminators. > It's recommended that you use a **kernel size of 4x4** and use that to determine the correct stride and padding size for each layer. [This Stanford resource](http://cs231n.github.io/convolutional-networks/#conv) may also help in determining stride and padding sizes. * Define your convolutional layers in `__init__` * Then fill in the forward behavior of the network The `forward` function defines how an input image moves through the discriminator, and the most important thing is to pass it through your convolutional layers in order, with a **ReLu** activation function applied to all but the last layer. You should **not** apply a sigmoid activation function to the output, here, and that is because we are planning on using a squared error loss for training. And you can read more about this loss function, later in the notebook. ``` class Discriminator(nn.Module): def __init__(self, conv_dim=64): super(Discriminator, self).__init__() # Define all convolutional layers # Should accept an RGB image as input and output a single value def forward(self, x): # define feedforward behavior return x ``` ## Generators The generators, `G_XtoY` and `G_YtoX` (sometimes called F), are made of an **encoder**, a conv net that is responsible for turning an image into a smaller feature representation, and a **decoder**, a *transpose_conv* net that is responsible for turning that representation into an transformed image. These generators, one from XtoY and one from YtoX, have the following architecture: <img src='notebook_images/cyclegan_generator_ex.png' width=90% /> This network sees a 128x128x3 image, compresses it into a feature representation as it goes through three convolutional layers and reaches a series of residual blocks. It goes through a few (typically 6 or more) of these residual blocks, then it goes through three transpose convolutional layers (sometimes called *de-conv* layers) which upsample the output of the resnet blocks and create a new image! Note that most of the convolutional and transpose-convolutional layers have BatchNorm and ReLu functions applied to their outputs with the exception of the final transpose convolutional layer, which has a `tanh` activation function applied to the output. Also, the residual blocks are made of convolutional and batch normalization layers, which we'll go over in more detail, next. --- ### Residual Block Class To define the generators, you're expected to define a `ResidualBlock` class which will help you connect the encoder and decoder portions of the generators. You might be wondering, what exactly is a Resnet block? It may sound familiar from something like ResNet50 for image classification, pictured below. <img src='notebook_images/resnet_50.png' width=90%/> ResNet blocks rely on connecting the output of one layer with the input of an earlier layer. The motivation for this structure is as follows: very deep neural networks can be difficult to train. Deeper networks are more likely to have vanishing or exploding gradients and, therefore, have trouble reaching convergence; batch normalization helps with this a bit. However, during training, we often see that deep networks respond with a kind of training degradation. Essentially, the training accuracy stops improving and gets saturated at some point during training. In the worst cases, deep models would see their training accuracy actually worsen over time! One solution to this problem is to use **Resnet blocks** that allow us to learn so-called *residual functions* as they are applied to layer inputs. You can read more about this proposed architecture in the paper, [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385.pdf) by Kaiming He et. al, and the below image is from that paper. <img src='notebook_images/resnet_block.png' width=40%/> ### Residual Functions Usually, when we create a deep learning model, the model (several layers with activations applied) is responsible for learning a mapping, `M`, from an input `x` to an output `y`. >`M(x) = y` (Equation 1) Instead of learning a direct mapping from `x` to `y`, we can instead define a **residual function** > `F(x) = M(x) - x` This looks at the difference between a mapping applied to x and the original input, x. `F(x)` is, typically, two convolutional layers + normalization layer and a ReLu in between. These convolutional layers should have the same number of inputs as outputs. This mapping can then be written as the following; a function of the residual function and the input x. The addition step creates a kind of loop that connects the input x to the output, y: >`M(x) = F(x) + x` (Equation 2) or >`y = F(x) + x` (Equation 3) #### Optimizing a Residual Function The idea is that it is easier to optimize this residual function `F(x)` than it is to optimize the original mapping `M(x)`. Consider an example; what if we want `y = x`? From our first, direct mapping equation, **Equation 1**, we could set `M(x) = x` but it is easier to solve the residual equation `F(x) = 0`, which, when plugged in to **Equation 3**, yields `y = x`. ### Defining the `ResidualBlock` Class To define the `ResidualBlock` class, we'll define residual functions (a series of layers), apply them to an input x and add them to that same input. This is defined just like any other neural network, with an `__init__` function and the addition step in the `forward` function. In our case, you'll want to define the residual block as: * Two convolutional layers with the same size input and output * Batch normalization applied to the outputs of the convolutional layers * A ReLu function on the output of the *first* convolutional layer Then, in the `forward` function, add the input x to this residual block. Feel free to use the helper `conv` function from above to create this block. ``` # residual block class class ResidualBlock(nn.Module): """Defines a residual block. This adds an input x to a convolutional layer (applied to x) with the same size input and output. These blocks allow a model to learn an effective transformation from one domain to another. """ def __init__(self, conv_dim): super(ResidualBlock, self).__init__() # conv_dim = number of inputs # define two convolutional layers + batch normalization that will act as our residual function, F(x) # layers should have the same shape input as output; I suggest a kernel_size of 3 def forward(self, x): # apply a ReLu activation the outputs of the first layer # return a summed output, x + resnet_block(x) return x ``` ### Transpose Convolutional Helper Function To define the generators, you're expected to use the above `conv` function, `ResidualBlock` class, and the below `deconv` helper function, which creates a transpose convolutional layer + an optional batchnorm layer. ``` # helper deconv function def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True): """Creates a transpose convolutional layer, with optional batch normalization. """ layers = [] # append transpose conv layer layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False)) # optional batch norm layer if batch_norm: layers.append(nn.BatchNorm2d(out_channels)) return nn.Sequential(*layers) ``` --- ## Define the Generator Architecture * Complete the `__init__` function with the specified 3 layer **encoder** convolutional net, a series of residual blocks (the number of which is given by `n_res_blocks`), and then a 3 layer **decoder** transpose convolutional net. * Then complete the `forward` function to define the forward behavior of the generators. Recall that the last layer has a `tanh` activation function. Both $G_{XtoY}$ and $G_{YtoX}$ have the same architecture, so we only need to define one class, and later instantiate two generators. ``` class CycleGenerator(nn.Module): def __init__(self, conv_dim=64, n_res_blocks=6): super(CycleGenerator, self).__init__() # 1. Define the encoder part of the generator # 2. Define the resnet part of the generator # 3. Define the decoder part of the generator def forward(self, x): """Given an image x, returns a transformed image.""" # define feedforward behavior, applying activations as necessary return x ``` --- ## Create the complete network Using the classes you defined earlier, you can define the discriminators and generators necessary to create a complete CycleGAN. The given parameters should work for training. First, create two discriminators, one for checking if $X$ sample images are real, and one for checking if $Y$ sample images are real. Then the generators. Instantiate two of them, one for transforming a painting into a realistic photo and one for transforming a photo into into a painting. ``` def create_model(g_conv_dim=64, d_conv_dim=64, n_res_blocks=6): """Builds the generators and discriminators.""" # Instantiate generators G_XtoY = G_YtoX = # Instantiate discriminators D_X = D_Y = # move models to GPU, if available if torch.cuda.is_available(): device = torch.device("cuda:0") G_XtoY.to(device) G_YtoX.to(device) D_X.to(device) D_Y.to(device) print('Models moved to GPU.') else: print('Only CPU available.') return G_XtoY, G_YtoX, D_X, D_Y # call the function to get models G_XtoY, G_YtoX, D_X, D_Y = create_model() ``` ## Check that you've implemented this correctly The function `create_model` should return the two generator and two discriminator networks. After you've defined these discriminator and generator components, it's good practice to check your work. The easiest way to do this is to print out your model architecture and read through it to make sure the parameters are what you expected. The next cell will print out their architectures. ``` # helper function for printing the model architecture def print_models(G_XtoY, G_YtoX, D_X, D_Y): """Prints model information for the generators and discriminators. """ print(" G_XtoY ") print("-----------------------------------------------") print(G_XtoY) print() print(" G_YtoX ") print("-----------------------------------------------") print(G_YtoX) print() print(" D_X ") print("-----------------------------------------------") print(D_X) print() print(" D_Y ") print("-----------------------------------------------") print(D_Y) print() # print all of the models print_models(G_XtoY, G_YtoX, D_X, D_Y) ``` ## Discriminator and Generator Losses Computing the discriminator and the generator losses are key to getting a CycleGAN to train. <img src='notebook_images/CycleGAN_loss.png' width=90% height=90% /> **Image from [original paper](https://arxiv.org/abs/1703.10593) by Jun-Yan Zhu et. al.** * The CycleGAN contains two mapping functions $G: X \rightarrow Y$ and $F: Y \rightarrow X$, and associated adversarial discriminators $D_Y$ and $D_X$. **(a)** $D_Y$ encourages $G$ to translate $X$ into outputs indistinguishable from domain $Y$, and vice versa for $D_X$ and $F$. * To further regularize the mappings, we introduce two cycle consistency losses that capture the intuition that if we translate from one domain to the other and back again we should arrive at where we started. **(b)** Forward cycle-consistency loss and **(c)** backward cycle-consistency loss. ## Least Squares GANs We've seen that regular GANs treat the discriminator as a classifier with the sigmoid cross entropy loss function. However, this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we'll use a least squares loss function for the discriminator. This structure is also referred to as a least squares GAN or LSGAN, and you can [read the original paper on LSGANs, here](https://arxiv.org/pdf/1611.04076.pdf). The authors show that LSGANs are able to generate higher quality images than regular GANs and that this loss type is a bit more stable during training! ### Discriminator Losses The discriminator losses will be mean squared errors between the output of the discriminator, given an image, and the target value, 0 or 1, depending on whether it should classify that image as fake or real. For example, for a *real* image, `x`, we can train $D_X$ by looking at how close it is to recognizing and image `x` as real using the mean squared error: ``` out_x = D_X(x) real_err = torch.mean((out_x-1)**2) ``` ### Generator Losses Calculating the generator losses will look somewhat similar to calculating the discriminator loss; there will still be steps in which you generate fake images that look like they belong to the set of $X$ images but are based on real images in set $Y$, and vice versa. You'll compute the "real loss" on those generated images by looking at the output of the discriminator as it's applied to these _fake_ images; this time, your generator aims to make the discriminator classify these fake images as *real* images. #### Cycle Consistency Loss In addition to the adversarial losses, the generator loss terms will also include the **cycle consistency loss**. This loss is a measure of how good a reconstructed image is, when compared to an original image. Say you have a fake, generated image, `x_hat`, and a real image, `y`. You can get a reconstructed `y_hat` by applying `G_XtoY(x_hat) = y_hat` and then check to see if this reconstruction `y_hat` and the orginal image `y` match. For this, we recommed calculating the L1 loss, which is an absolute difference, between reconstructed and real images. You may also choose to multiply this loss by some weight value `lambda_weight` to convey its importance. <img src='notebook_images/reconstruction_error.png' width=40% height=40% /> The total generator loss will be the sum of the generator losses and the forward and backward cycle consistency losses. --- ### Define Loss Functions To help us calculate the discriminator and gnerator losses during training, let's define some helpful loss functions. Here, we'll define three. 1. `real_mse_loss` that looks at the output of a discriminator and returns the error based on how close that output is to being classified as real. This should be a mean squared error. 2. `fake_mse_loss` that looks at the output of a discriminator and returns the error based on how close that output is to being classified as fake. This should be a mean squared error. 3. `cycle_consistency_loss` that looks at a set of real image and a set of reconstructed/generated images, and returns the mean absolute error between them. This has a `lambda_weight` parameter that will weight the mean absolute error in a batch. It's recommended that you take a [look at the original, CycleGAN paper](https://arxiv.org/pdf/1703.10593.pdf) to get a starting value for `lambda_weight`. ``` def real_mse_loss(D_out): # how close is the produced output from being "real"? def fake_mse_loss(D_out): # how close is the produced output from being "false"? def cycle_consistency_loss(real_im, reconstructed_im, lambda_weight): # calculate reconstruction loss # return weighted loss ``` ### Define the Optimizers Next, let's define how this model will update its weights. This, like the GANs you may have seen before, uses [Adam](https://pytorch.org/docs/stable/optim.html#algorithms) optimizers for the discriminator and generator. It's again recommended that you take a [look at the original, CycleGAN paper](https://arxiv.org/pdf/1703.10593.pdf) to get starting hyperparameter values. ``` import torch.optim as optim # hyperparams for Adam optimizers lr= beta1= beta2= g_params = list(G_XtoY.parameters()) + list(G_YtoX.parameters()) # Get generator parameters # Create optimizers for the generators and discriminators g_optimizer = optim.Adam(g_params, lr, [beta1, beta2]) d_x_optimizer = optim.Adam(D_X.parameters(), lr, [beta1, beta2]) d_y_optimizer = optim.Adam(D_Y.parameters(), lr, [beta1, beta2]) ``` --- ## Training a CycleGAN When a CycleGAN trains, and sees one batch of real images from set $X$ and $Y$, it trains by performing the following steps: **Training the Discriminators** 1. Compute the discriminator $D_X$ loss on real images 2. Generate fake images that look like domain $X$ based on real images in domain $Y$ 3. Compute the fake loss for $D_X$ 4. Compute the total loss and perform backpropagation and $D_X$ optimization 5. Repeat steps 1-4 only with $D_Y$ and your domains switched! **Training the Generators** 1. Generate fake images that look like domain $X$ based on real images in domain $Y$ 2. Compute the generator loss based on how $D_X$ responds to fake $X$ 3. Generate *reconstructed* $\hat{Y}$ images based on the fake $X$ images generated in step 1 4. Compute the cycle consistency loss by comparing the reconstructions with real $Y$ images 5. Repeat steps 1-4 only swapping domains 6. Add up all the generator and reconstruction losses and perform backpropagation + optimization <img src='notebook_images/cycle_consistency_ex.png' width=70% /> ### Saving Your Progress A CycleGAN repeats its training process, alternating between training the discriminators and the generators, for a specified number of training iterations. You've been given code that will save some example generated images that the CycleGAN has learned to generate after a certain number of training iterations. Along with looking at the losses, these example generations should give you an idea of how well your network has trained. Below, you may choose to keep all default parameters; your only task is to calculate the appropriate losses and complete the training cycle. ``` # import save code from helpers import save_samples, checkpoint # train the network def training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, n_epochs=1000): print_every=10 # keep track of losses over time losses = [] test_iter_X = iter(test_dataloader_X) test_iter_Y = iter(test_dataloader_Y) # Get some fixed data from domains X and Y for sampling. These are images that are held # constant throughout training, that allow us to inspect the model's performance. fixed_X = test_iter_X.next()[0] fixed_Y = test_iter_Y.next()[0] fixed_X = scale(fixed_X) # make sure to scale to a range -1 to 1 fixed_Y = scale(fixed_Y) # batches per epoch iter_X = iter(dataloader_X) iter_Y = iter(dataloader_Y) batches_per_epoch = min(len(iter_X), len(iter_Y)) for epoch in range(1, n_epochs+1): # Reset iterators for each epoch if epoch % batches_per_epoch == 0: iter_X = iter(dataloader_X) iter_Y = iter(dataloader_Y) images_X, _ = iter_X.next() images_X = scale(images_X) # make sure to scale to a range -1 to 1 images_Y, _ = iter_Y.next() images_Y = scale(images_Y) # move images to GPU if available (otherwise stay on CPU) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") images_X = images_X.to(device) images_Y = images_Y.to(device) # ============================================ # TRAIN THE DISCRIMINATORS # ============================================ ## First: D_X, real and fake loss components ## # 1. Compute the discriminator losses on real images # 2. Generate fake images that look like domain X based on real images in domain Y # 3. Compute the fake loss for D_X # 4. Compute the total loss and perform backprop d_x_loss = ## Second: D_Y, real and fake loss components ## d_y_loss = # ========================================= # TRAIN THE GENERATORS # ========================================= ## First: generate fake X images and reconstructed Y images ## # 1. Generate fake images that look like domain X based on real images in domain Y # 2. Compute the generator loss based on domain X # 3. Create a reconstructed y # 4. Compute the cycle consistency loss (the reconstruction loss) ## Second: generate fake Y images and reconstructed X images ## # 5. Add up all generator and reconstructed losses and perform backprop g_total_loss = # Print the log info if epoch % print_every == 0: # append real and fake discriminator losses and the generator loss losses.append((d_x_loss.item(), d_y_loss.item(), g_total_loss.item())) print('Epoch [{:5d}/{:5d}] | d_X_loss: {:6.4f} | d_Y_loss: {:6.4f} | g_total_loss: {:6.4f}'.format( epoch, n_epochs, d_x_loss.item(), d_y_loss.item(), g_total_loss.item())) sample_every=100 # Save the generated samples if epoch % sample_every == 0: G_YtoX.eval() # set generators to eval mode for sample generation G_XtoY.eval() save_samples(epoch, fixed_Y, fixed_X, G_YtoX, G_XtoY, batch_size=16) G_YtoX.train() G_XtoY.train() # uncomment these lines, if you want to save your model # checkpoint_every=1000 # # Save the model parameters # if epoch % checkpoint_every == 0: # checkpoint(epoch, G_XtoY, G_YtoX, D_X, D_Y) return losses n_epochs = 1000 # keep this small when testing if a model first works, then increase it to >=1000 losses = training_loop(dataloader_X, dataloader_Y, test_dataloader_X, test_dataloader_Y, n_epochs=n_epochs) ``` ## Tips on Training and Loss Patterns A lot of experimentation goes into finding the best hyperparameters such that the generators and discriminators don't overpower each other. It's often a good starting point to look at existing papers to find what has worked in previous experiments, I'd recommend this [DCGAN paper](https://arxiv.org/pdf/1511.06434.pdf) in addition to the original [CycleGAN paper](https://arxiv.org/pdf/1703.10593.pdf) to see what worked for them. Then, you can try your own experiments based off of a good foundation. #### Discriminator Losses When you display the generator and discriminator losses you should see that there is always some discriminator loss; recall that we are trying to design a model that can generate good "fake" images. So, the ideal discriminator will not be able to tell the difference between real and fake images and, as such, will always have some loss. You should also see that $D_X$ and $D_Y$ are roughly at the same loss levels; if they are not, this indicates that your training is favoring one type of discriminator over the and you may need to look at biases in your models or data. #### Generator Loss The generator's loss should start significantly higher than the discriminator losses because it is accounting for the loss of both generators *and* weighted reconstruction errors. You should see this loss decrease a lot at the start of training because initial, generated images are often far-off from being good fakes. After some time it may level off; this is normal since the generator and discriminator are both improving as they train. If you see that the loss is jumping around a lot, over time, you may want to try decreasing your learning rates or changing your cycle consistency loss to be a little more/less weighted. ``` fig, ax = plt.subplots(figsize=(12,8)) losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator, X', alpha=0.5) plt.plot(losses.T[1], label='Discriminator, Y', alpha=0.5) plt.plot(losses.T[2], label='Generators', alpha=0.5) plt.title("Training Losses") plt.legend() ``` --- ## Evaluate the Result! As you trained this model, you may have chosen to sample and save the results of your generated images after a certain number of training iterations. This gives you a way to see whether or not your Generators are creating *good* fake images. For example, the image below depicts real images in the $Y$ set, and the corresponding generated images during different points in the training process. You can see that the generator starts out creating very noisy, fake images, but begins to converge to better representations as it trains (though, not perfect). <img src='notebook_images/sample-004000-summer2winter.png' width=50% /> Below, you've been given a helper function for displaying generated samples based on the passed in training iteration. ``` import matplotlib.image as mpimg # helper visualization code def view_samples(iteration, sample_dir='samples_cyclegan'): # samples are named by iteration path_XtoY = os.path.join(sample_dir, 'sample-{:06d}-X-Y.png'.format(iteration)) path_YtoX = os.path.join(sample_dir, 'sample-{:06d}-Y-X.png'.format(iteration)) # read in those samples try: x2y = mpimg.imread(path_XtoY) y2x = mpimg.imread(path_YtoX) except: print('Invalid number of iterations.') fig, (ax1, ax2) = plt.subplots(figsize=(18,20), nrows=2, ncols=1, sharey=True, sharex=True) ax1.imshow(x2y) ax1.set_title('X to Y') ax2.imshow(y2x) ax2.set_title('Y to X') # view samples at iteration 100 view_samples(100, 'samples_cyclegan') # view samples at iteration 1000 view_samples(1000, 'samples_cyclegan') ``` --- ## Further Challenges and Directions * One shortcoming of this model is that it produces fairly low-resolution images; this is an ongoing area of research; you can read about a higher-resolution formulation that uses a multi-scale generator model, in [this paper](https://arxiv.org/abs/1711.11585). * Relatedly, we may want to process these as larger (say 256x256) images at first, to take advantage of high-res data. * It may help your model to converge faster, if you initialize the weights in your network. * This model struggles with matching colors exactly. This is because, if $G_{YtoX}$ and $G_{XtoY}$ may change the tint of an image; the cycle consistency loss may not be affected and can still be small. You could choose to introduce a new, color-based loss term that compares $G_{YtoX}(y)$ and $y$, and $G_{XtoY}(x)$ and $x$, but then this becomes a supervised learning approach. * This unsupervised approach also struggles with geometric changes, like changing the apparent size of individual object in an image, so it is best suited for stylistic transformations. * For creating different kinds of models or trying out the Pix2Pix Architecture, [this Github repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/) which implements CycleGAN *and* Pix2Pix in PyTorch is a great resource. **Once you are satified with your model, you are ancouraged to test it on a different dataset to see if it can find different types of mappings!** --- ### Different datasets for download You can download a variety of datasets used in the Pix2Pix and CycleGAN papers, by following instructions in the [associated Github repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/README.md). You'll just need to make sure that the data directories are named and organized correctly to load in that data.
github_jupyter
# TensorFlow In this notebook, we'll learn the basics of [TensorFlow + Keras](https://tensorflow.org), which is a machine learning library used to build dynamic neural networks. We'll learn about the basics, like creating and using Tensors. # Set seeds ``` %tensorflow_version 2.x import numpy as np import tensorflow as tf SEED = 1234 # Set seed for reproducibility np.random.seed(seed=SEED) tf.random.set_seed(SEED) ``` # Basics ``` # Constants x = tf.constant(1) print (x) # Creating a random tensor x = tf.random.uniform((2,3)) print(f"Type: {x.dtype}") print(f"Size: {x.shape}") print(f"Values: \n{x}") # Zero and Ones tensor x = tf.zeros((2, 3)) print (x) x = tf.ones((2, 3)) print (x) # List → Tensor x = tf.convert_to_tensor([[1, 2, 3],[4, 5, 6]], dtype='int32') print(f"Size: {x.shape}") print(f"Values: \n{x}") # NumPy array → Tensor x = tf.convert_to_tensor(np.random.rand(2, 3), dtype='float32') print(f"Size: {x.shape}") print(f"Values: \n{x}") # Changing tensor type x = tf.random.uniform((2,3)) print(f"Type: {x.dtype}") x = tf.random.uniform((2,3), dtype='float64') print(f"Type: {x.dtype}") ``` # Operations ``` # Addition x = tf.random.uniform((2,3)) y = tf.random.uniform((2,3)) z = x + y print(f"Size: {z.shape}") print(f"Values: \n{z}") # Dot product x = tf.random.uniform((2,3)) y = tf.random.uniform((3,2)) z = tf.matmul(x, y) print(f"Size: {z.shape}") print(f"Values: \n{z}") # Transpose x = tf.random.uniform((2,3)) print(f"Size: {x.shape}") print(f"Values: \n{x}") y = tf.transpose(x) print(f"Size: {y.shape}") print(f"Values: \n{y}") # Reshape x = tf.random.uniform((2,3)) z = tf.reshape(x, (1, 6)) print(f"Size: {z.shape}") print(f"Values: \n{z}") # Dimensional operations x = tf.random.uniform((2,3)) print(f"Values: \n{x}") y = tf.math.reduce_sum(x, axis=0) # add each row's value for every column print(f"Values: \n{y}") z = tf.math.reduce_sum(x, axis=1) # add each columns's value for every row print(f"Values: \n{z}") ``` # Indexing, Splicing and Joining ``` x = tf.random.uniform((2,3)) print (f"x: \n{x}") print (f"x[:1]: \n{x[:1]}") print (f"x[:1, 1:3]: \n{x[:1, 1:3]}") # Select with dimensional indicies x = tf.random.uniform((2,3)) print(f"Values: \n{x}") col_indices = tf.convert_to_tensor([0, 2]) chosen = tf.gather(x, indices=col_indices, axis=1) # values from column 0 & 2 print(f"Values: \n{chosen}") # Concatenation x = tf.random.uniform((2,3)) print(f"Values: \n{x}") y = tf.concat([x, x], axis=0) # stack by rows (axis=1 to stack by columns) print(f"Values: \n{y}") ``` # Gradients ``` # Tensors with gradient bookkeeping x = tf.constant(3.0) with tf.GradientTape() as g: g.watch(x) y = 3 * x + 2 g.gradient(y, x).numpy() ``` * $ y = 3x + 2 $ * $ \frac{\partial(y)}{\partial(x)} = 3 $ # GPUs ``` # Is CUDA available? print (tf.config.list_physical_devices('GPU')) ``` If False (CUDA is not available), let's change that by following these steps: Go to *Runtime* > *Change runtime type* > Change *Hardware accelertor* to *GPU* > Click *Save* ``` %tensorflow_version 2.x import tensorflow as tf # Is CUDA available now? print (tf.config.list_physical_devices('GPU')) ```
github_jupyter
``` %load_ext watermark import numpy as np import pandas as pd ``` ## Load the JupyterHub logs ``` columns = ['user', 'machine', 'session_start', 'session_end', 'session_length', 'log_file'] df_all = pd.read_csv("../data/jhub_logs.csv.gz", parse_dates=["session_start", "session_end"]) df_all["session_length"] = (df_all.session_end - df_all.session_start).dt.total_seconds().div(60) df_all = df_all[columns] df_all.head() df_all["machine"].unique() dfs = {'all': df_all, 'Cheyenne': df_all[df_all["machine"] == 'cheyenne'], 'Casper': df_all[df_all["machine"] == 'casper']} ``` ## Basic Statistics Some basic statistics from the logs. ``` print(f'Total number of sessions:') for name in dfs: print(f'{name.rjust(10)}: {len(dfs[name])}') def print_span(name): print(f'Total span of {name} logs: {dfs[name].session_end.max() - dfs[name].session_start.min()}') print(f' From: {dfs[name].session_start.min()}') print(f' To: {dfs[name].session_end.max()}') for name in dfs: print_span(name) print() def print_lengths(name): print(f'Session Lengths on {name} [minutes]:') print(f' Minimum: {dfs[name].session_length.min()}') print(f' Maximum: {dfs[name].session_length.max()}') print(f' Mean: {dfs[name].session_length.mean()}') print(f' Median: {dfs[name].session_length.median()}') for name in dfs: print_lengths(name) print() print(f'Total Number of Unique Users:') for name in dfs: print(f'{name.rjust(10)}: {dfs[name].user.nunique()}') ``` ## Tutorials Three tutorials have been given during the duration of the logs. We note the dates for these tutorials so that we can reference their times in the plots below. ``` tutorials = {'jun': ["2019-06-03", "2019-06-05"], 'sep': ["2019-09-18", "2019-09-20"], 'oct': ["2019-10-16", "2019-10-18"]} ``` ## Sessions by Day, Week & Month First, we take a look at the number of sessions per day, week, and month over the span of the logs. ``` days = pd.date_range(dfs['all'].session_start.min(), dfs['all'].session_end.max() + pd.DateOffset(days=1), freq='D').normalize() wks = pd.date_range(dfs['all'].session_start.min() - pd.DateOffset(months=1), dfs['all'].session_end.max() + pd.DateOffset(months=1), freq='W').normalize() mons = pd.date_range(dfs['all'].session_start.min() - pd.DateOffset(months=1), dfs['all'].session_end.max() + pd.DateOffset(months=1), freq='MS').normalize() sess_per_day = pd.DataFrame() sess_per_wk = pd.DataFrame() sess_per_mon = pd.DataFrame() for name in dfs: sess_per_day[name] = 0.5 * (dfs[name].groupby(pd.cut(dfs[name].session_start, days)).size() + dfs[name].groupby(pd.cut(dfs[name].session_end, days)).size()).rename(f'Sessions per Day ({name})') sess_per_wk[name] = 0.5 * (dfs[name].groupby(pd.cut(dfs[name].session_start, wks)).size() + dfs[name].groupby(pd.cut(dfs[name].session_end, wks)).size()).rename(f'Sessions per Week ({name})') sess_per_mon[name] = 0.5 * (dfs[name].groupby(pd.cut(dfs[name].session_start, mons)).size() + dfs[name].groupby(pd.cut(dfs[name].session_end, mons)).size()).rename(f'Sessions per Month ({name})') ax = sess_per_day.plot.area(figsize=(14,5), stacked=False) xticks = np.linspace(0, len(days)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([days[i].strftime('%Y-%m-%d') for i in xticks]) ax.set_title('Number of JupyterHub Sessions per Day', fontsize=16) for tutorial in tutorials: t1, t2 = tutorials[tutorial] ax.axvspan(days.get_loc(t1)-1, days.get_loc(t2)+1, facecolor='r', alpha=.3, edgecolor='none') ax = sess_per_wk.plot.area(figsize=(14,5)) xticks = np.linspace(0, len(wks)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([wks[i].strftime('%Y W%U') for i in xticks]) ax.set_title('Number of JupyterHub Sessions per Week', fontsize=16); ax = sess_per_mon.plot.bar(figsize=(14,5)) xticks = np.linspace(0, len(mons)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([mons[i].strftime('%Y-%m') for i in xticks]) ax.set_title('Number of JupyterHub Sessions per Month', fontsize=16); ``` **NOTE:** You can see a definite up-tick in the number of sessions following the September and October tutorials. During this period of time, from August through October, the average number of sessions per day and month more than doubles. ## Unique Users by Day, Week & Month Next, we look at the number of unique users per day, week and month. ``` users_per_day = pd.DataFrame() users_per_wk = pd.DataFrame() users_per_mon = pd.DataFrame() for name in dfs: users_per_day[name] = 0.5 * (dfs[name].user.groupby(pd.cut(dfs[name].session_start, days)).nunique() + dfs[name].user.groupby(pd.cut(dfs[name].session_end, days)).nunique()).rename(f'Unique Users per Day ({name})') users_per_wk[name] = 0.5 * (dfs[name].user.groupby(pd.cut(dfs[name].session_start, wks)).nunique() + dfs[name].user.groupby(pd.cut(dfs[name].session_end, wks)).nunique()).rename(f'Unique Users per Week ({name})') users_per_mon[name] = 0.5 * (dfs[name].user.groupby(pd.cut(dfs[name].session_start, mons)).nunique() + dfs[name].user.groupby(pd.cut(dfs[name].session_end, mons)).nunique()).rename(f'Unique Users per Month ({name})') ax = users_per_day.plot.area(figsize=(14,5), stacked=False) xticks = np.linspace(0, len(days)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([days[i].strftime('%Y-%m-%d') for i in xticks]) ax.set_title('Number of Unqiue JupyterHub Users per Day', fontsize=16) for tutorial in tutorials: t1, t2 = tutorials[tutorial] ax.axvspan(days.get_loc(t1)-1, days.get_loc(t2)+1, facecolor='r', alpha=.3, edgecolor='none') ax = users_per_wk.plot.area(figsize=(14,5)) xticks = np.linspace(0, len(wks)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([wks[i].strftime('%Y W%U') for i in xticks]) ax.set_title('Number of Unique JupyterHub Users per Week', fontsize=16); ax = users_per_mon.plot.bar(figsize=(14,5)) xticks = np.linspace(0, len(mons)-1, 10, dtype=int) ax.set_xticks(xticks) ax.set_xticklabels([mons[i].strftime('%Y-%m') for i in xticks]) ax.set_title('Number of Unique JupyterHub Users per Month', fontsize=16); ``` **NOTE:** Again, you can note a growth in users following each tutorial/hackathon (September and October). ``` %watermark -iv -d -u ```
github_jupyter
``` %serialconnect # below is the esp8266 version # RST | GPIO1 TX # A0 | GPIO3 RX # D0 GPIO16 | GPIO5 D1 SCL # SCK D5 GPIO14 | GPIO4 D2 SDA # MISO D6 GPIO12 | GPIO0 D3 # MOSI D7 GPIO13 | GPIO2 D4 LED # SS D8 GPIO15 | GND # 3V3 | 5V # You need the esp8266 code built with mqtt_as libraries # See https://github.com/goatchurchprime/jupyter_micropython_developer_notebooks/blob/master/mqtt_async_projects/esp8266_commissioning_mqtt_as.ipynb #HW-166 # 5V VM | PWMA D8 # 3V VCC | AIN2 D0 # GND | AIN1 D3 # MotL A01 | STBY 3V # MotL A02 | BIN1 D5 # MotR B02 | BIN2 D6 # MotR B01 | PWMB D7 # G GND | GND G %sendtofile --source stdmqttas.py %sendtofile --quiet --source utils.py %sendtofile config.txt wifiname DoESLiverpool wifipassword decafbad00 mqttbroker sensorcity.io pinled 2 boardname miniwheels1 mAen 15 mAfore 0 mAback 16 mBen 13 mBfore 14 mBback 12 %serialconnect %sendtofile main.py import os, time, sys, machine, itertools, time from machine import Pin, PWM import uasyncio as asyncio from stdmqttas import fconfig, config, mqttconnecttask, callbackcmdtask from mqtt_as import MQTTClient from stdmqttas import pinled, flashpinled, shortmac #flashpinled(5, 300, 300) import network network.WLAN().active(0) # disable the connection at startup from machine import Pin, PWM mApwm = PWM(Pin(int(fconfig["mAen"]))) mAfore = Pin(int(fconfig["mAfore"]), Pin.OUT) mAback = Pin(int(fconfig["mAback"]), Pin.OUT) mApwm.freq(10) mApwm.duty(0) mBpwm = PWM(Pin(int(fconfig["mBen"]))) mBfore = Pin(int(fconfig["mBfore"]), Pin.OUT) mBback = Pin(int(fconfig["mBback"]), Pin.OUT) mBpwm.freq(10) mBpwm.duty(0) timeZeroBy = 1 async def zeroingtask(): global timeZeroBy while True: if timeZeroBy != 0: if time.ticks_ms() > timeZeroBy: timeZeroBy = 0 mAfore.value(0) mAback.value(0) mBfore.value(0) mBback.value(0) pinled.value(1) await asyncio.sleep_ms(50) def updatewheels(msg): global timeZeroBy try: s = msg.split() timeZeroBy = time.ticks_ms() + int(s[6]) mApwm.duty(int(s[0])) mAfore.value(int(s[1])) mAback.value(int(s[2])) mBpwm.duty(int(s[3])) mBfore.value(int(s[4])) mBback.value(int(s[5])) pinled.value(0) except Exception as e: print("Bad wheels", msg, e) topicstem = fconfig["boardname"].encode() topicboardcmd = topicstem + b"/cmd" topicboardwheels = topicstem + b"/wheels" topicstatus = topicstem+b"/status" print("topicstem", topicstem) import network network.WLAN().active(0) # disable the connection at startup for l in ["500 1 0 500 1 0 1", "500 0 1 500 0 1 1", "500 1 0 500 0 1 1", "500 0 1 500 1 0 1", "500 0 0 500 0 0 1"]: updatewheels(l) time.sleep_ms(500) def callbackcmd(topic, msg, retained): print(topic, msg) if topic == topicboardcmd: aloop.create_task(callbackcmdtask(client, topicreply, msg)) else: updatewheels(msg) async def onconnecttask(client): ipnumber = client._sta_if.ifconfig()[0] await client.publish(topicstatus, ipnumber, retain=True) await client.subscribe(topiccmd, 1) async def onconnecttask(client): print("subscribing") ipnumber = client._sta_if.ifconfig()[0] print("ipnumber", ipnumber) await client.publish(topicstatus, ipnumber, retain=True) print("subscribing to", topicboardcmd, topicboardwheels) await client.subscribe(topicboardcmd, 1) await client.subscribe(topicboardwheels, 1) config['subs_cb'] = callbackcmd config['connect_coro'] = onconnecttask config['will'] = (topicstatus, "offline", True) client = MQTTClient(config) client.DEBUG = True aloop = asyncio.get_event_loop() aloop.create_task(zeroingtask()) aloop.create_task(mqttconnecttask(client)) aloop.run_forever() # leftover attempts at light controls from machine import Pin, I2C i2c = I2C(scl=Pin(15), sda=Pin(4), freq=450000) print(list(map(hex, i2c.scan()))) # TCS3472 i2c.writeto_mem(0x29, 0x80, b'\x03') print(hex(i2c.readfrom_mem(0x29, 0x92, 1)[0])) x = i2c.readfrom_mem(0x29, 0x8F, 1) # gain of 1x print(x) x = i2c.readfrom_mem(0x29, 0x81, 1) print(x) print(hex(i2c.readfrom_mem(0x29, 0x92, 1)[0])) i2c.writeto_mem(0x29, 0x8F, b'\x00') # gain of 1x i2c.writeto_mem(0x29, 0x81, b'\x00') # integration time 700ms i2c.writeto_mem(0x29, 0x8F, b'\x01') # gain of 2x i2c.writeto_mem(0x29, 0x81, b'\xC0') # integration time 154ms i2c.writeto_mem(0x29, 0x8F, b'\x02') # gain of 4x i2c.writeto_mem(0x29, 0x81, b'\xEB') # integration time 50ms import struct, time t0 = time.ticks_ms() for i in range(20): print(time.ticks_ms() - t0, struct.unpack("<HHHH", i2c.readfrom_mem(0x29, 0x94, 8))) time.sleep(0.01) ```
github_jupyter
# MAPEM de Pierro algorithm for the Bowsher prior One of the more popular methods for guiding a reconstruction based on a high quality image was suggested by Bowsher. This notebook explores this prior. We highly recommend you look at the [PET/MAPEM](../PET/MAPEM.ipynb) notebook first. This example extends upon the quadratic prior used in that notebook to use an anatomical prior. Authors: Kris Thielemans, Sam Ellis, Richard Brown, Casper da Costa-Luis First version: 22nd of October 2019 Second version: 27th of October 2019 Third version: June 2021 CCP SyneRBI Synergistic Image Reconstruction Framework (SIRF) Copyright 2019,20201 University College London Copyright 2019 King's College London This is software developed for the Collaborative Computational Project in Synergistic Reconstruction for Biomedical Imaging. (http://www.synerbi.ac.uk/). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Brief description of the Bowsher prior The "usual" quadratic prior penalises differences between neighbouring voxels (using the square of the difference). This tends to oversmooth parts of the image where you know there should be an edge. To overcome this, it is natural to not penalise the difference between those "edge" voxels. This can be done after segmentation of the anatomical image for instance. Bowsher suggested a segmentation-free approach to use an anatomical (or any "side" image) as follows: - compute edge information on the anatomical image. - for each voxel, consider only the $N_B$ neighbours which have the lowest difference in the anatomical image. The paper is Bowsher, J. E., Hong Yuan, L. W. Hedlund, T. G. Turkington, G. Akabani, A. Badea, W. C. Kurylo, et al. ‘Utilizing MRI Information to Estimate F18-FDG Distributions in Rat Flank Tumors’. In IEEE Symposium Conference Record Nuclear Science 2004., 4:2488-2492 Vol. 4, 2004. https://doi.org/10.1109/NSSMIC.2004.1462760. # All the normal imports and handy functions ``` %matplotlib notebook # Setup the working directory for the notebook import notebook_setup from sirf_exercises import cd_to_working_dir cd_to_working_dir('Synergistic', 'MAPEM_Bowsher') #%% Initial imports etc import numpy import matplotlib.pyplot as plt import os import sys import shutil from tqdm.auto import tqdm, trange import time from scipy.ndimage.filters import gaussian_filter import sirf.STIR as pet from numba import jit from sirf_exercises import exercises_data_path brainweb_sim_data_path = exercises_data_path('working_folder', 'Synergistic', 'BrainWeb') # set-up redirection of STIR messages to files msg_red = pet.MessageRedirector('info.txt', 'warnings.txt', 'errors.txt') # plotting settings plt.ion() # interactive 'on' such that plots appear during loops #%% some handy function definitions def imshow(image, limits=None, title=''): """Usage: imshow(image, [min,max], title)""" plt.title(title) bitmap = plt.imshow(image) if limits is None: limits = [image.min(), image.max()] plt.clim(limits[0], limits[1]) plt.colorbar(shrink=.6) plt.axis('off') return bitmap def make_cylindrical_FOV(image): """truncate to cylindrical FOV""" filt = pet.TruncateToCylinderProcessor() filt.apply(image) #%% define a function for plotting images and the updates # This is the same function as in `ML_reconstruction` def plot_progress(all_images, title, subiterations, cmax): if len(subiterations)==0: num_subiters = all_images[0].shape[0]-1 subiterations = range(1, num_subiters+1) num_rows = len(all_images); slice_show = 60 for it in subiterations: plt.figure() for r in range(num_rows): plt.subplot(num_rows,2,2*r+1) imshow(all_images[r][it,slice_show,:,:], [0,cmax], '%s at %d' % (title[r], it)) plt.subplot(num_rows,2,2*r+2) imshow(all_images[r][it,slice_show,:,:]-all_images[r][it-1,slice_show,:,:],[-cmax*.1,cmax*.1], 'update') plt.show(); def subplot_(idx,vol,title,clims=None,cmap="viridis"): plt.subplot(*idx) plt.imshow(vol,cmap=cmap) if not clims is None: plt.clim(clims) plt.colorbar() plt.title(title) plt.axis("off") ``` # Load the data To generate the data needed for this notebook, run the [BrainWeb](./BrainWeb.ipynb) notebook first. ``` full_acquired_data = pet.AcquisitionData(os.path.join(brainweb_sim_data_path, 'FDG_sino_noisy.hs')) atten = pet.ImageData(os.path.join(brainweb_sim_data_path, 'uMap_small.hv')) # Anatomical image anatomical = pet.ImageData(os.path.join(brainweb_sim_data_path, 'T1_small.hv')) # could be T2_small.hv anatomical_arr = anatomical.as_array() # create initial image init_image=atten.get_uniform_copy(atten.as_array().max()*.1) make_cylindrical_FOV(init_image) plt.figure() imshow(anatomical.as_array()[64, :, :]) plt.show() plt.figure() imshow(full_acquired_data.as_array()[0, 64, :, :]) plt.show() ``` # Code from first MAPEM notebook The following chunk of code is copied and pasted more-or-less directly from the other notebook as a starting point. First, run the code chunk to get the objective functions etc ### construction of Likelihood objective functions and OSEM ``` def get_obj_fun(acquired_data, atten): print('\n------------- Setting up objective function') # #%% create objective function #%% create acquisition model am = pet.AcquisitionModelUsingRayTracingMatrix() am.set_num_tangential_LORs(5) # Set up sensitivity due to attenuation asm_attn = pet.AcquisitionSensitivityModel(atten, am) asm_attn.set_up(acquired_data) bin_eff = pet.AcquisitionData(acquired_data) bin_eff.fill(1.0) asm_attn.unnormalise(bin_eff) asm_attn = pet.AcquisitionSensitivityModel(bin_eff) # Set sensitivity of the model and set up am.set_acquisition_sensitivity(asm_attn) am.set_up(acquired_data,atten); #%% create objective function obj_fun = pet.make_Poisson_loglikelihood(acquired_data) obj_fun.set_acquisition_model(am) print('\n------------- Finished setting up objective function') return obj_fun def get_reconstructor(num_subsets, num_subiters, obj_fun, init_image): print('\n------------- Setting up reconstructor') #%% create OSEM reconstructor OSEM_reconstructor = pet.OSMAPOSLReconstructor() OSEM_reconstructor.set_objective_function(obj_fun) OSEM_reconstructor.set_num_subsets(num_subsets) OSEM_reconstructor.set_num_subiterations(num_subiters) #%% initialise OSEM_reconstructor.set_up(init_image) print('\n------------- Finished setting up reconstructor') return OSEM_reconstructor # Use rebin to create a smaller sinogram to speed up calculations acquired_data = full_acquired_data.clone() acquired_data = acquired_data.rebin(3) # Get the objective function obj_fun = get_obj_fun(acquired_data, atten) ``` # Implement de Pierro MAP-EM for a quadratic prior with arbitrary weights The following code is almost a copy-paste of the implementation by A. Mehranian and S. Ellis [contributed during one of our hackathons](https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/kcl). It is copied here for you to have an easier look. Note that the code avoids the `for` loops in our simplistic version above and hence should be faster (however, the construction of the neighbourhood is still slow, but you should have to do this only once). Also, this is a Python reimplementation of MATLAB code (hence the use of "Fortran order" below). ``` def dePierroReg(image,weights,nhoodIndVec): """Get the de Pierro regularisation image (xreg)""" imSize = image.shape # vectorise image for indexing imageVec = image.reshape(-1,order='F') # retrieve voxel intensities for neighbourhoods resultVec = imageVec[nhoodIndVec] result = resultVec.reshape(weights.shape,order='F') # compute xreg imageReg = 0.5*numpy.sum(weights*(result + image.reshape(-1,1,order='F')),axis=1)/numpy.sum(weights,axis=1) imageReg = imageReg.reshape(imSize,order='F') return imageReg def compute_nhoodIndVec(imageSize,weightsSize): """Get the neigbourhoods of each voxel""" w = int(round(weightsSize[1]**(1.0/3))) # side length of neighbourhood nhoodInd = neighbourExtract(imageSize,w) return nhoodInd.reshape(-1,order='F') def neighbourExtract(imageSize,w): """Adapted from kcl.Prior class""" n = imageSize[0] m = imageSize[1] h = imageSize[2] wlen = 2*numpy.floor(w/2) widx = xidx = yidx = numpy.arange(-wlen/2,wlen/2+1) if h==1: zidx = [0] nN = w*w else: zidx = widx nN = w*w*w Y,X,Z = numpy.meshgrid(numpy.arange(0,m), numpy.arange(0,n), numpy.arange(0,h)) N = numpy.zeros([n*m*h, nN],dtype='int32') l = 0 for x in xidx: Xnew = setBoundary(X + x,n) for y in yidx: Ynew = setBoundary(Y + y,m) for z in zidx: Znew = setBoundary(Z + z,h) N[:,l] = ((Xnew + (Ynew)*n + (Znew)*n*m)).reshape(-1,1).flatten('F') l += 1 return N def setBoundary(X,n): """Boundary conditions for neighbourExtract. Adapted from kcl.Prior class""" idx = X<0 X[idx] = X[idx] + n idx = X>n-1 X[idx] = X[idx] - n return X.flatten('F') @jit def dePierroUpdate(xEM, imageReg, beta): """Update the image based on the de Pierro regularisation image""" return (2*xEM)/(((1 - beta*imageReg)**2 + 4*beta*xEM)**0.5 + (1 - beta*imageReg) + 0.00001) def MAPEM_iteration(OSEM_reconstructor,current_image,weights,nhoodIndVec,beta): image_reg = dePierroReg(current_image.as_array(),weights,nhoodIndVec) # compute xreg OSEM_reconstructor.update(current_image); # compute EM update image_EM=current_image.as_array() # get xEM as a numpy array updated = dePierroUpdate(image_EM, image_reg, beta) # compute new uxpdate current_image.fill(updated) # store for next iteration return current_image ``` ## Create uniform and Bowsher weights We will use the `kcl.Prior` class here to construct the Bowsher weights given an anatomical image. The `kcl.Prior` class (and the above code) assumes that the `weights` are returned an $N_v \times N_n$ array, with $N_v$ the number of voxels and $N_n$ the number of neighbours (here 27 as the implementation is in 3D). ``` import sirf.contrib.kcl.Prior as pr def update_bowsher_weights(prior,side_image,num_bowsher_neighbours): return prior.BowshserWeights\ (side_image.as_array(),num_bowsher_neighbours) ``` For illustration, we will keep only a few neighbours in the Bowsher prior. This makes the contrast with "uniform" weights higher of course. ``` num_bowsher_neighbours = 3 myPrior = pr.Prior(anatomical_arr.shape) BowsherWeights = update_bowsher_weights(myPrior,anatomical,num_bowsher_neighbours) ``` Ignore the warning about `divide by zero`, it is actually handled in the `kcl.Prior` class. ``` # compute indices of the neighbourhood for each voxel nhoodIndVec=compute_nhoodIndVec(anatomical_arr.shape,BowsherWeights.shape) # illustrate that only a few of the weights in the neighbourhood are kept # (taking an arbitrary voxel) print(BowsherWeights[500,:]) ``` You could try to understand the neighbourhood structure using the following, but it is quite complicated due to the Fortran order and linear indices. ``` toLinearIndices=nhoodIndVec.reshape(BowsherWeights.shape,order='F') print(toLinearIndices[500,:]) ``` We will also use uniform weights where every neighbour is counted the same (often people will use 1/distance between voxels as weighting, but this isn't implemented here). ``` uniformWeights=BowsherWeights.copy() uniformWeights[:,:]=1 # set "self-weight" of the voxel to zero uniformWeights[:,27//2]=0 print(uniformWeights[500,:]) ``` # Run some experiments ``` num_subsets = 21 num_subiters = 42 ``` ## Do a normal OSEM (for comparison and initialisation) ``` # Do initial OSEM recon OSEM_reconstructor = get_reconstructor(num_subsets, num_subiters, obj_fun, init_image) osem_image = init_image.clone() OSEM_reconstructor.reconstruct(osem_image) plt.figure() imshow(osem_image.as_array()[60,:,:]) plt.show(); ``` ## Run MAP-EM with the 2 different sets of weights To save some time, we will initialise the algorithms with the OSEM image. This makes sense of course as in the initial iterations, the penalty will just slow everything down (as it smooths an already too smooth image even more!). ``` # arbitrary value for the weight of the penalty. You might have to tune it beta=1 ``` Compute with Bowsher penalty ``` current_image=osem_image.clone() for it in trange(1, num_subiters+1): current_image = MAPEM_iteration(OSEM_reconstructor,current_image,BowsherWeights,nhoodIndVec,beta) Bowsher=current_image.clone() ``` Compute with uniform weights (we'll call the result UQP for "uniform quadratic penalty") ``` current_image=osem_image.clone() for it in trange(1, num_subiters+1): current_image = MAPEM_iteration(OSEM_reconstructor,current_image,uniformWeights,nhoodIndVec,beta) UQP=current_image.clone() # Plot the anatomical, OSEM, and two MAPEM images plt.figure() cmax=osem_image.max()*.6 clim=[0,cmax] subplot_([1,2,1],anatomical.as_array()[60,:,:],"anatomical") subplot_([1,2,2],osem_image.as_array()[60,:,:],"OSEM",clim) plt.figure() subplot_([1,2,1],UQP.as_array()[60,:,:],"Uniform Quadratic prior",clim) subplot_([1,2,2],Bowsher.as_array()[60,:,:],"Bowsher Quadratic prior",clim) plt.figure() y_idx=osem_image.dimensions()[1]//2 plt.plot(osem_image.as_array()[60,y_idx,:],label="OSEM") plt.plot(UQP.as_array()[60,y_idx,:],label="Uniform Quadratic prior") plt.plot(Bowsher.as_array()[60,y_idx,:],label="Bowsher Quadratic prior") plt.legend() ``` You will probably see that the MAP-EM are quite smooth, and that there is very little difference between the "uniform" and "Bowsher" weights after this number of updates. The difference will get larger with higher number of updates (try it!). Also, with the Bowsher weights you should be able to increase `beta` more than for the uniform weights without oversmoothing the image too much. # Misalignment between anatomical and emission images What happens if you want to use an anatomical prior but the image isn't aligned with the image you're trying to reconstruct? You'll have to register them of course! Have a look at the [registration notebook](../Reg/sirf_registration.ipynb) if you haven't already. The idea here would be to run an initial reconstruction (say, OSEM), and then register the anatomical image to the resulting reconstruction... Once we've got the anatomical image in the correct space, we can calculate the Bowsher weights. ``` import sirf.Reg as Reg registration = Reg.NiftyAladinSym() registration.set_reference_image registration.set_reference_image(osem_image) registration.set_floating_image(anatomical) registration.set_parameter('SetPerformRigid','1') registration.set_parameter('SetPerformAffine','0') registration.process() anatomical_in_emission_space = registration.get_output() Bweights = update_bowsher_weights(myPrior,anatomical_in_emission_space,num_bowsher_neighbours) ```
github_jupyter
``` %matplotlib inline ``` ************************* Text rendering With LaTeX ************************* Rendering text with LaTeX in Matplotlib. Matplotlib has the option to use LaTeX to manage all text layout. This option is available with the following backends: * Agg * PS * PDF The LaTeX option is activated by setting ``text.usetex : True`` in your rc settings. Text handling with matplotlib's LaTeX support is slower than matplotlib's very capable :doc:`mathtext </tutorials/text/mathtext>`, but is more flexible, since different LaTeX packages (font packages, math packages, etc.) can be used. The results can be striking, especially when you take care to use the same fonts in your figures as in the main document. Matplotlib's LaTeX support requires a working LaTeX_ installation, dvipng_ (which may be included with your LaTeX installation), and Ghostscript_ (GPL Ghostscript 9.0 or later is required). The executables for these external dependencies must all be located on your :envvar:`PATH`. There are a couple of options to mention, which can be changed using :doc:`rc settings </tutorials/introductory/customizing>`. Here is an example matplotlibrc file:: font.family : serif font.serif : Times, Palatino, New Century Schoolbook, Bookman, Computer Modern Roman font.sans-serif : Helvetica, Avant Garde, Computer Modern Sans serif font.cursive : Zapf Chancery font.monospace : Courier, Computer Modern Typewriter text.usetex : true The first valid font in each family is the one that will be loaded. If the fonts are not specified, the Computer Modern fonts are used by default. All of the other fonts are Adobe fonts. Times and Palatino each have their own accompanying math fonts, while the other Adobe serif fonts make use of the Computer Modern math fonts. See the PSNFSS_ documentation for more details. To use LaTeX and select Helvetica as the default font, without editing matplotlibrc use:: from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) ## for Palatino and other serif fonts use: #rc('font',**{'family':'serif','serif':['Palatino']}) rc('text', usetex=True) Here is the standard example, :file:`/gallery/text_labels_and_annotations/tex_demo`: .. figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_tex_demo_001.png :target: ../../gallery/text_labels_and_annotations/tex_demo.html :align: center :scale: 50 TeX Demo Note that display math mode (``$$ e=mc^2 $$``) is not supported, but adding the command ``\displaystyle``, as in the above demo, will produce the same results. <div class="alert alert-info"><h4>Note</h4><p>Certain characters require special escaping in TeX, such as:: # $ % & ~ _ ^ \ { } \( \) \[ \] Therefore, these characters will behave differently depending on the rcParam ``text.usetex`` flag.</p></div> usetex with unicode =================== It is also possible to use unicode strings with the LaTeX text manager, here is an example taken from :file:`/gallery/text_labels_and_annotations/tex_demo`. The axis labels include Unicode text: .. figure:: ../../gallery/text_labels_and_annotations/images/sphx_glr_tex_demo_001.png :target: ../../gallery/text_labels_and_annotations/tex_demo.html :align: center :scale: 50 TeX Unicode Demo Postscript options ================== In order to produce encapsulated postscript files that can be embedded in a new LaTeX document, the default behavior of matplotlib is to distill the output, which removes some postscript operators used by LaTeX that are illegal in an eps file. This step produces results which may be unacceptable to some users, because the text is coarsely rasterized and converted to bitmaps, which are not scalable like standard postscript, and the text is not searchable. One workaround is to set ``ps.distiller.res`` to a higher value (perhaps 6000) in your rc settings, which will produce larger files but may look better and scale reasonably. A better workaround, which requires Poppler_ or Xpdf_, can be activated by changing the ``ps.usedistiller`` rc setting to ``xpdf``. This alternative produces postscript without rasterizing text, so it scales properly, can be edited in Adobe Illustrator, and searched text in pdf documents. Possible hangups ================ * On Windows, the :envvar:`PATH` environment variable may need to be modified to include the directories containing the latex, dvipng and ghostscript executables. See `environment-variables` and `setting-windows-environment-variables` for details. * Using MiKTeX with Computer Modern fonts, if you get odd \*Agg and PNG results, go to MiKTeX/Options and update your format files * On Ubuntu and Gentoo, the base texlive install does not ship with the type1cm package. You may need to install some of the extra packages to get all the goodies that come bundled with other latex distributions. * Some progress has been made so matplotlib uses the dvi files directly for text layout. This allows latex to be used for text layout with the pdf and svg backends, as well as the \*Agg and PS backends. In the future, a latex installation may be the only external dependency. Troubleshooting =============== * Try deleting your :file:`.matplotlib/tex.cache` directory. If you don't know where to find :file:`.matplotlib`, see `locating-matplotlib-config-dir`. * Make sure LaTeX, dvipng and ghostscript are each working and on your :envvar:`PATH`. * Make sure what you are trying to do is possible in a LaTeX document, that your LaTeX syntax is valid and that you are using raw strings if necessary to avoid unintended escape sequences. * Most problems reported on the mailing list have been cleared up by upgrading Ghostscript_. If possible, please try upgrading to the latest release before reporting problems to the list. * The ``text.latex.preamble`` rc setting is not officially supported. This option provides lots of flexibility, and lots of ways to cause problems. Please disable this option before reporting problems to the mailing list. * If you still need help, please see `reporting-problems`
github_jupyter
``` #all_slow ``` # Tutorial - Migrating from Lightning > Incrementally adding fastai goodness to your Lightning training We're going to use the MNIST training code from Lightning's 'Quick Start' (as at August 2020), converted to a module. See `migrating_lightning.py` for the Lightning code we are importing here. ``` from migrating_lightning import * from fastai.vision.all import * ``` ## Using fastai's training loop We can use the Lightning module directly: ``` model = LitModel() ``` To use it in fastai, we first pull the DataLoaders from the module into a `DataLoaders` object: ``` data = DataLoaders(model.train_dataloader(), model.val_dataloader()).cuda() ``` We can now create a `Learner` and fit: ``` learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam, metrics=accuracy) learn.fit_one_cycle(1, 0.001) ``` As you can see, migrating from Lightning allowed us to reduce the amount of code, and doesn't require you to change any of your existing data pipelines, optimizers, loss functions, models, etc. Once you've made this change, you can then benefit from fastai's rich set of callbacks, transforms, visualizations, and so forth. For instance, in the Lightning example, Tensorboard support was defined a special-case "logger". In fastai, Tensorboard is just another `Callback` that you can add, with the parameter `cbs=Tensorboard`, when you create your `Learner`. The callbacks all work together, so you can add an remove any schedulers, loggers, visualizers, and so forth. You don't have to learn about special types of functionality for each - they are all just plain callbacks. Note that fastai is very different from Lightning, in that it is much more than just a training loop (although we're only using the training loop in this example) - it is a complete framework including GPU-accelerated transformations, end-to-end inference, integrated applications for vision, text, tabular, and collaborative filtering, and so forth. You can use any part of the framework on its own, or combine them together, as described in the [fastai paper](https://arxiv.org/abs/2002.04688). ### Taking advantage of fastai Data Blocks One problem in the Lightning example is that it doesn't actually use a validation set - it's just using the training set a second time as a validation set. You might prefer to use fastai's Data Block API, which makes it really easy to create, visualize, and test your input data processing. Here's how you can create input data for MNIST, for instance: ``` mnist = DataBlock(blocks=(ImageBlock(cls=PILImageBW), CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(), get_y=parent_label) ``` Here, we're telling `DataBlock` that we have a B&W image input, and a category output, our input items are file names of images, the images are labeled based on the name of the parent folder, and they are split by training vs validation based on the grandparent folder name. It's important to actually look at your data, so fastai also makes it easy to visualize your inputs and outputs, for instance: ``` dls = mnist.dataloaders(untar_data(URLs.MNIST_TINY)) dls.show_batch(max_n=9, figsize=(4,4)) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 ``` # LDLS Demo This notebook demonstrates how to use LDLS to perform instance segmentation of a LiDAR point cloud. This demo uses Frame 571 from the KITTI object detection dataset. ## Setup Import LiDAR segmentation modules: ``` import numpy as np from pathlib import Path import skimage from lidar_segmentation.detections import MaskRCNNDetections from lidar_segmentation.segmentation import LidarSegmentation from lidar_segmentation.kitti_utils import load_kitti_lidar_data, load_kitti_object_calib from lidar_segmentation.utils import load_image from mask_rcnn.mask_rcnn import MaskRCNNDetector ``` # Load input data Load the following files: - Calibration data (relates the LiDAR and camera sensor coordinate frames) - Image - LiDAR point cloud ``` # Define file paths calib_path = Path("data/") / "kitti_demo" / "calib" / "000571.txt" image_path = Path("data/") / "kitti_demo" / "image_2" / "000571.png" lidar_path = Path("data/") / "kitti_demo" / "velodyne" / "000571.bin" # Load calibration data projection = load_kitti_object_calib(calib_path) # Load image image = load_image(image_path) skimage.io.imshow(image) # Load lidar lidar = load_kitti_lidar_data(lidar_path, load_reflectance=False) print("Loaded LiDAR point cloud with %d points" % lidar.shape[0]) ``` # Run Mask-RCNN detector on image The first step in the LDLS pipeline is to run Mask-RCNN on the input image to generate 2D segmentation masks. The following code block runs Mask-RCNN and visualizes results on the input image. ``` detector = MaskRCNNDetector() detections = detector.detect(image) detections.visualize(image) ``` # Perform LiDAR segmentation Next, perform 3D segmentation using a LidarSegmentation object. The LidarSegmentation.run() method takes as inputs a LiDAR point cloud, Mask-RCNN detections, and a maximum number of iterations parameter. ``` lidarseg = LidarSegmentation(projection) # Be sure to set save_all=False when running segmentation # If set to true, returns label diffusion results at each iteration in the results # This is useful for analysis or visualizing the diffusion, but slow. results = lidarseg.run(lidar, detections, max_iters=50, save_all=False) %timeit lidarseg.run(lidar, detections, max_iters=50, save_all=False) ``` # Visualize results using Plotly Plot the resulting labeled pointcloud using [Plotly](https://plot.ly/). You can visualize the results with points colored according to class labels (Person, Car, ...), or instance labels (Person 1, Person 2, Car 1, ...). ``` from lidar_segmentation.plotting import plot_segmentation_result # Show points colored by class label plot_segmentation_result(results, label_type='class') # Show points colored by instance label plot_segmentation_result(results, label_type='instance') ``` You can also visualize the label diffusion over time. This requires running the lidar segmentation with the `save_all` parameter set to `true` (note that this is significantly slower due to saving the full diffusion results in an array). Run the following code block to visualize this. You can use the slide bar on the bottom to see results at different iterations. ``` from lidar_segmentation.plotting import plot_diffusion results_all = lidarseg.run(lidar, detections, max_iters=50, save_all=True) plot_diffusion(results_all) ```
github_jupyter
# Background Computation with "ofilter" This notebook illustrate background calculations using `ofilter` algorithm adapted from IRAF. ``` import matplotlib.pyplot as plt import numpy as np import scipy import ofiltsky %matplotlib inline ``` ### Generate Data ``` # Set the seed for reproducibility np.random.seed(0) # Random Poisson data data = np.random.poisson(lam=1, size=(50, 50)) plt.imshow(data, cmap='viridis', interpolation='none') plt.colorbar() h = plt.hist(data.flatten(), bins=5, histtype='stepfilled') ``` ### Quick IRAF Detour To write this data out for IRAF, uncomment and run the following: ``` # from astropy.io import fits # # hdu0 = fits.PrimaryHDU() # hdu1 = fits.ImageHDU(data.astype(np.float32)) # hdu = fits.HDUList([hdu0, hdu1]) # hdu.writeto('im_poisson_0.fits', clobber=True) ``` In addition to the FITS file, you also have to create an one-liner `im_poisson_0.coo` file with the following contents: 25 25 Inside your IRAF session, set the following parameters (more or less) using `epar`: datapars.sigma = 1. fitskypars.salgo = "ofilter" fitskypars.annulus = 1. fitskypars.dannulus = 20. fitskypars.smaxiter = 10 fitskypars.snreject = 10 Then, run the following command: fitsky im_poisson_0.fits im_poisson_0.coo The result would be in `im_poisson_0.fits1.sky.1`: MSKY = 0.7501966 STDEV = 0.7515768 SSKEW = 0.5060266 ### Back to Python For this dataset, Python version of the `ofilter` algorithm gives slightly lower sky and skew values, but comparable sigma. The Python version uses third-party libraries like Numpy, Scipy, and Astropy. Thus, it is not shocking that we are not getting complete agreement here. Some questions that could be pursued: 1. Is it good enough? (Also see the next sub-section.) 2. Do we even care about the skew? Maybe not? In Python, it is calculated using `scipy.stats.skew()`. ``` np.ceil(data.std()) # NOTE: Sigma clipping does not matter much for this dataset. ofil_results = ofiltsky.fitsky_ofilter(data, binsize=data.std(), sigclip_sigma=None) print('MSKY =', ofil_results[0]) print('STDEV =', ofil_results[1]) print('SSKEW =', ofil_results[2]) ``` We can also compare with some other available statistics: ``` sky_mean = data.mean() sky_med = np.median(data) sky_mode = scipy.stats.mode(data, axis=None).mode[0] print('MEAN =', sky_mean) print('MEDIAN =', sky_med) print('MODE =', sky_mode) ``` ### Comparing Results This sub-section attempts to generate a plot not unlike what was published in WFPC2 ISR 1996-03 (Ferguson 1996). Perhaps the plot here can answer, "Is it good enough?" ``` # Populate desired background values vals = np.arange(0, 4.5, 0.1) # Initialize arrays to store results sky_vals = [] sky_ofil = [] sky_med = [] sky_mean = [] # Generate results for i, val in enumerate(vals): np.random.seed(i) data = np.random.poisson(lam=val, size=(50, 50)) try: msky = ofiltsky.fitsky_ofilter(data, sigclip_sigma=None)[0] except ValueError as e: print('i={0}, val={1:.1f}, errmsg={2}'.format(i, val, str(e))) continue sky_vals.append(val) sky_ofil.append(msky) sky_med.append(np.median(data)) sky_mean.append(data.mean()) # Convert result to Numpy arrays sky_vals = np.asarray(sky_vals) sky_ofil = np.asarray(sky_ofil) sky_med = np.asarray(sky_med) sky_mean = np.asarray(sky_mean) print() print('Number of data points for plotting:', sky_ofil.size) plt.scatter(sky_mean, sky_med - sky_mean, color='b', marker='x', label='median') plt.scatter(sky_mean, sky_ofil - sky_mean, color='r', marker='o', label='ofilter (Python)') plt.scatter([0.9868], [0.7501699 - 0.9868], color='k', marker='o', label='ofilter (IRAF)') plt.xlabel('mean') plt.ylabel('X - mean') plt.axhline(0, color='k', linestyle='--') plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', scatterpoints=1) plt.scatter(sky_med, sky_mean - sky_med, color='b', marker='x', label='mean') plt.scatter(sky_med, sky_ofil - sky_med, color='r', marker='o', label='ofilter (Python)') plt.scatter([1], [0.7501699 - 1], color='k', marker='o', label='ofilter (IRAF)') plt.xlabel('median') plt.ylabel('X - median') plt.axhline(0, color='k', linestyle='--') plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', scatterpoints=1) plt.scatter(sky_vals, sky_mean - sky_vals, color='b', marker='x', label='mean') plt.scatter(sky_vals, sky_med - sky_vals, color='g', marker='*', label='median') plt.scatter(sky_vals, sky_ofil - sky_vals, color='r', marker='o', label='ofilter (Python)') plt.scatter([1], [0.7501699 - 1], color='k', marker='o', label='ofilter (IRAF)') plt.xlabel('vals') plt.ylabel('X - vals') plt.axhline(0, color='k', linestyle='--') plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', scatterpoints=1) ``` ### Try with Other Skewed Distribution ``` from scipy.stats import gumbel_r # Populate desired background values vals = np.arange(0.1, 4.6, 0.1) # Initialize arrays to store results sky_vals = [] sky_ofil = [] sky_med = [] sky_mean = [] # Generate results for i, val in enumerate(vals): np.random.seed(i) # Does this control Scipy? data = gumbel_r.rvs(loc=val, size=(50, 50)) try: msky = ofiltsky.fitsky_ofilter(data, sigclip_sigma=None)[0] except ValueError as e: print('i={0}, val={1:.1f}, errmsg={2}'.format(i, val, str(e))) continue sky_vals.append(val) sky_ofil.append(msky) sky_med.append(np.median(data)) sky_mean.append(data.mean()) # Convert result to Numpy arrays sky_vals = np.asarray(sky_vals) sky_ofil = np.asarray(sky_ofil) sky_med = np.asarray(sky_med) sky_mean = np.asarray(sky_mean) print() print('Number of data points for plotting:', sky_ofil.size) plt.scatter(sky_vals, sky_mean - sky_vals, color='b', marker='x', label='mean') plt.scatter(sky_vals, sky_med - sky_vals, color='g', marker='*', label='median') plt.scatter(sky_vals, sky_ofil - sky_vals, color='r', marker='o', label='ofilter (Python)') plt.xlabel('vals') plt.ylabel('X - vals') plt.axhline(0, color='k', linestyle='--') plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', scatterpoints=1) ``` Display histogram of the data from the last iteration above: ``` h = plt.hist(data.flatten(), bins=20, histtype='stepfilled') plt.axvline(msky, color='r') plt.axvline(np.median(data), color='g') plt.axvline(data.mean(), color='b') plt.axvline(val, color='k') ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D2_DynamicNetworks/student/W3D2_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week 3, Day 2, Tutorial 1 # Neuronal Network Dynamics: Neural Rate Models __Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva __Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom --- # Tutorial Objectives The brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. The activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). How the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain. In this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time. In this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. **Steps:** - Write the equation for the firing rate dynamics of a 1D excitatory population. - Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve. - Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. - Investigate the stability of the fixed points by linearizing the dynamics around them. --- # Setup ``` # Imports import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt # root-finding algorithm # @title Figure Settings import ipywidgets as widgets # interactive display %config InlineBackend.figure_format = 'retina' plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") # @title Helper functions def plot_fI(x, f): plt.figure(figsize=(6, 4)) # plot the figure plt.plot(x, f, 'k') plt.xlabel('x (a.u.)', fontsize=14) plt.ylabel('F(x)', fontsize=14) plt.show() def plot_dr_r(r, drdt, x_fps=None): plt.figure() plt.plot(r, drdt, 'k') plt.plot(r, 0. * r, 'k--') if x_fps is not None: plt.plot(x_fps, np.zeros_like(x_fps), "ko", ms=12) plt.xlabel(r'$r$') plt.ylabel(r'$\frac{dr}{dt}$', fontsize=20) plt.ylim(-0.1, 0.1) def plot_dFdt(x, dFdt): plt.figure() plt.plot(x, dFdt, 'r') plt.xlabel('x (a.u.)', fontsize=14) plt.ylabel('dF(x)', fontsize=14) plt.show() ``` --- # Section 1: Neuronal network dynamics ``` # @title Video 1: Dynamic networks from IPython.display import YouTubeVideo video = YouTubeVideo(id="p848349hPyw", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` ## Section 1.1: Dynamics of a single excitatory population Individual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as: \begin{align} \tau \frac{dr}{dt} &= -r + F(w\cdot r + I_{\text{ext}}) \quad\qquad (1) \end{align} $r(t)$ represents the average firing rate of the excitatory population at time $t$, $\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\text{ext}}$ represents the external input, and the transfer function $F(\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs. To start building the model, please execute the cell below to initialize the simulation parameters. ``` # @markdown *Execute this cell to set default parameters for a single excitatory population model* def default_pars_single(**kwargs): pars = {} # Excitatory parameters pars['tau'] = 1. # Timescale of the E population [ms] pars['a'] = 1.2 # Gain of the E population pars['theta'] = 2.8 # Threshold of the E population # Connection strength pars['w'] = 0. # E to E, we first set it to 0 # External input pars['I_ext'] = 0. # simulation parameters pars['T'] = 20. # Total duration of simulation [ms] pars['dt'] = .1 # Simulation time step [ms] pars['r_init'] = 0.2 # Initial value of E # External parameters if any pars.update(kwargs) # Vector of discretized time points [ms] pars['range_t'] = np.arange(0, pars['T'], pars['dt']) return pars ``` You can now use: - `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. - `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step - To update an existing parameter dictionary, use `pars['New_para'] = value` Because `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax. ## Section 1.2: F-I curves In electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial. The transfer function $F(\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. A sigmoidal $F(\cdot)$ is parameterized by its gain $a$ and threshold $\theta$. $$ F(x;a,\theta) = \frac{1}{1+\text{e}^{-a(x-\theta)}} - \frac{1}{1+\text{e}^{a\theta}} \quad(2)$$ The argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\theta)=0$. Many other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$. ### Exercise 1: Implement F-I curve Let's first investigate the activation functions before simulating the dynamics of the entire population. In this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\theta$ as parameters. ``` def F(x, a, theta): """ Population activation function. Args: x (float): the population input a (float): the gain of the function theta (float): the threshold of the function Returns: float: the population activation response F(x) for input x """ ################################################# ## TODO for students: compute f = F(x) ## # Fill out function and remove raise NotImplementedError("Student excercise: implement the f-I function") ################################################# # Define the sigmoidal transfer function f = F(x) f = ... return f pars = default_pars_single() # get default parameters x = np.arange(0, 10, .1) # set the range of input # Uncomment below to test your function # f = F(x, pars['a'], pars['theta']) # plot_fI(x, f) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_45ddc05f.py) *Example output:* <img alt='Solution hint' align='left' width=416 height=272 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_DynamicNetworks/static/W3D2_Tutorial1_Solution_45ddc05f_0.png> ### Interactive Demo: Parameter exploration of F-I curve Here's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve? ``` # @title # @markdown Make sure you execute this cell to enable the widget! def interactive_plot_FI(a, theta): """ Population activation function. Expecxts: a : the gain of the function theta : the threshold of the function Returns: plot the F-I curve with give parameters """ # set the range of input x = np.arange(0, 10, .1) plt.figure() plt.plot(x, F(x, a, theta), 'k') plt.xlabel('x (a.u.)', fontsize=14) plt.ylabel('F(x)', fontsize=14) plt.show() _ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_1c0165d7.py) ## Section 1.3: Simulation scheme of E dynamics Because $F(\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\Delta t$: \begin{align} &\frac{dr}{dt} \approx \frac{r[k+1]-r[k]}{\Delta t} \end{align} where $r[k] = r(k\Delta t)$. Thus, $$\Delta r[k] = \frac{\Delta t}{\tau}[-r[k] + F(w\cdot r[k] + I_{\text{ext}}(k;a,\theta))]$$ Hence, Equation (1) is updated at each time step by: $$r[k+1] = r[k] + \Delta r[k]$$ ``` # @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`* def simulate_single(pars): """ Simulate an excitatory population of neurons Args: pars : Parameter dictionary Returns: rE : Activity of excitatory population (array) Example: pars = default_pars_single() r = simulate_single(pars) """ # Set parameters tau, a, theta = pars['tau'], pars['a'], pars['theta'] w = pars['w'] I_ext = pars['I_ext'] r_init = pars['r_init'] dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size # Initialize activity r = np.zeros(Lt) r[0] = r_init I_ext = I_ext * np.ones(Lt) # Update the E activity for k in range(Lt - 1): dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta)) r[k+1] = r[k] + dr return r help(simulate_single) ``` ### Interactive Demo: Parameter Exploration of single population dynamics Note that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\text{ext}}$. Explore these dynamics in this interactive demo. How does $r_{\text{sim}}(t)$ change with different $I_{\text{ext}}$ values? How does it change with different $\tau$ values? Investigate the relationship between $F(I_{\text{ext}}; a, \theta)$ and the steady value of $r(t)$. Note that, $r_{\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section. ``` # @title # @markdown Make sure you execute this cell to enable the widget! # get default parameters pars = default_pars_single(T=20.) def Myplot_E_diffI_difftau(I_ext, tau): # set external input and time constant pars['I_ext'] = I_ext pars['tau'] = tau # simulation r = simulate_single(pars) # Analytical Solution r_ana = (pars['r_init'] + (F(I_ext, pars['a'], pars['theta']) - pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau']))) # plot plt.figure() plt.plot(pars['range_t'], r, 'b', label=r'$r_{\mathrm{sim}}$(t)', alpha=0.5, zorder=1) plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2), label=r'$r_{\mathrm{ana}}$(t)', zorder=2) plt.plot(pars['range_t'], F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size), 'k--', label=r'$F(I_{\mathrm{ext}})$') plt.xlabel('t (ms)', fontsize=16.) plt.ylabel('Activity r(t)', fontsize=16.) plt.legend(loc='best', fontsize=14.) plt.show() _ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.), tau=(1., 5., 0.2)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_65dee3e7.py) ## Think! Above, we have numerically solved a system driven by a positive input and that, if $w_{EE} \neq 0$, receives an excitatory recurrent input (**extra challenge: try changing the value of $w_{EE}$ to a positive number and plotting the results of simulate_single**). Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value. - Why doesn't the solution of the system "explode" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? - Which parameter would you change in order to increase the maximum value of the response? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_5a95a98e.py) --- # Section 2: Fixed points of the single population system ``` # @title Video 2: Fixed point from IPython.display import YouTubeVideo video = YouTubeVideo(id="Ox3ELd1UFyo", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\displaystyle \frac{dr}{dt}=0$. We can find that the steady state of the Equation. (1) by setting $\displaystyle{\frac{dr}{dt}=0}$ and solve for $r$: $$-r_{\text{steady}} + F(w\cdot r_{\text{steady}} + I_{\text{ext}};a,\theta) = 0, \qquad (3)$$ When it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later. From the Interactive Demo, one could also notice that the value of $\tau$ influences how quickly the activity will converge to the steady state from its initial value. In the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\tau$ in determining the convergence to the fixed point: $$\displaystyle{r(t) = \big{[}F(I_{\text{ext}};a,\theta) -r(t=0)\big{]} (1-\text{e}^{-\frac{t}{\tau}})} + r(t=0)$$ \\ We can now numerically calculate the fixed point with a root finding algorithm. ## Exercise 2: Visualization of the fixed points When it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\displaystyle{\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. Here, let us, for example, set $w=5.0$ and $I^{\text{ext}}=0.5$. From Equation (1), you can obtain $$\frac{dr}{dt} = [-r + F(w\cdot r + I^{\text{ext}})]\,/\,\tau $$ Then, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points. ``` def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars): """Given parameters, compute dr/dt as a function of r. Args: r (1D array) : Average firing rate of the excitatory population I_ext, w, a, theta, tau (numbers): Simulation parameters to use other_pars : Other simulation parameters are unused by this function Returns drdt function for each value of r """ ######################################################################### # TODO compute drdt and disable the error raise NotImplementedError("Finish the compute_drdt function") ######################################################################### # Calculate drdt drdt = ... return drdt # Define a vector of r values and the simulation parameters r = np.linspace(0, 1, 1000) pars = default_pars_single(I_ext=0.5, w=5) # Uncomment to test your function # drdt = compute_drdt(r, **pars) # plot_dr_r(r, drdt) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_c5280901.py) *Example output:* <img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_DynamicNetworks/static/W3D2_Tutorial1_Solution_c5280901_0.png> ## Exercise 3: Fixed point calculation We will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\text{guess}}$) for the root-finding algorithm to start from. From the line $\displaystyle{\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point). The next cell defines three helper functions that we will use: - `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value - `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\rm fp}$ for which $\displaystyle{\frac{dr}{dt}} = 0$ are the true fixed points - `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions ``` # @markdown *Execute this cell to enable the fixed point functions* def my_fp_single(r_guess, a, theta, w, I_ext, **other_pars): """ Calculate the fixed point through drE/dt=0 Args: r_guess : Initial value used for scipy.optimize function a, theta, w, I_ext : simulation parameters Returns: x_fp : value of fixed point """ # define the right hand of E dynamics def my_WCr(x): r = x drdt = (-r + F(w * r + I_ext, a, theta)) y = np.array(drdt) return y x0 = np.array(r_guess) x_fp = opt.root(my_WCr, x0).x.item() return x_fp def check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars): """ Verify |dr/dt| < mytol Args: fp : value of fixed point a, theta, w, I_ext: simulation parameters mytol : tolerance, default as 10^{-4} Returns : Whether it is a correct fixed point: True/False """ # calculate Equation(3) y = x_fp - F(w * x_fp + I_ext, a, theta) # Here we set tolerance as 10^{-4} return np.abs(y) < mytol def my_fp_finder(pars, r_guess_vector, mytol=1e-4): """ Calculate the fixed point(s) through drE/dt=0 Args: pars : Parameter dictionary r_guess_vector : Initial values used for scipy.optimize function mytol : tolerance for checking fixed point, default as 10^{-4} Returns: x_fps : values of fixed points """ x_fps = [] correct_fps = [] for r_guess in r_guess_vector: x_fp = my_fp_single(r_guess, **pars) if check_fp_single(x_fp, **pars, mytol=mytol): x_fps.append(x_fp) return x_fps help(my_fp_finder) r = np.linspace(0, 1, 1000) pars = default_pars_single(I_ext=0.5, w=5) drdt = compute_drdt(r, **pars) ############################################################################# # TODO for students: # Define initial values close to the intersections of drdt and y=0 # (How many initial values? Hint: How many times do the two lines intersect?) # Calculate the fixed point with these initial values and plot them ############################################################################# r_guess_vector = [...] # Uncomment to test your values # x_fps = my_fp_finder(pars, r_guess_vector) # plot_dr_r(r, drdt, x_fps) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_0637b6bf.py) *Example output:* <img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_DynamicNetworks/static/W3D2_Tutorial1_Solution_0637b6bf_0.png> ## Interactive Demo: fixed points as a function of recurrent and external inputs. You can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\text{ext}}$ take different values. How does the number of fixed points change? ``` # @title # @markdown Make sure you execute this cell to enable the widget! def plot_intersection_single(w, I_ext): # set your parameters pars = default_pars_single(w=w, I_ext=I_ext) # find fixed points r_init_vector = [0, .4, .9] x_fps = my_fp_finder(pars, r_init_vector) # plot r = np.linspace(0, 1., 1000) drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau'] plot_dr_r(r, drdt, x_fps) _ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2), I_ext=(0, 3, 0.1)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_20486792.py) --- # Summary In this tutorial, we have investigated the dynamics of a rate-based single population of neurons. We learned about: - The effect of the input parameters and the time constant of the network on the dynamics of the population. - How to find the fixed point(s) of the system. Next, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn: - How to determine the stability of a fixed point by linearizing the system. - How to add realistic inputs to our model. --- # Bonus 1: Stability of a fixed point ``` # @title Video 3: Stability of fixed points from IPython.display import YouTubeVideo video = YouTubeVideo(id="KKMlWWU83Jg", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` #### Initial values and trajectories Here, let us first set $w=5.0$ and $I_{\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \equiv r_{\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\text{init}} = 0.0, 0.1, 0.2,..., 0.9$. ``` # @markdown Execute this cell to see the trajectories! pars = default_pars_single() pars['w'] = 5.0 pars['I_ext'] = 0.5 plt.figure(figsize=(8, 5)) for ie in range(10): pars['r_init'] = 0.1 * ie # set the initial value r = simulate_single(pars) # run the simulation # plot the activity with given initial plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie, label=r'r$_{\mathrm{init}}$=%.1f' % (0.1 * ie)) plt.xlabel('t (ms)') plt.title('Two steady states?') plt.ylabel(r'$r$(t)') plt.legend(loc=[1.01, -0.06], fontsize=14) plt.show() ``` ## Interactive Demo: dynamics as a function of the initial value Let's now set $r_{\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe? ``` # @title # @markdown Make sure you execute this cell to enable the widget! pars = default_pars_single(w=5.0, I_ext=0.5) def plot_single_diffEinit(r_init): pars['r_init'] = r_init r = simulate_single(pars) plt.figure() plt.plot(pars['range_t'], r, 'b', zorder=1) plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2) plt.xlabel('t (ms)', fontsize=16) plt.ylabel(r'$r(t)$', fontsize=16) plt.ylim(0, 1.0) plt.show() _ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02)) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_4d2de6a0.py) ### Stability analysis via linearization of the dynamics Just like Equation $1$ in the case ($w=0$) discussed above, a generic linear system $$\frac{dx}{dt} = \lambda (x - b),$$ has a fixed point for $x=b$. The analytical solution of such a system can be found to be: $$x(t) = b + \big{(} x(0) - b \big{)} \text{e}^{\lambda t}.$$ Now consider a small perturbation of the activity around the fixed point: $x(0) = b+ \epsilon$, where $|\epsilon| \ll 1$. Will the perturbation $\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as: $$\epsilon (t) = x(t) - b = \epsilon \text{e}^{\lambda t}$$ - if $\lambda < 0$, $\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is "**stable**". - if $\lambda > 0$, $\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, "**unstable**" . ### Compute the stability of Equation $1$ Similar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\epsilon$, i.e. $r = r^{*} + \epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\epsilon(t)$: \begin{align} \tau \frac{d\epsilon}{dt} \approx -\epsilon + w F'(w\cdot r^{*} + I_{\text{ext}};a,\theta) \epsilon \end{align} where $F'(\cdot)$ is the derivative of the transfer function $F(\cdot)$. We can rewrite the above equation as: \begin{align} \frac{d\epsilon}{dt} \approx \frac{\epsilon}{\tau }[-1 + w F'(w\cdot r^* + I_{\text{ext}};a,\theta)] \end{align} That is, as in the linear system above, the value of $$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau \qquad (4)$$ determines whether the perturbation will grow or decay to zero, i.e., $\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system. ## Exercise 4: Compute $dF$ The derivative of the sigmoid transfer function is: \begin{align} \frac{dF}{dx} & = \frac{d}{dx} (1+\exp\{-a(x-\theta)\})^{-1} \\ & = a\exp\{-a(x-\theta)\} (1+\exp\{-a(x-\theta)\})^{-2}. \qquad (5) \end{align} Let's now find the expression for the derivative $\displaystyle{\frac{dF}{dx}}$ in the following cell and plot it. ``` def dF(x, a, theta): """ Population activation function. Args: x : the population input a : the gain of the function theta : the threshold of the function Returns: dFdx : the population activation response F(x) for input x """ ########################################################################### # TODO for students: compute dFdx ## raise NotImplementedError("Student excercise: compute the deravitive of F") ########################################################################### # Calculate the population activation dFdx = ... return dFdx pars = default_pars_single() # get default parameters x = np.arange(0, 10, .1) # set the range of input # Uncomment below to test your function # df = dF(x, pars['a'], pars['theta']) # plot_dFdt(x, df) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_ce2e3bc5.py) *Example output:* <img alt='Solution hint' align='left' width=560 height=416 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D2_DynamicNetworks/static/W3D2_Tutorial1_Solution_ce2e3bc5_0.png> ## Exercise 5: Compute eigenvalues As discussed above, for the case with $w=5.0$ and $I_{\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable? Note that the expression of the eigenvalue at fixed point $r^*$ $$\lambda = [-1+ wF'(w\cdot r^* + I_{\text{ext}};a,\theta)]/\tau$$ ``` def eig_single(fp, tau, a, theta, w, I_ext, **other_pars): """ Args: fp : fixed point r_fp tau, a, theta, w, I_ext : Simulation parameters Returns: eig : eigevalue of the linearized system """ ##################################################################### ## TODO for students: compute eigenvalue and disable the error raise NotImplementedError("Student excercise: compute the eigenvalue") ###################################################################### # Compute the eigenvalue eig = ... return eig # Find the eigenvalues for all fixed points of Exercise 2 pars = default_pars_single(w=5, I_ext=.5) r_guess_vector = [0, .4, .9] x_fp = my_fp_finder(pars, r_guess_vector) # Uncomment below lines after completing the eig_single function. # for fp in x_fp: # eig_fp = eig_single(fp, **pars) # print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}') ``` **SAMPLE OUTPUT** ``` Fixed point1 at 0.042 with Eigenvalue=-0.583 Fixed point2 at 0.447 with Eigenvalue=0.498 Fixed point3 at 0.900 with Eigenvalue=-0.626 ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_e285f60d.py) ## Think! Throughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$? [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial1_Solution_579bc9c9.py) --- # Bonus 2: Noisy input drives the transition between two stable states ## Ornstein-Uhlenbeck (OU) process As discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\eta(t)$ follows: $$\tau_\eta \frac{d}{dt}\eta(t) = -\eta (t) + \sigma_\eta\sqrt{2\tau_\eta}\xi(t)$$ Execute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process. ``` # @title OU process `my_OU(pars, sig, myseed=False)` # @markdown Make sure you execute this cell to visualize the noise! def my_OU(pars, sig, myseed=False): """ A functions that generates Ornstein-Uhlenback process Args: pars : parameter dictionary sig : noise amplitute myseed : random seed. int or boolean Returns: I : Ornstein-Uhlenbeck input current """ # Retrieve simulation parameters dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size tau_ou = pars['tau_ou'] # [ms] # set random seed if myseed: np.random.seed(seed=myseed) else: np.random.seed() # Initialize noise = np.random.randn(Lt) I_ou = np.zeros(Lt) I_ou[0] = noise[0] * sig # generate OU for it in range(Lt - 1): I_ou[it + 1] = (I_ou[it] + dt / tau_ou * (0. - I_ou[it]) + np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1]) return I_ou pars = default_pars_single(T=100) pars['tau_ou'] = 1. # [ms] sig_ou = 0.1 I_ou = my_OU(pars, sig=sig_ou, myseed=2020) plt.figure(figsize=(10, 4)) plt.plot(pars['range_t'], I_ou, 'r') plt.xlabel('t (ms)') plt.ylabel(r'$I_{\mathrm{OU}}$') plt.show() ``` ## Example: Up-Down transition In the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs. ``` # @title Simulation of an E population with OU inputs # @markdown Make sure you execute this cell to spot the Up-Down states! pars = default_pars_single(T=1000) pars['w'] = 5.0 sig_ou = 0.7 pars['tau_ou'] = 1. # [ms] pars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020) r = simulate_single(pars) plt.figure(figsize=(10, 4)) plt.plot(pars['range_t'], r, 'b', alpha=0.8) plt.xlabel('t (ms)') plt.ylabel(r'$r(t)$') plt.show() ```
github_jupyter
# Creating city models and objects In this tutorial we explore how to create new city models with using `cjio`'s API. ``` from pathlib import Path from cjio import cityjson from cjio.models import CityObject, Geometry ``` Set up paths for the tutorial. ``` package_dir = Path(__name__).resolve().parent.parent.parent schema_dir = package_dir / 'cjio' / 'schemas'/ '1.0.0' data_dir = package_dir / 'tests' / 'data' ``` ## Creating a single CityObject We are building a single CityObject of type *Building*. This building has an LoD2 geometry, thus it has Semantic Surfaces. The geometric shape of the building is a simple cube (size 10x10x10), which is sufficient for this demonstration. The idea is that we create empty containers for the CityModel, CityObjects and Geometries, then fill those up and add to the CityModel. We create an empty CityModel ``` cm = cityjson.CityJSON() print(cm) ``` An empty CityObject. Note that the ID is required. ``` co = CityObject( id='1' ) ``` We can also add attributes ``` co_attrs = { 'some_attribute': 42, 'other_attribute': 'bla bla' } co.attributes = co_attrs ``` Let's see what do we have ``` print(co) ``` Instantiate a Geometry without boundaries and semantics ``` geom = Geometry(type='Solid', lod=2) ``` We build the boundary Solid of the cube The surfaces are in this order: WallSurface, WallSurface, WallSurface, WallSurface, GroundSurface, RoofSurface ``` bdry = [ [[(0.0, 0.0, 0.0), (10.0, 0.0, 0.0), (10.0, 0.0, 10.0), (0.0, 0.0, 10.0)]], [[(10.0, 0.0, 0.0), (10.0, 10.0, 0.0), (10.0, 10.0, 10.0), (10.0, 0.0, 10.0)]], [[(10.0, 10.0, 0.0), (0.0, 10.0, 0.0), (0.0, 10.0, 10.0), (10.0, 10.0, 10.0)]], [[(0.0, 10.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 10.0), (0.0, 10.0, 10.0)]], [[(0.0, 0.0, 0.0), (0.0, 10.0, 0.0), (10.0, 10.0, 0.0), (10.0, 0.0, 0.0)]], [[(10.0, 0.0, 10.0), (10.0, 10.0, 10.0), (0.0, 10.0, 10.0), (0.0, 0.0, 10.0)]] ] ``` Add the boundary to the Geometry ``` geom.boundaries.append(bdry) ``` We build the SemanticSurfaces for the boundary. The `surfaces` attribute must contain at least the `surface_idx` and `type` keys, optionally `attributes`. We have three semantic surface types, WallSurface, GroundSurface, RoofSurface. ``` srf = { 0: {'surface_idx': [], 'type': 'WallSurface'}, 1: {'surface_idx': [], 'type': 'GroundSurface'}, 2: {'surface_idx': [], 'type': 'RoofSurface'} } ``` We use the `surface_idx` to point to the surfaces of the boundary. Thus the index to a single boundary surface is composed as [Solid index, Shell index, Surface index]. Consequently, in case of a CompositeSolid which first Solid, outer Shell, second Surface is a WallSurface, one element in the `surface_idx` would be `[0, 0, 1]`. Then assuming that there is only a single WallSurface in the mentioned CompositeSolid, the index to the WallSurfaces is composed as `{'surface_idx': [ [0, 0, 1] ], 'type': 'WallSurface'}`. In case of a Solid boundary type the *Solid index* is omitted from the elements of `surface_idx`. In case of a MultiSurface boundary type both the *Solid index* and *Shell index* are omitted from the elements of `surface_idx`. We create the surface index accordingly and assign them to the geometry. ``` geom.surfaces[0] = {'surface_idx': [[0,0], [0,1], [0,2], [0,3]], 'type': 'WallSurface'} geom.surfaces[1] = {'surface_idx': [[0,4]], 'type': 'GroundSurface'} geom.surfaces[2] = {'surface_idx': [[0,5]], 'type': 'RoofSurface'} ``` Then we test if it works. ``` ground = geom.get_surfaces('groundsurface') ground_boundaries = [] for g in ground.values(): ground_boundaries.append(geom.get_surface_boundaries(g)) ``` We have a list of generators ``` res = list(ground_boundaries[0]) ``` The generator creates a list of surfaces --> a MultiSurface ``` assert res[0] == bdry[4] # %% wall = geom.get_surfaces('wallsurface') wall_boundaries = [] for w in wall.values(): wall_boundaries.append(geom.get_surface_boundaries(w)) ``` We put everything together, first filling up the CityObject ``` co.geometry.append(geom) co.type = 'Building' ``` Then adding the CityObject to the CityModel. ``` cm.cityobjects[co.id] = co ``` Let's validate the citymodel before writing it to a file. However, first we need to index the geometry boundaries and create the vertex list, second we need to add the cityobject and vertices to the internal json-store of the citymodel so the `validate()` method can validate them. Note: CityJSON version 1.0.0 only accepts the Geometry `lod` as a numeric value and not a string. ``` cityobjects, vertex_lookup = cm.reference_geometry() cm.add_to_j(cityobjects,vertex_lookup) cm.update_bbox() #cm.validate(folder_schemas=schema_dir) cm ``` Finally, we write the citymodel to a CityJSON file. ``` outfile = data_dir / 'test_create.json' cityjson.save(cm, outfile) ```
github_jupyter
# Image classification of simulated AT-TPC events Welcome to this project in applied machine learning. In this project we will tackle a simple classification problem of two different classes. The classes are simulated reaction types for the Ar(p, p') experiment conducted at MSU, in this task we'll focus on the classification task and simply treat the experiment as a black box. ### This is a completed notebook with solution examples, for your implementation we suggest you implement your own solution in the `project.ipynb` notebook This project has three tasks with a recommendation for the time to spend on each task: - Preparation, Data exploration and standardization: 0.5hr - Model construction: 1hr - Hyperparameter tuning and performance validation: 1hr There is a notebook `project_solution.ipynb` included with suggestions to solutions for each task included, for reference or to easily move on to a part of the project more appealing to your interests. ## Preparation: This project uses python and the machine learning library `keras`. As well as some functionality from `numpy` and `scikit-learn`. We recommend a `Python` verson of `>3.4`. These libraries should be installed to your specific system by using the command `pip3 install --user LIBRARY_NAME` ## Task 1: Data exploration and standardization In machine learning, as in many other fields, the task of preparing data for analysis is as vital as it can be troublesome and tedious. In data-analysis the researcher can expect to spend the majority of their time merely processing data to prepare for analysis. In this projcet we will focus more on the entire pipeline of analysis, and so the data at hand has already been shaped to an image format suitable for our analysis. ## Task 1a: Loading the data The data is stored in the `.npy` format using vecotrized code to speed up the read process. The files pointed to in this task are downsampled images with dimensions $64 x 64$ (if the images are to big for your laptop to handle, the script included in `../scripts/downsample_images.py` can further reduce the dimension). ``` import numpy as np # we'll be using this shorthand for the NumPy library throughout dataset = np.load("../data/images/project_data.npy") n_samples = dataset.shape[0] print("Data shape: ", dataset.shape) ``` ## Task 1b: Inspecting the data The data is stored as xy projections of the real events, who take place in a 3d volume. This allows a simple exploratiuon of the data as images. In this task you should plot a few different events in a grid using `matplotlib.pyplot` ``` import matplotlib.pyplot as plt rows = 2 cols = 2 n_plots = rows*cols fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(10, 10 )) for row in axs: for ax in row: """ one of pythons most wonderful attributes is that if an object is iterable it can be directly iterated over, like above. ax is an axis object from the 2d array of axis objects """ which = np.random.randint(0, n_samples) ax.imshow(dataset[which].reshape(64, 64)) ax.axis("off") ``` ## Task 1c: Standardizing the data An important part of the preprocessing of data is the standardization of the input. The intuition here is simply that the model should expect similar values in the input to mean the same. You should implement a standardization of the input. Perhaps the most common standardization is the centering of the mean of the distribution, and scaling by the standard deviation: $X_s = \frac{X - \mu}{\sigma}$ Note that for our data we only want to standardize the signal part of our image, we know the rest is zero and we don't want the standardization to be unduly effected. This also means we don't necessarily want a zero mean for our signal distribution. So for this example we stick with the scaling: $X_s = \frac{X}{\sigma}$ Another important fact is that at already at this point is it recommended to separate the data in train and test sets. The partion of the data to test on should be roughly 10-20%. And to remember to compute the standardization variables only from the training set. ### MORE OPEN ENDED !!! ``` from sklearn.model_selection import train_test_split targets = np.load("../data/targets/project_targets.npy") train_X, test_X, train_y, test_y = train_test_split(dataset, targets, test_size=0.15) nonzero_indices = np.nonzero(train_X) nonzero_elements = train_X[nonzero_indices] print("Train Mean: ", nonzero_elements.mean()) print("Train Std.: ", nonzero_elements.std()) print("-------------") print("Test Mean: ", test_X[np.nonzero(test_X)].mean()) print("Test Std.: ", test_X[np.nonzero(test_X)].std()) print("############") nonzero_scaled = nonzero_elements/nonzero_elements.std() train_X[nonzero_indices] = nonzero_scaled test_X[np.nonzero(test_X)] /= nonzero_elements.std() print("Train Mean: ", nonzero_scaled.mean()) print("Train Std.: ", nonzero_scaled.std()) print("-------------") print("Test Mean: ", test_X[np.nonzero(test_X)].mean()) print("Test Std.: ", test_X[np.nonzero(test_X)].std()) ``` #### We also want to plot up the data again to confirm that our scaling is sensible, you should reuse your code from above for this. ``` import matplotlib.pyplot as plt rows = 2 cols = 2 n_plots = rows*cols fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(10, 10 )) for row in axs: for ax in row: """ one of pythons most wonderful attributes is that if an object is iterable it can be directly iterated over, like above. ax is an axis object from the 2d array of axis objects """ which = np.random.randint(0, train_X.shape[0]) ax.imshow(train_X[which].reshape(64, 64)) ax.text(5, 5, "{}".format(int(train_y[which])), bbox={'facecolor': 'white', 'pad': 10}) ax.axis("off") ``` ## 1d: Encoding the targets: For classification one ordinarily encodes the target as a n-element zero vector with one element valued at 1 indicating the target class. This is simply called one-hot encoding. You should inspect the values of the target vectors and use the imported `OneHotEncoder` to convert the targets. Note that this is not necessary for the two class case, but we do it to demonstrate a general approach. ``` from sklearn.preprocessing import OneHotEncoder onehot_train_y = OneHotEncoder(sparse=False, categories="auto").fit_transform(train_y.reshape(-1, 1)) onehot_test_y = OneHotEncoder(sparse=False, categories="auto").fit_transform(test_y.reshape(-1, 1)) print("Onehot train targets:", onehot_train_y.shape) print("Onehot test targets:",onehot_test_y.shape) ``` ## Prelude to task 2: In this task we'll be constructing a CNN model for the two class data. Before that it is useful to characterize the performance of a less complex model, for example a logistic regression model. For this task then you should construct a logistic regression model to classify the two class problem. ``` from keras.models import Sequential, Model from keras.layers import Dense from keras.regularizers import l2 from keras.optimizers import SGD, adam flat_train_X = np.reshape(train_X, (train_X.shape[0], train_X.shape[1]*train_X.shape[2]*train_X.shape[3])) flat_test_X = np.reshape(test_X, (test_X.shape[0], train_X.shape[1]*train_X.shape[2]*train_X.shape[3])) logreg = Sequential() logreg.add(Dense(2, kernel_regularizer=l2(0.01), activation="softmax")) eta = 0.001 optimizer = SGD(eta) logreg.compile(optimizer, loss="binary_crossentropy", metrics=["accuracy",]) history = logreg.fit( x=flat_train_X, y=onehot_train_y, batch_size=100, epochs=200, validation_split=0.15, verbose=2 ) ``` The logistic regression model doesn't work, clearly. What about the data prohobits it from doing so? ## 2a: Creating a model In this task we will create a CNN with fully connected bottom-layers for classification. You should base your code on Morten's code for a model. We suggest you complete one of the following for this task: 1. Implement a class or function `cnn` that returns a compiled Keras model with an arbitrary number of convolutional and fully connected layers with optional configuration of regularization terms or layers. 2. Implement a simple hard-coded function `cnn` that returns a Keras model object. The architecture should be specified in the function. Both implementations should include multiple convolutional layers and ending with a couple fully connected layers. The output of the network should be a softmax or log-softmax layer of logits. You should experiment with where in the network you place the non-linearities and whether to use striding or pooling to reduce the input. As well as the use of padding, would you need one for the first layer? ``` model_config = { "n_conv":2, "receptive_fields":[3, 3], "strides":[1, 1,], "n_filters":[2, 2], "conv_activation":[1, 1], "max_pool":[1, 1], "n_dense":1, "neurons":[10,], "dense_activation":[1,] } from keras.models import Sequential, Model from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, ReLU, Input, Softmax from keras.regularizers import l2 def create_convolutional_neural_network_keras(input_shape, config, n_classes=2): """ Modified from MH Jensen's course on machine learning in physics: https://github.com/CompPhysics/MachineLearningMSU/blob/master/doc/pub/CNN/ipynb/CNN.ipynb """ model=Sequential() for i in range(config["n_conv"]): receptive_field = config["receptive_fields"][i] strides = config["strides"][i] n_filters = config["n_filters"][i] pad = "same" if i == 0 else "same" input_shape = input_shape if i==0 else None if i == 0: conv = Conv2D( n_filters, (receptive_field, receptive_field), input_shape=input_shape, padding=pad, strides=strides, kernel_regularizer=l2(0.01) ) else: conv = Conv2D( n_filters, (receptive_field, receptive_field), padding=pad, strides=strides, kernel_regularizer=l2(0.01) ) model.add(conv) pool = config["max_pool"][i] activation = config["conv_activation"][i] if activation: model.add(ReLU()) if pool: model.add(MaxPooling2D(2)) model.add(Flatten()) for i in range(config["n_dense"]): n_neurons = config["neurons"][i] model.add( Dense( n_neurons, kernel_regularizer=l2(0.01) )) activation = config["dense_activation"][i] if activation: model.add(ReLU()) model.add( Dense( n_classes, activation='softmax', kernel_regularizer=l2(0.01)) ) return model model_o = create_convolutional_neural_network_keras(train_X.shape[1:], model_config, n_classes=2) #model_o = mhj(train_X.shape[1:], 3, 2, 10, 2, 0.01) print(model_o.summary()) ``` ## 2b: Plot your model `Keras` provides a convenient class for plotting your model architecture. You should both do this and inspect the model summary to see how many trainable parameters you have as well as to confirm that your model is reasonably put together with no dangling edges in the graph etc. ``` from keras.utils import plot_model plot_model(model_o, to_file="convnet.png") ``` ![a plot of the model graph](./convnet.png) ## 2b: Compiling your model With the constructed model ready it can now be compiled. Compiling entails unrolling the computational graph underlying the model and attaching losses at the layers you specify. For more complex models one can attach loss functions at arbitrary layers or one could define a specific loss for your particular problem. For our case we will simply use a categorical cross-entropy, which means our network parametrizes an output of logits which we softmax to produce probabilities. In this task you should simply compile the above model with an optimizer of your choice and a categorical cross-entropy loss. ``` eta = 0.01 sgd = SGD(lr=eta, ) adam = adam(lr=eta, beta_1=0.5, ) model_o.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) ``` ## 2c: Running your model Here you should simply use the `.fit` method of the model to train on the training set. Select a suitable subset of train to use as validation, this should not be the test-set. Take care to note how many trainable parameters your model has. A model with $10^5$ parameters takes about a minute per epoch to run on a 7th gen i9 intel processor. If your laptop has a nvidia GPU training should be considerably faster. Hint: this model is quite easy to over-fit, you should build your network with a relatively low complexity ($10^3$ parameters). The `.fit` method returns a `history` object that you can use to plot the progress of your training. ``` %matplotlib notebook import matplotlib.pyplot as plt history = model_o.fit( x=train_X, y=onehot_train_y, batch_size=50, epochs=40, validation_split=0.15, verbose=2 ) # copied from https://keras.io/visualization/ # Plot training & validation accuracy values fig, axs= plt.subplots(figsize=(10, 8), nrows=2) fig.suptitle('Model performance') axs[0].plot(history.history['acc'], "x-",alpha=0.8) axs[0].plot(history.history['val_acc'], "x-", alpha=0.8) axs[0].set_ylabel('Accuracy') axs[0].set_xlabel('Epoch') axs[0].legend(['Train', 'Test'], loc='upper left') # Plot training & validation loss values axs[1].plot(history.history['loss'], "o-",alpha=0.8) axs[1].plot(history.history['val_loss'], "o-", alpha=0.8) axs[1].set_ylabel('Loss') axs[1].legend(['Train', 'Test'], loc='upper left') ``` ## 3a: Performance validation As mentioned in the lectures machine learning models suffer from the problem of overfitting as scaling to an almost arbitrary complexity is simple with todays hardware. The challenge then is often that of finding the correct architecture and type of model suitable for your problem. In this task you will be familiarized with some tools to monitor and estimate the degree of overfitting. In task 1c you separated your data in training and test sets. The test set is used to estimate generalization performance. Typically we use this to fine-tune the hyperparameters. Another trick is to use a validation set during training, which we implemented in the last task for the training. In this task we will start with attaching callbacks to the fitting process. They are listed in the documentation for `Keras`here: https://keras.io/callbacks/ Pick ones you think are suitable for our problem and re-run the training from above. ``` from keras.callbacks import EarlyStopping, ModelCheckpoint callbacks = [EarlyStopping(min_delta=0.0001, patience=4), ModelCheckpoint("../checkpoints/ckpt") history = model_o.fit( x=train_X, y=onehot_train_y, batch_size=50, epochs=150, validation_split=0.15, verbose=2, callbacks=callbacks ) ``` ## 3b: Hyperparameter tuning Hyperparameters, like the number of layers, learning rate or others can have a very big impact on the model quality. The model performance should also be statistically quantified using cross validation, bootstrapped confidence intervals for your performance metrics or other tools depending on model. In this task you should then implement a function or for loop doing either random search or a grid search over parameters and finally you should plot those results in a suitable way.
github_jupyter
### Import Package ``` import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import numpy as np import traceback import contextlib import pathlib ``` ### Load Dataset ``` mnist = tf.keras.datasets.fashion_mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() print("Train Image shape:", X_train.shape, "Test Image shape:", X_test.shape) # Normalize the images X_train = X_train / 255.0 X_test = X_test / 255.0 ``` ### Conv2D Model - Base Model ``` model = keras.Sequential([ keras.layers.InputLayer(input_shape=(28, 28)), keras.layers.Reshape(target_shape=(28, 28, 1)), keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'), keras.layers.MaxPooling2D(pool_size=(2, 2)), keras.layers.Flatten(), keras.layers.Dense(10) ]) # Model summary model.summary() ``` ### Train Conv2D ``` model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_test, y_test)) # Saving Model model.save('1_fashion_mnist_model.h5') # Evaluate the model on test set score = model.evaluate(X_test, y_test, verbose=0) # Print test accuracy print('\n', 'Test accuracy:', score[1]) ``` ### Train model with pruning ``` ! pip install -q tensorflow-model-optimization import tensorflow_model_optimization as tfmot prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude # Compute end step to finish pruning after 2 epochs. batch_size = 128 epochs = 40 validation_split = 0.1 # 10% of training set will be used for validation set. num_images = X_train.shape[0] * (1 - validation_split) end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs # Define model for pruning. pruning_params = { 'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50, final_sparsity=0.80, begin_step=0, end_step=end_step) } model_for_pruning = prune_low_magnitude(model, **pruning_params) # `prune_low_magnitude` requires a recompile. model_for_pruning.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model_for_pruning.summary() callbacks = [ tfmot.sparsity.keras.UpdatePruningStep(), tfmot.sparsity.keras.PruningSummaries(log_dir='log'), ] model_for_pruning.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=validation_split, callbacks=callbacks) _, model_for_pruning_accuracy = model_for_pruning.evaluate( X_train, y_train, verbose=0) print('Pruned test accuracy:', model_for_pruning_accuracy) model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning) tf.keras.models.save_model(model_for_export, '2_fashion_mnist_model_pruning.h5', include_optimizer=False) ``` ### Q-aware Training ``` import tensorflow_model_optimization as tfmot quantize_model = tfmot.quantization.keras.quantize_model # q_aware stands for for quantization aware. q_aware_model = quantize_model(model) # `quantize_model` requires a recompile. q_aware_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) q_aware_model.summary() # Train and evaluate the model against baseline train_images_subset = X_train[0:1000] # out of 60000 train_labels_subset = y_train[0:1000] q_aware_model.fit(train_images_subset, train_labels_subset, batch_size=10, epochs=50, validation_split=0.1) # Evaluate the model on test set import time start_time_infer = time.time() score = q_aware_model.evaluate(X_test, y_test, verbose=0) # Print test accuracy result = {'Time to full set infer': (time.time() - start_time_infer), 'Score' : score[1]} print(result) start_time_infer = time.time() #model = tf.keras.models.load_model('fashion_mnist_model_qaware.h5', compile = True) data = X_test[0] data = data.reshape((1, 28, 28)) data_y = y_test[0:1] score = q_aware_model.evaluate(data, data_y, verbose=0) result = {'Time to single unit infer': (time.time() - start_time_infer), 'Score' : score[1]} print(result) q_aware_model.save('3_fashion_mnist_model_qaware.h5') ``` ### Convert Model to TFLite ``` def ConvertTFLite(model_path, filename): try: # Loading Model model = tf.keras.models.load_model(model_path) # Converter converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() #Specify path tflite_models_dir = pathlib.Path("tflite_models/") tflite_models_dir.mkdir(exist_ok=True, parents=True) filename = filename+".tflite" tflite_model_file = tflite_models_dir/filename # Save Model tflite_model_file.write_bytes(tflite_model) return f'Converted to TFLite, path {tflite_model_file}' except Exception as e: return str(e) ConvertTFLite('./1_fashion_mnist_model.h5','4_fashion_mnist_model') ConvertTFLite('./2_fashion_mnist_model_pruning.h5','5_fashion_mnist_pruning_model') converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model) quantized_tflite_model = converter.convert() quantized_aware_tflite_file = '6_fashion_mnist_model_qaware.tflite' with open(quantized_aware_tflite_file, 'wb') as f: f.write(quantized_tflite_model) print('Saved quvantaised aware TFLite model to:', quantized_aware_tflite_file) ``` ### Integer with Float fallback quantaization ``` def Quant_int_with_float(model_name, filename): try: model = tf.keras.models.load_model(model_name) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_model_quant = converter.convert() filename = filename+'.tflite' tflite_models_dir = pathlib.Path("tflite_models/") tflite_models_dir.mkdir(exist_ok=True, parents=True) tflite_model_quant_file = tflite_models_dir/filename tflite_model_quant_file.write_bytes(tflite_model_quant) return f'Converted - path {tflite_model_quant_file}' except Exception as e: return str(e) Quant_int_with_float('./1_fashion_mnist_model.h5', '7_fashion_mnist_Integer_float_model') Quant_int_with_float('./2_fashion_mnist_model_pruning.h5','8_fashion_mnist_pruning_Integer_float_model') converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] mnist_train, _ = tf.keras.datasets.fashion_mnist.load_data() images = tf.cast(mnist_train[0], tf.float32) / 255.0 mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1) def representative_data_gen(): for input_value in mnist_ds.take(100): yield [input_value] converter.representative_dataset = representative_data_gen quantized_tflite_model = converter.convert() tflite_models_dir = pathlib.Path("tflite_models/") tflite_models_dir.mkdir(exist_ok=True, parents=True) tflite_model_quant_file = tflite_models_dir/"9_fashion_mnist_Qaware_Integer_float_model.tflite" tflite_model_quant_file.write_bytes(quantized_tflite_model) ``` ### Float 16 Quantization ``` def Quant_float(model_name, filename): try: model = tf.keras.models.load_model(model_name) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_fp16_model = converter.convert() filename = filename+'.tflite' tflite_models_fp16_dir = pathlib.Path("tflite_models/") tflite_models_fp16_dir.mkdir(exist_ok=True, parents=True) tflite_model_fp16_file = tflite_models_fp16_dir/filename tflite_model_fp16_file.write_bytes(tflite_fp16_model) return f'Converted - path {tflite_model_fp16_file}' except Exception as e: return str(e) Quant_float('./1_fashion_mnist_model.h5', '10_fashion_mnist_float16_model') Quant_float('./2_fashion_mnist_model_pruning.h5', '11_fashion_mnist_float_pruning_model') converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_fp16_model = converter.convert() tflite_model_fp16_file = tflite_models_dir/"12_fashion_mnist_Qaware_float16_model.tflite" tflite_model_fp16_file.write_bytes(tflite_fp16_model) ``` ### Integer Only ``` def Quant_integer(model_name, filename): try: model = tf.keras.models.load_model(model_name) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] mnist_train, _ = tf.keras.datasets.fashion_mnist.load_data() images = tf.cast(mnist_train[0], tf.float32) / 255.0 mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1) def representative_data_gen(): for input_value in mnist_ds.take(100): yield [input_value] converter.representative_dataset = representative_data_gen converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.int8 # or tf.uint8 converter.inference_output_type = tf.int8 # or tf.uint8 tflite_int_quant_model = converter.convert() filename = filename+'.tflite' tflite_models_dir = pathlib.Path("tflite_models/") tflite_models_dir.mkdir(exist_ok=True, parents=True) tflite_model_integeronly_file = tflite_models_dir/filename tflite_model_integeronly_file.write_bytes(tflite_int_quant_model) return f'Converted - path {tflite_model_integeronly_file}' except Exception as e: return str(e) Quant_integer('./1_fashion_mnist_model.h5', '13_fashion_mnist_integeronly_model') Quant_integer('./2_fashion_mnist_model_pruning.h5', '14_fashion_mnist_Integeronly_pruning_model') # Quant_integer('3_fashion_mnist_model_qaware.h5','15_fashion_mnist_qaware_integer_model') ``` ### Evalvate Model ``` import time ``` ### Keras model Evaluation ``` def evaluate_keras_model_single_unit(model_path): start_time_infer = time.time() model = tf.keras.models.load_model(model_path, compile = True) model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) data = X_test[0] data = data.reshape((1, 28, 28)) data_y = y_test[0:1] score = model.evaluate(data, data_y, verbose=0) result = {'Time to single unit infer': (time.time() - start_time_infer), 'Score' : score[1]} return result evaluate_keras_model_single_unit('./1_fashion_mnist_model.h5') evaluate_keras_model_single_unit('./2_fashion_mnist_model_pruning.h5') def evaluate_keras_model_test_set(model_path): start_time_infer = time.time() model = tf.keras.models.load_model(model_path, compile = True) model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) score = score = model.evaluate(X_test, y_test, verbose =0) result = {'Time to single unit infer': (time.time() - start_time_infer), 'Score' : score[1]} return result evaluate_keras_model_test_set('./1_fashion_mnist_model.h5') evaluate_keras_model_test_set('./2_fashion_mnist_model_pruning.h5') ``` ### TF Lite Model Evaluvation ``` # Evaluate the mode def evaluate_tflite_model_test_set(interpreter): start_time = time.time() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Run predictions on every image in the "test" dataset. prediction_digits = [] for test_image in X_test: # Pre-processing: add batch dimension and convert to float32 to match with # the model's input data format. test_image = np.expand_dims(test_image, axis=0).astype(np.float32) interpreter.set_tensor(input_index, test_image) # Run inference. interpreter.invoke() # Post-processing: remove batch dimension and find the digit with highest # probability. output = interpreter.tensor(output_index) digit = np.argmax(output()[0]) prediction_digits.append(digit) # Compare prediction results with ground truth labels to calculate accuracy. accurate_count = 0 for index in range(len(prediction_digits)): if prediction_digits[index] == y_test[index]: accurate_count += 1 accuracy = accurate_count * 1.0 / len(prediction_digits) results = {'time': (time.time() - start_time), 'accuracy': accuracy} return results ``` ### TF Lite Models ``` # TF Lite tflite_model_file = 'tflite_models/4_fashion_mnist_model.tflite' interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter.allocate_tensors() evaluate_tflite_model_test_set(interpreter) # Purning TF Lite tflite_pruning_model_file = 'tflite_models/5_fashion_mnist_pruning_model.tflite' interpreter_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file)) interpreter_pruning.allocate_tensors() evaluate_tflite_model_test_set(interpreter_pruning) # Qaware Model tflite_model_file = '6_fashion_mnist_model_qaware.tflite' interpreter_qaware = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter_qaware.allocate_tensors() evaluate_tflite_model_test_set(interpreter_qaware) ``` ### Integer Float TF Lite models ``` # TF Lite tflite_model_file = 'tflite_models/7_fashion_mnist_Integer_float_model.tflite' interpreter_int_float = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter_int_float.allocate_tensors() evaluate_tflite_model_test_set(interpreter_int_float) # Purning TF Lite tflite_pruning_model_file = 'tflite_models/8_fashion_mnist_pruning_Integer_float_model.tflite' interpreter_int_float_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file)) interpreter_int_float_pruning.allocate_tensors() evaluate_tflite_model_test_set(interpreter_int_float_pruning) # Q-aware TF Lite tflite_qaware_model_file = 'tflite_models/9_fashion_mnist_Qaware_Integer_float_model.tflite' interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file)) interpreter_tflite_qaware.allocate_tensors() evaluate_tflite_model_test_set(interpreter_tflite_qaware) ``` ### Float Tflite ``` # TF Lite tflite_model_file = 'tflite_models/10_fashion_mnist_float16_model.tflite' interpreter_float = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter_float.allocate_tensors() evaluate_tflite_model_test_set(interpreter_float) # Purning TF Lite tflite_pruning_model_file = 'tflite_models/11_fashion_mnist_float_pruning_model.tflite' interpreter_float_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file)) interpreter_float_pruning.allocate_tensors() evaluate_tflite_model_test_set(interpreter_float_pruning) tflite_qaware_model_file = 'tflite_models/12_fashion_mnist_Qaware_float16_model.tflite' interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file)) interpreter_tflite_qaware.allocate_tensors() evaluate_tflite_model_test_set(interpreter_tflite_qaware) ``` ### Integer Only TFlite ``` # TF Lite tflite_model_file = 'tflite_models/13_fashion_mnist_integeronly_model.tflite' interpreter_int = tf.lite.Interpreter(model_path=str(tflite_model_file)) interpreter_int.allocate_tensors() evaluate_tflite_model_test_set(interpreter_int) # Purning TF Lite tflite_pruning_model_file = 'tflite_models/14_fashion_mnist_Integeronly_pruning_model.tflite' interpreter_int_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file)) interpreter_int_pruning.allocate_tensors() evaluate_tflite_model_test_set(interpreter_int_pruning) # tflite_qaware_model_file = 'tflite_models/15_fashion_mnist_Qaware_integer_model.tflite' # interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file)) # interpreter_tflite_qaware.allocate_tensors() # evaluate_tflite_model_test_set(interpreter_tflite_qaware) ``` ### Find unit inference time ``` # Evaluate the mode def evaluate_tflite_model_single_unit(interpreter): start_time = time.time() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] test_image = np.expand_dims(X_test[0], axis=0).astype(np.float32) interpreter.set_tensor(input_index, test_image) # Run inference. interpreter.invoke() # Post-processing: remove batch dimension and find the digit with highest # probability. output = interpreter.tensor(output_index) results = {'time': (time.time() - start_time)} return results # TF Lite evaluate_tflite_model_single_unit(interpreter) evaluate_tflite_model_single_unit(interpreter_pruning) evaluate_tflite_model_single_unit(interpreter_int_float) evaluate_tflite_model_single_unit(interpreter_qaware) evaluate_tflite_model_single_unit(interpreter_int_float_pruning) evaluate_tflite_model_single_unit(interpreter_float) evaluate_tflite_model_single_unit(interpreter_float_pruning) evaluate_tflite_model_single_unit(interpreter_int) evaluate_tflite_model_single_unit(interpreter_float_pruning) ```
github_jupyter
<a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/bnn_hmc_gaussian.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # (SG)HMC for inferring params of a 2d Gaussian Based on https://github.com/google-research/google-research/blob/master/bnn_hmc/notebooks/mcmc_gaussian_test.ipynb ``` import jax print(jax.devices()) !git clone https://github.com/google-research/google-research.git %cd /content/google-research !ls bnn_hmc !pip install optax ``` # Setup ``` from jax.config import config import jax from jax import numpy as jnp import numpy as onp import numpy as np import os import sys import time import tqdm import optax import functools from matplotlib import pyplot as plt from bnn_hmc.utils import losses from bnn_hmc.utils import train_utils from bnn_hmc.utils import tree_utils %matplotlib inline %load_ext autoreload %autoreload 2 ``` # Data and model ``` mu = jnp.zeros([2,]) # sigma = jnp.array([[1., .5], [.5, 1.]]) sigma = jnp.array([[1.e-4, 0], [0., 1.]]) sigma_l = jnp.linalg.cholesky(sigma) sigma_inv = jnp.linalg.inv(sigma) sigma_det = jnp.linalg.det(sigma) onp.random.seed(0) samples = onp.random.multivariate_normal(onp.asarray(mu), onp.asarray(sigma), size=1000) plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3) plt.grid() def log_density_fn(params): assert params.shape == mu.shape, "Shape error" diff = params - mu k = mu.size log_density = -jnp.log(2 * jnp.pi) * k / 2 log_density -= jnp.log(sigma_det) / 2 log_density -= diff.T @ sigma_inv @ diff / 2 return log_density def log_likelihood_fn(_, params, *args, **kwargs): return log_density_fn(params), jnp.array(jnp.nan) def log_prior_fn(_): return 0. def log_prior_diff_fn(*args): return 0. fake_net_apply = None fake_data = jnp.array([[jnp.nan,],]), jnp.array([[jnp.nan,],]) fake_net_state = jnp.array([jnp.nan,]) ``` # HMC ``` step_size = 1e-1 trajectory_len = jnp.pi / 2 max_num_leapfrog_steps = int(trajectory_len // step_size + 1) print("Leapfrog steps per iteration:", max_num_leapfrog_steps) update, get_log_prob_and_grad = train_utils.make_hmc_update( fake_net_apply, log_likelihood_fn, log_prior_fn, log_prior_diff_fn, max_num_leapfrog_steps, 1., 0.) # Initial log-prob and grad values # params = jnp.ones_like(mu)[None, :] params = jnp.ones_like(mu) log_prob, state_grad, log_likelihood, net_state = ( get_log_prob_and_grad(fake_data, params, fake_net_state)) %%time num_iterations = 500 all_samples = [] key = jax.random.PRNGKey(0) for iteration in tqdm.tqdm(range(num_iterations)): (params, net_state, log_likelihood, state_grad, step_size, key, accept_prob, accepted) = ( update(fake_data, params, net_state, log_likelihood, state_grad, key, step_size, trajectory_len, True)) if accepted: all_samples.append(onp.asarray(params).copy()) # print("It: {} \t Accept P: {} \t Accepted {} \t Log-likelihood: {}".format( # iteration, accept_prob, accepted, log_likelihood)) len(all_samples) log_prob, state_grad, log_likelihood, net_state all_samples_cat = onp.stack(all_samples) plt.scatter(all_samples_cat[:, 0], all_samples_cat[:, 1], alpha=0.3) plt.grid() ``` # Blackjax ``` !pip install blackjax import jax import jax.numpy as jnp import jax.scipy.stats as stats import matplotlib.pyplot as plt import numpy as np import blackjax.hmc as hmc import blackjax.nuts as nuts import blackjax.stan_warmup as stan_warmup print(jax.devices()) potential = lambda x: -log_density_fn(**x) num_integration_steps = 30 kernel_generator = lambda step_size, inverse_mass_matrix: hmc.kernel( potential, step_size, inverse_mass_matrix, num_integration_steps ) rng_key = jax.random.PRNGKey(0) initial_position = {"params": np.zeros(2)} initial_state = hmc.new_state(initial_position, potential) print(initial_state) %%time nsteps = 500 final_state, (step_size, inverse_mass_matrix), info = stan_warmup.run( rng_key, kernel_generator, initial_state, nsteps, ) %%time kernel = nuts.kernel(potential, step_size, inverse_mass_matrix) kernel = jax.jit(kernel) def inference_loop(rng_key, kernel, initial_state, num_samples): def one_step(state, rng_key): state, _ = kernel(rng_key, state) return state, state keys = jax.random.split(rng_key, num_samples) _, states = jax.lax.scan(one_step, initial_state, keys) return states %%time nsamples = 500 states = inference_loop(rng_key, kernel, initial_state, nsamples) samples = states.position["params"].block_until_ready() print(samples.shape) plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3) plt.grid() ```
github_jupyter
# CIFAR10 전이학습 기반 분류기 이 노트북은 사전 훈련된 심층-CNN 중에서 VGG16으로 전이학습의 개념을 확용한 분류기를 구축하는 단계를 개략적으로 설명한다. ``` %matplotlib inline # Pandas and Numpy for data structures and util fucntions import scipy as sp import numpy as np import pandas as pd from numpy.random import rand pd.options.display.max_colwidth = 600 # Scikit 임포트 from sklearn import preprocessing from sklearn.metrics import roc_curve, auc, precision_recall_curve from sklearn.model_selection import train_test_split import cnn_utils as utils # Matplot 임포트 import matplotlib.pyplot as plt params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 5), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} plt.rcParams.update(params) # 판다스는 데이터 프레임을 테이블로 보여준다. from IPython.display import display, HTML import warnings warnings.filterwarnings('ignore') import tensorflow as tf from tensorflow.keras import callbacks from tensorflow.keras import optimizers from tensorflow.keras.datasets import cifar10 from tensorflow.keras import Model from tensorflow.keras.applications import vgg16 as vgg from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,BatchNormalization from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.python.keras.utils import np_utils ``` ## 데이터 세트 로딩과 준비 ``` BATCH_SIZE = 32 EPOCHS = 40 NUM_CLASSES = 10 LEARNING_RATE = 1e-4 MOMENTUM = 0.9 (X_train, y_train), (X_test, y_test) = cifar10.load_data() ``` Split training dataset in train and validation sets ``` X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, stratify=np.array(y_train), random_state=42) ``` Transform target variable/labels into one hot encoded form ``` Y_train = np_utils.to_categorical(y_train, NUM_CLASSES) Y_val = np_utils.to_categorical(y_val, NUM_CLASSES) Y_test = np_utils.to_categorical(y_test, NUM_CLASSES) ``` ### 전처리 VGG16을 특성 추출기로 사용할 것이기 때문에, 이미지의 최소 크기는 48x48이어야 한다. ```scipy```로 이미지 크기를 필요한 차원으로 재조정 한다. ``` X_train = np.array([sp.misc.imresize(x, (48, 48)) for x in X_train]) X_val = np.array([sp.misc.imresize(x, (48, 48)) for x in X_val]) X_test = np.array([sp.misc.imresize(x, (48, 48)) for x in X_test]) ``` ## 모델 준비 * 최상위층 없이 VGG16 로딩 * 커스텀 분류기 준비 * 모델의 맨 위에 새로운 층 쌓기 ``` base_model = vgg.VGG16(weights='imagenet', include_top=False, input_shape=(48, 48, 3)) ``` 목표는 분류층만 훈련시키는 것이기 때문에 훈련할 수 있는 파라미터 세팅을 False로 해서 나머지 층을 동결했다. 이렇게 하면 덜 강력한 기반 구조에서도 기존 아키텍처를 활용할 수 있고 학습된 가중치를 한 도메인에서 다른 도메인으로 전이할 수 있다. ``` # VGG16 모델의 세 번째 블록에서 마지막 층 추출 last = base_model.get_layer('block3_pool').output # 상위 층에 분류층 추가 x = GlobalAveragePooling2D()(last) x= BatchNormalization()(x) x = Dense(256, activation='relu')(x) x = Dense(256, activation='relu')(x) x = Dropout(0.6)(x) pred = Dense(NUM_CLASSES, activation='softmax')(x) model = Model(base_model.input, pred) ``` 우리의 목표는 커스컴 분류기를 훈련시키는 것이기 때문에 VGG16 층은 동결한다. ``` for layer in base_model.layers: layer.trainable = False model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(lr=LEARNING_RATE), metrics=['accuracy']) model.summary() ``` ## 데이터 늘리기 소규모 데이터 세트의 한계를 극복하고 모델을 일반화할 수 있도록 ```케라스``` 유틸리티로 데이터 세트를 늘려준다. ``` # 데이터 늘리기 구성의 준비 train_datagen = ImageDataGenerator( rescale=1. / 255, horizontal_flip=False) train_datagen.fit(X_train) train_generator = train_datagen.flow(X_train, Y_train, batch_size=BATCH_SIZE) val_datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=False) val_datagen.fit(X_val) val_generator = val_datagen.flow(X_val, Y_val, batch_size=BATCH_SIZE) ``` ## 모델 훈련 이제 모델을 몇 번의 에포크로 훈련시키고 그 성능을 측정해 보자. 다음 코드로 모델에 새로 추가된 층을 훈련시키기 위한 fit_generator() 함수를 호출한다. ``` train_steps_per_epoch = X_train.shape[0] // BATCH_SIZE val_steps_per_epoch = X_val.shape[0] // BATCH_SIZE history = model.fit_generator(train_generator, steps_per_epoch=train_steps_per_epoch, validation_data=val_generator, validation_steps=val_steps_per_epoch, epochs=EPOCHS, verbose=1) ``` ## 모델 성능 분석 ``` f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5)) t = f.suptitle('Deep Neural Net Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epochs = list(range(1,EPOCHS+1)) ax1.plot(epochs, history.history['acc'], label='Train Accuracy') ax1.plot(epochs, history.history['val_acc'], label='Validation Accuracy') ax1.set_xticks(epochs) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epochs, history.history['loss'], label='Train Loss') ax2.plot(epochs, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(epochs) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") predictions = model.predict(X_test/255.) test_labels = list(y_test.squeeze()) predictions = list(predictions.argmax(axis=1)) get_metrics(true_labels=y_test, predicted_labels=predictions) ``` ## 예측 시각화 ``` label_dict = {0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', 6:'frog', 7:'horse', 8:'ship', 9:'truck'} utils.plot_predictions(model=model,dataset=X_test/255., dataset_labels=Y_test, label_dict=label_dict, batch_size=16, grid_height=4, grid_width=4) ```
github_jupyter
# Table of Contents <p><div class="lev1 toc-item"><a href="#Setting-up-your-machine-Learning-Application" data-toc-modified-id="Setting-up-your-machine-Learning-Application-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Setting up your machine Learning Application</a></div><div class="lev2 toc-item"><a href="#Train-/-Dev-/-Test-sets" data-toc-modified-id="Train-/-Dev-/-Test-sets-11"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Train / Dev / Test sets</a></div><div class="lev2 toc-item"><a href="#Bias-/-Variance" data-toc-modified-id="Bias-/-Variance-12"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Bias / Variance</a></div><div class="lev2 toc-item"><a href="#Basic-Recipe-for-Machine-Learning" data-toc-modified-id="Basic-Recipe-for-Machine-Learning-13"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Basic Recipe for Machine Learning</a></div><div class="lev1 toc-item"><a href="#Regularizing-your-neural-network" data-toc-modified-id="Regularizing-your-neural-network-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Regularizing your neural network</a></div><div class="lev2 toc-item"><a href="#Regularization" data-toc-modified-id="Regularization-21"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Regularization</a></div><div class="lev2 toc-item"><a href="#Why-regularization-reduces-overfitting?" data-toc-modified-id="Why-regularization-reduces-overfitting?-22"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Why regularization reduces overfitting?</a></div><div class="lev2 toc-item"><a href="#Dropout-Regularization" data-toc-modified-id="Dropout-Regularization-23"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Dropout Regularization</a></div><div class="lev2 toc-item"><a href="#Understanding-Dropout" data-toc-modified-id="Understanding-Dropout-24"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Understanding Dropout</a></div><div class="lev2 toc-item"><a href="#Other-regularization-methods" data-toc-modified-id="Other-regularization-methods-25"><span class="toc-item-num">2.5&nbsp;&nbsp;</span>Other regularization methods</a></div><div class="lev1 toc-item"><a href="#Setting-up-your-optimization-problem" data-toc-modified-id="Setting-up-your-optimization-problem-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Setting up your optimization problem</a></div><div class="lev2 toc-item"><a href="#Normalizing-inputs" data-toc-modified-id="Normalizing-inputs-31"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Normalizing inputs</a></div><div class="lev2 toc-item"><a href="#Vanishing-/-Exploding-gradients" data-toc-modified-id="Vanishing-/-Exploding-gradients-32"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Vanishing / Exploding gradients</a></div><div class="lev2 toc-item"><a href="#Weight-Initialization-for-Deep-Networks" data-toc-modified-id="Weight-Initialization-for-Deep-Networks-33"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Weight Initialization for Deep Networks</a></div><div class="lev2 toc-item"><a href="#Numerical-approximation-of-gradients" data-toc-modified-id="Numerical-approximation-of-gradients-34"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Numerical approximation of gradients</a></div><div class="lev2 toc-item"><a href="#Gradient-Checking" data-toc-modified-id="Gradient-Checking-35"><span class="toc-item-num">3.5&nbsp;&nbsp;</span>Gradient Checking</a></div><div class="lev2 toc-item"><a href="#Gradient-Checking-Implementation-Notes" data-toc-modified-id="Gradient-Checking-Implementation-Notes-36"><span class="toc-item-num">3.6&nbsp;&nbsp;</span>Gradient Checking Implementation Notes</a></div> # Setting up your machine Learning Application ## Train / Dev / Test sets ![](https://i.imgur.com/zhNfPDO.png) ![](https://i.imgur.com/M2sJUEW.png) ![](https://i.imgur.com/3ZE2tkh.png) ## Bias / Variance ![](https://i.imgur.com/zUwoWUf.png) ## Basic Recipe for Machine Learning ![](https://i.imgur.com/9JBOjZn.png) # Regularizing your neural network ## Regularization ![](https://i.imgur.com/oQeoTQB.png) ![](https://i.imgur.com/KBOdujA.png) ## Why regularization reduces overfitting? ![](https://i.imgur.com/86HoIQn.png) ![](https://i.imgur.com/cXX9dOd.png) ## Dropout Regularization ![](https://i.imgur.com/dNacOCr.png) ![](https://i.imgur.com/KD7pcKH.png) ![](https://i.imgur.com/QuZ5UNB.png) ## Understanding Dropout ![](https://i.imgur.com/hW8BZwj.png) ## Other regularization methods ![](https://i.imgur.com/BVmNSMM.png) ![](https://i.imgur.com/SntfgkV.png) # Setting up your optimization problem ## Normalizing inputs ![](https://i.imgur.com/a2ZdeSg.png) ![](https://i.imgur.com/Ph78qBk.png) ## Vanishing / Exploding gradients ![](https://i.imgur.com/GzjU43b.png) ## Weight Initialization for Deep Networks ![](https://i.imgur.com/uWl9XI9.png) ## Numerical approximation of gradients ![](https://i.imgur.com/Z1DBfT1.png) ## Gradient Checking ![](https://i.imgur.com/MLWOkP2.png) ![](https://i.imgur.com/4ndm620.png) ## Gradient Checking Implementation Notes ![](https://i.imgur.com/dwjrd88g.png)
github_jupyter
[source](../api/alibi_detect.ad.model_distillation.rst) # Model distillation ## Overview [Model distillation](https://arxiv.org/abs/1503.02531) is a technique that is used to transfer knowledge from a large network to a smaller network. Typically, it consists of training a second model with a simplified architecture on soft targets (the output distributions or the logits) obtained from the original model. Here, we apply model distillation to obtain harmfulness scores, by comparing the output distributions of the original model with the output distributions of the distilled model, in order to detect adversarial data, malicious data drift or data corruption. We use the following definition of harmful and harmless data points: * Harmful data points are defined as inputs for which the model's predictions on the uncorrupted data are correct while the model's predictions on the corrupted data are wrong. * Harmless data points are defined as inputs for which the model's predictions on the uncorrupted data are correct and the model's predictions on the corrupted data remain correct. Analogously to the [adversarial AE detector](https://arxiv.org/abs/2002.09364), which is also part of the library, the model distillation detector picks up drift that reduces the performance of the classification model. The detector can be used as follows: * Given an input $x,$ an adversarial score $S(x)$ is computed. $S(x)$ equals the value loss function employed for distillation calculated between the original model's output and the distilled model's output on $x$. * If $S(x)$ is above a threshold (explicitly defined or inferred from training data), the instance is flagged as adversarial. ## Usage ### Initialize Parameters: * `threshold`: threshold value above which the instance is flagged as an adversarial instance. * `distilled_model`: `tf.keras.Sequential` instance containing the model used for distillation. Example: ```python distilled_model = tf.keras.Sequential( [ tf.keras.InputLayer(input_shape=(input_dim,)), tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax) ] ) ``` * `model`: the classifier as a `tf.keras.Model`. Example: ```python inputs = tf.keras.Input(shape=(input_dim,)) hidden = tf.keras.layers.Dense(hidden_dim)(inputs) outputs = tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax)(hidden) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` * `loss_type`: type of loss used for distillation. Supported losses: 'kld', 'xent'. * `temperature`: Temperature used for model prediction scaling. Temperature <1 sharpens the prediction probability distribution which can be beneficial for prediction distributions with high entropy. * `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*. Initialized detector example: ```python from alibi_detect.ad import ModelDistillation ad = ModelDistillation( distilled_model=distilled_model, model=model, temperature=0.5 ) ``` ### Fit We then need to train the detector. The following parameters can be specified: * `X`: training batch as a numpy array. * `loss_fn`: loss function used for training. Defaults to the custom model distillation loss. * `optimizer`: optimizer used for training. Defaults to [Adam](https://arxiv.org/abs/1412.6980) with learning rate 1e-3. * `epochs`: number of training epochs. * `batch_size`: batch size used during training. * `verbose`: boolean whether to print training progress. * `log_metric`: additional metrics whose progress will be displayed if verbose equals True. * `preprocess_fn`: optional data preprocessing function applied per batch during training. ```python ad.fit(X_train, epochs=50) ``` The threshold for the adversarial / harmfulness score can be set via ```infer_threshold```. We need to pass a batch of instances $X$ and specify what percentage of those we consider to be normal via `threshold_perc`. Even if we only have normal instances in the batch, it might be best to set the threshold value a bit lower (e.g. $95$%) since the model could have misclassified training instances. ```python ad.infer_threshold(X_train, threshold_perc=95, batch_size=64) ``` ### Detect We detect adversarial / harmful instances by simply calling `predict` on a batch of instances `X`. We can also return the instance level score by setting `return_instance_score` to True. The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys: * `is_adversarial`: boolean whether instances are above the threshold and therefore adversarial instances. The array is of shape *(batch size,)*. * `instance_score`: contains instance level scores if `return_instance_score` equals True. ```python preds_detect = ad.predict(X, batch_size=64, return_instance_score=True) ``` ## Examples ### Image [Harmful drift detection through model distillation on CIFAR10](../examples/cd_distillation_cifar10.nblink)
github_jupyter
## 각 형태소 분석기 비교 (KKMA, KOMORAN, MECAB, TWITTER) ##### 긍정리뷰 3개 vs. 부정리뷰 3개를 기준으로 ------------ #### 긍정리뷰 "3달정도 사용해오고 있는데 가성비부터 최고예요. 운동도 하고 교통비도 아끼고 대만족입니다." "QR코드 이용해서 대여하니 빠르고 편해요. 처음 이용해봤는데 좋았네요^^" "너무너무 좋아요. 결제도 쉽고 대여소 찾는 것도 쉽고 정기권 끊어서 타고있어요. 이거 덕분에 자전거 많이 타고있어요!! 감사합니다~" #### 부정리뷰 "소셜로그인은 자동로그인이 안되나요? 너무 불편해요ㅠ" "회원가입도 안되고 로그인도 잘 안되요." "제발 앱좀 고쳐주세요..매번 카드정보입력하게할거면 뭐하러 회원정보수집하고 뭐하러 로그인시키나요.. 탈때마다 불편해죽겠어요. " "매번 카드정보입력하게할거면 뭐하러 회원정보수집하고 뭐하러 로그인시키나요.. 탈때마다 불편해죽겠어요. " ``` # 각 형태소 분석기 import from konlpy.tag import * twitter = Okt() mecab = Mecab() han = Hannanum() kkma = Kkma() komo = Komoran() b_text1 = '소셜로그인은 자동로그인이 안되나요? 너무 불편해요ㅠ' b_text2 = "회원가입도 안되고 로그인도 잘 안되요." b_text3 = "제발 앱좀 고쳐주세요..매번 카드정보입력하게할거면 뭐하러 회원정보수집하고 뭐하러 로그인시키나요.. 탈때마다 불편해죽겠어요. " b_text_extra = "매번 카드정보입력하게할거면 뭐하러 회원정보수집하고 뭐하러 로그인시키나요.. 탈때마다 불편해죽겠어요. " g_text1 = "3달정도 사용해오고 있는데 가성비부터 최고예요. 운동도 하고 교통비도 아끼고 대만족입니다." g_text2 = "QR코드이용해서 대여하니빠르고 편해요 ㅎㅎㅎ. 처음 이용해봤는데 좋았네요^^" g_text3 = "너무너무 좋아요. 결제도 쉽고 대여소 찾는 것도 쉽고 정기권 끊어서 타고있어요. 이거 덕분에 자전거 많이 타고있어요!! 감사합니다~" def posComparsion(text): kkma_text = kkma.pos(text) han_text = han.pos(text) komo_text = komo.pos(text) mecab_text = mecab.pos(text) twitter_text =twitter.pos(text) return print('KKMA POS\n', kkma_text, '\n\nHannanum POS\n', han_text, '\n\nKomoran POS\n', komo_text, '\n\nMecab POS\n', mecab_text, '\n\nTwitter POS\n' ,twitter_text) def makeSlash(pos_text): pos_slash = [] for compo in pos_text: word =compo[0]+'/' pos = compo[1] words = (word, pos) pos_slash.append(words) return pos_slash def posComparsionText(text): kkma_text = kkma.pos(text) han_text = han.pos(text) komo_text = komo.pos(text) mecab_text = mecab.pos(text) twitter_text =twitter.pos(text, stem=True) kkma_slash = makeSlash(kkma_text) han_slash =makeSlash(han_text) komo_slash = makeSlash(komo_text) mecab_slash = makeSlash(mecab_text) twitter_slash =makeSlash(twitter_text) return print('KKMA POS\n', kkma_slash, '\n\nHannanum POS\n', han_slash, '\n\nKomoran POS\n', komo_slash, '\n\nMecab POS\n', mecab_slash, '\n\nTwitter POS\n' ,twitter_slash) ``` ### 긍정리뷰 #### "3달정도 사용해오고 있는데 가성비부터 최고예요. 운동도 하고 교통비도 아끼고 대만족입니다." ``` posComparsionText(g_text1) ``` </br> #### "QR코드이용해서 대여하니빠르고 편해요 ㅎㅎㅎ. 처음 이용해봤는데 좋았네요^^" ``` posComparsionText(g_text2) ``` </br> #### "너무너무 좋아요. 결제도 쉽고 대여소 찾는 것도 쉽고 정기권 끊어서 타고있어요. 이거 덕분에 자전거 많이 타고있어요!! 감사합니다~" ``` posComparsion(g_text3) ``` </br> ### 부정리뷰 #### "소셜로그인은 자동로그인이 안되나요? 너무 불편해요ㅠ" ``` posComparsion(b_text1) ``` </br> #### "회원가입도 안되고 로그인도 잘 안되요." ``` posComparsion(b_text2) ``` </br> #### "제발 앱좀 고쳐주세요..매번 카드정보입력하게할거면 뭐하러 회원정보수집하고 뭐하러 로그인시키나요.. 탈때마다 불편해죽겠어요. " ``` posComparsion(b_text3) posComparsionText(b_text_extra) twitter.pos("3달정도 사용해오고 있는데 가성비부터 최고예요. 운동도 하고 교통비도 아끼고 대만족입니다." , stem=True) ```
github_jupyter
<a href="https://colab.research.google.com/github/liscolme/EscapeEarth/blob/main/Interns/Elise/BLS_Function_Test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/gdrive') ################################### !pip install lightkurve import lightkurve as lk import numpy as np import pandas as pd import astropy.units as u from astropy.timeseries import BoxLeastSquares !ls /content/gdrive/MyDrive/EscapeEarthData #/content/gdrive/My \Drive/EscapeEarthData/Sec14_cleaned/{ticid}/lc.fits #/content/gdrive/My \Drive/EscapeEarthData/Sec15_cleaned/{ticid}/lc.fits #/content/gdrive/My \Drive/EscapeEarthData/all_targets_S014_v1.csv #timestamps t = np.random.uniform(0, 20, 2000) #observations y = np.ones_like(t) - 0.1*((t%3)<0.2) + 0.01*np.random.randn(len(t)) model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2) import matplotlib.pyplot as plt plt.plot(periodogram.period, periodogram.power) model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2, objective="snr") plt.plot(periodogram.period, periodogram.power) # This objective will generally produce a periodogram that is qualitatively similar to the log likelihood spectrum # but it has been used to improve the reliability of transit search in the presence of correlated noise. ``` **Period Grid** ``` # It is possible to provide a specific period grid as follows: model = BoxLeastSquares(t * u.day, y, dy=0.01) periods = np.linspace(2.5, 3.5, 1000) * u.day periodogram = model.power(periods, 0.2) plt.plot(periodogram.period, periodogram.power) # However, if the period grid is too coarse, the correct period might be missed. model = BoxLeastSquares(t * u.day, y, dy=0.01) periods = np.linspace(0.5, 10.5, 15) * u.day periodogram = model.power(periods, 0.2) plt.plot(periodogram.period, periodogram.power) ``` **Peak Statistics** ``` # The compute_stats method can be used to calculate several statistics of a candidate transit. model = BoxLeastSquares(t * u.day, y, dy=0.01) periodogram = model.autopower(0.2) max_power = np.argmax(periodogram.power) stats = model.compute_stats(periodogram.period[max_power], periodogram.duration[max_power], periodogram.transit_time[max_power]) ``` **Function** ``` def BLS(periodgrid,durationgrid,lightcurve): ''' Purppose ------------------ A Box Least Squares function to print out the compute stats of the periodogram. Parameters ------------------- Period grid - describes how often a transit is happening Duration Grid - describes the width of a transit Lightcurve - lightkurve class object Return ------------------ Calculate the peak using the method compute_stats. ''' # assinging parameters to variables. period = periodgrid lc = lightcurve # t = timestamps t = durationgrid * u.day # y = observations y = lc * u.dimensionless_unscaled # indicate quantities that do not have a physical dimension # assigned to a BLS object model = BoxLeastSquares(t , y , dy=lc.flux_err) # dy is the uncertainty periodogram = model.autopower(period,durationgrid) #calculates the maximum statstics within the transit for period, duration, and transit time max_power = np.argmax(periodogram.power) stats = model.compute_stats(periodogram.period[max_power], periodogram.duration[max_power], periodogram.transit_time[max_power]) # stats is the one peak, periodgram is the arrays return stats, periodogram def periods(N=1000): period=np.logspace(-0.523, 1.43, N, endpoint=True) return period def duration_grid(N=10): duration=np.linspace(.01, 0.298, N) return duration pd = periods(N=1000) dg = duration_grid(N=10) ```
github_jupyter
``` import math import numpy as np from sympy import * t, s, k = symbols('t, s, k') # Defino los métodos de suma de rectángulos - izquierda def _Riemman_izq(Func, limA, limB, numI): """ Método de la Bisección para encontrar raíces en un intervalo. ## Parámetros: Func (function) : función que depende de una variable. limA (function) : derivada de la función. limB (int) : semilla de la solución. numI (int) : número máximo de iteraciones. ## Devoluciones: Area (float) : valor de area encontrado. """ Δx = (limB - limA) / numI Alturas = 0 for i in range(numI): Alturas += Func(limA + i * Δx) Area = Δx * Alturas return Area # Defino los métodos de suma de rectángulos - izquierda def _Riemman_der(Func, limA, limB, numI): """ Método de la Bisección para encontrar raíces en un intervalo. ## Parámetros: Func (function) : función que depende de una variable. limA (function) : derivada de la función. limB (int) : semilla de la solución. numI (int) : número máximo de iteraciones. ## Devoluciones: Area (float) : valor de area encontrado. """ Δx = (limB - limA) / numI Alturas = 0 for i in range(numI): Alturas += Func(limB - i * Δx) Area = Δx * Alturas return Area # Defino los métodos de suma de rectángulos - izquierda def _Riemman_med(Func, limA, limB, numI): """ Método de la Bisección para encontrar raíces en un intervalo. ## Parámetros: Func (function) : función que depende de una variable. limA (function) : derivada de la función. limB (int) : semilla de la solución. numI (int) : número máximo de iteraciones. ## Devoluciones: Area (float) : valor de area encontrado. """ Δx = (limB - limA) / numI Alturas = 0 for i in range(numI): Alturas += Func(limA + (2*i + 1)/2 * Δx) Area = Δx * Alturas return Area # 1 A = [(1, 3), (0, 3)] b = [14, 21] x = np.matmul(np.linalg.inv(A), np.transpose(b)) x1 = x[0] x2 = x[1] ft = 2*t Ft = Integral(ft, t) Ft_ab = Integral(ft, (t, x1, x2)) # Evaluar la integral Rt = Ft.doit() Rt_ab = Ft_ab.doit() display(Eq(Ft, Rt)) display(Ft_ab, Rt_ab) # 2 A = [(1, 2), (-3, 4)] b = [14, 18] M11 = 4 detA = np.linalg.det(A) Zt = Integral(s, (s, t, M11)) funtion_Zt = Zt.doit() Zt1 = detA * funtion_Zt.subs(t, 1) display(Zt, funtion_Zt, Zt1) # math.floor(Z11) # 3 Vr = math.e Va = 2.72 er = 100 * abs(Va - Vr)/Vr print(f"{er:7.1e}") # 5 F1 = lambda x: x**2 _Riemman_izq(F1, 0, 2, 3) # 6 F2 = lambda x: (x**3)**2 Va = _Riemman_der(F2, 0, 4, 5) print(4250 - Va) print(4500 - Va) # 7 F3 = lambda k: math.e**k vr = Integral(math.e**k, (k, 0, 3)).doit() va = _Riemman_med(F3, 0, 3, 2) er = 100 * abs(vr-va)/va print(vr, va, er) ```
github_jupyter
``` file_1 = """Stock Close Beta Cap Apple 188.72 0.2 895.667B Tesla 278.62 0.5 48.338B""" file_2 = """Employee Wage Hired Promotion Linda 3000 2017 Yes Bob 2000 2016 No Joshua 800 2019 Yes""" ``` ### My solution Other approaches are possible ``` def parser(stringa): """ Parse string and returns dict of lists: keys are first line, lists are columns. """ # lines will be a list of lists # each sub list contains the words of a single line lines = list() for line in stringa.splitlines(): lines.append(line.split()) keys = lines[0] # the first line is the key lines = lines[1:] # now lines does not include the first line result = dict() count = 0 for key in keys: values = [line[count] for line in lines] result[key] = values count += 1 return result parser(file_1) parser(file_2) ``` ### Test We want to verify carefully that everything works as intended ``` def feel_bored_1_test(function): """ Verify that function returns result1 and result2. """ result_1 = {'Stock': ['Apple', 'Tesla'], 'Close': ['188.72', '278.62'], 'Beta': ['0.2', '0.5'], 'Cap': ['895.667B', '48.338B']} result_2 = {'Employee': ['Linda', 'Bob', 'Joshua'], 'Wage': ['3000', '2000', '800'], 'Hired': ['2017', '2016', '2019'], 'Promotion': ['Yes', 'No', 'Yes']} results = list() if function(file1) == result1: print("Test 1 passed") results.append(True) else: print("Test 1 not passed") results.append(False) if function(file2) == result2: print("Test 2 passed") results.append(True) else: print("Test 2 not passed") results.append(False) return results ``` We can follow DRY (Don't Repeat Yourself) with a for loop to improve the testing function ``` def feel_bored_1_test(function): result_1 = {'Stock': ['Apple', 'Tesla'], 'Close': ['188.72', '278.62'], 'Beta': ['0.2', '0.5'], 'Cap': ['895.667B', '48.338B']} result_2 = {'Employee': ['Linda', 'Bob', 'Joshua'], 'Wage': ['3000', '2000', '800'], 'Hired': ['2017', '2016', '2019'], 'Promotion': ['Yes', 'No', 'Yes']} input_to_output = {file_1: result_1, file_2: result_2} results = list() count = 1 for key, value in input_to_output.items(): if function(key) == value: results.append(True) print(f"Test {count} passed") else: results.append(False) print(f"Test {count} not passed") count += 1 return results feel_bored_1_test(parser) ``` ### Improve code ``` def fast_parser(stringa): """ Parse string and returns dict of lists: keys are first line, lists are columns. """ lines = [line.split() for line in stringa.splitlines()] # list of lists keys = lines.pop(0) # remove first line and assign to keys result = { key: [line[index] for line in lines] for index, key in enumerate(keys) } return result ``` ### Everything appears to work as intended ``` feel_bored_1_test(fast_parser) ``` ### Efficiency does not matter, but it's still interesting to measure We can see that the difference is insignificant for small inputs ``` %%timeit parser(file_1) %%timeit fast_parser(file_1) ``` ### With bigger inputs, parsing efficiency becomes relevant <br> **Key Takeaway:** <br> Do not waste time on optimizing code if you don't need it <br> <br> **Premature optimization is the root of all evil** ``` big_input = (file_1 + '\n') * 100 print(big_input) %%timeit parser(big_input) %%timeit fast_parser(big_input) ```
github_jupyter
``` import numpy as np import pandas as pd import xarray as xr from glob import glob import pymongo import pdb from datetime import datetime, timedelta from sqlalchemy import create_engine import time import psycopg2 import os from io import StringIO from scipy import sparse from scipy.sparse.linalg import svds import dask.dataframe as dd engine = create_engine('postgresql://postgres:postgres@localhost:5432/atmos') verdziFileNames = '/home/tyler/slicer/**/*.csv' df = dd.read_csv(verdziFileNames) filenames = glob(verdziFileNames) filenames[0] def create_idx(lat, lon, layer, delta): row = (180 - lat) / delta col = (360 - lon) / delta idx = (1 + row)*col*layer return idx def row_idx(lat, delta=1): return (90 - lat) / delta def col_idx(lon, delta=1): return (180 - lon) / delta def vector_idx(row): return (row['rowIdx'] + 1) * (row['colIdx'] + 1) * row['pres'] def get_layer(row, delta=1): delta = 1 nrow = (180 - row['lat']) / delta ncol = (360 - row['lon']) / delta layer = int(row['index'] / ((1 + nrow)*ncol)) return layer vdf = pd.DataFrame() for idx, file in enumerate(filenames): df = pd.read_csv(file, index_col=0) time = file.split('_')[-1].strip('.csv') value = 'T' + time df = df.rename(index=str, columns={value:'temp'}) df['time'] = int(time) df = df.reset_index() df['index'] = df['index'].astype(int) df['pres'] = df.apply(get_layer, axis=1) df = df[['time', 'temp', 'lat', 'lon', 'pres', 'index']] vdf = pd.concat([vdf, df], axis=0, sort=False) #get depth from index if idx % 100 == 0: print('on idx: {}'.format(idx)) print('finished') vdf = vdf.dropna() vdf.to_csv('verdzi.csv') vdf = pd.read_csv('verdzi.csv', index_col=0) vdf = vdf.sort_values(['time', 'lat', 'lon'], ascending=True) vdf['rowIdx'] = vdf['lat'].apply(lambda x: row_idx(x)) vdf['colIdx'] = vdf['lon'].apply(lambda x: col_idx(x)) for time, colDf in vdf.groupby('time'): colDf['idx'] = colDf.apply(lambda row: vector_idx(row), axis=1) data = colDf['temp'].values * np.cos(np.deg2rad(colDf['lat'])) row = colDf['idx'].astype(int).values col = np.zeros(len(row)) colM = sparse.csc_matrix((data, (row, col)), shape=(180*360*25, 1)) if time == 0: M = colM else: M = sparse.hstack([M, colM]) if time % 100 == 0: print('on time: {}'.format(time)) print('finished') sparse.save_npz('verdzi.npz', M) u, s, vt = svds(M, k=30) u.sum() colDf['temp'].shape vdf.to_csv('verdzi.csv') M = sparse.csc_matrix([0], shape=(180*360*25, 1)) nCol = M.shape[1] u.shape ```
github_jupyter
<a href="https://colab.research.google.com/github/satyajitghana/PadhAI-Course/blob/master/11_VectorizedGDAlgorithms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import matplotlib.pyplot as plt import matplotlib.colors import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, mean_squared_error, log_loss from tqdm import tqdm_notebook import seaborn as sns import imageio import time from IPython.display import HTML sns.set() from sklearn.preprocessing import OneHotEncoder from sklearn.datasets import make_blobs my_cmap = 'inferno' np.random.seed(0) ``` # Generate Data ``` data, labels = make_blobs(n_samples=1000, centers=4, n_features=2, random_state=0) print(data.shape, labels.shape) plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap) plt.show() labels_orig = labels labels = np.mod(labels_orig, 2) plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap) plt.show() ``` # MultiClass Classification ``` X_train, X_val, Y_train, Y_val = train_test_split(data, labels_orig, stratify=labels_orig, random_state=0) print(X_train.shape, X_val.shape, labels_orig.shape) enc = OneHotEncoder() # 0 -> (1, 0, 0, 0), 1 -> (0, 1, 0, 0), 2 -> (0, 0, 1, 0), 3 -> (0, 0, 0, 1) y_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray() y_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray() print(y_OH_train.shape, y_OH_val.shape) W1 = np.random.randn(2,2) W2 = np.random.randn(2,4) print(W1) print(W2) ``` # FF Class ``` class FFNetwork: def __init__(self, W1, W2): self.params={} self.params["W1"]=W1.copy() self.params["W2"]=W2.copy() self.params["B1"]=np.zeros((1,2)) self.params["B2"]=np.zeros((1,4)) self.num_layers=2 self.gradients={} self.update_params={} self.prev_update_params={} for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)]=0 self.update_params["v_b"+str(i)]=0 self.update_params["m_b"+str(i)]=0 self.update_params["m_w"+str(i)]=0 self.prev_update_params["v_w"+str(i)]=0 self.prev_update_params["v_b"+str(i)]=0 def forward_activation(self, X): return 1.0/(1.0 + np.exp(-X)) def grad_activation(self, X): return X*(1-X) def softmax(self, X): exps = np.exp(X) return exps / np.sum(exps, axis=1).reshape(-1,1) def forward_pass(self, X, params = None): if params is None: params = self.params self.A1 = np.matmul(X, params["W1"]) + params["B1"] # (N, 2) * (2, 2) -> (N, 2) self.H1 = self.forward_activation(self.A1) # (N, 2) self.A2 = np.matmul(self.H1, params["W2"]) + params["B2"] # (N, 2) * (2, 4) -> (N, 4) self.H2 = self.softmax(self.A2) # (N, 4) return self.H2 def grad(self, X, Y, params = None): if params is None: params = self.params self.forward_pass(X, params) m = X.shape[0] self.gradients["dA2"] = self.H2 - Y # (N, 4) - (N, 4) -> (N, 4) self.gradients["dW2"] = np.matmul(self.H1.T, self.gradients["dA2"]) # (2, N) * (N, 4) -> (2, 4) self.gradients["dB2"] = np.sum(self.gradients["dA2"], axis=0).reshape(1, -1) # (N, 4) -> (1, 4) self.gradients["dH1"] = np.matmul(self.gradients["dA2"], params["W2"].T) # (N, 4) * (4, 2) -> (N, 2) self.gradients["dA1"] = np.multiply(self.gradients["dH1"], self.grad_activation(self.H1)) # (N, 2) .* (N, 2) -> (N, 2) self.gradients["dW1"] = np.matmul(X.T, self.gradients["dA1"]) # (2, N) * (N, 2) -> (2, 2) self.gradients["dB1"] = np.sum(self.gradients["dA1"], axis=0).reshape(1, -1) # (N, 2) -> (1, 2) def fit(self, X, Y, epochs=1, algo= "GD", display_loss=False, eta=1, mini_batch_size=100, eps=1e-8, beta=0.9, beta1=0.9, beta2=0.9, gamma=0.9 ): if display_loss: loss = {} for num_epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): m = X.shape[0] if algo == "GD": self.grad(X, Y) for i in range(1,self.num_layers+1): self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/m) self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/m) elif algo == "MiniBatch": for k in range(0,m,mini_batch_size): self.grad(X[k:k+mini_batch_size], Y[k:k+mini_batch_size]) for i in range(1,self.num_layers+1): self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/mini_batch_size) self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/mini_batch_size) elif algo == "Momentum": self.grad(X, Y) for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)] = gamma *self.update_params["v_w"+str(i)] + eta * (self.gradients["dW"+str(i)]/m) self.update_params["v_b"+str(i)] = gamma *self.update_params["v_b"+str(i)] + eta * (self.gradients["dB"+str(i)]/m) self.params["W"+str(i)] -= self.update_params["v_w"+str(i)] self.params["B"+str(i)] -= self.update_params["v_b"+str(i)] elif algo == "NAG": temp_params = {} for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)]=gamma*self.prev_update_params["v_w"+str(i)] self.update_params["v_b"+str(i)]=gamma*self.prev_update_params["v_b"+str(i)] temp_params["W"+str(i)]=self.params["W"+str(i)]-self.update_params["v_w"+str(i)] temp_params["B"+str(i)]=self.params["B"+str(i)]-self.update_params["v_b"+str(i)] self.grad(X,Y,temp_params) for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)] = gamma *self.update_params["v_w"+str(i)] + eta * (self.gradients["dW"+str(i)]/m) self.update_params["v_b"+str(i)] = gamma *self.update_params["v_b"+str(i)] + eta * (self.gradients["dB"+str(i)]/m) self.params["W"+str(i)] -= eta * (self.update_params["v_w"+str(i)]) self.params["B"+str(i)] -= eta * (self.update_params["v_b"+str(i)]) self.prev_update_params=self.update_params elif algo == "AdaGrad": self.grad(X, Y) for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)] += (self.gradients["dW"+str(i)]/m)**2 self.update_params["v_b"+str(i)] += (self.gradients["dB"+str(i)]/m)**2 self.params["W"+str(i)] -= (eta/(np.sqrt(self.update_params["v_w"+str(i)])+eps)) * (self.gradients["dW"+str(i)]/m) self.params["B"+str(i)] -= (eta/(np.sqrt(self.update_params["v_b"+str(i)])+eps)) * (self.gradients["dB"+str(i)]/m) elif algo == "RMSProp": self.grad(X, Y) for i in range(1,self.num_layers+1): self.update_params["v_w"+str(i)] = beta*self.update_params["v_w"+str(i)] +(1-beta)*((self.gradients["dW"+str(i)]/m)**2) self.update_params["v_b"+str(i)] = beta*self.update_params["v_b"+str(i)] +(1-beta)*((self.gradients["dB"+str(i)]/m)**2) self.params["W"+str(i)] -= (eta/(np.sqrt(self.update_params["v_w"+str(i)]+eps)))*(self.gradients["dW"+str(i)]/m) self.params["B"+str(i)] -= (eta/(np.sqrt(self.update_params["v_b"+str(i)]+eps)))*(self.gradients["dB"+str(i)]/m) elif algo == "Adam": self.grad(X, Y) num_updates=0 for i in range(1,self.num_layers+1): num_updates+=1 self.update_params["m_w"+str(i)]=beta1*self.update_params["m_w"+str(i)]+(1-beta1)*(self.gradients["dW"+str(i)]/m) self.update_params["v_w"+str(i)]=beta2*self.update_params["v_w"+str(i)]+(1-beta2)*((self.gradients["dW"+str(i)]/m)**2) m_w_hat=self.update_params["m_w"+str(i)]/(1-np.power(beta1,num_updates)) v_w_hat=self.update_params["v_w"+str(i)]/(1-np.power(beta2,num_updates)) self.params["W"+str(i)] -=(eta/np.sqrt(v_w_hat+eps))*m_w_hat self.update_params["m_b"+str(i)]=beta1*self.update_params["m_b"+str(i)]+(1-beta1)*(self.gradients["dB"+str(i)]/m) self.update_params["v_b"+str(i)]=beta2*self.update_params["v_b"+str(i)]+(1-beta2)*((self.gradients["dB"+str(i)]/m)**2) m_b_hat=self.update_params["m_b"+str(i)]/(1-np.power(beta1,num_updates)) v_b_hat=self.update_params["v_b"+str(i)]/(1-np.power(beta2,num_updates)) self.params["B"+str(i)] -=(eta/np.sqrt(v_b_hat+eps))*m_b_hat if display_loss: Y_pred = self.predict(X) loss[num_epoch] = log_loss(np.argmax(Y, axis=1), Y_pred) if display_loss: plt.plot(list(loss.values()), '-o', markersize=5) plt.xlabel('Epochs') plt.ylabel('Log Loss') plt.show() def predict(self, X): Y_pred = self.forward_pass(X) return np.array(Y_pred).squeeze() def print_accuracy(): Y_pred_train = model.predict(X_train) Y_pred_train = np.argmax(Y_pred_train,1) Y_pred_val = model.predict(X_val) Y_pred_val = np.argmax(Y_pred_val,1) accuracy_train = accuracy_score(Y_pred_train, Y_train) accuracy_val = accuracy_score(Y_pred_val, Y_val) print("Training accuracy", round(accuracy_train, 4)) print("Validation accuracy", round(accuracy_val, 4)) if False: plt.scatter(X_train[:,0], X_train[:,1], c=Y_pred_train, cmap=my_cmap, s=15*(np.abs(np.sign(Y_pred_train-Y_train))+.1)) plt.show() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="GD", display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="MiniBatch", mini_batch_size=128, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="MiniBatch", mini_batch_size=8, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.5, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.99, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.99, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.5, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="AdaGrad", display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="AdaGrad", display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="RMSProp", beta=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=.9, algo="RMSProp", beta=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=.9, algo="Adam", beta=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="Adam", beta=0.9, display_loss=True) print_accuracy() ``` # Good Configuration for each Algorithm ``` %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=10000, eta=0.5, algo="GD", display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=1000, eta=0.5, algo="Momentum", gamma=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=1000, eta=0.5, algo="NAG", gamma=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=500, eta=1, algo="AdaGrad", display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=2000, eta=.01, algo="RMSProp", beta=0.9, display_loss=True) print_accuracy() %%time model = FFNetwork(W1, W2) model.fit(X_train, y_OH_train, epochs=200, eta=.1, algo="Adam", beta=0.9, display_loss=True) print_accuracy() ```
github_jupyter
<a href="https://colab.research.google.com/github/mottaquikarim/PYTH2/blob/master/src/Topics/nb/basic_data_types.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Basic Data Types Let's discuss data types, variables, and naming. A data type is a unit of information that can be stored and retrieved using a program language. **Variables** store different types of data. You can display the value stored in a variable on the screen using the `print()` statement. Because variables are so ubiquitous, there are some **rules for naming variables** that help avoid confusion. - **Rule:** snake_case with the underscore character - **Rule:** Can’t use keywords as var names, e.g. True, None, function, class, object - **Rule:** Can’t start with a numeric value, e.g. can't name something "1var", but you CAN do function1 - **Best Practice:** You COULD use CamelCase to name a variable, but this is typically reserved for naming a different Python object called a "class". #### Creating & Reading a Variable ``` first_prime = 2 print(first_prime) # expect to see 2 ``` #### Comments Real quick, let's see how we create comments so that you can write notes to yourself in your code - **very** useful! ``` # I'm a comment! # Here is a multi-line comment: """ 'Cause if you liked it, then you should have put a ring on it If you liked it, then you should have put a ring on it Don't be mad once you see that he want it If you liked it, then you should have put a ring on it """ ``` These are the most basic data types, which compose the more complex data types we'll learn about later: * Strings: alphanumeric characters read as text * **Important takeaway!** If you use a `'`, say in a contraction, the string might misinterpret it as the ending `'` for the string. To avoid this, add `\` right before that `'` like this: 'I\'ll`. * Integers: whole numbers * Floats: decimals * Booleans: represents True or False values; used for evaluating conditional statements * Nonetype: represents variables not yet defined or essentially blank values (shown as None, used as placeholder to avoid error) ``` # Strings cat = 'Layla' # or "Layla" because single & double quotes are both valid cat_description = 'Layla\'s a mischievous, but very cute kitty!' # Note the use of \' here. # Integers age = 8 # Floats weight = 10.5 # Booleans vaccinated = True good_with_other_cats = False # Nonetype good_with_other_dogs = None ``` ### String Formatting Strings have some special qualities that make dealing with them easier. For instance, you can store several separate snippets of text within a single string. One way to do this is adding `\n` to signify that you want to create a new line. However, there's another way to do this that makes it much more readable! To do this, you can use triple quotes i.e. `'''` or `"""`. You can use single and double quotes within the string freely, so no need to worry about that detail! Let's say you're storing song lyrics, so you want to have a line break between each line of the song. ``` 'Cause if you liked it, then you should have put a ring on it\nIf you liked it, then you should have put a ring on it\nDon\'t be mad once you see that he want it\nIf you liked it, then you should have put a ring on it' ''' 'Cause if you liked it, then you should have put a ring on it If you liked it, then you should have put a ring on it Don't be mad once you see that he want it If you liked it, then you should have put a ring on it ''' ``` If you want to insert variables within a string, you can do this: ``` cat = 'Layla' print(cat, 'is a mischievous, but very cute kitty!') # Note that the ',' automatically adds a space character. ``` But that can be annoying if you're inserting multiple variables. To simplify this, you dynamically add variables directly within a string like this: ``` cat = 'Layla' # or "Layla" because single & double quotes are both valid age = 8 weight = 10.5 print(f'{cat} is {age} years old.\n{cat} weighs {weight} pounds.') print(f''' {cat} is {age} years old. {cat} weighs {weight} pounds. ''') """ Both of these print... Layla is 8 years old. Layla weighs 10.5 pounds. """ ``` **Pro Tip**: You can use several versions of the the `.strip()` function to remove leading and trailing spaces from strings. * `lstrip()`: remove all leading spaces * `rstrip()`: remove all trailing spaces * `strip()`: remove all leading AND trailing spaces ``` a = ' hi mom!' print(f'LEADING SPACES: \n{a} \n{a.strip()}\n\n') b = 'hi mom! ' print(f'TRAILING SPACES: \n{b} \n{b.strip()}\n\n') c = ' hi mom! ' # print(c, '\n', c.strip(), '\n\n') print(f'LEADING & TRAILING SPACES: \n{c} \n{c.strip()}') ``` ## Typecasting **Typecasting** is a way to convert an instance of one data type to another. Numbers are the most flexible for converting, while strings are very inflexible. Booleans get a bit more complicated, so we'll look at those last! First you need to know what data type your variable is in the first place! Check what data type is stored in a variable using the `type()` statement or the `isinstance()` statement. ``` # Example 1 - type() a = 1 print(type(a)) # <class 'int'> b = '2.5' print(type(b)) # <class 'str'> # Example 2 - isinstance() c = -1 print(f'Is {c} a boolean?', isinstance(c, bool)) # False d = False print(f'Is {d} a boolean?', isinstance(d, bool)) # True ``` ### Convert Data Types ``` """INTEGERS""" int_to_float = float(10) # 10.0, <class 'float'> int_to_string = str(10) # 10, <class 'str'> """FLOATS""" float_to_int = int(2.5) # 2, <class 'float'> print(float_to_int) # Notice it does NOT ROUND! float_to_string = str(2.5) # '2.5', <class 'str'> ``` #### Converting Strings ``` string_to_int = int('mango') # ERROR! string_to_float = str('strawberry') # ERROR! ``` #### Converting Booleans As you'll see below, you can convert any number to a True boolean, BUT a True boolean will only ever become 1 or 1.0. ``` """EVALS TO TRUE""" int_to_boolean = bool(10) # True, <class 'bool'> int_to_boolean = bool(0) # False float_to_boolean = print(bool(-1)) # True, <class 'bool'> string_to_boolean = bool('peach') # True, <class 'bool'> """EVALS TO FALSE""" int_to_boolean = bool(0) # False, <class 'bool'> float_to_boolean = bool(0.0) # False, <class 'bool'> string_to_boolean = print(bool(' ')) # False, <class 'bool'> ``` Notice that the **ONLY** way a string converted into a boolean will be False is if it's empty. Spaces count as characters even though they wouldn't display anything if printed. ## Simple Integer, Float, & String Operators **Operators** are shortcuts for manipulating values stored in variables. ### Integer/Float Operators We can operate on integers/floats in the following ways: * Addition * Subtraction * Multiplication * Division * Modulus (This one divides and returns **only the remainder**.) ``` orig_num = 10 # Addition num1 = orig_num + 5 # 15 # Subtraction num2 = orig_num - 5 # 5 # Multiplication num3 = orig_num * 5 # 50 # Division num4 = orig_num / 5 # 2 # Modulus num5 = orig_num % 5 # 0 num6 = orig_num % 3 # 1 # if num1 % 3 == 0, then it's even # ``` ### String Operators * We can "add" strings * We CANNOT add strings to non strings ``` a = 'this string' b = 'that string' print(a + b) # 'this stringthat string' print(a + ' and ' + b) # 'this string and that string' print(a, 'and', b) # 'this string and that string' """ERROR!!! print('this will not work' + 4) doesn't work because you can't add a number to a string""" # quant = 4 # product = 'bananas' # how_much = quant + product # '4 bananas' ``` ## Class Practice PSETs [Basic Data Types](https://colab.research.google.com/github/mottaquikarim/PYTH2/blob/master/src/PSETS/nb/basic_data_inclass_psets.ipynb) ## Additional Resources * [A Repl.it Summarizing Print Statements](https://repl.it/@brandiw/Python-01-Variables-4?lite=true) * [Python For Beginners](http://www.pythonforbeginners.com/basics/python-variables) * [Python Programming Tutorial: Variables](https://www.youtube.com/watch?v=vKqVnr0BEJQ) * [Variables in Python](https://www.guru99.com/variables-in-python.html) * [Operators Cheatsheet](http://python-reference.readthedocs.io/en/latest/docs/operators/) * [Python Style Guide: Naming](https://www.python.org/dev/peps/pep-0008/#descriptive-naming-styles) * [Python-Strings](https://www.tutorialspoint.com/python/python_strings.htm) * [String Concatenation and Formatting](http://www.pythonforbeginners.com/concatenation/string-concatenation-and-formatting-in-python) * [String Concatenation and Formatting - Video](https://www.youtube.com/watch?v=jA5LW3bR0Us)
github_jupyter
# CT-LTI: Multiple Sample Performance Evaluation Table This table is found in the appendix section A.4. and summarizes the performance comparison between NODEC and OC in relative terms of error and energy. Without extensive hyperparameter optimization we see that NODEC is competitive to OC for all graphs and intial-target state settings. Furthermore, please make sure that the required data folder is available at the paths used by the script. You may generate the required data by running the python script ```nodec_experiments/ct_lti/gen_parameters.py```. Please also make sure that a trainingproceedure has produced results in the corresponding paths used below. Running ```nodec_experiments/ct_lti/multi_sample/train.ipynb``` with default paths is expected to generate at the requiered location. As neural network intialization is stochastic, please make sure that appropriate seeds are used or expect some variance to paper results. ``` %load_ext autoreload %autoreload 2 import numpy as np import pandas as pd ``` ## Gather data from files Below we gather the data from files generated by the ```train_and_eval.ipynb``` file. Please run this first if the data files are not present! ``` data_folder = '../../../../data/results/ct_lti/multi_sample/' graphs = ['lattice', 'ba', 'tree'] graph_name = {'lattice' : 'Square Lattice', 'ba' : 'Barabasi Albert', 'tree' : 'Random Tree'} resulting_rows = [] for graph in graphs: graph_folder = data_folder + graph + '/' interactions = [50, 500, 5000] for interaction in interactions: mse_diffs = [] energy_diffs = [] for i in range(100): nnres = pd.read_csv(graph_folder+'nn_sample_'+str(i)+'_train_'+str(interaction)+'/epoch_metadata.csv') ocres = pd.read_csv(graph_folder+'oc_sample'+str(i)+'_ninter_'+str(interaction)+'/epoch_metadata.csv') nn_en = nnres['total_energy'].item() oc_en = ocres['total_energy'].item() nn_fl = nnres['final_loss'].item() oc_fl = ocres['final_loss'].item() mse_diffs.append((nn_fl-oc_fl)/oc_fl) energy_diffs.append((nn_en-oc_en)/oc_en) row = {'Graph' : graph_name[graph], 'Interaction Interval': 5.0/interaction, 'Median Energy' : round(np.quantile(energy_diffs, 0.5), 2), 'IQR Energy' : round(np.quantile(energy_diffs, 0.75)-np.quantile(energy_diffs,0.25), 2), 'Median MSE' : round(np.quantile(mse_diffs, 0.5), 2), 'IQR MSE' : round(np.quantile(mse_diffs, 0.75)-np.quantile(mse_diffs, 0.25), 2), 'Numerical Instabilities' : round((np.array(mse_diffs) > 10).mean(), 2) } resulting_rows.append(row) ``` ## Resulting Table ``` df = pd.DataFrame(resulting_rows).groupby(['Graph', 'Interaction Interval']).first() styler = df.style.apply(lambda x: ["background: lightblue" if v <= 0.1 and i in [0,2] else "" for i,v in enumerate(x)], axis = 1) styler ```
github_jupyter
## PLINK GWAS Regression Tutorial These commands walk through running the GWAS regressions from Marees et al. 2018 using PLINK. As in all PLINK tutorials, the comments and code from the original tutorial are included with R steps commented out (and replaced by python where necessary) and to disambiguate between comments from the original authors and me, the ```#*#``` character is used. ``` import os.path as osp import pandas as pd import numpy as np import plotnine as pn from IPython.display import display, Image %run ../init/benchmark.py register_timeop_magic(get_ipython(), 'plink') prev_dir = osp.expanduser('~/data/gwas/tutorial/2_PS_GWAS') data_dir = osp.expanduser('~/data/gwas/tutorial/3_AA_GWAS') data_dir ``` Copy necessary data from directory for second step in tutorial (population stratification) to directory for this step: ``` %%timeop -o ps0 %%bash -s "$prev_dir" "$data_dir" set -e echo "$1 $2" cp $1/HapMap_3_r3_13.* $2/ cp $1/covar_mds.txt $2/ ``` ### Step 1: Association Analysis via Hypothesis Testing Run chi-square tests for each variant testing whether or not independence between the frequency of cases and controls is unlikely for dominant and recessive alleles (as well as several others, all as separate tests). See [PLINK#assoc](https://www.cog-genomics.org/plink/1.9/assoc) for more details. ``` %%timeop -o aa1 %%bash -s "$data_dir" set -e; cd $1 # assoc plink --bfile HapMap_3_r3_13 --assoc --out assoc_results # Note, the --assoc option does not allow to correct covariates such as principal components (PC's)/ MDS components, which makes it # less suited for association analyses. printf '=%.0s' {1..80}; echo # HR echo "head assoc_results.assoc" head assoc_results.assoc ``` ### Step 2: Association Analysis va Regression Use the PLINK [--logistic](https://www.cog-genomics.org/plink/1.9/assoc#linear) command to run regressions adjusted for population stratification covariates. ``` %%timeop -o aa2 %%bash -s "$data_dir" set -e; cd $1 # logistic # We will be using 10 principal components as covariates in this logistic analysis. We use the MDS components calculated from the previous tutorial: covar_mds.txt. plink --bfile HapMap_3_r3_13 --covar covar_mds.txt --logistic --hide-covar --out logistic_results # Note, we use the option -ñhide-covar to only show the additive results of the SNPs in the output file. # Remove NA values, those might give problems generating plots in later steps. awk '!/'NA'/' logistic_results.assoc.logistic > logistic_results.assoc_2.logistic printf '=%.0s' {1..80}; echo # HR echo "head logistic_results.assoc_2.logistic" head logistic_results.assoc_2.logistic ``` ### Step 3: Visualization ``` def get_data(path): return ( pd.read_csv(path, sep='\s+') .sort_values(['CHR', 'BP']) .reset_index(drop=True) .rename_axis('POS', axis='index') .reset_index() ) get_data(osp.join(data_dir, 'assoc_results.assoc')).head() get_data(osp.join(data_dir, 'logistic_results.assoc_2.logistic')).head() def manhattan_plot(df, limit=20000): return ( pn.ggplot( df .sort_values('P') .reset_index(drop=True) .head(limit) .assign(LOGP=lambda df: -np.log10(df['P'])) .assign(CHR=lambda df: df['CHR'].astype(str)) , pn.aes(x='POS', y='LOGP', fill='CHR', color='CHR') ) + pn.geom_point() + pn.geom_hline(yintercept=5) + pn.theme_bw() + pn.theme(figure_size=(16, 4)) ) def qq_plot(df, limit=20000): return ( pn.ggplot( df .sort_values('P') .assign(OBS=lambda df: -np.log10(df['P'])) .assign(EXP=lambda df: -np.log10(np.arange(1, len(df) + 1) / float(len(df)))) .head(limit), pn.aes(x='EXP', y='OBS') ) + pn.geom_point() + pn.geom_abline() + pn.theme_bw() ) display(manhattan_plot(get_data(osp.join(data_dir, 'logistic_results.assoc_2.logistic'))) + pn.ggtitle('Manhattan Plot (Logistic Regression)')) display(manhattan_plot(get_data(osp.join(data_dir, 'assoc_results.assoc'))) + pn.ggtitle('Manhattan Plot (Assocation Tests)')) display(Image('figures/manhattan-logistic.jpeg')) display(Image('figures/manhattan-assoc.jpeg')) display(qq_plot(get_data(osp.join(data_dir, 'logistic_results.assoc_2.logistic'))) + pn.ggtitle('QQ Plot (Logistic Regression)')) display(qq_plot(get_data(osp.join(data_dir, 'assoc_results.assoc'))) + pn.ggtitle('QQ Plot (Assocation Tests)')) display(Image('figures/QQ-Plot_logistic.jpeg')) display(Image('figures/QQ-Plot_assoc.jpeg')) ```
github_jupyter
### quero usar matplotlib para ilustrar permutações A primeira coisa é fazer circulos numerados ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False) plt.axes(aspect="equal") fig = plt.gcf() fig.gca().add_artist(circle1) plt.axis("off") circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False) circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False) circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False) circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False) circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False) plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") fig1.gca().add_artist(circle1) fig1.gca().add_artist(circle2) fig1.gca().add_artist(circle3) fig1.gca().add_artist(circle4) fig1.gca().add_artist(circle5) circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False) circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False) circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False) circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False) circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False) circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False) circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False) circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False) circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False) circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False) plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") fig1.gca().add_artist(circle1) fig1.gca().add_artist(circle2) fig1.gca().add_artist(circle3) fig1.gca().add_artist(circle4) fig1.gca().add_artist(circle5) fig1.gca().add_artist(circled1) fig1.gca().add_artist(circled2) fig1.gca().add_artist(circled3) fig1.gca().add_artist(circled4) fig1.gca().add_artist(circled5) circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False) circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False) circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False) circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False) circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False) circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False) circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False) circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False) circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False) circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False) plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") fig1.gca().add_artist(circle1) fig1.gca().add_artist(circle2) fig1.gca().add_artist(circle3) fig1.gca().add_artist(circle4) fig1.gca().add_artist(circle5) fig1.gca().add_artist(circled1) fig1.gca().add_artist(circled2) fig1.gca().add_artist(circled3) fig1.gca().add_artist(circled4) fig1.gca().add_artist(circled5) # as arestas fig1.gca().plot([0.15,0.85],[0,0.8], color="red", alpha=0.6 ) fig1.gca().text(0.,0.,r'$5$', fontsize=20,verticalalignment='center', horizontalalignment='center') fig1.gca().text(1,0,r'$5$', fontsize=20, verticalalignment='center', horizontalalignment='center') fig1.gca().text(1,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center') fig1.gca().text(0,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center') fig1.gca().plot([0.15,0.85],[0.8,0.4], color=(.2,.6,.7), alpha=0.6 ) # agora faremos as funções. primeiro a cor de um inteiro def cor(n): ''' Dado um inteiro n designa uma cor''' return (n/(n+1), 1- n/(n+1), 1-(n+2)/(n+5)) #teste circle1=plt.Circle((0,0),.1,color=cor(1), alpha=0.2, clip_on=False) circle2=plt.Circle((0,0.2),.1,color=cor(3), alpha=0.2, clip_on=False) plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") fig1.gca().add_artist(circle1) fig1.gca().add_artist(circle2) def circulo(x,n): '''Define um circulo de centro (x,0.2*n) de raio 0.1 e cor n''' return plt.Circle((x,0.2*n), .1, color=cor(n), alpha=0.3, clip_on=False ) #teste plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") fig1.gca().add_artist(circulo(0,3)) fig1.gca().add_artist(circulo(0,4)) # função pilha de circulos def pilha_de_circulos(x,n): '''Faz uma pilha de n circulos sobre a abcissa x''' for k in range(n): fig1.gca().add_artist(circulo(x,k)) fig1.gca().text(x,0.2*k,r'$'+str(k+1)+'$', fontsize=20,verticalalignment='center', horizontalalignment='center') return # teste desta função: plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") pilha_de_circulos(0,3) pilha_de_circulos(1,3) pilha_de_circulos(2,3) # agora a função mapa_permu def mapa_permu(x,p): ''' desenha a permutação p (uma lista) na posição x''' l=len(p) x1= x+.15 x2= x+.85 for y in range(l): fig1.gca().plot([x1,x2],[0.2*y,0.2*(p[y]-1)], color=cor(y), alpha=0.6 ) return # teste plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") pilha_de_circulos(0,3) pilha_de_circulos(1,3) pilha_de_circulos(2,3) mapa_permu(0,[2,1,3]) mapa_permu(1.0, [3,1,2]) plt.axes(aspect="equal") fig1 = plt.gcf() plt.axis("off") pilha_de_circulos(0,5) pilha_de_circulos(1,5) mapa_permu(0,[3,2,1,5,4]) def pgrafico(x,p): '''Faz o grafico da permutação p começando em x''' n=len(p) fig1= plt.gcf() plt.axis("off") pilha_de_circulos(x,n) pilha_de_circulos(x+1,n) return mapa_permu(x,p) #teste plt.axes(aspect="equal") fig1= plt.gcf() plt.axis("off") pgrafico(0,[3,1,2]) ```
github_jupyter
# Fitting to existing data ``` # Base Data Science snippet import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import time from tqdm import tqdm_notebook %matplotlib inline %load_ext autoreload %autoreload 2 ``` Inspiration - https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html ``` import sys sys.path.append("../") from covid.dataset import fetch_daily_case from covid.models import SIR from covid.models.states import CompartmentStates ``` # Fitting French data to SIR model ``` cases = fetch_daily_case(return_data=True) cases.head() ``` ## Getting cases for all France ### Fetching French data and prepare it ``` cases_fr = ( cases.query("granularite =='pays'") .query("source_nom=='Ministère des Solidarités et de la Santé'") [["date","cas_confirmes","deces","gueris"]] .drop_duplicates(subset = ["date"]) .fillna(0.0) .assign(date = lambda x : pd.to_datetime(x["date"])) .set_index("date") ) start,end = cases_fr.index[0],cases_fr.index[-1] date_range = pd.date_range(start,end,freq="D") cases_fr = cases_fr.reindex(date_range).fillna(method="ffill") cases_fr.plot(figsize = (15,4)) plt.show() ``` ### Recomputing compartments ``` cases_fr["I"] = cases_fr["cas_confirmes"] - (cases_fr["deces"] + cases_fr["gueris"]) cases_fr["R"] = (cases_fr["deces"] + cases_fr["gueris"]) pop_fr = 66.99*1e6 cases_fr["S"] = pop_fr - cases_fr["I"] - cases_fr["R"] cases_fr[["S","I","R"]].plot(figsize = (15,4)); cases_fr[["I","R"]].plot(figsize = (15,4)); ``` ### Smoothing curves ``` from scipy.signal import savgol_filter import statsmodels.api as sm def smooth(y,p = 1600): cycle, trend = sm.tsa.filters.hpfilter(y, p) return trend pd.Series(savgol_filter(cases_fr["I"], 51, 2)).plot(figsize = (15,4)) pd.Series(savgol_filter(cases_fr["R"], 51, 2)).plot() plt.show() pd.Series(smooth(cases_fr["I"],6.25)).plot(figsize = (15,4),label = "Is") pd.Series(cases_fr["I"]).plot(label = "I") pd.Series(smooth(cases_fr["R"],6.25)).plot(label = "Rs") pd.Series(cases_fr["R"]).plot(label = "R") plt.legend() plt.show() pd.Series(smooth(cases_fr["I"],1600)).plot(figsize = (15,4),label = "Is") pd.Series(cases_fr["I"]).plot(label = "I") pd.Series(smooth(cases_fr["R"],1600)).plot(label = "Rs") pd.Series(cases_fr["R"]).plot(label = "R") plt.legend() plt.show() ``` ## Preparing SIR model ``` from covid.models import SIR # Parameters N = pop_fr beta = 5/4 gamma = 1/4 start_date = cases_fr.index[0] sir = SIR(N,beta,gamma) states = sir.solve((N-1,1,0),start_date = start_date) states.head() states.show(plotly = False) ``` So of course parameters are not correct, in this version of the SIR model 30+ million persons get infected.<br> Even because of test biases, estimates are more around 5 to 12% not 40%. <br> Moreover, starting from first cases a peak was to be expected in mid-February, and in France lockdown started on the 10th of March. ``` states["I"].plot(figsize = (15,4)) cases_fr["I"].plot() plt.show() states["I"].plot(figsize = (15,4),label = "I_pred") cases_fr["I"].plot(secondary_y = True,label = "I_true") plt.legend() plt.show() ``` ## Some intuition about parameter sensibility ``` from ipywidgets import interact @interact(beta = 5/4,gamma = 1/4) def show_sir(beta,gamma): # Create SIR model N = pop_fr start_date = cases_fr.index[0] sir = SIR(N,beta,gamma) states = sir.solve((N-1,1,0),start_date = start_date) # Plot result states["I"].plot(figsize = (15,2),label = "I_pred") cases_fr["I"].plot(secondary_y = True,label = "I_true") plt.legend() plt.show() states["I"].plot(figsize = (15,2),label = "I_pred") cases_fr["I"].plot(label = "I_true") plt.legend() plt.show() ``` ## Fitting parameters with hyperopt ### First attempt ##### References - https://towardsdatascience.com/hyperparameter-optimization-in-python-part-2-hyperopt-5f661db91324 - http://hyperopt.github.io/hyperopt/ ##### Space ``` from hyperopt import hp space = { "beta":hp.uniform('beta',0.1,5), "gamma":hp.uniform('gamma',1/15,1/3), } ``` ##### Loss function between prediction and true ``` def loss_pred(states,true,cols = None): if cols is None: cols = states.columns.tolist() loss = 0 for col in cols: loss = np.linalg.norm(states.loc[true.index,col].values - true[col].values) return loss loss_pred(states,cases_fr,cols = ["I","R"]) loss_pred(cases_fr,cases_fr,cols = ["I","R"]) ``` ##### Final loss function ``` def objective(params): sir = SIR(N,params["beta"],params["gamma"]) states = sir.solve((N-1,1,0),start_date = start_date) return loss_pred(states,cases_fr,cols = ["I","R"]) ``` ##### Hyperopt optimization ``` from hyperopt import fmin, tpe, Trials trials = Trials() best = fmin( fn=objective, space=space, trials=trials, algo=tpe.suggest, max_evals=1000) print(best) ``` ##### Visualizing results ``` sir = SIR(N,best["beta"],best["gamma"]) states = sir.solve((N-1,1,0),start_date = start_date) states["I"].plot(figsize = (15,2),label = "pred") cases_fr["I"].plot(label = "true") plt.legend() plt.show() states["R"].plot(figsize = (15,4),label = "pred") cases_fr["R"].plot(label = "true") plt.legend() plt.show() ```
github_jupyter
<img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/logo-bdc.png" align="right" width="64"/> # <span style="color:#336699">Introduction to the SpatioTemporal Asset Catalog (STAC)</span> <hr style="border:2px solid #0077b9;"> <div style="text-align: left;"> <a href="https://nbviewer.jupyter.org/github/brazil-data-cube/code-gallery/blob/master/jupyter/Python/stac/stac-introduction.ipynb"><img src="https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg" align="center"/></a> </div> <br/> <div style="text-align: center;font-size: 90%;"> Matheus Zaglia<sup><a href="https://orcid.org/0000-0001-6181-2158"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Rennan Marujo<sup><a href="https://orcid.org/0000-0002-0082-9498"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Gilberto R. Queiroz<sup><a href="https://orcid.org/0000-0001-7534-0219"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup> <br/><br/> Earth Observation and Geoinformatics Division, National Institute for Space Research (INPE) <br/> Avenida dos Astronautas, 1758, Jardim da Granja, São José dos Campos, SP 12227-010, Brazil <br/><br/> Contact: <a href="mailto:brazildatacube@inpe.br">brazildatacube@inpe.br</a> <br/><br/> Last Update: March 12, 2021 </div> <br/> <div style="text-align: justify; margin-left: 25%; margin-right: 25%;"> <b>Abstract.</b> This Jupyter Notebook gives an overview on how to use the STAC service to discover and access the data products from the <em>Brazil Data Cube</em>. </div> <br/> <div style="text-align: justify; margin-left: 25%; margin-right: 25%;font-size: 75%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;"> <b>This Jupyter Notebook is a supplement to the following paper:</b> <div style="margin-left: 10px; margin-right: 10px"> Zaglia, M.; Vinhas, L.; Queiroz, G. R.; Simões, R. <a href="http://urlib.net/rep/8JMKD3MGPDW34R/3UFEFD8" target="_blank">Catalogação de Metadados do Cubo de Dados do Brasil com o SpatioTemporal Asset Catalog</a>. In: Proceedings XX GEOINFO, November 11-13, 2019, São José dos Campos, SP, Brazil. p 280-285. </div> </div> # Introduction <hr style="border:1px solid #0077b9;"> The [**S**patio**T**emporal **A**sset **C**atalog (STAC)](https://stacspec.org/) is a specification created through the colaboration of several organizations intended to increase satellite image search interoperability. The diagram depicted in the picture contains the most important concepts behind the STAC data model: <center> <img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/stac/stac-model.png" width="480" /> <br/> <b>Figure 1</b> - STAC model. </center> The description of the concepts below are adapted from the [STAC Specification](https://github.com/radiantearth/stac-spec): - **Item**: a `STAC Item` is the atomic unit of metadata in STAC, providing links to the actual `assets` (including thumbnails) that they represent. It is a `GeoJSON Feature` with additional fields for things like time, links to related entities and mainly to the assets. According to the specification, this is the atomic unit that describes the data to be discovered in a `STAC Catalog` or `Collection`. - **Asset**: a `spatiotemporal asset` is any file that represents information about the earth captured in a certain space and time. - **Catalog**: provides a structure to link various `STAC Items` together or even to other `STAC Catalogs` or `Collections`. - **Collection:** is a specialization of the `Catalog` that allows additional information about a spatio-temporal collection of data. # STAC Client API <hr style="border:1px solid #0077b9;"> For running the examples in this Jupyter Notebook you will need to install the [STAC client for Python](https://github.com/brazil-data-cube/stac.py). To install it from PyPI using `pip`, use the following command: ``` #!pip install stac.py ``` In order to access the funcionalities of the client API, you should import the `stac` package, as follows: ``` import stac ``` After that, you can check the installed `stac` package version: ``` stac.__version__ ``` Then, create a `STAC` object attached to the Brazil Data Cube' STAC service: ``` service = stac.STAC('https://brazildatacube.dpi.inpe.br/stac/', access_token='change-me') ``` # Listing the Available Data Products <hr style="border:1px solid #0077b9;"> In the Jupyter environment, the `STAC` object will list the available image and data cube collections from the service: ``` service ``` or, access the `collections` property: ``` service.collections ``` # Retrieving the Metadata of a Data Product <hr style="border:1px solid #0077b9;"> The `collection` method returns information about a given image or data cube collection identified by its name. In this example we are retrieving information about the datacube collection `CB4_64_16D_STK-1`: ``` collection = service.collection('CB4_64_16D_STK-1') collection ``` # Retrieving Collection Items <hr style="border:1px solid #0077b9;"> The `get_items` method returns the items of a given collection: ``` collection.get_items() ``` The `get_items` method also supports filtering rules through the specification of a rectangle (`bbox`) or a date and time (`datatime`) criterias: ``` items = collection.get_items( filter={ 'bbox':'-46.62597656250001,-13.19716452328198,-45.03570556640626,-12.297068292853805', 'datetime':'2018-08-01/2019-07-31', 'limit':10 } ) items ``` From the item collection retrieved with the `get_item` method, it is possible to traverse the list of items: ``` for item in items: print(item.id) ``` or, it is possible to use the index operador (`[]`) with the ``features`` property in order to retrieve a specific item from the collection: ``` item = items.features[0] item.id ``` # Assets <hr style="border:1px solid #0077b9;"> The assets with the links to the images, thumbnails or specific metadata files, can be accessed through the property `assets` (from a given item): ``` assets = item.assets ``` Then, from the assets it is possible to traverse or access individual elements: ``` for k in assets.keys(): print(k) ``` The metadata related to the CBERS-4/AWFI blue band is available under the dictionary key `BAND13`: ``` blue_asset = assets['BAND13'] blue_asset ``` To iterate in the item's assets, use the following pattern: ``` for asset in assets.values(): print(asset) ``` # Using RasterIO and NumPy <hr style="border:1px solid #0077b9;"> The `rasterio` library can be used to read image files from the Brazil Data Cube' service on-the-fly and then to create `NumPy` arrays. The `read` method of an `Item` can be used to perform the reading and array creation: ``` nir = item.read('BAND16') ``` <div style="text-align: justify; margin-left: 15%; margin-right: 15%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;"> <b>Note:</b> If there are errors because of your pyproj version, you can run the code below as specified in <a href="https://rasterio.readthedocs.io/en/latest/faq.html#why-can-t-rasterio-find-proj-db-rasterio-from-pypi-versions-1-2-0" target="_blank">rasterio documentation</a> and try again: import os del os.environ['PROJ_LIB'] </div> ``` nir ``` The next cell code import the `Window` class from the `rasterio` library in order to retrieve a subset of an image and then create an array: ``` from rasterio.windows import Window ``` We can specify a subset of the image file (window or chunck) to be read. Let's read a range that starts on pixel (0, 0) with 500 x 500 and column 0 to column 500, for the spectral bands `red`, `green` and `blue`: ``` red = item.read('BAND15', window=Window(0, 0, 500, 500)) # Window(col_off, row_off, width, height) green = item.read('BAND14', window=Window(0, 0, 500, 500)) blue = item.read('BAND13', window=Window(0, 0, 500, 500)) blue ``` # Using Matplotlib to Visualize Images <hr style="border:1px solid #0077b9;"> The `Matplotlib` cab be used to plot the arrays read in the last section: ``` %matplotlib inline from matplotlib import pyplot as plt fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12, 4)) ax1.imshow(red, cmap='gray') ax2.imshow(green, cmap='gray') ax3.imshow(blue, cmap='gray') ``` Using `Numpy` we can stack the previous arrays and use `Matplotlib` to plot a color image, but first we need to normalize their values: ``` import numpy def normalize(array): """Normalizes numpy arrays into scale 0.0 - 1.0""" array_min, array_max = array.min(), array.max() return ((array - array_min)/(array_max - array_min)) rgb = numpy.dstack((normalize(red), normalize(green), normalize(blue))) plt.imshow(rgb) ``` # Retrieving Image Files <hr style="border:1px solid #0077b9;"> The file related to an asset can be retrieved through the `download` method. The cell code below shows ho to download the image file associated to the asset into a folder named `img`: ``` blue_asset.download('img') ``` In order to download all files related to an item, use the `Item.download` method: ``` item.download('images') ``` Note that the URL for a given asset can be retrieved by the property `href`: ``` blue_asset.href ``` # References <hr style="border:1px solid #0077b9;"> - [Spatio Temporal Asset Catalog Specification](https://stacspec.org/) - [Brazil Data Cube Python Client Library for STAC Service - GitHub Repository](https://github.com/brazil-data-cube/stac.py) # See also the following Jupyter Notebooks <hr style="border:1px solid #0077b9;"> * [NDVI calculation on images obtained through STAC](./stac-ndvi-calculation.ipynb) * [Thresholding images obtained through STAC](./stac-image-threshold.ipynb) * [Calculating Image Difference on images obtained through STAC](./stac-image-difference.ipynb)
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import plotly.offline as py import plotly.graph_objs as go import plotly.tools as tls import seaborn as sns import io from google.colab import files uploaded = files.upload() data = uploaded['LAND TEMPERATURES FILE FROM KAGGLE IMPORT'] inputData = pd.read_csv(io.BytesIO(data)) #Process and Clean Data - from Kaggle DataSet countriesToClear = ['CHOOSE COUNTRIES'] countriesToBeReplaced = ['CHOOSE COUNTRIES'] countriedReplacedWith = ['CHOOSE REPLACEMENTS'] cleanedCountries = inputData[~inputData['Country'].isin(countriesToClear)] cleanedCountries = cleanedCountries.replace(countriesToBeReplaced ,countriedReplacedWith) #Average Country Temperatures countries = np.unique(cleanedCountries['Country']) meanTemperatures = [] for country in countries: meanTempsByCountry = cleanedCountries[cleanedCountries['Country'] == country]['AverageTemperature'].mean() meanTemperatures.append(meanTempsByCountry) meanTempBar, countryBar = (list(x) for x in zip(*sorted(zip(meanTemperatures, countries), reverse = True))) #Plot Graphs of Hottest and Coldest sns.set(font_scale=1) a,b = plt.subplots(figsize=(5, 10)) sns.barplot(meanTempBar[:25], countryBar[:25], palette = sns.color_palette('coolwarm', len(countries))[::1]) b.set(xlabel='Average temperature', title='Warmest 25 Countries') sns.set(font_scale=1) a,b = plt.subplots(figsize=(5, 10)) sns.barplot(meanTempBar[-25:], countryBar[-25:], palette = sns.color_palette('coolwarm', len(countries))[::1]) b.set(xlabel='Average temperature', title='Coldest 25 Countries') # Combine Data Into Zipped List countriesToPlot = countryBar yAxis = np.arange(len(countriesToPlot)) tempsToPlot = meanTempBar countries_zipped = list(zip(countryBar,meanTempBar)) # Plot All Countries Mean Temperatures plt.bar(yAxis,tempsToPlot, align='center', alpha=0.5) plt.ylabel('Temperature / degrees celcius') plt.xlabel('Countries / country number') plt.title('Average Global Temperatures 1750-2013') plt.show() # Cycle Through Each Country counter = 0 # for country in countries: if counter < 10: temperature = meanTempsByCountry = cleanedCountries[cleanedCountries['Country'] == country]['AverageTemperature'].tail(165).iloc[::12] date = meanTempsByCountry = cleanedCountries[cleanedCountries['Country'] == country]['dt'].tail(165).iloc[::12] plot(date,temperature) # Plot Temperatures of Each Country Over A Year xlabel('Time / Date ') ylabel('Temperature / Degrees Celcius') title(country) plt.xticks(rotation=90) grid(True) show() ```
github_jupyter
``` import bare ``` #### Plot detected interest points over images ``` image_file_name = 'image.tif' ip_file_name = 'image.vwip' ip_csv_file_name = bare.core.write_ip_to_csv(ip_file_name) bare.plot.ip_plot(image_file_name, ip_csv_file_name) ``` !['example_plot_IP'](example_plots/interest_points/v2_sub8_interest_points.png) #### Plot match points found between two images ``` image1_file_name = 'image1.tif' image2_file_name = 'image2.tif' match_file_name = 'image1__image2-clean.match' match_csv_file_name = bare.core.write_mp_to_csv(match_file_name) bare.plot.mp_plot(image1_file_name, image2_file_name, match_csv_file_name,out_dir_abs='qc_plots/match_points') ``` !['example_plot_mp'](example_plots/match_points/v2_sub8__v3_sub8_match_points.png) #### Plot dxdy after bundle adjustment ``` ba_dir = 'bundle_adjust_output_directory/' bare.plot.plot_dxdy(ba_dir) ``` !['example_plot_dxdy'](example_plots/dxdy/v3_sub8__v4_sub8_dxdy_plot.png) #### Plot tsai camera xyz positions bef|ore and after bundle adjustment ``` ba_dir = 'bundle_adjust_output_directory/' img_dir = 'input_image_directory/' input_cam_dir = 'input_cameras_directory/' bare.plot.plot_tsai_camera_positions_before_and_after(ba_dir, input_cam_dir) ``` !['example_plot_xy'](example_plots/camera_positions/xy_camera_positions_before_and_after_bundle_adjust.png) !['example_plot_z'](example_plots/camera_positions/z_camera_positions_before_and_after_bundle_adjust.png) #### Plot residuals after bundle adjustment Figure on the right shows outliers is a result of outliers. ``` ba_dir = 'bundle_adjust_output_directory/' bare.plot.plot_residuals(ba_dir) ``` !['example_plot_6'](example_plots/residuals/ba_match_residuals_before_and_after.jpg) #### Plot WV3 footprint and scanner positions during aquisition. ``` image_file_name = 'wv_image.tif' camera_file = 'wv_image.xml' reference_dem = 'reference_dem.tif' bare.plot.plot_footprint(image_file_name, camera_file, reference_dem) ``` !['example_plot_wv_foot_cam'](example_plots/camera_footprint/104001004F176F00.r100_footprint.png) #### Plot WV3 footprint. ``` image_file_name = 'wv_image.tif' camera_file = 'wv_image.xml' reference_dem = 'reference_dem.tif' bare.plot.plot_footprint(image_file_name, camera_file, reference_dem, cam_on=False) ``` !['example_plot_wv_foot'](example_plots/camera_footprint/104001004F176F00.r100_footprint_cam_off.png) #### Plot tsai camera footprint and position. ``` image_file_name = 'image.tif' camera_file = 'image.tsai' reference_dem = 'reference_dem.tif' bare.plot.plot_footprint(image_file_name, camera_file, reference_dem) ``` !['example_plot_tsai_foot_cam'](example_plots/camera_footprint/v3_sub8_footprint.png) #### Batch plot tsai camera footprints. ``` img_dir = 'input_image_directory/' cam_dir = 'input_tsai_camera_directory/' reference_dem = 'reference_dem.tif' bare.batch.plot_footprints(cam_dir, img_dir, reference_dem) ``` !['example_plot_multiple_footprints'](example_plots/camera_footprint/footprints.png) #### Run all ``` ba_dir = 'bundle_adjust_output_directory/' img_dir = 'input_image_directory/' input_cam_dir = 'input_cameras_directory/' bare.batch.plot_all_qc_products(ba_dir, img_dir, input_cam_dir, img_extension='8.tif') ```
github_jupyter
``` %matplotlib inline import numpy as np import pandas as pd import scipy import sklearn import spacy import matplotlib.pyplot as plt import seaborn as sns import re from nltk.corpus import state_union, stopwords from collections import Counter from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings("ignore") import nltk nltk.download() state_union.fileids() #Let's analyze Eisenhower and Kennedy eisenhower = state_union.raw('1953-Eisenhower.txt') kennedy = state_union.raw('1962-Kennedy.txt') # Utility function for standard text cleaning. def text_cleaner(text): # Visual inspection identifies a form of punctuation spaCy does not # recognize: the double dash '--'. Better get rid of it now! text = re.sub(r'--',' ',text) text = re.sub("[\[].*?[\]]", "", text) text = ' '.join(text.split()) return text eisenhower = text_cleaner(eisenhower) kennedy = text_cleaner(kennedy) #SpaCy nlp = spacy.load('en') eisenhower_doc = nlp(eisenhower) kennedy_doc = nlp(kennedy) # Group into sentences. eisenhower_sents = [[sent, 'Eisenhower'] for sent in eisenhower_doc.sents] kennedy_sents = [[sent, "Kennedy"] for sent in kennedy_doc.sents] # Combine the sentences from the two novels into one data frame. sentences = pd.DataFrame(eisenhower_sents + kennedy_sents) sentences.head() # how long are their speeches? print('Eisenhower speech length:', len(eisenhower_doc)) print('Kennedy speech length:', len(kennedy_doc)) # check excerpts for any other cleaning needed print(eisenhower_doc[:100]) print(kennedy_doc[:100]) ``` ## Bag of Words ``` # Utility function to create a list of the 2000 most common words. def bag_of_words(text): # Filter out punctuation and stop words. allwords = [token.lemma_ for token in text if not token.is_punct and not token.is_stop] # Return the most common words. return [item[0] for item in Counter(allwords).most_common(2000)] # Creates a data frame with features for each word in our common word set. # Each value is the count of the times the word appears in each sentence. def bow_features(sentences, common_words): # Scaffold the data frame and initialize counts to zero. df = pd.DataFrame(columns=common_words) df['text_sentence'] = sentences[0] df['text_source'] = sentences[1] df.loc[:, common_words] = 0 # Process each row, counting the occurrence of words in each sentence. for i, sentence in enumerate(df['text_sentence']): # Convert the sentence to lemmas, then filter out punctuation, # stop words, and uncommon words. words = [token.lemma_ for token in sentence if ( not token.is_punct and not token.is_stop and token.lemma_ in common_words )] # Populate the row with word counts. for word in words: df.loc[i, word] += 1 # This counter is just to make sure the kernel didn't hang. if i % 500 == 0: print("Processing row {}".format(i)) return df # Set up the bags. eisenhowerwords = bag_of_words(eisenhower_doc) kennedywords = bag_of_words(kennedy_doc) # Combine bags to create a set of unique words. common_words = set(eisenhowerwords + kennedywords) # Create bow features bow = bow_features(sentences, common_words) bow.head() ``` ## TF-IDF ``` #sentences eisenhower = state_union.sents('1953-Eisenhower.txt') kennedy = state_union.sents('1962-Kennedy.txt') # lists eisenhower_list = [" ".join(sent) for sent in eisenhower] kennedy_list = [" ".join(sent) for sent in kennedy] together = eisenhower_list + kennedy_list from sklearn.feature_extraction.text import TfidfVectorizer #X_train, X_test = train_test_split(together, test_size=0.4, random_state=0) vectorizer = TfidfVectorizer(max_df=0.5, # drop words that occur in more than half the paragraphs min_df=2, # only use words that appear at least twice stop_words='english', lowercase=True, #convert everything to lower case (since Alice in Wonderland has the HABIT of CAPITALIZING WORDS for EMPHASIS) use_idf=True,#we definitely want to use inverse document frequencies in our weighting norm=u'l2', #Applies a correction factor so that longer paragraphs and shorter paragraphs get treated equally smooth_idf=True #Adds 1 to all document frequencies, as if an extra document existed that used every word once. Prevents divide-by-zero errors ) #Applying the vectorizer together_tfidf=vectorizer.fit_transform(together) print("Number of features: %d" % together_tfidf.get_shape()[1]) tfidf = vectorizer.fit_transform(together).tocsr() ``` ### These two texts, even though just a few years apart, are not highly correlated. There could be many reasons for this, but perhaps it's a shift in party in the White House? Or, different events at the time. ## Supervised Learning Models #### Logistic Regression ``` #Imports from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, cross_val_predict from sklearn import metrics # Set X, y and train, test, split y = bow['text_source'] X = np.array(bow.drop(['text_sentence','text_source'], 1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) # Logistic Regression Model with BoW lrb = LogisticRegression() model = lrb.fit(X_train, y_train) pred = lrb.predict(X_test) print(X_train.shape, y_train.shape) print('BoW Training set score:', lrb.score(X_train, y_train)) print('BoW Test set score:', lrb.score(X_test, y_test)) print('BoW Predictions:', pred[0:5]) #5 fold Cross Validation scores = cross_val_score(model, X, y, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(lrb, X, y, cv=5))) # Tfidf X_tfidf = tfidf y_tfidf = ['Eisenhower']*len(eisenhower_list) + ['Kennedy']*len(kennedy_list) X2_train, X2_test, y2_train, y2_test = train_test_split(X_tfidf, y_tfidf, test_size=0.4, random_state=0) # Logistic Regression Model with TFIDF lrt = LogisticRegression() model = lrt.fit(X2_train, y2_train) pred = lrt.predict(X2_test) print('\nTFIDF Training set score:', lrt.score(X2_train, y2_train)) print('TFIDF Test set score:', lrt.score(X2_test, y2_test)) print('Predictions:', pred[0:5]) #5 fold Cross Validation scores = cross_val_score(model, X_tfidf, y_tfidf, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(lrt, X_tfidf, y_tfidf, cv=5))) ``` #### Random Forest ``` #Import from sklearn.ensemble import RandomForestClassifier #Random Forest Model with BoW rfcb = RandomForestClassifier() model = rfcb.fit(X_train, y_train) pred = rfcb.predict(X_test) print('Training set score:', rfcb.score(X_train, y_train)) print('Test set score:', rfcb.score(X_test, y_test)) print('Predictions:', pred[0:5]) #5 fold cross validation scores = cross_val_score(model, X, y, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(rfcb, X, y, cv=5))) # Random Forest Model with TFIDF rfct = RandomForestClassifier() model = rfct.fit(X2_train, y2_train) pred = rfct.predict(X2_test) print('\nTFIDF Training set score:', rfct.score(X2_train, y2_train)) print('TFIDF Test set score:', rfct.score(X2_test, y2_test)) print('Predictions:', pred[0:5]) #5 fold Cross Validation scores = cross_val_score(model, X_tfidf, y_tfidf, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(rfct, X_tfidf, y_tfidf, cv=5))) ``` #### XGBoost Classifier ``` #import from xgboost import XGBClassifier #Our XGBoost Classifier clfb = XGBClassifier() model= clfb.fit(X_train, y_train) print('Training set score:', clfb.score(X_train, y_train)) print('Test set score:', clfb.score(X_test, y_test)) #5 fold cross validation scores = cross_val_score(model, X, y, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(clfb, X, y, cv=5))) # Random Forest Model with TFIDF clft = XGBClassifier() model = clft.fit(X2_train, y2_train) pred = clft.predict(X2_test) print('\nTFIDF Training set score:', clft.score(X2_train, y2_train)) print('TFIDF Test set score:', clft.score(X2_test, y2_test)) print('Predictions:', pred[0:5]) #5 fold Cross Validation scores = cross_val_score(model, X_tfidf, y_tfidf, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(clft, X_tfidf, y_tfidf, cv=5))) # We'll make 500 iterations, use 2-deep trees, and set our loss function. params = {'n_estimators': 500, 'max_depth': 2, 'loss': 'deviance'} # Initialize and fit the model. clfb = ensemble.GradientBoostingClassifier(**params) model= clfb.fit(X_train, y_train) print('Training set score:', clfb.score(X_train, y_train)) print('Test set score:', clfb.score(X_test, y_test)) #5 fold cross validation scores = cross_val_score(model, X, y, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(cross_val_score(clfb, X, y, cv=5))) # Random Forest Model with TFIDF clft = ensemble.GradientBoostingClassifier(**params) model = clft.fit(X2_train, y2_train) pred = clft.predict(X2_test) print('\nTFIDF Training set score:', clft.score(X2_train, y2_train)) print('TFIDF Test set score:', clft.score(X2_test, y2_test)) print('Predictions:', pred[0:5]) #5 fold Cross Validation scores = cross_val_score(model, X_tfidf, y_tfidf, cv=5) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(scores)) ``` ### Increase Accuracy by 5% on Random Forest ``` # Utility function to create a list of the 3000 most common words and add in punctuation. def bag_of_words(text): # Filter out punctuation and stop words. allwords = [token.lemma_ for token in text if not token.is_stop] # Return the most common words. return [item[0] for item in Counter(allwords).most_common(4000)] # Creates a data frame with features for each word in our common word set. # Each value is the count of the times the word appears in each sentence. def bow_features(sentences, common_words): # Scaffold the data frame and initialize counts to zero. df = pd.DataFrame(columns=common_words) df['text_sentence'] = sentences[0] df['text_source'] = sentences[1] df.loc[:, common_words] = 0 # Process each row, counting the occurrence of words in each sentence. for i, sentence in enumerate(df['text_sentence']): # Convert the sentence to lemmas, then filter out punctuation, # stop words, and uncommon words. words = [token.lemma_ for token in sentence if ( not token.is_punct and not token.is_stop and token.lemma_ in common_words )] # Populate the row with word counts. for word in words: df.loc[i, word] += 1 # This counter is just to make sure the kernel didn't hang. if i % 500 == 0: print("Processing row {}".format(i)) return df # Set up the bags. eisenhowerwords = bag_of_words(eisenhower_doc) kennedywords = bag_of_words(kennedy_doc) # Combine bags to create a set of unique words. common_words = set(eisenhowerwords + kennedywords) # Create bow features bow_inc = bow_features(sentences, common_words) bow.head() from sklearn.model_selection import GridSearchCV # Set X, y and train, test, split y2 = bow_inc['text_source'] X2 = np.array(bow_inc.drop(['text_sentence','text_source'], 1)) X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.4, random_state=0) # Logistic Regression Model with GridSearchCV on BoW param_grid = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] } lrb2 = GridSearchCV(LogisticRegression(penalty='l2', random_state=42, dual=True, class_weight=None), param_grid) model2 = lrb2.fit(X2_train, y2_train) pred = lrb2.predict(X2_test) print(X2_train.shape, y2_train.shape) print('BoW Training set score:', lrb2.score(X2_train, y2_train)) print('BoW Test set score:', lrb2.score(X2_test, y2_test)) print('BoW Predictions:', pred[0:5]) #10 fold Cross Validation scores = cross_val_score(model2, X2, y2, cv=10) print('Cross-validated scores:', scores) print('Avg. Score ', np.mean(scores)) ``` ### Adding Grid Search, tuning parameters and Cross Validating with 10 folds was the best solution to increase by 5%.
github_jupyter
<a href="https://colab.research.google.com/github/GivanTsai/Bert-cookbook/blob/main/BERT_Fine_Tuning_Sentence_Classification_v4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # BERT Fine-Tuning Tutorial with PyTorch By Chris McCormick and Nick Ryan *Revised on March 20, 2020 - Switched to `tokenizer.encode_plus` and added validation loss. See [Revision History](https://colab.research.google.com/drive/1pTuQhug6Dhl9XalKB0zUGf4FIdYFlpcX#scrollTo=IKzLS9ohzGVu) at the end for details.* In this tutorial I'll show you how to use BERT with the huggingface PyTorch library to quickly and efficiently fine-tune a model to get near state of the art performance in sentence classification. More broadly, I describe the practical application of transfer learning in NLP to create high performance models with minimal effort on a range of NLP tasks. This post is presented in two forms--as a blog post [here](http://mccormickml.com/2019/07/22/BERT-fine-tuning/) and as a Colab Notebook [here](https://colab.research.google.com/drive/1pTuQhug6Dhl9XalKB0zUGf4FIdYFlpcX). The content is identical in both, but: * The blog post includes a comments section for discussion. * The Colab Notebook will allow you to run the code and inspect it as you read through. I've also published a video walkthrough of this post on my YouTube channel! [Part 1](https://youtu.be/x66kkDnbzi4) and [Part 2](https://youtu.be/Hnvb9b7a_Ps). # Contents See "Table of contents" in the sidebar to the left. # Introduction ## History 2018 was a breakthrough year in NLP. Transfer learning, particularly models like Allen AI's ELMO, OpenAI's Open-GPT, and Google's BERT allowed researchers to smash multiple benchmarks with minimal task-specific fine-tuning and provided the rest of the NLP community with pretrained models that could easily (with less data and less compute time) be fine-tuned and implemented to produce state of the art results. Unfortunately, for many starting out in NLP and even for some experienced practicioners, the theory and practical application of these powerful models is still not well understood. ## What is BERT? BERT (Bidirectional Encoder Representations from Transformers), released in late 2018, is the model we will use in this tutorial to provide readers with a better understanding of and practical guidance for using transfer learning models in NLP. BERT is a method of pretraining language representations that was used to create models that NLP practicioners can then download and use for free. You can either use these models to extract high quality language features from your text data, or you can fine-tune these models on a specific task (classification, entity recognition, question answering, etc.) with your own data to produce state of the art predictions. This post will explain how you can modify and fine-tune BERT to create a powerful NLP model that quickly gives you state of the art results. ## Advantages of Fine-Tuning In this tutorial, we will use BERT to train a text classifier. Specifically, we will take the pre-trained BERT model, add an untrained layer of neurons on the end, and train the new model for our classification task. Why do this rather than train a train a specific deep learning model (a CNN, BiLSTM, etc.) that is well suited for the specific NLP task you need? 1. **Quicker Development** * First, the pre-trained BERT model weights already encode a lot of information about our language. As a result, it takes much less time to train our fine-tuned model - it is as if we have already trained the bottom layers of our network extensively and only need to gently tune them while using their output as features for our classification task. In fact, the authors recommend only 2-4 epochs of training for fine-tuning BERT on a specific NLP task (compared to the hundreds of GPU hours needed to train the original BERT model or a LSTM from scratch!). 2. **Less Data** * In addition and perhaps just as important, because of the pre-trained weights this method allows us to fine-tune our task on a much smaller dataset than would be required in a model that is built from scratch. A major drawback of NLP models built from scratch is that we often need a prohibitively large dataset in order to train our network to reasonable accuracy, meaning a lot of time and energy had to be put into dataset creation. By fine-tuning BERT, we are now able to get away with training a model to good performance on a much smaller amount of training data. 3. **Better Results** * Finally, this simple fine-tuning procedure (typically adding one fully-connected layer on top of BERT and training for a few epochs) was shown to achieve state of the art results with minimal task-specific adjustments for a wide variety of tasks: classification, language inference, semantic similarity, question answering, etc. Rather than implementing custom and sometimes-obscure architetures shown to work well on a specific task, simply fine-tuning BERT is shown to be a better (or at least equal) alternative. ### A Shift in NLP This shift to transfer learning parallels the same shift that took place in computer vision a few years ago. Creating a good deep learning network for computer vision tasks can take millions of parameters and be very expensive to train. Researchers discovered that deep networks learn hierarchical feature representations (simple features like edges at the lowest layers with gradually more complex features at higher layers). Rather than training a new network from scratch each time, the lower layers of a trained network with generalized image features could be copied and transfered for use in another network with a different task. It soon became common practice to download a pre-trained deep network and quickly retrain it for the new task or add additional layers on top - vastly preferable to the expensive process of training a network from scratch. For many, the introduction of deep pre-trained language models in 2018 (ELMO, BERT, ULMFIT, Open-GPT, etc.) signals the same shift to transfer learning in NLP that computer vision saw. Let's get started! [![BERT eBook Display Ad](https://drive.google.com/uc?export=view&id=1d6L584QYqpREpRIwAZ55Wsq8AUs5qSk1)](https://bit.ly/30JzuBH) # 1. Setup ## 1.1. Using Colab GPU for Training Google Colab offers free GPUs and TPUs! Since we'll be training a large neural network it's best to take advantage of this (in this case we'll attach a GPU), otherwise training will take a very long time. A GPU can be added by going to the menu and selecting: `Edit 🡒 Notebook Settings 🡒 Hardware accelerator 🡒 (GPU)` Then run the following cell to confirm that the GPU is detected. ``` import tensorflow as tf # Get the GPU device name. device_name = tf.test.gpu_device_name() # The device name should look like the following: if device_name == '/device:GPU:0': print('Found GPU at: {}'.format(device_name)) else: raise SystemError('GPU device not found') ``` In order for torch to use the GPU, we need to identify and specify the GPU as the device. Later, in our training loop, we will load data onto the device. ``` import torch # If there's a GPU available... if torch.cuda.is_available(): # Tell PyTorch to use the GPU. device = torch.device("cuda") print('There are %d GPU(s) available.' % torch.cuda.device_count()) print('We will use the GPU:', torch.cuda.get_device_name(0)) # If not... else: print('No GPU available, using the CPU instead.') device = torch.device("cpu") ``` ## 1.2. Installing the Hugging Face Library Next, let's install the [transformers](https://github.com/huggingface/transformers) package from Hugging Face which will give us a pytorch interface for working with BERT. (This library contains interfaces for other pretrained language models like OpenAI's GPT and GPT-2.) We've selected the pytorch interface because it strikes a nice balance between the high-level APIs (which are easy to use but don't provide insight into how things work) and tensorflow code (which contains lots of details but often sidetracks us into lessons about tensorflow, when the purpose here is BERT!). At the moment, the Hugging Face library seems to be the most widely accepted and powerful pytorch interface for working with BERT. In addition to supporting a variety of different pre-trained transformer models, the library also includes pre-built modifications of these models suited to your specific task. For example, in this tutorial we will use `BertForSequenceClassification`. The library also includes task-specific classes for token classification, question answering, next sentence prediciton, etc. Using these pre-built classes simplifies the process of modifying BERT for your purposes. ``` !pip install transformers ``` The code in this notebook is actually a simplified version of the [run_glue.py](https://github.com/huggingface/transformers/blob/master/examples/run_glue.py) example script from huggingface. `run_glue.py` is a helpful utility which allows you to pick which GLUE benchmark task you want to run on, and which pre-trained model you want to use (you can see the list of possible models [here](https://github.com/huggingface/transformers/blob/e6cff60b4cbc1158fbd6e4a1c3afda8dc224f566/examples/run_glue.py#L69)). It also supports using either the CPU, a single GPU, or multiple GPUs. It even supports using 16-bit precision if you want further speed up. Unfortunately, all of this configurability comes at the cost of *readability*. In this Notebook, we've simplified the code greatly and added plenty of comments to make it clear what's going on. # 2. Loading CoLA Dataset We'll use [The Corpus of Linguistic Acceptability (CoLA)](https://nyu-mll.github.io/CoLA/) dataset for single sentence classification. It's a set of sentences labeled as grammatically correct or incorrect. It was first published in May of 2018, and is one of the tests included in the "GLUE Benchmark" on which models like BERT are competing. ## 2.1. Download & Extract We'll use the `wget` package to download the dataset to the Colab instance's file system. ``` !pip install wget ``` The dataset is hosted on GitHub in this repo: https://nyu-mll.github.io/CoLA/ ``` import wget import os print('Downloading dataset...') # The URL for the dataset zip file. url = 'https://nyu-mll.github.io/CoLA/cola_public_1.1.zip' # Download the file (if we haven't already) if not os.path.exists('./cola_public_1.1.zip'): wget.download(url, './cola_public_1.1.zip') ``` Unzip the dataset to the file system. You can browse the file system of the Colab instance in the sidebar on the left. ``` # Unzip the dataset (if we haven't already) if not os.path.exists('./cola_public/'): !unzip cola_public_1.1.zip ``` ## 2.2. Parse We can see from the file names that both `tokenized` and `raw` versions of the data are available. We can't use the pre-tokenized version because, in order to apply the pre-trained BERT, we *must* use the tokenizer provided by the model. This is because (1) the model has a specific, fixed vocabulary and (2) the BERT tokenizer has a particular way of handling out-of-vocabulary words. We'll use pandas to parse the "in-domain" training set and look at a few of its properties and data points. ``` import pandas as pd # Load the dataset into a pandas dataframe. df = pd.read_csv("./cola_public/raw/in_domain_train.tsv", delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence']) # Report the number of sentences. print('Number of training sentences: {:,}\n'.format(df.shape[0])) # Display 10 random rows from the data. df.sample(10) ``` The two properties we actually care about are the the `sentence` and its `label`, which is referred to as the "acceptibility judgment" (0=unacceptable, 1=acceptable). Here are five sentences which are labeled as not grammatically acceptible. Note how much more difficult this task is than something like sentiment analysis! ``` df.loc[df.label == 0].sample(5)[['sentence', 'label']] ``` Let's extract the sentences and labels of our training set as numpy ndarrays. ``` # Get the lists of sentences and their labels. sentences = df.sentence.values labels = df.label.values ``` # 3. Tokenization & Input Formatting In this section, we'll transform our dataset into the format that BERT can be trained on. ## 3.1. BERT Tokenizer To feed our text to BERT, it must be split into tokens, and then these tokens must be mapped to their index in the tokenizer vocabulary. The tokenization must be performed by the tokenizer included with BERT--the below cell will download this for us. We'll be using the "uncased" version here. ``` from transformers import BertTokenizer # Load the BERT tokenizer. print('Loading BERT tokenizer...') tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) ``` Let's apply the tokenizer to one sentence just to see the output. ``` # Print the original sentence. print(' Original: ', sentences[0]) # Print the sentence split into tokens. print('Tokenized: ', tokenizer.tokenize(sentences[0])) # Print the sentence mapped to token ids. print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0]))) ``` When we actually convert all of our sentences, we'll use the `tokenize.encode` function to handle both steps, rather than calling `tokenize` and `convert_tokens_to_ids` separately. Before we can do that, though, we need to talk about some of BERT's formatting requirements. ## 3.2. Required Formatting The above code left out a few required formatting steps that we'll look at here. *Side Note: The input format to BERT seems "over-specified" to me... We are required to give it a number of pieces of information which seem redundant, or like they could easily be inferred from the data without us explicity providing it. But it is what it is, and I suspect it will make more sense once I have a deeper understanding of the BERT internals.* We are required to: 1. Add special tokens to the start and end of each sentence. 2. Pad & truncate all sentences to a single constant length. 3. Explicitly differentiate real tokens from padding tokens with the "attention mask". ### Special Tokens **`[SEP]`** At the end of every sentence, we need to append the special `[SEP]` token. This token is an artifact of two-sentence tasks, where BERT is given two separate sentences and asked to determine something (e.g., can the answer to the question in sentence A be found in sentence B?). I am not certain yet why the token is still required when we have only single-sentence input, but it is! **`[CLS]`** For classification tasks, we must prepend the special `[CLS]` token to the beginning of every sentence. This token has special significance. BERT consists of 12 Transformer layers. Each transformer takes in a list of token embeddings, and produces the same number of embeddings on the output (but with the feature values changed, of course!). ![Illustration of CLS token purpose](https://drive.google.com/uc?export=view&id=1ck4mvGkznVJfW3hv6GUqcdGepVTOx7HE) On the output of the final (12th) transformer, *only the first embedding (corresponding to the [CLS] token) is used by the classifier*. > "The first token of every sequence is always a special classification token (`[CLS]`). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks." (from the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf)) You might think to try some pooling strategy over the final embeddings, but this isn't necessary. Because BERT is trained to only use this [CLS] token for classification, we know that the model has been motivated to encode everything it needs for the classification step into that single 768-value embedding vector. It's already done the pooling for us! ### Sentence Length & Attention Mask The sentences in our dataset obviously have varying lengths, so how does BERT handle this? BERT has two constraints: 1. All sentences must be padded or truncated to a single, fixed length. 2. The maximum sentence length is 512 tokens. Padding is done with a special `[PAD]` token, which is at index 0 in the BERT vocabulary. The below illustration demonstrates padding out to a "MAX_LEN" of 8 tokens. <img src="https://drive.google.com/uc?export=view&id=1cb5xeqLu_5vPOgs3eRnail2Y00Fl2pCo" width="600"> The "Attention Mask" is simply an array of 1s and 0s indicating which tokens are padding and which aren't (seems kind of redundant, doesn't it?!). This mask tells the "Self-Attention" mechanism in BERT not to incorporate these PAD tokens into its interpretation of the sentence. The maximum length does impact training and evaluation speed, however. For example, with a Tesla K80: `MAX_LEN = 128 --> Training epochs take ~5:28 each` `MAX_LEN = 64 --> Training epochs take ~2:57 each` ## 3.3. Tokenize Dataset The transformers library provides a helpful `encode` function which will handle most of the parsing and data prep steps for us. Before we are ready to encode our text, though, we need to decide on a **maximum sentence length** for padding / truncating to. The below cell will perform one tokenization pass of the dataset in order to measure the maximum sentence length. ``` max_len = 0 # For every sentence... for sent in sentences: # Tokenize the text and add `[CLS]` and `[SEP]` tokens. input_ids = tokenizer.encode(sent, add_special_tokens=True) # Update the maximum sentence length. max_len = max(max_len, len(input_ids)) print('Max sentence length: ', max_len) ``` Just in case there are some longer test sentences, I'll set the maximum length to 64. Now we're ready to perform the real tokenization. The `tokenizer.encode_plus` function combines multiple steps for us: 1. Split the sentence into tokens. 2. Add the special `[CLS]` and `[SEP]` tokens. 3. Map the tokens to their IDs. 4. Pad or truncate all sentences to the same length. 5. Create the attention masks which explicitly differentiate real tokens from `[PAD]` tokens. The first four features are in `tokenizer.encode`, but I'm using `tokenizer.encode_plus` to get the fifth item (attention masks). Documentation is [here](https://huggingface.co/transformers/main_classes/tokenizer.html?highlight=encode_plus#transformers.PreTrainedTokenizer.encode_plus). ``` # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 64, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Print sentence 0, now as a list of IDs. print('Original: ', sentences[0]) print('Token IDs:', input_ids[0]) ``` ## 3.4. Training & Validation Split Divide up our training set to use 90% for training and 10% for validation. ``` from torch.utils.data import TensorDataset, random_split # Combine the training inputs into a TensorDataset. dataset = TensorDataset(input_ids, attention_masks, labels) # Create a 90-10 train-validation split. # Calculate the number of samples to include in each set. train_size = int(0.9 * len(dataset)) val_size = len(dataset) - train_size # Divide the dataset by randomly selecting samples. train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) print('{:>5,} training samples'.format(train_size)) print('{:>5,} validation samples'.format(val_size)) ``` We'll also create an iterator for our dataset using the torch DataLoader class. This helps save on memory during training because, unlike a for loop, with an iterator the entire dataset does not need to be loaded into memory. ``` from torch.utils.data import DataLoader, RandomSampler, SequentialSampler # The DataLoader needs to know our batch size for training, so we specify it # here. For fine-tuning BERT on a specific task, the authors recommend a batch # size of 16 or 32. batch_size = 32 # Create the DataLoaders for our training and validation sets. # We'll take training samples in random order. train_dataloader = DataLoader( train_dataset, # The training samples. sampler = RandomSampler(train_dataset), # Select batches randomly batch_size = batch_size # Trains with this batch size. ) # For validation the order doesn't matter, so we'll just read them sequentially. validation_dataloader = DataLoader( val_dataset, # The validation samples. sampler = SequentialSampler(val_dataset), # Pull out batches sequentially. batch_size = batch_size # Evaluate with this batch size. ) ``` # 4. Train Our Classification Model Now that our input data is properly formatted, it's time to fine tune the BERT model. ## 4.1. BertForSequenceClassification For this task, we first want to modify the pre-trained BERT model to give outputs for classification, and then we want to continue training the model on our dataset until that the entire model, end-to-end, is well-suited for our task. Thankfully, the huggingface pytorch implementation includes a set of interfaces designed for a variety of NLP tasks. Though these interfaces are all built on top of a trained BERT model, each has different top layers and output types designed to accomodate their specific NLP task. Here is the current list of classes provided for fine-tuning: * BertModel * BertForPreTraining * BertForMaskedLM * BertForNextSentencePrediction * **BertForSequenceClassification** - The one we'll use. * BertForTokenClassification * BertForQuestionAnswering The documentation for these can be found under [here](https://huggingface.co/transformers/v2.2.0/model_doc/bert.html). We'll be using [BertForSequenceClassification](https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#bertforsequenceclassification). This is the normal BERT model with an added single linear layer on top for classification that we will use as a sentence classifier. As we feed input data, the entire pre-trained BERT model and the additional untrained classification layer is trained on our specific task. OK, let's load BERT! There are a few different pre-trained BERT models available. "bert-base-uncased" means the version that has only lowercase letters ("uncased") and is the smaller version of the two ("base" vs "large"). The documentation for `from_pretrained` can be found [here](https://huggingface.co/transformers/v2.2.0/main_classes/model.html#transformers.PreTrainedModel.from_pretrained), with the additional parameters defined [here](https://huggingface.co/transformers/v2.2.0/main_classes/configuration.html#transformers.PretrainedConfig). ``` from transformers import BertForSequenceClassification, AdamW, BertConfig # Load BertForSequenceClassification, the pretrained BERT model with a single # linear classification layer on top. model = BertForSequenceClassification.from_pretrained( "bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab. num_labels = 2, # The number of output labels--2 for binary classification. # You can increase this for multi-class tasks. output_attentions = False, # Whether the model returns attentions weights. output_hidden_states = False, # Whether the model returns all hidden-states. ) # Tell pytorch to run this model on the GPU. model.cuda() ``` Just for curiosity's sake, we can browse all of the model's parameters by name here. In the below cell, I've printed out the names and dimensions of the weights for: 1. The embedding layer. 2. The first of the twelve transformers. 3. The output layer. ``` # Get all of the model's parameters as a list of tuples. params = list(model.named_parameters()) print('The BERT model has {:} different named parameters.\n'.format(len(params))) print('==== Embedding Layer ====\n') for p in params[0:5]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== First Transformer ====\n') for p in params[5:21]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== Output Layer ====\n') for p in params[-4:]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) ``` ## 4.2. Optimizer & Learning Rate Scheduler Now that we have our model loaded we need to grab the training hyperparameters from within the stored model. For the purposes of fine-tuning, the authors recommend choosing from the following values (from Appendix A.3 of the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf)): >- **Batch size:** 16, 32 - **Learning rate (Adam):** 5e-5, 3e-5, 2e-5 - **Number of epochs:** 2, 3, 4 We chose: * Batch size: 32 (set when creating our DataLoaders) * Learning rate: 2e-5 * Epochs: 4 (we'll see that this is probably too many...) The epsilon parameter `eps = 1e-8` is "a very small number to prevent any division by zero in the implementation" (from [here](https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/)). You can find the creation of the AdamW optimizer in `run_glue.py` [here](https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L109). ``` # Note: AdamW is a class from the huggingface library (as opposed to pytorch) # I believe the 'W' stands for 'Weight Decay fix" optimizer = AdamW(model.parameters(), lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5 eps = 1e-8 # args.adam_epsilon - default is 1e-8. ) from transformers import get_linear_schedule_with_warmup # Number of training epochs. The BERT authors recommend between 2 and 4. # We chose to run for 4, but we'll see later that this may be over-fitting the # training data. epochs = 4 # Total number of training steps is [number of batches] x [number of epochs]. # (Note that this is not the same as the number of training samples). total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, # Default value in run_glue.py num_training_steps = total_steps) ``` ## 4.3. Training Loop Below is our training loop. There's a lot going on, but fundamentally for each pass in our loop we have a trianing phase and a validation phase. > *Thank you to [Stas Bekman](https://ca.linkedin.com/in/stasbekman) for contributing the insights and code for using validation loss to detect over-fitting!* **Training:** - Unpack our data inputs and labels - Load data onto the GPU for acceleration - Clear out the gradients calculated in the previous pass. - In pytorch the gradients accumulate by default (useful for things like RNNs) unless you explicitly clear them out. - Forward pass (feed input data through the network) - Backward pass (backpropagation) - Tell the network to update parameters with optimizer.step() - Track variables for monitoring progress **Evalution:** - Unpack our data inputs and labels - Load data onto the GPU for acceleration - Forward pass (feed input data through the network) - Compute loss on our validation data and track variables for monitoring progress Pytorch hides all of the detailed calculations from us, but we've commented the code to point out which of the above steps are happening on each line. > *PyTorch also has some [beginner tutorials](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py) which you may also find helpful.* Define a helper function for calculating accuracy. ``` import numpy as np # Function to calculate the accuracy of our predictions vs labels def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) ``` Helper function for formatting elapsed times as `hh:mm:ss` ``` import time import datetime def format_time(elapsed): ''' Takes a time in seconds and returns a string hh:mm:ss ''' # Round to the nearest second. elapsed_rounded = int(round((elapsed))) # Format as hh:mm:ss return str(datetime.timedelta(seconds=elapsed_rounded)) ``` We're ready to kick off the training! ``` import random import numpy as np # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. seed_val = 42 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) # We'll store a number of quantities such as training and validation loss, # validation accuracy, and timings. training_stats = [] # Measure the total training time for the whole run. total_t0 = time.time() # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_train_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # In PyTorch, calling `model` will in turn call the model's `forward` # function and pass down the arguments. The `forward` function is # documented here: # https://huggingface.co/transformers/model_doc/bert.html#bertforsequenceclassification # The results are returned in a results object, documented here: # https://huggingface.co/transformers/main_classes/output.html#transformers.modeling_outputs.SequenceClassifierOutput # Specifically, we'll get the loss (because we provided labels) and the # "logits"--the model outputs prior to activation. result = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True) loss = result.loss logits = result.logits # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_train_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over all of the batches. avg_train_loss = total_train_loss / len(train_dataloader) # Measure how long this epoch took. training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) # ======================================== # Validation # ======================================== # After the completion of each training epoch, measure our performance on # our validation set. print("") print("Running Validation...") t0 = time.time() # Put the model in evaluation mode--the dropout layers behave differently # during evaluation. model.eval() # Tracking variables total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 # Evaluate data for one epoch for batch in validation_dataloader: # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using # the `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Tell pytorch not to bother with constructing the compute graph during # the forward pass, since this is only needed for backprop (training). with torch.no_grad(): # Forward pass, calculate logit predictions. # token_type_ids is the same as the "segment ids", which # differentiates sentence 1 and 2 in 2-sentence tasks. result = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels, return_dict=True) # Get the loss and "logits" output by the model. The "logits" are the # output values prior to applying an activation function like the # softmax. loss = result.loss logits = result.logits # Accumulate the validation loss. total_eval_loss += loss.item() # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Calculate the accuracy for this batch of test sentences, and # accumulate it over all batches. total_eval_accuracy += flat_accuracy(logits, label_ids) # Report the final accuracy for this validation run. avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) # Calculate the average loss over all of the batches. avg_val_loss = total_eval_loss / len(validation_dataloader) # Measure how long the validation run took. validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) # Record all statistics from this epoch. training_stats.append( { 'epoch': epoch_i + 1, 'Training Loss': avg_train_loss, 'Valid. Loss': avg_val_loss, 'Valid. Accur.': avg_val_accuracy, 'Training Time': training_time, 'Validation Time': validation_time } ) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0))) ``` Let's view the summary of the training process. ``` import pandas as pd # Display floats with two decimal places. pd.set_option('precision', 2) # Create a DataFrame from our training statistics. df_stats = pd.DataFrame(data=training_stats) # Use the 'epoch' as the row index. df_stats = df_stats.set_index('epoch') # A hack to force the column headers to wrap. #df = df.style.set_table_styles([dict(selector="th",props=[('max-width', '70px')])]) # Display the table. df_stats ``` Notice that, while the the training loss is going down with each epoch, the validation loss is increasing! This suggests that we are training our model too long, and it's over-fitting on the training data. (For reference, we are using 7,695 training samples and 856 validation samples). Validation Loss is a more precise measure than accuracy, because with accuracy we don't care about the exact output value, but just which side of a threshold it falls on. If we are predicting the correct answer, but with less confidence, then validation loss will catch this, while accuracy will not. ``` import matplotlib.pyplot as plt % matplotlib inline import seaborn as sns # Use plot styling from seaborn. sns.set(style='darkgrid') # Increase the plot size and font size. sns.set(font_scale=1.5) plt.rcParams["figure.figsize"] = (12,6) # Plot the learning curve. plt.plot(df_stats['Training Loss'], 'b-o', label="Training") plt.plot(df_stats['Valid. Loss'], 'g-o', label="Validation") # Label the plot. plt.title("Training & Validation Loss") plt.xlabel("Epoch") plt.ylabel("Loss") plt.legend() plt.xticks([1, 2, 3, 4]) plt.show() ``` # 5. Performance On Test Set Now we'll load the holdout dataset and prepare inputs just as we did with the training set. Then we'll evaluate predictions using [Matthew's correlation coefficient](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html) because this is the metric used by the wider NLP community to evaluate performance on CoLA. With this metric, +1 is the best score, and -1 is the worst score. This way, we can see how well we perform against the state of the art models for this specific task. ### 5.1. Data Preparation We'll need to apply all of the same steps that we did for the training data to prepare our test data set. ``` import pandas as pd # Load the dataset into a pandas dataframe. df = pd.read_csv("./cola_public/raw/out_of_domain_dev.tsv", delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence']) # Report the number of sentences. print('Number of test sentences: {:,}\n'.format(df.shape[0])) # Create sentence and label lists sentences = df.sentence.values labels = df.label.values # Tokenize all of the sentences and map the tokens to thier word IDs. input_ids = [] attention_masks = [] # For every sentence... for sent in sentences: # `encode_plus` will: # (1) Tokenize the sentence. # (2) Prepend the `[CLS]` token to the start. # (3) Append the `[SEP]` token to the end. # (4) Map tokens to their IDs. # (5) Pad or truncate the sentence to `max_length` # (6) Create attention masks for [PAD] tokens. encoded_dict = tokenizer.encode_plus( sent, # Sentence to encode. add_special_tokens = True, # Add '[CLS]' and '[SEP]' max_length = 64, # Pad & truncate all sentences. pad_to_max_length = True, return_attention_mask = True, # Construct attn. masks. return_tensors = 'pt', # Return pytorch tensors. ) # Add the encoded sentence to the list. input_ids.append(encoded_dict['input_ids']) # And its attention mask (simply differentiates padding from non-padding). attention_masks.append(encoded_dict['attention_mask']) # Convert the lists into tensors. input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) # Set the batch size. batch_size = 32 # Create the DataLoader. prediction_data = TensorDataset(input_ids, attention_masks, labels) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size) ``` ## 5.2. Evaluate on Test Set With the test set prepared, we can apply our fine-tuned model to generate predictions on the test set. ``` # Prediction on test set print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) # Put model in evaluation mode model.eval() # Tracking variables predictions , true_labels = [], [] # Predict for batch in prediction_dataloader: # Add batch to GPU batch = tuple(t.to(device) for t in batch) # Unpack the inputs from our dataloader b_input_ids, b_input_mask, b_labels = batch # Telling the model not to compute or store gradients, saving memory and # speeding up prediction with torch.no_grad(): # Forward pass, calculate logit predictions. result = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, return_dict=True) logits = result.logits # Move logits and labels to CPU logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu').numpy() # Store predictions and true labels predictions.append(logits) true_labels.append(label_ids) print(' DONE.') ``` Accuracy on the CoLA benchmark is measured using the "[Matthews correlation coefficient](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html)" (MCC). We use MCC here because the classes are imbalanced: ``` print('Positive samples: %d of %d (%.2f%%)' % (df.label.sum(), len(df.label), (df.label.sum() / len(df.label) * 100.0))) from sklearn.metrics import matthews_corrcoef matthews_set = [] # Evaluate each test batch using Matthew's correlation coefficient print('Calculating Matthews Corr. Coef. for each batch...') # For each input batch... for i in range(len(true_labels)): # The predictions for this batch are a 2-column ndarray (one column for "0" # and one column for "1"). Pick the label with the highest value and turn this # in to a list of 0s and 1s. pred_labels_i = np.argmax(predictions[i], axis=1).flatten() # Calculate and store the coef for this batch. matthews = matthews_corrcoef(true_labels[i], pred_labels_i) matthews_set.append(matthews) ``` The final score will be based on the entire test set, but let's take a look at the scores on the individual batches to get a sense of the variability in the metric between batches. Each batch has 32 sentences in it, except the last batch which has only (516 % 32) = 4 test sentences in it. ``` # Create a barplot showing the MCC score for each batch of test samples. ax = sns.barplot(x=list(range(len(matthews_set))), y=matthews_set, ci=None) plt.title('MCC Score per Batch') plt.ylabel('MCC Score (-1 to +1)') plt.xlabel('Batch #') plt.show() ``` Now we'll combine the results for all of the batches and calculate our final MCC score. ``` # Combine the results across all batches. flat_predictions = np.concatenate(predictions, axis=0) # For each sample, pick the label (0 or 1) with the higher score. flat_predictions = np.argmax(flat_predictions, axis=1).flatten() # Combine the correct labels for each batch into a single list. flat_true_labels = np.concatenate(true_labels, axis=0) # Calculate the MCC mcc = matthews_corrcoef(flat_true_labels, flat_predictions) print('Total MCC: %.3f' % mcc) ``` Cool! In about half an hour and without doing any hyperparameter tuning (adjusting the learning rate, epochs, batch size, ADAM properties, etc.) we are able to get a good score. > *Note: To maximize the score, we should remove the "validation set" (which we used to help determine how many epochs to train for) and train on the entire training set.* The library documents the expected accuracy for this benchmark [here](https://huggingface.co/transformers/examples.html#glue) as `49.23`. You can also look at the official leaderboard [here](https://gluebenchmark.com/leaderboard/submission/zlssuBTm5XRs0aSKbFYGVIVdvbj1/-LhijX9VVmvJcvzKymxy). Note that (due to the small dataset size?) the accuracy can vary significantly between runs. # Conclusion This post demonstrates that with a pre-trained BERT model you can quickly and effectively create a high quality model with minimal effort and training time using the pytorch interface, regardless of the specific NLP task you are interested in. # Appendix ## A1. Saving & Loading Fine-Tuned Model This first cell (taken from `run_glue.py` [here](https://github.com/huggingface/transformers/blob/35ff345fc9df9e777b27903f11fa213e4052595b/examples/run_glue.py#L495)) writes the model and tokenizer out to disk. ``` import os # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() output_dir = './model_save/' # Create output directory if needed if not os.path.exists(output_dir): os.makedirs(output_dir) print("Saving model to %s" % output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model # torch.save(args, os.path.join(output_dir, 'training_args.bin')) ``` Let's check out the file sizes, out of curiosity. ``` !ls -l --block-size=K ./model_save/ ``` The largest file is the model weights, at around 418 megabytes. ``` !ls -l --block-size=M ./model_save/pytorch_model.bin ``` To save your model across Colab Notebook sessions, download it to your local machine, or ideally copy it to your Google Drive. ``` # Mount Google Drive to this Notebook instance. from google.colab import drive drive.mount('/content/drive') # Copy the model files to a directory in your Google Drive. !cp -r ./model_save/ "./drive/Shared drives/ChrisMcCormick.AI/Blog Posts/BERT Fine-Tuning/" ``` The following functions will load the model back from disk. ``` # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(output_dir) tokenizer = tokenizer_class.from_pretrained(output_dir) # Copy the model to the GPU. model.to(device) ``` ## A.2. Weight Decay The huggingface example includes the following code block for enabling weight decay, but the default decay rate is "0.0", so I moved this to the appendix. This block essentially tells the optimizer to not apply weight decay to the bias terms (e.g., $ b $ in the equation $ y = Wx + b $ ). Weight decay is a form of regularization--after calculating the gradients, we multiply them by, e.g., 0.99. ``` # This code is taken from: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L102 # Don't apply weight decay to any parameters whose names include these tokens. # (Here, the BERT doesn't have `gamma` or `beta` parameters, only `bias` terms) no_decay = ['bias', 'LayerNorm.weight'] # Separate the `weight` parameters from the `bias` parameters. # - For the `weight` parameters, this specifies a 'weight_decay_rate' of 0.01. # - For the `bias` parameters, the 'weight_decay_rate' is 0.0. optimizer_grouped_parameters = [ # Filter for all parameters which *don't* include 'bias', 'gamma', 'beta'. {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.1}, # Filter for parameters which *do* include those. {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0} ] # Note - `optimizer_grouped_parameters` only includes the parameter values, not # the names. ``` # Revision History **Version 4** - *Feb 2nd, 2020* - (current) * Updated all calls to `model` (fine-tuning and evaluation) to use the [`SequenceClassifierOutput`](https://huggingface.co/transformers/main_classes/output.html#transformers.modeling_outputs.SequenceClassifierOutput) class. * Moved illustration images to Google Drive--Colab appears to no longer support images at external URLs. **Version 3** - *Mar 18th, 2020* * Simplified the tokenization and input formatting (for both training and test) by leveraging the `tokenizer.encode_plus` function. `encode_plus` handles padding *and* creates the attention masks for us. * Improved explanation of attention masks. * Switched to using `torch.utils.data.random_split` for creating the training-validation split. * Added a summary table of the training statistics (validation loss, time per epoch, etc.). * Added validation loss to the learning curve plot, so we can see if we're overfitting. * Thank you to [Stas Bekman](https://ca.linkedin.com/in/stasbekman) for contributing this! * Displayed the per-batch MCC as a bar plot. **Version 2** - *Dec 20th, 2019* - [link](https://colab.research.google.com/drive/1Y4o3jh3ZH70tl6mCd76vz_IxX23biCPP) * huggingface renamed their library to `transformers`. * Updated the notebook to use the `transformers` library. **Version 1** - *July 22nd, 2019* * Initial version. ## Further Work * It might make more sense to use the MCC score for “validation accuracy”, but I’ve left it out so as not to have to explain it earlier in the Notebook. * Seeding -- I’m not convinced that setting the seed values at the beginning of the training loop is actually creating reproducible results… * The MCC score seems to vary substantially across different runs. It would be interesting to run this example a number of times and show the variance.
github_jupyter
# Example: Polynomial Cureve Fitting Observse a real-valued input variable $x$ $\rightarrow$ predict a real-valued target variable $t$ * $\textbf{x} \equiv (x_1, \cdots, x_i, \cdots, x_N)^T, \quad x_i \in [0, 1]$ * $\textbf{t} \equiv (t_1, \cdots, t_i, \cdots, t_N)^T, \quad t_i = \sin(2\pi x_i) + N(\mu, \sigma^2)$ ``` import numpy as np import matplotlib.pylab as plt # making data seed = 62 np.random.seed(seed) N = 10 x = np.random.rand(N) t = np.sin(2*np.pi*x) + np.random.randn(N) * 0.1 x_sin = np.linspace(0, 1) t_sin = np.sin(2*np.pi*x_sin) plt.plot(x_sin, t_sin, c='green') plt.scatter(x, t) plt.xlabel('x', fontsize=16) plt.ylabel('t', rotation=0, fontsize=16) plt.show() ``` * Goal: exploit this training set in order to make predictions of the value $\hat{t}$ of the target variable for some new value $\hat{x}$ of the input variable. * Use some theories: * Probability theory: provides a framework for expressing such uncertainty in a precise and quantitative manner * Decision theory: allows us to exploit this probabilistic representation in order to make predictions that are optimal according to appropriate criteria * For the moment, let's use polynomial function, where $M$ is the order of polynomial. $y(x, \mathbf{w})$ is a linear function of coefficients ($\mathbf{w}$) $$y(x, \mathbf{w}) = w_0 + w_1 x + w_2 x^2 + \cdots + w_M x^M = \sum_{j=0}^{M} w_j x^j$$ ``` def vandermonde_matrix(x, m): """we will introduce vandermonde_matrix, when we find solution of polynomial regression""" return np.array([x**i for i in range(m+1)]).T def polynomial_function(x, w, m): assert w.size == m+1, "coefficients number must same as M+1" V = vandermonde_matrix(x, m) # shape (x.size, M+1) return np.dot(V, w) np.random.seed(seed) M = 3 w = np.random.randn(M+1) t_hat = polynomial_function(x, w, M) print(t_hat.round(3)) ``` * The values of the coefficients will be determined by fitting the polynomial to the training data, this can be done by minimizing an error function, which measure the misfit between the function $y(x, \mathbf{w})$ and training data points. $$E(\mathbf{w}) = \dfrac{1}{2} \sum_{n=1}^{N} (y(x_n, \mathbf{w}) - t_n)^2$$ ``` def error_function(pred, target): return (1/2)*((pred-target)**2).sum() error_value = error_function(t_hat, t) error_value ``` * Because error function is quadratic function of $\mathbf{w}$, its derivatives with respect to the coefficients will be linear in the elements of $\mathbf{w}$, so the minimization of the error function has a unique solution. * The remain problem is choosing the order $M$, this is called **model comparison or model selection**. * Then how to choose optimal $M$? * use test data with 100 data points * evaluate the residual value of error ``` np.random.seed(seed) N_test = 100 x_test = np.random.rand(N_test) t_test = np.sin(2*np.pi*x_test) + np.random.randn(N_test) * 0.1 plt.plot(x_sin, t_sin, c='green') plt.scatter(x_test, t_test, c='red') plt.xlabel('x', fontsize=16) plt.ylabel('t', rotation=0, fontsize=16) plt.show() def root_mean_square_error(error, n_samples): return np.sqrt(2*error/n_samples) # M=3 error = error_function(polynomial_function(x_test, w, M), t_test) rms = root_mean_square_error(error, N_test) rms ``` ### using normal equation to find soulution First define $V$(size is $(N, M+1)$) matrix named **Vandermode matrix** which is looks like below. $M$ is degree of polynomial function. $$V = \begin{bmatrix} 1 & x_1 & x_1^2 & \cdots & x_1^M \\ 1 & x_2 & x_2^2 & \cdots & x_2^M \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 1 & x_N & x_N^2 & \cdots & x_N^M \end{bmatrix}$$ ``` def vandermonde_matrix(x, m): """vandermonde matrix""" return np.array([x**i for i in range(m+1)]).T M = 3 V = vandermonde_matrix(x, M) print(V.round(3)) ``` So, we can define polynomial as $y=V\cdot w$. Where $w$ is a column vector called **coefficients** , $w = [w_0, w_1, \cdots , w_M]^T$ $$y = \begin{bmatrix} y_1 \\ y_2 \\ \vdots \\ y_N \end{bmatrix} = \begin{bmatrix} w_0 + w_1x_1 + w_2x_1^2 + \cdots + w_Mx_1^M \\ w_0 + w_1x_2 + w_2x_2^2 + \cdots + w_Mx_2^M \\ \vdots \\ w_0 + w_1x_N + w_2x_N^2 + \cdots + w_Mx_N^M \end{bmatrix}$$ We already defined error function, $E(\mathbf{w}) = \dfrac{1}{2} \sum_{n=1}^{N} (y(x_n, \mathbf{w}) - t_n)^2 = \dfrac{1}{2} \Vert y - V \cdot w \Vert^2$. which is can solved by minimization, $\hat{w} = \underset{w}{\arg \min} E(w)$. Define residual $r = y - V \cdot w$ then error function becomes $E(\mathbf{w}) = \dfrac{1}{2} r^2 $ Because error function is quadratic function, the minimization of the error function has a unique solution. Then we can get derivatives, and when it becomes to $0$, error function has minimum value. $$\begin{aligned} \dfrac{\partial E}{\partial w} &= \begin{bmatrix} \dfrac{\partial E}{\partial w_0} \\ \dfrac{\partial E}{\partial w_1} \\ \vdots \\ \dfrac{\partial E}{\partial w_M} \end{bmatrix} \\ &= \begin{bmatrix} \dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_0} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_0} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_0} \\ \dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_1} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_1} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_1} \\ \vdots \\ \dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_M} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_M} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_M} \end{bmatrix} \\ &= \begin{bmatrix} \dfrac{\partial r_1}{\partial w_0} & \dfrac{\partial r_2}{\partial w_0} & \cdots & \dfrac{\partial r_N}{\partial w_0} \\ \dfrac{\partial r_1}{\partial w_1} & \dfrac{\partial r_2}{\partial w_1} & \cdots & \dfrac{\partial r_N}{\partial w_1} \\ \vdots & \vdots & \ddots & \vdots \\ \dfrac{\partial r_1}{\partial w_M} & \dfrac{\partial r_2}{\partial w_M} & \cdots & \dfrac{\partial r_N}{\partial w_M} \end{bmatrix} \cdot \begin{bmatrix} \dfrac{\partial E}{\partial r_1} \\ \dfrac{\partial E}{\partial r_2} \\ \vdots \\ \dfrac{\partial E}{\partial r_N} \end{bmatrix} \\ &= \dfrac{\partial r}{\partial w} \cdot \dfrac{\partial E}{\partial r} \\ &= V^T \cdot (y - V\cdot w) = 0 \end{aligned}$$ So, we can find solution of coefficient $w$. $$w = (V^TV)^{-1}V^Ty$$ ``` def poly_solution(x, t, m): V = vandermonde_matrix(x, m) return np.linalg.inv(np.dot(V.T, V)).dot(V.T).dot(t) print(f"Solution of coefficients are {poly_solution(x, t, M).round(3)}") # confirm we are right from numpy.polynomial import polynomial as P print(P.polyfit(x, t, M).round(3)) ``` Let's find optimal degree of polynomial now! ``` def get_rms_error(t_hat, t, n_sample, m): error = error_function(t_hat, t) rms = root_mean_square_error(error, n_sample) return rms all_w = [] all_rms_train = [] all_rms_test = [] for m in range(10): optimal_w = poly_solution(x, t, m) t_hat = polynomial_function(x, optimal_w, m) t_hat_test = polynomial_function(x_test, optimal_w, m) rms_train = get_rms_error(t_hat, t, N, m) # N=10 rms_test = get_rms_error(t_hat_test, t_test, N_test, m) # N_test = 100 print(f"M={m} | rms_train: {rms_train:.4f} rms_test: {rms_test:.4f}") # Plot predicted line plt.plot(x_sin, t_sin, c="green", label="sin function") plt.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}") plt.scatter(x, t) plt.xlim((0, 1)) plt.ylim((-1.25, 1.25)) plt.xlabel('x', fontsize=16) plt.ylabel('t', rotation=0, fontsize=16) plt.legend() plt.show() all_w.append(optimal_w) all_rms_train.append(rms_train) all_rms_test.append(rms_test) plt.scatter(np.arange(10), all_rms_train, facecolors='none', edgecolors='b') plt.plot(np.arange(10), all_rms_train, c='b', label='Training') plt.scatter(np.arange(len(all_rms_test)), all_rms_test, facecolors='none', edgecolors='r') plt.plot(np.arange(len(all_rms_test)), all_rms_test, c='r', label='Test') plt.legend() plt.xlim((-0.1, 10)) plt.ylim((-0.1, 1.2)) plt.ylabel("root-mean-squared Error", fontsize=16) plt.xlabel("M", fontsize=16) plt.show() np.set_printoptions(precision=3) for i in [0, 1, 3, 9]: print(f"coefficients at M={i} is {all_w[i].round(3)}") ``` ### Test for Different size of datas ``` np.random.seed(seed) N1 = 15 N2 = 100 x1, x2 = np.random.rand(N1), np.random.rand(N2) t1 = np.sin(2*np.pi*x1) + np.random.randn(N1) * 0.1 t2 = np.sin(2*np.pi*x2) + np.random.randn(N2) * 0.1 optimal_w1 = poly_solution(x1, t1, m=9) optimal_w2 = poly_solution(x2, t2, m=9) # Plot predicted line fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) def plot(x, t, x_sin, t_sin, optimal_w, m, ax): ax.plot(x_sin, t_sin, c="green", label="sin function") ax.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model N={len(x)}") ax.scatter(x, t) ax.set_xlim((0, 1)) ax.set_ylim((-1.25, 1.25)) ax.set_xlabel('x', fontsize=16) ax.set_ylabel('t', rotation=0, fontsize=16) ax.legend() plot(x1, t1, x_sin, t_sin, optimal_w1, m=9, ax=ax1) plot(x2, t2, x_sin, t_sin, optimal_w2, m=9, ax=ax2) plt.show() ``` ## Regularization $$ E(\mathbf{w}) = \dfrac{1}{2} \Vert y - V \cdot w \Vert^2 + \frac{\lambda}{2} \Vert w \Vert^2 \qquad \cdots (4) $$ where, $\Vert \mathbf{w} \Vert^2 \equiv \mathbf{w}^T\mathbf{w}=w_0^2 + w_1^2 + \cdots w_M^2$ easy to get solution for this $$ \begin{aligned} \frac{\partial E(w)}{\partial w} &= V^Ty-V^TV\cdot w+\lambda w = 0 \\ w &= (V^TV- \lambda I_{(M+1)})^{-1}V^Ty \end{aligned} $$ when regularizer $\lambda \uparrow$, means that more regularization ``` def ridge_solution(x, t, m, alpha=0): V = vandermonde_matrix(x, m) return np.linalg.inv(np.dot(V.T, V) - alpha * np.eye(m+1)).dot(V.T).dot(t) M=9 optimal_w1 = ridge_solution(x, t, m=M, alpha=1e-8) optimal_w2 = ridge_solution(x, t, m=M, alpha=1.0) # Plot predicted line fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6)) def plot_ridge(x, t, x_sin, t_sin, optimal_w, m, text, ax): ax.plot(x_sin, t_sin, c="green", label="sin function") ax.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}") ax.scatter(x, t) ax.set_xlim((0, 1)) ax.set_ylim((-1.25, 1.25)) ax.set_xlabel('x', fontsize=16) ax.set_ylabel('t', rotation=0, fontsize=16) ax.legend() ax.annotate(text, (0.6, 0.5), fontsize=14) plot_ridge(x, t, x_sin, t_sin, optimal_w1, m=M, text='lambda = 1e-8', ax=ax1) plot_ridge(x, t, x_sin, t_sin, optimal_w2, m=M, text='lambda = 1.0', ax=ax2) plt.show() print(f"coefficients at lambda=1e-8 is {optimal_w1.round(3)}") print(f"coefficients at lambda=1.0 is {optimal_w2.round(3)}") ``` ## see ridge effect ``` all_w = [] all_rms_train = [] all_rms_test = [] M = 9 for alpha in np.exp(np.arange(-28, -15)): optimal_w = ridge_solution(x, t, m=M, alpha=alpha) t_hat = polynomial_function(x, optimal_w, m=M) t_hat_test = polynomial_function(x_test, optimal_w, m=M) rms_train = get_rms_error(t_hat, t, N, m=M) rms_test = get_rms_error(t_hat_test, t_test, N_test, m=M) # N_test = 100 print(f"lambda={alpha} | rms_train: {rms_train:.4f} rms_test: {rms_test:.4f}") # Plot predicted line # plt.plot(x_sin, t_sin, c="green", label="sin function") # plt.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}") # plt.scatter(x, t) # plt.xlim((0, 1)) # plt.ylim((-1.25, 1.25)) # plt.xlabel('x', fontsize=16) # plt.ylabel('t', rotation=0, fontsize=16) # plt.legend() # plt.show() all_w.append(optimal_w) all_rms_train.append(rms_train) all_rms_test.append(rms_test) plt.scatter(np.arange(len(all_rms_train)), all_rms_train, facecolors='none', edgecolors='b') plt.plot(np.arange(len(all_rms_train)), all_rms_train, c='b', label='Training') plt.scatter(np.arange(len(all_rms_test)), all_rms_test, facecolors='none', edgecolors='r') plt.plot(np.arange(len(all_rms_test)), all_rms_test, c='r', label='Test') plt.legend() plt.xticks(np.arange(len(all_rms_test)), np.arange(-28, -15)) plt.ylabel("root-mean-squared Error", fontsize=16) plt.xlabel("np.log(lambda)", fontsize=16) plt.show() ```
github_jupyter
## Initialization ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import math import scipy.io from scipy.special import expit from math import * from scipy import optimize sns.set_style('whitegrid') %matplotlib inline ``` ## Loading Data ``` mat = scipy.io.loadmat('ex4data1.mat') X = mat['X'] y = mat['y'] X = np.insert(X,0,1,axis= 1) m,n = X.shape input_layer_size = 400 hidden_layer_size = 25 num_labels = 10 _lambda = 1 ``` ## Function Section ``` #functions Sections def magic_display(matrix = None): if matrix is None: # selecting 100 random rows of the X rand_indces = np.random.permutation(m)[0:100] X_dis = X[rand_indces] else: X_dis = matrix if( len(X_dis.shape) > 1 ): m_test,n_test = X_dis.shape axis_bound = 1 else: m_test = 1 n_test = X_dis.shape[0] axis_bound = 0 # each number width , height in plot example_width = int(round(sqrt(n_test))) example_height = int(round( n_test / example_width )) # number of numbers to show in plot display_rows = floor(sqrt(m_test)) display_cols = ceil(m_test / display_rows ) # padding between numbers pad = 2 # intilazation array for holding previos 100 random numbers display_array = np.ones(( pad + display_rows * ( example_height + pad ), pad + display_cols * ( example_width + pad ) )) count = 0; for i in range(display_rows): for j in range(display_cols): if( count >= m_test ): break # max_val of each row in X_dis max_val = np.max( X_dis[count : count+1], axis= axis_bound) # Starting x,y point of numbers shape in array ex_x_range = pad + ( i ) * ( example_height + pad ) ex_y_range = pad + ( j ) * ( example_width + pad ) if(m_test > 1): ex_arr = X_dis[ count : count + 1 , 1:].reshape(example_height , example_width) else: ex_arr = X_dis[1:].reshape(example_height , example_width) # Setting values display_array[ ex_x_range : ex_x_range + example_height, ex_y_range : ex_y_range + example_width ] = np.divide(ex_arr , max_val) count += 1 # Plotting 100 random data plt.figure(figsize=(12,8)) # Get rod of grid plt.grid(False) plt.imshow(display_array) def hyp(matrix): return expit(matrix) def neural_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, _lam): # initialization some varibles if(len(X.shape) > 1): axis_bound = 1 else: axis_bound = 0 # reshaping from one dimensional to 2d dimensional parameter vector end_indx_theta1 = hidden_layer_size * ( input_layer_size + 1 ) Theta1 = np.reshape( nn_params[0 : end_indx_theta1 ], ( hidden_layer_size, input_layer_size + 1 )) # reshaping from one dimensional to 2d dimensional parameter vector Theta2 = np.reshape( nn_params[end_indx_theta1 : ], ( num_labels, hidden_layer_size + 1 )) # Copmuting hidden level activation z_2 = np.dot(X, Theta1.T ) hidden_activation = hyp( z_2 ) hidden_activation = np.insert( hidden_activation, 0, 1, axis=axis_bound ) # Copmuting output level activation z_3 = np.dot(hidden_activation, Theta2.T) out_activation = hyp(z_3) # finding hypotesis matrix h = out_activation # Computing Log(sigmoid(x)) for all of the hypotesis elements h1 = np.log(h) # Computing Log( 1 - simgoid(x)) for all of the hypotesis elements h2 = np.log(1 - h) # Creating new matrix for y new_y0 = ( y - 1 ).copy() new_y1 = np.zeros(out_activation.shape) new_y1[np.arange(0,out_activation.shape[0]),new_y0.T] = 1 # Computing Regularization Part Varibles Theta1_pow2 = Theta1 * Theta1 Theta2_pow2 = Theta2 * Theta2 #Computing Cost of the hypotesis J = ( -1 / m ) * sum(sum( new_y1 * h1 + (1 - new_y1) * h2)) + \ ( _lam / ( 2 * m )) * ( sum(sum( Theta1_pow2 )) + sum(sum( Theta2_pow2 )) ) return J def neural_gradient_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, _lam): # initialization some varibles if(len(X.shape) > 1): axis_bound = 1 else: axis_bound = 0 # Number of training examples m = X.shape[0] # reshaping from one dimensional to 2d dimensional parameter vector end_indx_theta1 = hidden_layer_size * ( input_layer_size + 1 ) Theta1 = np.reshape( nn_params[0 : end_indx_theta1 ], ( hidden_layer_size, input_layer_size + 1 )) # reshaping from one dimensional to 2d dimensional parameter vector Theta2 = np.reshape( nn_params[end_indx_theta1 : ], ( num_labels, hidden_layer_size + 1 )) # Defining Delta's Delta1 = np.zeros(Theta1.shape) Delta2 = np.zeros(Theta2.shape) # Defining Theta_grad Matrixs Theta1_grad = np.zeros(Theta1.shape) Theta2_grad = np.zeros(Theta2.shape) for i in range(m): X_input = X[i : i + 1,:] # Copmuting hidden level activation z_2 = np.dot( X_input, Theta1.T ) hidden_activation = hyp( z_2 ) hidden_activation = np.insert( hidden_activation, 0, 1, axis=axis_bound ) # Copmuting output level activation z_3 = np.dot( hidden_activation, Theta2.T ) out_activation = hyp( z_3 ) # finding hypotesis matrix h = out_activation # Creating new matrix for y new_y0 = ( y - 1 ).copy() new_y1 = np.zeros(out_activation.shape[1]) new_y1[new_y0[i]] = 1 # Computing erros out_error = h - new_y1 z_2 = np.insert(z_2, 0, 1, axis=1) hidden_error = np.dot( out_error , Theta2 ).T * sigmoid_gradient(z_2).T hidden_error = hidden_error[ 1: ] # Computing Delta Delta1 = Delta1 + hidden_error * X_input Delta2 = Delta2 + out_error.T * hidden_activation Theta1_grad[:, 0:1 ] = ( 1 / m ) * ( Delta1[:, 0:1 ] ) Theta1_grad[:, 1: ] = ( 1 / m ) * ( Delta1[:, 1: ] ) + ( _lam / m ) * Theta1[:, 1: ] Theta2_grad[:, 0:1 ] = ( 1 / m ) * ( Delta2[:, 0:1 ] ) Theta2_grad[:, 1: ] = ( 1 / m ) * ( Delta2[:, 1: ] ) + ( _lam / m ) * Theta2[:, 1: ] # Converting Weigths to 1 Dimensional Matrix's Theta1_grad_flat = np.array(Theta1_grad.flat) Theta2_grad_flat = np.array(Theta2_grad.flat) return np.concatenate((Theta1_grad_flat, Theta2_grad_flat)) * 1e-3 def sigmoid_gradient(matrix): return hyp(matrix) * ( 1 - hyp(matrix) ) def checking_gradient(_lambda): if(_lambda == None): _lambda = 0 input_layer_size = 3 hidden_layer_size = 5 num_labels = 3 m = 5 Theta1 = debug_initialaize_weights(hidden_layer_size, input_layer_size) Theta2 = debug_initialaize_weights(num_labels, hidden_layer_size) X = debug_initialaize_weights(m, input_layer_size - 1) y = 1 + np.mod(np.arange(0,m), num_labels) # initialization some varibles if(len(X.shape) > 1): axis_bound = 1 else: axis_bound = 0 # Inserting 1's column to matrix X = np.insert( X, 0, 1, axis= axis_bound) Theta1_flat = np.array(Theta1.flat) Theta2_flat = np.array(Theta2.flat) Theta = np.concatenate((Theta1_flat, Theta2_flat)) grad = neural_gradient_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda) numerical_grad = numerical_gradinet_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda) print(np.linalg.norm(numerical_grad - grad) / np.linalg.norm(numerical_grad + grad)) def numerical_gradinet_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda): new_grad = np.zeros(Theta.size) p = np.zeros(Theta.size) e = 1e-4 for i in range(Theta.size): p[i] = e j1 = neural_cost_function(Theta + p, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda) j2 = neural_cost_function(Theta - p, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda) new_grad[i] = (j1 - j2) / ( 2 * e ) p[i] = 0 return new_grad def debug_initialaize_weights(output_layer, input_layer): matrix = np.zeros((output_layer, input_layer + 1)) return np.sin(np.arange(1,matrix.size + 1)).reshape(matrix.shape) / 10 checking_gradient(3) ``` ## Visualizing Data ``` magic_display() ``` ## Feedforward Propagation Algorithm ``` # Loading Weights weights = scipy.io.loadmat('ex4weights.mat') Theta1 = weights['Theta1'] Theta2 = weights['Theta2'] Theta1.shape Theta2.shape # Converting Weigths to 1 Dimensional Matrix's Theta1_flat = np.array(Theta1.flat) Theta2_flat = np.array(Theta2.flat) # Creating New 1d Matrix for holding all of the weights Theta = np.concatenate((Theta1_flat, Theta2_flat)) neural_cost_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, 3) ```
github_jupyter
# Analyse wavefields This notebook checks the velocity models and FD simulations output by `generate_velocity_models.py` and `generate_forward_simulations.py` are sensible. ``` import glob import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import scipy as sp import sys sys.path.insert(0, '../shared_modules/') import plot_utils %matplotlib inline ``` ## Load example velocity model and FD simulation ``` # PARAMETERS VEL_RUN = "marmousi" SIM_RUN = "marmousi_2ms" VEL_DIR = "velocity/" + VEL_RUN + "/" OUT_SIM_DIR = "gather/" + SIM_RUN + "/" isim=(20,1) wavefields = np.load(OUT_SIM_DIR + "wavefields_%.8i_%.8i.npy"%(isim[0],isim[1])) wavefields = wavefields[::4] gather = np.load(OUT_SIM_DIR + "gather_%.8i_%.8i.npy"%(isim[0],isim[1])) velocity = np.load(VEL_DIR + "velocity_%.8i.npy"%(isim[0])) source_is = np.load(OUT_SIM_DIR + "source_is.npy") receiver_is = np.load(OUT_SIM_DIR + "receiver_is.npy") DELTAT = 0.002 source_i = source_is[isim[0],isim[1]] print(velocity.shape, velocity[0,0]) print(wavefields.shape, np.max(wavefields)) print(gather.shape) print(receiver_is.shape, source_is.shape) #print(receiver_is) #print(source_is) print(source_i) ``` ## Create wavefield animation ``` %matplotlib notebook # define initial plots fig = plt.figure(figsize=(13.5,6)) plt.subplot(1,2,2) plt.imshow(velocity.T, cmap="viridis") cb = plt.colorbar() cb.ax.set_ylabel('P-wave velocity (m/s)') plt.subplot(1,2,1) plt.imshow(velocity.T, alpha=0.4, cmap="gray_r") im = plt.imshow(wavefields[0].T, aspect=1, cmap=plot_utils.rgb, alpha=0.4, vmin = -2, vmax=2) cb = plt.colorbar() cb.ax.set_ylabel('P-wave amplitude') plt.scatter(receiver_is[:,0], receiver_is[:,1]) plt.scatter(source_i[0], source_i[1]) # define animation update function def update(i): # set the data in the im object plt.title("t = %i"%(i)) im.set_data(wavefields[i].T) return [im]# tells the animator which parts of the plot to update # start animation # important: keep the instance to maintain timer ani = animation.FuncAnimation(fig, update, frames=range(0,wavefields.shape[0],10), interval=100, blit=False) plt.subplots_adjust(left=0.0, right=1., bottom=0.05, top=0.95, hspace=0.0, wspace=0.0) plt.show() ani._stop() ``` ## Check wavefields and gather match ``` # check wavefields and gather match gather_test = wavefields[:,receiver_is[:,0], receiver_is[:,1]].T print(gather.shape, gather_test.shape) print(np.allclose(gather, gather_test)) # plot gather %matplotlib inline print(gather.mean(), 5*gather.std()) gathern = gather/(1) t = np.arange(gather.shape[1], dtype=np.float32) t_gain = (t**2.5) t_gain = t_gain/np.median(t_gain) plt.figure(figsize=(12,8)) plt.imshow((gathern*t_gain).T, aspect=0.1, cmap="Greys", vmin=-1, vmax=1) plt.colorbar() plt.figure(figsize=(20,10)) plt.plot(t.flatten(),(gathern*t_gain)[10,:]) plt.scatter(t.flatten(),np.zeros(gather.shape[1]), s=0.1) ``` ## Plot average frequency spectrum of gather ``` # plot average frequency spectrum of gather s = np.abs(np.fft.fft(gather, axis=1)) s = np.sum(s, axis=0) f = np.fft.fftfreq(s.shape[0], DELTAT) plt.figure(figsize=(10,5)) plt.plot(f[np.argsort(f)], s[np.argsort(f)]) plt.xlim(0, 250) plt.show() print(f[np.argmax(s)])# dominant frequency plt.plot(t,t_gain) ```
github_jupyter
# Week 3: Transfer Learning Welcome to this assignment! This week, you are going to use a technique called `Transfer Learning` in which you utilize an already trained network to help you solve a similar problem to the one it was originally trained to solve. Let's get started! ``` import os import zipfile import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import Model from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.preprocessing.image import img_to_array, load_img ``` ## Dataset For this assignment, you will use the `Horse or Human dataset`, which contains images of horses and humans. Download the `training` and `validation` sets by running the cell below: ``` # Get the Horse or Human training dataset !wget -q -P /content/ https://storage.googleapis.com/tensorflow-1-public/course2/week3/horse-or-human.zip # Get the Horse or Human validation dataset !wget -q -P /content/ https://storage.googleapis.com/tensorflow-1-public/course2/week3/validation-horse-or-human.zip test_local_zip = './horse-or-human.zip' zip_ref = zipfile.ZipFile(test_local_zip, 'r') zip_ref.extractall('/tmp/training') val_local_zip = './validation-horse-or-human.zip' zip_ref = zipfile.ZipFile(val_local_zip, 'r') zip_ref.extractall('/tmp/validation') zip_ref.close() ``` This dataset already has an structure that is compatible with Keras' `flow_from_directory` so you don't need to move the images into subdirectories as you did in the previous assignments. However, it is still a good idea to save the paths of the images so you can use them later on: ``` # Define the training and validation base directories train_dir = '/tmp/training' validation_dir = '/tmp/validation' # Directory with training horse pictures train_horses_dir = os.path.join(train_dir, 'horses') # Directory with training humans pictures train_humans_dir = os.path.join(train_dir, 'humans') # Directory with validation horse pictures validation_horses_dir = os.path.join(validation_dir, 'horses') # Directory with validation human pictures validation_humans_dir = os.path.join(validation_dir, 'humans') # Check the number of images for each class and set print(f"There are {len(os.listdir(train_horses_dir))} images of horses for training.\n") print(f"There are {len(os.listdir(train_humans_dir))} images of humans for training.\n") print(f"There are {len(os.listdir(validation_horses_dir))} images of horses for validation.\n") print(f"There are {len(os.listdir(validation_humans_dir))} images of humans for validation.\n") ``` Now take a look at a sample image of each one of the classes: ``` print("Sample horse image:") plt.imshow(load_img(f"{os.path.join(train_horses_dir, os.listdir(train_horses_dir)[0])}")) plt.show() print("\nSample human image:") plt.imshow(load_img(f"{os.path.join(train_humans_dir, os.listdir(train_humans_dir)[0])}")) plt.show() ``` `matplotlib` makes it easy to see that these images have a resolution of 300x300 and are colored, but you can double check this by using the code below: ``` # Load the first example of a horse sample_image = load_img(f"{os.path.join(train_horses_dir, os.listdir(train_horses_dir)[0])}") # Convert the image into its numpy array representation sample_array = img_to_array(sample_image) print(f"Each image has shape: {sample_array.shape}") ``` As expected, the sample image has a resolution of 300x300 and the last dimension is used for each one of the RGB channels to represent color. ## Training and Validation Generators Now that you know the images you are dealing with, it is time for you to code the generators that will fed these images to your Network. For this, complete the `train_val_generators` function below: **Important Note:** The images have a resolution of 300x300 but the `flow_from_directory` method you will use allows you to set a target resolution. In this case, **set a `target_size` of (150, 150)**. This will heavily lower the number of trainable parameters in your final network, yielding much quicker training times without compromising the accuracy! ``` # GRADED FUNCTION: train_val_generators def train_val_generators(TRAINING_DIR, VALIDATION_DIR): ### START CODE HERE # Instantiate the ImageDataGenerator class # Don't forget to normalize pixel values and set arguments to augment the images train_datagen = None # Pass in the appropriate arguments to the flow_from_directory method train_generator = train_datagen.flow_from_directory(directory=None, batch_size=32, class_mode=None, target_size=(None, None)) # Instantiate the ImageDataGenerator class (don't forget to set the rescale argument) # Remember that validation data should not be augmented validation_datagen = None # Pass in the appropriate arguments to the flow_from_directory method validation_generator = validation_datagen.flow_from_directory(directory=None, batch_size=32, class_mode=None, target_size=(None, None)) ### END CODE HERE return train_generator, validation_generator # Test your generators train_generator, validation_generator = train_val_generators(train_dir, validation_dir) ``` **Expected Output:** ``` Found 1027 images belonging to 2 classes. Found 256 images belonging to 2 classes. ``` ## Transfer learning - Create the pre-trained model Download the `inception V3` weights into the `/tmp/` directory: ``` # Download the inception v3 weights !wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \ -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 ``` Now load the `InceptionV3` model and save the path to the weights you just downloaded: ``` # Import the inception model from tensorflow.keras.applications.inception_v3 import InceptionV3 # Create an instance of the inception model from the local pre-trained weights local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' ``` Complete the `create_pre_trained_model` function below. You should specify the correct `input_shape` for the model (remember that you set a new resolution for the images instead of the native 300x300) and make all of the layers non-trainable: ``` # GRADED FUNCTION: create_pre_trained_model def create_pre_trained_model(local_weights_file): ### START CODE HERE pre_trained_model = InceptionV3(input_shape = (None, None, None), include_top = False, weights = None) pre_trained_model.load_weights(local_weights_file) # Make all the layers in the pre-trained model non-trainable for None in None: None = None ### END CODE HERE return pre_trained_model ``` Check that everything went well by comparing the last few rows of the model summary to the expected output: ``` pre_trained_model = create_pre_trained_model(local_weights_file) # Print the model summary pre_trained_model.summary() ``` **Expected Output:** ``` batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0] __________________________________________________________________________________________________ activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0] __________________________________________________________________________________________________ mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0] activation_276[0][0] __________________________________________________________________________________________________ concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0] activation_280[0][0] __________________________________________________________________________________________________ activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0] __________________________________________________________________________________________________ mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0] mixed9_1[0][0] concatenate_5[0][0] activation_281[0][0] ================================================================================================== Total params: 21,802,784 Trainable params: 0 Non-trainable params: 21,802,784 ``` To check that all the layers in the model were set to be non-trainable, you can also run the cell below: ``` total_params = pre_trained_model.count_params() num_trainable_params = sum([w.shape.num_elements() for w in pre_trained_model.trainable_weights]) print(f"There are {total_params:,} total parameters in this model.") print(f"There are {num_trainable_params:,} trainable parameters in this model.") ``` **Expected Output:** ``` There are 21,802,784 total parameters in this model. There are 0 trainable parameters in this model. ``` ## Creating callbacks for later You have already worked with callbacks in the first course of this specialization so the callback to stop training once an accuracy of 99.9% is reached, is provided for you: ``` # Define a Callback class that stops training once accuracy reaches 99.9% class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.999): print("\nReached 99.9% accuracy so cancelling training!") self.model.stop_training = True ``` ## Pipelining the pre-trained model with your own Now that the pre-trained model is ready, you need to "glue" it to your own model to solve the task at hand. For this you will need the last output of the pre-trained model, since this will be the input for your own. Complete the `output_of_last_layer` function below. **Note:** For grading purposes use the `mixed7` layer as the last layer of the pre-trained model. However, after submitting feel free to come back here and play around with this. ``` # GRADED FUNCTION: output_of_last_layer def output_of_last_layer(pre_trained_model): ### START CODE HERE last_desired_layer = None print('last layer output shape: ', last_desired_layer.output_shape) last_output = None print('last layer output: ', last_output) ### END CODE HERE return last_output ``` Check that everything works as expected: ``` last_output = output_of_last_layer(pre_trained_model) ``` **Expected Output (if `mixed7` layer was used):** ``` last layer output shape: (None, 7, 7, 768) last layer output: KerasTensor(type_spec=TensorSpec(shape=(None, 7, 7, 768), dtype=tf.float32, name=None), name='mixed7/concat:0', description="created by layer 'mixed7'") ``` Now you will create the final model by adding some additional layers on top of the pre-trained model. Complete the `create_final_model` function below. You will need to use Tensorflow's [Functional API](https://www.tensorflow.org/guide/keras/functional) for this since the pretrained model has been created using it. Let's double check this first: ``` # Print the type of the pre-trained model print(f"The pretrained model has type: {type(pre_trained_model)}") ``` To create the final model, you will use Keras' Model class by defining the appropriate inputs and outputs as described in the first way to instantiate a Model in the [docs](https://www.tensorflow.org/api_docs/python/tf/keras/Model). Note that you can get the input from any existing model by using its `input` attribute and by using the Funcional API you can use the last layer directly as output when creating the final model. ``` # GRADED FUNCTION: create_final_model def create_final_model(pre_trained_model, last_output): # Flatten the output layer to 1 dimension x = layers.Flatten()(last_output) ### START CODE HERE # Add a fully connected layer with 1024 hidden units and ReLU activation x = None # Add a dropout rate of 0.2 x = None # Add a final sigmoid layer for classification x = None # Create the complete model by using the Model class model = Model(inputs=None, outputs=None) # Compile the model model.compile(optimizer = RMSprop(learning_rate=0.0001), loss = None, metrics = [None]) ### END CODE HERE return model # Save your model in a variable model = create_final_model(pre_trained_model, last_output) # Inspect parameters total_params = model.count_params() num_trainable_params = sum([w.shape.num_elements() for w in model.trainable_weights]) print(f"There are {total_params:,} total parameters in this model.") print(f"There are {num_trainable_params:,} trainable parameters in this model.") ``` **Expected Output:** ``` There are 47,512,481 total parameters in this model. There are 38,537,217 trainable parameters in this model. ``` Wow, that is a lot of parameters! After submitting your assignment later, try re-running this notebook but use the original resolution of 300x300, you will be surprised to see how many more parameters are for that case. Now train the model: ``` # Run this and see how many epochs it should take before the callback # fires, and stops training at 99.9% accuracy # (It should take a few epochs) callbacks = myCallback() history = model.fit(train_generator, validation_data = validation_generator, epochs = 100, verbose = 2, callbacks=callbacks) ``` The training should have stopped after less than 10 epochs and it should have reached an accuracy over 99,9% (firing the callback). This happened so quickly because of the pre-trained model you used, which already contained information to classify humans from horses. Really cool! Now take a quick look at the training and validation accuracies for each epoch of training: ``` # Plot the training and validation accuracies for each epoch acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() ``` You will need to submit this notebook for grading. To download it, click on the `File` tab in the upper left corner of the screen then click on `Download` -> `Download .ipynb`. You can name it anything you want as long as it is a valid `.ipynb` (jupyter notebook) file. **Congratulations on finishing this week's assignment!** You have successfully implemented a convolutional neural network that leverages a pre-trained network to help you solve the problem of classifying humans from horses. **Keep it up!**
github_jupyter
# Introduction to Taxi ETL Job This is the Taxi ETL job to generate the input datasets for the Taxi XGBoost job. ## Prerequirement ### 1. Download data All data could be found at https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page ### 2. Download needed jars * [cudf-21.12.2-cuda11.jar](https://repo1.maven.org/maven2/ai/rapids/cudf/21.12.2/) * [rapids-4-spark_2.12-21.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/21.12.0/rapids-4-spark_2.12-21.12.0.jar) ### 3. Start Spark Standalone Before running the script, please setup Spark standalone mode ### 4. Add ENV ``` $ export SPARK_JARS=cudf-21.12.2-cuda11.jar,rapids-4-spark_2.12-21.12.0.jar ``` ### 5.Start Jupyter Notebook with spylon-kernal or toree ``` $ jupyter notebook --allow-root --notebook-dir=${your-dir} --config=${your-configs} ``` ## Import Libs ``` import org.apache.spark.sql.SparkSession import org.apache.spark.sql.DataFrame import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.DataTypes.{DoubleType, IntegerType, StringType} import org.apache.spark.sql.types.{FloatType, StructField, StructType} ``` ## Script Settings ### 1. File Path Settings * Define input file path ``` val dataRoot = sys.env.getOrElse("DATA_ROOT", "/data") val rawPath = dataRoot + "/taxi/taxi-etl-input-small.csv" val outPath = dataRoot + "/taxi/output" ``` ## Function and Object Define ### Define the constants * Define input file schema ``` val rawSchema = StructType(Seq( StructField("vendor_id", StringType), StructField("pickup_datetime", StringType), StructField("dropoff_datetime", StringType), StructField("passenger_count", IntegerType), StructField("trip_distance", DoubleType), StructField("pickup_longitude", DoubleType), StructField("pickup_latitude", DoubleType), StructField("rate_code", StringType), StructField("store_and_fwd_flag", StringType), StructField("dropoff_longitude", DoubleType), StructField("dropoff_latitude", DoubleType), StructField("payment_type", StringType), StructField("fare_amount", DoubleType), StructField("surcharge", DoubleType), StructField("mta_tax", DoubleType), StructField("tip_amount", DoubleType), StructField("tolls_amount", DoubleType), StructField("total_amount", DoubleType) )) def dataRatios: (Int, Int, Int) = { val ratios = (80, 20) (ratios._1, ratios._2, 100 - ratios._1 - ratios._2) } val (trainRatio, evalRatio, trainEvalRatio) = dataRatios ``` * Build the spark session and dataframe ``` // Build the spark session and data reader as usual val sparkSession = SparkSession.builder.appName("taxi-etl").getOrCreate val df = sparkSession.read.option("header", true).schema(rawSchema).csv(rawPath) ``` * Define some ETL functions ``` def dropUseless(dataFrame: DataFrame): DataFrame = { dataFrame.drop( "dropoff_datetime", "payment_type", "surcharge", "mta_tax", "tip_amount", "tolls_amount", "total_amount") } def encodeCategories(dataFrame: DataFrame): DataFrame = { val categories = Seq("vendor_id", "rate_code", "store_and_fwd_flag") (categories.foldLeft(dataFrame) { case (df, category) => df.withColumn(category, hash(col(category))) }).withColumnRenamed("store_and_fwd_flag", "store_and_fwd") } def fillNa(dataFrame: DataFrame): DataFrame = { dataFrame.na.fill(-1) } def removeInvalid(dataFrame: DataFrame): DataFrame = { val conditions = Seq( Seq("fare_amount", 0, 500), Seq("passenger_count", 0, 6), Seq("pickup_longitude", -75, -73), Seq("dropoff_longitude", -75, -73), Seq("pickup_latitude", 40, 42), Seq("dropoff_latitude", 40, 42)) conditions .map { case Seq(column, min, max) => "%s > %d and %s < %d".format(column, min, column, max) } .foldLeft(dataFrame) { _.filter(_) } } def convertDatetime(dataFrame: DataFrame): DataFrame = { val datetime = col("pickup_datetime") dataFrame .withColumn("pickup_datetime", to_timestamp(datetime)) .withColumn("year", year(datetime)) .withColumn("month", month(datetime)) .withColumn("day", dayofmonth(datetime)) .withColumn("day_of_week", dayofweek(datetime)) .withColumn( "is_weekend", col("day_of_week").isin(1, 7).cast(IntegerType)) // 1: Sunday, 7: Saturday .withColumn("hour", hour(datetime)) .drop(datetime.toString) } def addHDistance(dataFrame: DataFrame): DataFrame = { val P = math.Pi / 180 val lat1 = col("pickup_latitude") val lon1 = col("pickup_longitude") val lat2 = col("dropoff_latitude") val lon2 = col("dropoff_longitude") val internalValue = (lit(0.5) - cos((lat2 - lat1) * P) / 2 + cos(lat1 * P) * cos(lat2 * P) * (lit(1) - cos((lon2 - lon1) * P)) / 2) val hDistance = lit(12734) * asin(sqrt(internalValue)) dataFrame.withColumn("h_distance", hDistance) } // def preProcess(dataFrame: DataFrame): DataFrame = { // val processes = Seq[DataFrame => DataFrame]( // dropUseless, // encodeCategories, // fillNa, // removeInvalid, // convertDatetime, // addHDistance // ) // processes // .foldLeft(dataFrame) { case (df, process) => process(df) } // } ``` * Define main ETL function ``` def preProcess(dataFrame: DataFrame, splits: Array[Int]): Array[DataFrame] = { val processes = Seq[DataFrame => DataFrame]( dropUseless, encodeCategories, fillNa, removeInvalid, convertDatetime, addHDistance ) processes .foldLeft(dataFrame) { case (df, process) => process(df) } .randomSplit(splits.map(_.toDouble)) } val dataset = preProcess(df, Array(trainRatio, trainEvalRatio, evalRatio)) ``` ## Run ETL Process and Save the Result ``` val t0 = System.currentTimeMillis for ((name, index) <- Seq("train", "eval", "trans").zipWithIndex) { dataset(index).write.mode("overwrite").parquet(outPath + "/parquet/" + name) dataset(index).write.mode("overwrite").csv(outPath + "/csv/" + name) } val t1 = System.currentTimeMillis println("Elapsed time : " + ((t1 - t0).toFloat / 1000) + "s") sparkSession.stop() ```
github_jupyter
``` # default_exp cli #hide from nbdev.showdoc import * #export from dash_oop_components.core import * #export import os import webbrowser from pathlib import Path import click ``` # dashapp CLI > a simple way of launching dashboards directly from the commandline With `dash_oop_components` you can easily dump the configuration for a `dash` dashboard to a configuration `.yaml` file. Along with the library a `dashapp` command line tool (CLI) gets installed to make it easy to directly launch a dashboard from the commandline. This is useful for when: - you quickly want to launch a dashboard without starting python or jupyter, or finding the correct gunicorn config - you want to instruct others how to easily launch your dashboard without messing around with python or gunicorn ## `dashapp` command line tool You first need to store your dash app to a config yaml file using e.g ```python db = DashApp(dashboard_component, port=8000, querystrings=True, bootstrap=True) db.to_yaml("dashboard.yaml") ``` You can then run the app directly from the commandline and have it opened in a browser: ```sh $ dashapp dashboard.yaml ``` To try loading the figure factory from pickle, add the `--try-pickles` flag: ```sh $ dashapp --try-pickles dashboard.yaml ``` You can also store a `DashComponent` and run it from the command-line, by saving it to yaml: ```python dashboard_component = CovidDashboard(plot_factory) dashboard_component.to_yaml("dashboard_component.yaml") ``` And running it: ```sh $ dashapp dashboard_component.yaml ``` To include the bootstrap css and store parameters in url querystring and set port to 9000: ```sh $ dashapp dashboard_component.yaml --querystrings --bootstrap --port 9000 ``` If you follow the naming convention of storing the yaml to `dashboard.yaml`, or `dashboard_component.yaml`, you can omit the argument and simply run: ```sh $ dashapp ``` ``` Options: -nb, --no-browser Launch a dashboard, but do not launch a browser. -tp, --try-pickles if DashFigureFactory parameter config has filepath defined, try to load it from pickle. -fp, --force-pickles if DashFigureFactory parameter config has filepath defined, load it from pickle or raise exception. -q, --querystrings Store state in url querystring -b, --bootstrap include default bootstrap css -p, --port INTEGER specific port to run dashboard on --help Show this message and exit. ``` ``` #export @click.command() @click.argument("dashboard_yaml", nargs=1, required=False) @click.option("--no-browser", "-nb", "no_browser", is_flag=True, help="Launch a dashboard, but do not launch a browser.") @click.option("--try-pickles", "-tp", "try_pickles", is_flag=True, help="if DashFigureFactory parameter config has filepath defined, try to load it from pickle.") @click.option("--force-pickles", "-fp", "force_pickles", is_flag=True, help="if DashFigureFactory parameter config has filepath defined, load it from pickle or raise exception.") @click.option("--querystrings", "-q", "querystrings", is_flag=True, help="Store state in url querystring") @click.option("--bootstrap", "-b", "bootstrap", is_flag=True, help="include default bootstrap css") @click.option("--port", "-p", "port", default=None, type=click.INT, help="specific port to run dashboard on") def dashapp(dashboard_yaml, no_browser, try_pickles, force_pickles, querystrings, bootstrap, port): """ dashapp is a CLI tool from the dash_oop_components library, used to launch a dash app from the commandline. You first need to store your dash app to a config yaml file using e.g \b db = DashApp(dashboard_component, port=8000) db.to_yaml("dashboard.yaml") \b You can then run the app directly from the commandline with and open it in a browser: $ dashapp dashboard.yaml or try the load figure factory from pickle: $ dashapp --try-pickles dashboard.yaml You can also store and run a DashComponent, by saving it to yaml: \b dashboard_component = CovidDashboard(plot_factory) dashboard_component.to_yaml("dashboard_component.yaml") And running it: $ dashapp dashboard_component.yaml To include the bootstrap css and store parameters in url querystring: $ dashapp dashboard_component.yaml --querystrings --bootstrap If you follow the naming convention of storing the yaml to `dashboard.yaml`, or `dashboard_component.yaml`, you can omit the argument and simply run: $ dashapp """ if dashboard_yaml is None: if (Path().cwd() / "dashboard.yaml").exists(): dashboard_yaml = Path().cwd() / "dashboard.yaml" elif (Path().cwd() / "dashboard_component.yaml").exists(): dashboard_yaml = Path().cwd() / "dashboard_component.yaml" else: click.echo("No argument given and could find neither a " "default filename dashboard.yaml or dashboard_component.yaml." "Try `dashapp --help` for options. Aborting.") return if not str(dashboard_yaml).endswith(".yaml"): click.echo("you need to pass a .yaml file to start a dashboard! Aborting.") return kwargs = {} if try_pickles: kwargs["try_pickles"] = True if force_pickles: kwargs["force_pickles"] = True dashboard_component = DashComponentBase.from_yaml(dashboard_yaml, **kwargs) if isinstance(dashboard_component, DashApp): db = dashboard_component elif isinstance(dashboard_component, DashComponent): db_kwargs = {} if querystrings: db_kwargs["querystrings"] = True if bootstrap: db_kwargs["bootstrap"] = True db = DashApp(dashboard_component, **db_kwargs) if port is None: port = db.port if port is None: port = 8050 import socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2) result = sock.connect_ex(('127.0.0.1',port)) if result == 0: click.echo(f"dashapp ===> Port {port} already in use! Please override with e.g. --port {port+1}") return if not no_browser and not os.environ.get("WERKZEUG_RUN_MAIN"): click.echo(f"explainerdashboard ===> launching browser at {f'http://localhost:{port}/'}") webbrowser.open_new(f"http://localhost:{port}/") click.echo(f"dashapp ===> Starting dashboard:") db.run(port) ```
github_jupyter
# Bag of Tricks Experiment Analyze the effects of our different "tricks". 1. Sample matches off mask 2. Scale by hard negatives 3. L2 pixel loss on matches We will compare standard network, networks missing one trick only, and a network without any tricks (i.e same as Tanner Schmidt) ``` import dense_correspondence_manipulation.utils.utils as utils utils.add_dense_correspondence_to_python_path() from dense_correspondence.training.training import * import sys import logging # utils.set_default_cuda_visible_devices() utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES from dense_correspondence.training.training import DenseCorrespondenceTraining from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset logging.basicConfig(level=logging.INFO) dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'dataset', 'composite', "caterpillar_baymax_starbot_all_front_single_only.yaml") train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'training', 'training.yaml') logging_dir = "code/data_volume/pdc/trained_models/trick_analysis" num_iterations = 3500 num_image_pairs = 100 debug = False TRAIN = True EVALUATE = True # num_image_pairs = 10 # num_iterations = 10 d = 3 network_dict = dict() ``` ## Standard ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "standard_%d" %(d) print "training %s" %(name) train_config = utils.getDictFromYamlFilename(train_config_file) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ``` ## With L2 on masked non_matches ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "l2_masked_%d" %(d) print "training %s" %(name) train_config = utils.getDictFromYamlFilename(train_config_file) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d train._config["loss_function"]["use_l2_pixel_loss_on_masked_non_matches"] = True if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ``` ## Dont scale by hard negatives ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "dont_scale_hard_negatives_%d" %(d) print "training %s" %(name) train_config = utils.getDictFromYamlFilename(train_config_file) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d train._config["loss_function"]["scale_by_hard_negatives"] = False if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ``` ## Dont sample off mask ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "dont_sample_from_mask_%d" %(d) print "training %s" %(name) train_config = utils.getDictFromYamlFilename(train_config_file) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d train._config["training"]["sample_matches_only_off_mask"] = False train._config["training"]["use_image_b_mask_inv"] = False train._config["training"]["fraction_masked_non_matches"] = 0.01 train._config["training"]["fraction_background_non_matches"] = 0.99 if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ``` ## No tricks ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "no_tricks_%d" %(d) print "training %s" %(name) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train_config = utils.getDictFromYamlFilename(train_config_file) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d train._config["loss_function"]["scale_by_hard_negatives"] = False train._config["loss_function"]["use_l2_pixel_loss_on_masked_non_matches"] = False train._config["training"]["sample_matches_only_off_mask"] = False train._config["training"]["use_image_b_mask_inv"] = False train._config["training"]["fraction_masked_non_matches"] = 0.01 train._config["training"]["fraction_background_non_matches"] = 0.99 if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ``` ## L2 and dont scale hard negatives ``` dataset_config = utils.getDictFromYamlFilename(dataset_config_filename) dataset = SpartanDataset(config=dataset_config) train_config = utils.getDictFromYamlFilename(train_config_file) name = "l2_dont_scale_hard_negatives_run_2_%d" %(d) print "training %s" %(name) train_config = utils.getDictFromYamlFilename(train_config_file) train = DenseCorrespondenceTraining(dataset=dataset, config=train_config) train._config["training"]["logging_dir"] = logging_dir train._config["training"]["logging_dir_name"] = name train._config["training"]["num_iterations"] = num_iterations train._config["dense_correspondence_network"]["descriptor_dimension"] = d train._config["loss_function"]["scale_by_hard_negatives"] = False train._config["loss_function"]["use_l2_pixel_loss_on_masked_non_matches"] = True if TRAIN: train.run() print "finished training descriptor of dimension %d" %(d) # now do evaluation print "running evaluation on network %s" %(name) model_folder = os.path.join(logging_dir, name) model_folder = utils.convert_to_absolute_path(model_folder) network_dict[name] = model_folder if EVALUATE: DCE = DenseCorrespondenceEvaluation DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs) print "finished running evaluation on network %s" %(name) ```
github_jupyter
# Calculate Shapley values Shapley values as used in coalition game theory were introduced by William Shapley in 1953. [Scott Lundberg](http://scottlundberg.com/) applied Shapley values for calculating feature importance in [2017](http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf). If you want to read the paper, I recommend reading: Abstract, 1 Introduction, 2 Additive Feature Attribution Methods, (skip 2.1, 2.2, 2.3), and 2.4 Classic Shapley Value Estimation. Lundberg calls this feature importance method "SHAP", which stands for SHapley Additive exPlanations. Here’s the formula for calculating Shapley values: $ \phi_{i} = \sum_{S \subseteq M \setminus i} \frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$ A key part of this is the difference between the model’s prediction with the feature $i$, and the model’s prediction without feature $i$. $S$ refers to a subset of features that doesn’t include the feature for which we're calculating $\phi_i$. $S \cup i$ is the subset that includes features in $S$ plus feature $i$. $S \subseteq M \setminus i$ in the $\Sigma$ symbol is saying, all sets $S$ that are subsets of the full set of features $M$, excluding feature $i$. ##### Options for your learning journey * If you’re okay with just using this formula, you can skip ahead to the coding section below. * If you would like an explanation for what this formula is doing, please continue reading here. ## Optional (explanation of this formula) The part of the formula with the factorials calculates the number of ways to generate the collection of features, where order matters. $\frac{|S|! (|M| - |S| -1 )!}{|M|!}$ #### Adding features to a Coalition The following concepts come from coalition game theory, so when we say "coalition", think of it as a team, where members of the team are added, one after another, in a particular order. Let’s imagine that we’re creating a coalition of features, by adding one feature at a time to the coalition, and including all $|M|$ features. Let’s say we have 3 features total. Here are all the possible ways that we can create this “coalition” of features. <ol> <li>$x_0,x_1,x_2$</li> <li>$x_0,x_2,x_1$</li> <li>$x_1,x_0,x_2$</li> <li>$x_1,x_2,x_0$</li> <li>$x_2,x_0,x_1$</li> <li>$x_2,x_1,x_0$</li> </ol> Notice that for $|M| = 3$ features, there are $3! = 3 \times 2 \times 1 = 6$ possible ways to create the coalition. #### marginal contribution of a feature For each of the 6 ways to create a coalition, let's see how to calculate the marginal contribution of feature $x_2$. <ol> <li>Model’s prediction when it includes features 0,1,2, minus the model’s prediction when it includes only features 0 and 1. $x_0,x_1,x_2$: $f(x_0,x_1,x_2) - f(x_0,x_1)$ <li>Model’s prediction when it includes features 0 and 2, minus the prediction when using only feature 0. Notice that feature 1 is added after feature 2, so it’s not included in the model. $x_0,x_2,x_1$: $f(x_0,x_2) - f(x_0)$</li> <li>Model's prediction including all three features, minus when the model is only given features 1 and 0. $x_1,x_0,x_2$: $f(x_1,x_0,x_2) - f(x_1,x_0)$</li> <li>Model's prediction when given features 1 and 2, minus when the model is only given feature 1. $x_1,x_2,x_0$: $f(x_1,x_2) - f(x_1)$</li> <li>Model’s prediction if it only uses feature 2, minus the model’s prediction if it has no features. When there are no features, the model’s prediction would be the average of the labels in the training data. $x_2,x_0,x_1$: $f(x_2) - f( )$ </li> <li>Model's prediction (same as the previous one) $x_2,x_1,x_0$: $f(x_2) - f( )$ </li> Notice that some of these marginal contribution calculations look the same. For example the first and third sequences, $f(x_0,x_1,x_2) - f(x_0,x_1)$ would get the same result as $f(x_1,x_0,x_2) - f(x_1,x_0)$. Same with the fifth and sixth. So we can use factorials to help us calculate the number of permutations that result in the same marginal contribution. #### break into 2 parts To get to the formula that we saw above, we can break up the sequence into two sections: the sequence of features before adding feature $i$; and the sequence of features that are added after feature $i$. For the set of features that are added before feature $i$, we’ll call this set $S$. For the set of features that are added after feature $i$ is added, we’ll call this $Q$. So, given the six sequences, and that feature $i$ is $x_2$ in this example, here’s what set $S$ and $Q$ are for each sequence: <ol> <li>$x_0,x_1,x_2$: $S$ = {0,1}, $Q$ = {}</li> <li>$x_0,x_2,x_1$: $S$ = {0}, $Q$ = {1} </li> <li>$x_1,x_0,x_2$: $S$ = {1,0}, $Q$ = {} </li> <li>$x_1,x_2,x_0$: $S$ = {1}, $Q$ = {0} </li> <li>$x_2,x_0,x_1$: $S$ = {}, $Q$ = {0,1} </li> <li>$x_2,x_1,x_0$: $S$ = {}, $Q$ = {1,0} </li> </ol> So for the first and third sequences, these have the same set S = {0,1} and same set $Q$ = {}. Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 2! \times 0! = 2$. Similarly, the fifth and sixth sequences have the same set S = {} and Q = {0,1}. Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 0! \times 2! = 2$. #### And now, the original formula To use the notation of the original formula, note that $|Q| = |M| - |S| - 1$. Recall that to calculate that there are 6 total sequences, we can use $|M|! = 3! = 3 \times 2 \times 1 = 6$. We’ll divide $|S|! \times (|M| - |S| - 1)!$ by $|M|!$ to get the proportion assigned to each marginal contribution. This is the weight that will be applied to each marginal contribution, and the weights sum to 1. So that’s how we get the formula: $\frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$ for each set $S \subseteq M \setminus i$ We can sum up the weighted marginal contributions for all sets $S$, and this represents the importance of feature $i$. You’ll get to practice this in code! ``` import sys !{sys.executable} -m pip install numpy==1.14.5 !{sys.executable} -m pip install scikit-learn==0.19.1 !{sys.executable} -m pip install graphviz==0.9 !{sys.executable} -m pip install shap==0.25.2 import sklearn import shap import numpy as np import graphviz from math import factorial ``` ## Generate input data and fit a tree model We'll create data where features 0 and 1 form the "AND" operator, and feature 2 does not contribute to the prediction (because it's always zero). ``` # AND case (features 0 and 1) N = 100 M = 3 X = np.zeros((N,M)) X.shape y = np.zeros(N) X[:1 * N//4, 1] = 1 X[:N//2, 0] = 1 X[N//2:3 * N//4, 1] = 1 y[:1 * N//4] = 1 # fit model model = sklearn.tree.DecisionTreeRegressor(random_state=0) model.fit(X, y) # draw model dot_data = sklearn.tree.export_graphviz(model, out_file=None, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph ``` ### Calculate Shap values We'll try to calculate the local feature importance of feature 0. We have 3 features, $x_0, x_1, x_2$. For feature $x_0$, determine what the model predicts with or without $x_0$. Subsets S that exclude feature $x_0$ are: {} {$x_1$} {$x_2$} {$x_1,x_2$} We want to see what the model predicts with feature $x_0$ compared to the model without feature $x_0$: $f(x_0) - f( )$ $f(x_0,x_1) - f(x_1)$ $f(x_0,x_2) - f(x_2)$ $f(x_0,x_1,x_2) - f(x_1,x_2)$ ## Sample data point We'll calculate the local feature importance of a sample data point, where feature $x_0 = 1$ feature $x_1 = 1$ feature $x_2 = 1$ ``` sample_values = np.array([1,1,1]) print(f"sample values to calculate local feature importance on: {sample_values}") ``` ## helper function To make things easier, we'll use a helper function that takes the entire feature set M, and also a list of the features (columns) that we want, and puts them together into a 2D array. ``` def get_subset(X, feature_l): """ Given a 2D array containing all feature columns, and a list of integers representing which columns we want, Return a 2D array with just the subset of features desired """ cols_l = [] for f in feature_l: cols_l.append(X[:,f].reshape(-1,1)) return np.concatenate(cols_l, axis=1) # try it out tmp = get_subset(X,[0,2]) tmp[0:10] ``` ## helper function to calculate permutation weight This helper function calculates $\frac{|S|! (|M| - |S| - 1)!}{|M|!}$ ``` from math import factorial def calc_weight(size_S, num_features): return factorial(size_S) * factorial(num_features - size_S - 1) / factorial(num_features) ``` Try it out when size of S is 2 and there are 3 features total. The answer should be equal to $\frac{2! \times (3-2-1)!}{3!} = \frac{2 \times 1}{6} = \frac{1}{3}$ ``` calc_weight(size_S=2,num_features=3) ``` ## case A Calculate the prediction of a model that uses features 0 and 1 Calculate the prediction of a model that uses feature 1 Calculate the difference (the marginal contribution of feature 0) $f(x_0,x_1) - f(x_1)$ #### Calculate $f(x_0,x_1)$ ``` # S_union_i S_union_i = get_subset(X,[0,1]) # fit model f_S_union_i = sklearn.tree.DecisionTreeRegressor() f_S_union_i.fit(S_union_i, y) ``` Remember, for the sample input for which we'll calculate feature importance, we chose values of 1 for all features. ``` # This will throw an error try: f_S_union_i.predict(np.array([1,1])) except Exception as e: print(e) ``` The error message says: >Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample. So we'll reshape the data so that it represents a sample (a row), which means it has 1 row and 1 or more columns. ``` # feature 0 and feature 1 are both 1 in the sample input sample_input = np.array([1,1]).reshape(1,-1) sample_input ``` The prediction of the model when it has features 0 and 1 is: ``` pred_S_union_i = f_S_union_i.predict(sample_input) pred_S_union_i ``` When feature 0 and feature 1 are both 1, the prediction of the model is 1 #### Calculate $f(x_1)$ ``` # S S = get_subset(X,[1]) f_S = sklearn.tree.DecisionTreeRegressor() f_S.fit(S, y) ``` The sample input for feature 1 is 1. ``` sample_input = np.array([1]).reshape(1,-1) ``` The model's prediction when it is only training on feature 1 is: ``` pred_S = f_S.predict(sample_input) pred_S ``` When feature 1 is 1, then the prediction of this model is 0.5. If you look at the data in X, this makes sense, because when feature 1 is 1, half of the time, the label in y is 0, and half the time, the label in y is 1. So on average, the prediction is 0.5 #### Calculate difference ``` diff_A = pred_S_union_i - pred_S diff_A ``` #### Calculate the weight Calculate the weight assigned to the marginal contribution. In this case, if this marginal contribution occurs 1 out of the 6 possible permutations of the 3 features, then its weight is 1/6 ``` size_S = S.shape[1] # should be 1 weight_A = calc_weight(size_S, M) weight_A # should be 1/6 ``` ## Quiz: Case B Calculate the prediction of a model that uses features 0 and 2 Calculate the prediction of a model that uses feature 2 Calculate the difference $f(x_0,x_2) - f(x_2)$ #### Calculate $f(x_0,x_2)$ ``` # TODO S_union_i = get_subset(X,[0,2]) f_S_union_i = sklearn.tree.DecisionTreeRegressor() f_S_union_i.fit(S_union_i, y) sample_input = np.array([1,1]).reshape(1,-1) pred_S_union_i = f_S_union_i.predict(sample_input) pred_S_union_i ``` Since we're using features 0 and 2, and feature 2 doesn't help with predicting the output, then the model really just depends on feature 0. When feature 0 is 1, half of the labels are 0, and half of the labels are 1. So the average prediction is 0.5 #### Calculate $f(x_2)$ ``` # TODO S = get_subset(X,[2]) f_S = sklearn.tree.DecisionTreeRegressor() f_S.fit(S, y) sample_input = np.array([1]).reshape(1,-1) pred_S = f_S.predict(sample_input) pred_S ``` Since feature 2 doesn't help with predicting the labels in y, and feature 2 is 0 for all 100 training observations, then the prediction of the model is the average of all 100 training labels. 1/4 of the labels are 1, and the rest are 0. So that prediction is 0.25 #### Calculate the difference in predictions ``` # TODO diff_B = pred_S_union_i - pred_S diff_B ``` #### Calculate the weight ``` # TODO size_S = S.shape[1] # is 1 weight_B = calc_weight(size_S, M) weight_B # should be 1/6 ``` # Quiz: Case C Calculate the prediction of a model that uses features 0,1 and 2 Calculate the prediction of a model that uses feature 1 and 2 Calculate the difference $f(x_0,x_1,x_2) - f(x_1,x_2)$ #### Calculate $f(x_0,x_1,x_2) $ ``` # TODO S_union_i = get_subset(X,[0,1,2]) f_S_union_i = sklearn.tree.DecisionTreeRegressor() f_S_union_i.fit(S_union_i, y) sample_input = np.array([1,1,1]).reshape(1,-1) pred_S_union_i = f_S_union_i.predict(sample_input) pred_S_union_i ``` When we use all three features, the model is able to predict that if feature 0 and feature 1 are both 1, then the label is 1. #### Calculate $f(x_1,x_2)$ ``` # TODO S = get_subset(X,[1,2]) f_S = sklearn.tree.DecisionTreeRegressor() f_S.fit(S, y) sample_input = np.array([1,1]).reshape(1,-1) pred_S = f_S.predict(sample_input) pred_S ``` When the model is trained on features 1 and 2, then its training data tells it that half of the time, when feature 1 is 1, the label is 0; and half the time, the label is 1. So the average prediction of the model is 0.5 #### Calculate difference in predictions ``` # TODO diff_C = pred_S_union_i - pred_S diff_C ``` #### Calculate weights ``` # TODO size_S = S.shape[1] weight_C = calc_weight(size_S,M) # should be 2 / 6 = 1/3 weight_C ``` ## Quiz: case D: remember to include the empty set! The empty set is also a set. We'll compare how the model does when it has no features, and see how that compares to when it gets feature 0 as input. Calculate the prediction of a model that uses features 0. Calculate the prediction of a model that uses no features. Calculate the difference $f(x_0) - f()$ #### Calculate $f(x_0)$ ``` # TODO S_union_i = get_subset(X,[0]) f_S_union_i = sklearn.tree.DecisionTreeRegressor() f_S_union_i.fit(S_union_i, y) sample_input = np.array([1]).reshape(1,-1) pred_S_union_i = f_S_union_i.predict(sample_input) pred_S_union_i ``` With just feature 0 as input, the model predicts 0.5 #### Calculate $f()$ **hint**: you don't have to fit a model, since there are no features to input into the model. ``` # TODO # with no input features, the model will predict the average of the labels, which is 0.25 pred_S = np.mean(y) pred_S ``` With no input features, the model's best guess is the average of the labels, which is 0.25 #### Calculate difference in predictions ``` # TODO diff_D = pred_S_union_i - pred_S diff_D ``` #### Calculate weight We expect this to be: 0! * (3-0-1)! / 3! = 2/6 = 1/3 ``` # TODO size_S = 0 weight_D = calc_weight(size_S,M) # weight is 1/3 weight_D ``` # Calculate Shapley value For a single sample observation, where feature 0 is 1, feature 1 is 1, and feature 2 is 1, calculate the shapley value of feature 0 as the weighted sum of the differences in predictions. $\phi_{i} = \sum_{S \subseteq N \setminus i} weight_S \times (f(S \cup i) - f(S))$ ``` # TODO shap_0 = # ... shap_0 ``` ## Verify with the shap library The [shap](https://github.com/slundberg/shap) library is written by Scott Lundberg, the creator of Shapley Additive Explanations. ``` sample_values = np.array([1,1,1]) shap_values = shap.TreeExplainer(model).shap_values(sample_values) print(f"Shapley value for feature 0 that we calculated: {shap_0}") print(f"Shapley value for feature 0 is {shap_values[0]}") print(f"Shapley value for feature 1 is {shap_values[1]}") print(f"Shapley value for feature 2 is {shap_values[2]}") ``` ## Quiz: Does this make sense? The shap libary outputs the shap values for features 0, 1 and 2. We can see that the shapley value for feature 0 matches what we calculated. The Shapley value for feature 1 is also given the same importance as feature 0. * Given that the training data is simulating an AND operation, do you think these values make sense? * Do you think feature 0 and 1 are equally important, or is one more important than the other? * Does the importane of feature 2 make sense as well? * How does this compare to the feature importance that's built into sci-kit learn? ## Answer ## Note This method is general enough that it works for any model, not just trees. There is an optimized way to calculate this when the complex model being explained is a tree-based model. We'll look at that next. ## Solution [Solution notebook](calculate_shap_solution.ipynb)
github_jupyter
# Allosteric pathways with current flow analysis on protein-cofactor networks *This tutorial shows how to build and analyze networks that include protein residues and cofactors (e.g. lipids or small molecules).* ***Note***: To build and analyze a residue interaction network of the isolated protein only, just skip the steps in Section 2b and 3a, and inputs named *interactor_atom_inds_file.npy* or *additional_interactor_**. ## Citing this work The code and developments here are described in two papers. <br> **[1]** P.W. Kang, A.M. Westerlund, J. Shi, K. MacFarland White, A.K. Dou, A.H. Cui, J.R. Silva, L. Delemotte and J. Cui. <br> *Calmodulin acts as a state-dependent switch to control a cardiac potassium channel opening*. 2020<br><br> **[2]** A.M. Westerlund, O. Fleetwood, S. Perez-Conesa and L. Delemotte. <br> *Network analysis reveals how lipids and other cofactors influence membrane protein allostery*. 2020 [1] is an applications-oriented paper describing how to analyze **residue interaction networks** of **isolated proteins**. <br> [2] is a methods-oriented paper of how to build and analyze **residue interaction networks** that include **proteins and cofactors**. ## Short background A residue interaction network is typically obtained from the element-wise product of two matrices: <br> &emsp; 1) Contact map. <br> &emsp; 2) Correlation (of node fluctuations) map. For protein residue interaction networks, the node fluctuations correspond to protein residue fluctuations around an equilibrium position [1]. The method used to build contact and correlation maps which include cofactor nodes is described in details in [2]. ### Contact map The contact map here is defined using a truncated Gaussian kernel $K$ to smooth the contacts. For a frame with given a distance $d$ between two nodes $$ K(d) = \begin{cases} 1 & \text{if } d \le c \\ \exp (-\frac{d^2}{2\sigma^2}) / \exp (-\frac{c^2}{2\sigma^2}) & \text{otherwise} \end{cases} $$ By default, $c=0.45$ nm and $\sigma=0.138$ nm. <br> The cutoff, $c=0.45$, ensures a contact if $d \le 4.5$ Å. The standard deviation, $\sigma=0.138$, is chosen such that $K(d=0.8 \text{ nm}) = 10^{-5}$. <br><br> The final contact map is averaged over frames. ### Correlation map The correlation of node (protein residues in the case of isolated proteins) fluctuations is calculated using mutual information. $$ M_{ij} = H_i + H_j - H_{ij}, $$ where $$ H_i = -\int\limits_X \rho(x)\ln \rho(x). $$ $\rho_i(x)$ is the density of distances from the node equilibrium position. This is estimated with Gaussian mixture models and the Bayesian information criterion model selection. ### Including cofactors in the network Cofactors, such as lipids and small molecules, are treated slighlty differently than protein residues. The details are described in [2]. Practically, cofactors are processesed and added to the network in separate steps than the protein residues. The network nodes that represent cofactors are called *interactor nodes*. The following is needed to add cofactors in the network: 1. **Trajectory (and .pdb file) with protein and cofactors**: If the trajectory is centered on the protein, make sure that the other molecules are not split across simulation box boundaries. In gromacs, for example, this may be avoided in *gmx trjconv* by using the option *-pbc res*. <br> 2. **Definition of interactors**: A cofactor may be described by one or several *interactors*. An interactor could e.g. be the lipid head group. We therefore have to specify which cofactor atoms form an interactor. More details are found in Section 2b. <br> 3. **Contact map and fluctuations**: The practical details are outlined in Sections 2b and 3a. ### Current flow analysis The networks are analyzed using a current flow analysis [3,4] framework. The code supports both current flow betweenness and current flow closeness analysis. In short, the current flow computes the net diffusion along edges between network nodes. The net throughput of a node is given by the sum over edges. Current flow betweenness is useful for identifying allosteric pathways [5,1]. Specifically, it shows how important each residue is for transmitting allosteric pathways from a source (allosteric site) to a sink (functional site). Current flow closeness centrality [3], instead indicates signaling efficiency within the network (using a "distance" measured in current flow). To perform current flow analysis, you need a contact map and a similarity map (e.g. mutual information or Pearson correlation). These are computed in Section 2-3. The practical details are described in Section 4. ## Additional references [3] U. Brandes and D. Fleischer, Springer, Berlin, Heidelberg, 2005 <br> [4] M. E. J. Newman, Social Networks, 2005 <br> [5] W.M. Botello-Smith and Y. Luo, J. Chem. Theory Comput., 2019 ## 1. Setup ``` import allopath import numpy as np # Set the trajectory that should be analyzed. structure=['input_data/my_system.pdb'] trajs=['input_data/system_traj1.dcd','input_data/system_traj2.dcd'] # Specify how many cores to run the calculations on. n_cores=4 # Set the output directories (out_dir is where the main data will be saved, # while out_dir_MI will contain the MI matrix data, see below on how they are used). out_dir='Results_data/' out_dir_MI='Results_data/MI_data/' file_label='my_system' # Simulation label which will be appended to filenames of all written files (optional) dt=1 # Trajectory stride (default=1) ``` ## 2. Semi-binary contact maps ------------------------------------------ ### 2a. Protein residue contact map To compute the protein (only including protein residue-residue interactions) contact map we will use _ContactMap_. ***allopath.ContactMap***(**self,** *topology_file*, \**kwargs) where *kwargs* is a dictionary with the keyword arguments (https://docs.python.org/2/glossary.html). This means that to contruct a _ContactMap_ object we have to give at least the topology_file (_structure_) as input (but in principle we want the average over a trajectory): > CM = allopath.ContactMap(structure) We now create a dictionary, *kwargs*, to define the named/keyword arguments that should not assume default values, such as the trajectory, ie. you may include all named input arguments that you want to modify and remove those that you wish to keep at default value. List of input keyword parameters: * **trajectory_files**: Input trajectory files (.xtc, .dcd, etc) * **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory (this is complementary to *trajectory_files*). * **file_label**: "File end name": label of the system that will be appended to the end of the produced files. * **out_directory**: The directory where data should be written. * **dt**: Trajectory stride. * **query**: Atom-selection used on the trajectory, e.g. "protein and !(type H)" or "protein and name CA". * **n_cores**: Number of jobs to run with joblib. * **cutoff**: Cutoff value, $c$, in the truncated Gaussian kernel. For distances < cutoff, the contact will be set to one (default $c=0.45$ nm, see "Background: contact map" for definition). * **std_dev**: Standard deviation value, $\sigma$, in the truncated Gaussian kernel. (default $\sigma=0.138$ nm => 1e-5 contact at 0.8 nm, see "Background: contact map" for definition) * **per_frame**: Whether or not to compute contact map per frame instead of averaging over the trajectory (default=False). * **start_frame**: Defines which frame to start calculations from. Used in combination with *per_frame*=True. * **end_frame**: Defines which frame to end calculations at. Used in combination with *per_frame*=True. * **ref_cmap_file**: File with reference cmap (e.g. average over all frames). Is used to make computations sparse/speed up calculation. Used in combination with *per_frame*=True. The default values are: <br> {'trajectory_files': '', <br> 'trajectory_file_directory': '', <br> 'dt': 1, <br> 'n_cores': 4, <br> 'out_directory': '', <br> 'file_label': '', <br> 'cutoff': 0.45, <br> 'query': 'protein and !(type H)', <br> 'start_frame': 0, <br> 'end_frame': -1, <br> 'ref_cmap_file': '', <br> 'per_frame': False, <br> 'std_dev': 0.138} <br> Note that the trajectory files can either be given by explicitly naming them and inputting as *trajectory_files* (as we do with _trajs_, see below), or by simply inputting a directory containing all the '.xtc' or '.dcd' files that should be analyzed (*trajectory_file_directory*). ``` # Set inputs kwargs={ 'trajectory_files': trajs, 'file_label': file_label, 'out_directory': out_dir, 'dt': dt, 'n_cores': n_cores } # Compute contact map and write to file CM = allopath.ContactMap(structure, **kwargs) CM.run() ``` ### 2b. Interactor node - protein residue contact map The contact map of interactor-interactor and interactor-protein residue node contacts wille be computed using *CofactorInteractors*. ***allopath.CofactorInteractors***(**self,** *topology_file*, \**kwargs) The *CofactorInteractors* is used to both compute the interactions that include cofactors, and the cofactor fluctuations. The cofactor fluctuations will be used as input to the MI calculations. List of input keyword parameters to create a contact map: * **trajectory_files**: Input trajectory files (.xtc, .dcd, etc) * **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory. * **file_label**: "File end name": label of the system that will be appended to the end of the produced files. * **out_directory**: The directory where data should be written. * **dt**: Trajectory stride. * **cofactor_domain_selection**: A file containing cofactor-interactor selections. Each row should list the atoms that make up an interactor. Example of a domain selection file content: <br><br> *resname POPC and name N C11 C12 C13 C14 C15 P O13 O14 O12 O11 C1 C2 <br> resname POPC and name O21 C21 C22 O22 C23 C24 C25 C26 C27 C28 C29 C210 C211 C212 C213 C214 C215 C216 C217 C218 <br> resname POPC and name C3 O31 C31 C32 O32 C33 C34 C35 C36 C37 C38 C39 C310 C311 C312 C313 C314 C315 C316* <br><br> * **cutoff**: Cutoff value, $c$, for binary residue-lipid contacts. For distances < cutoff, the contact will be set to one (default=0.45 nm). * **std_dev**: Standard deviation value, $\sigma$, on the semi-binary Gaussian-kernel. (default=0.138 nm => 1e-5 contact at 0.8 nm) The default values are: <br> {'trajectory_files': '', <br> 'trajectory_file_directory': '', <br> 'dt': 1, <br> 'out_directory': '', <br> 'file_label': '', <br> 'cofactor_domain_selection': '', <br> 'cofactor_interactor_inds': '', <br> 'cofactor_interactor_coords':, '', <br> 'compute_cofactor_interactor_fluctuations': False, <br> 'cofactor_interactor_atom_inds': '', <br> 'cutoff': 0.45, <br> 'std_dev': 0.138} <br> ``` # Set inputs cofactor_domain_selection_file='input_data/cofactor_domain_selection.txt' kwargs={ 'trajectory_files': trajs, 'file_label': file_label, 'out_directory': out_dir, 'dt': dt, 'cofactor_domain_selection': cofactor_domain_selection_file } # Compute contact map and write to file CI = allopath.CofactorInteractors(structure, **kwargs) CI.run() ``` ## 3. Mutual information ----------------------------------- To compute mutual information (MI) between nodes we use *MutualInformation* and *CofactorInteractors*. The MI is done in **four** steps. <br> **(a)** Computing the interactor node fluctuations using *CofactorInteractors*. These will be given as input to *MutualInformation*.<br> **(b)** Computing the off-diagonal elements in the MI matrix using *MutualInformation*. Because this is computationally demanding, we can 1) use the contact map as input to ignore non-contacting residues and 2) split the matrix into blocks that can be processed in parallel (although we will do it in sequence in this tutorial). > We will divide the matrix into 4 blocks along the column and 4 blocks along the rows. As we include the diagonal blocks but use symmetry on off-diagonal blocks, we get *n_matrix_block_cols=4 and *n_blocks*= n_matrix_block_cols(n_matrix_block_cols-1)/2 + n_matrix_block_cols = 10 number of blocks. The input argument *i_block* should be between 1 and *n_blocks*, denoting which block should be constructed. <br> **(c)** Computing the diagonal elements in the MI matrix using *MutualInformation*. This requires *do_diagonal*=True as input. *Note: This is only needed if you normalize the mutual information in allopath.CurrentFlow.* (Section 4)<br> **(d)** Building the full off-diagonal matrix based on blocks.<br><br> *Note:* The calculations in **(b)** and **(c)** are time consuming, but they are structured so that they can be launched in parallel. **(d)** cannot be done until the calculations in **(b)** have finished. ### 3a. Computing interactor node fluctuations The interactor fluctuations will be computed using *CofactorInteractors*. ***allopath.CofactorInteractors***(**self,** *topology_file*, \**kwargs) As mentioned, *CofactorInteractors* is used to both compute the interactions that include cofactors, and the cofactor fluctuations. To compute interactor fluctuations, we need to set **compute_cofactor_interactor_fluctuations=True** in *kwargs*. List of input keyword parameters to compute interactor fluctuations: * **trajectory_files**: Input trajectory files (.xtc, .dcd, etc) * **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory. * **file_label**: "File end name": label of the system that will be appended to the end of the produced files. * **out_directory**: The directory where data should be written. * **dt**: Trajectory stride. * **cofactor_interactor_inds**: (generated when computing the interactor node contact map). * **cofactor_interactor_coords**:(generated when computing the interactor node contact map). * **compute_interactor_node_fluctuations**: Whether or not to compute the fluctuations. Default is False. Set to True. The default values are: <br> {'trajectory_files': '', <br> 'trajectory_file_directory': '', <br> 'dt': 1, <br> 'out_directory': '', <br> 'file_label': '', <br> 'cofactor_domain_selection': '', <br> 'cofactor_interactor_inds': '', <br> 'cofactor_interactor_coords':, '', <br> 'compute_cofactor_interactor_fluctuations': False, <br> 'cofactor_interactor_atom_inds': '', <br> 'cutoff': 0.45, <br> 'std_dev': 0.138} <br> ``` # Set inputs cofactor_interactor_inds = out_dir+'cofactor_interactor_indices_'+file_label+'.npy' cofactor_interactor_coords = out_dir+'cofactor_interactor_coords_'+file_label+'.npy' kwargs={ 'trajectory_files': trajs, 'file_label': file_label, 'out_directory': out_dir, 'dt': dt, 'cofactor_interactor_inds': cofactor_interactor_inds, 'cofactor_interactor_coords': cofactor_interactor_coords, 'compute_interactor_node_fluctuations': True } # Compute interactor node fluctuations and write to file CI = allopath.CofactorInteractors(structure, **kwargs) CI.run() ``` ### 3b. Computing off-diagonal elements The MI matrix is obtained with *MutualInformation*. ***allopath.MutualInformation*** (**self,** *topology_file*, \**kwargs) Similarly to *ContactMap* and *CofactorInteractors* it is in principle enough to input the structure. > MI = allopath.MutualInformation(structure) List of input keyword parameters: * **trajectory_files**: Input trajectory files (.xtc, .dcd, etc) * **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory. * **file_label**: "File end name": label of the system that will be appended to the end of the produced files. * **out_directory**: The directory where data should be written. * **dt**: Trajectory stride. * **n_cores**: Number of jobs to run with joblib. * **n_matrix_block_cols**: Number of blocks of the column of the MI matrix. Example: 4 blocks => 10 parts (upper triangle + diagonal). See part (a) above. * **i_block**: The matrix block for which MI should be calculated. See part (a) above. * **n_split_sets**: Number of sampled sets with the same size as the original data set to use for more accurate estimate of entropy. Can also be used to check unceratinty of the MI matrix. * **additional_interactor_protein_contacts**: The interactor contact map (computed in Section 2b). * **additional_interactor_fluctuations**: The interactor fluctuations (computed in Section 3a). * **n_components_range:** Array with the lower and upper limit of GMM components used to estimate densities. * **do_diagonal**: Whether or not to compute diagonal of residue-residue mutual information (default=False). The default values are: <br> {'trajectory_files': '', <br> 'trajectory_file_directory': '', <br> 'dt': 1, <br> 'out_directory': '', <br> 'file_label': '', <br> 'n_cores': -1, <br> 'contact_map_file': '', <br> 'i_block': 0, <br> 'n_matrix_block_cols': 1 <br> 'n_split_sets': 0, <br> 'additional_interactor_protein_contacts': '', <br> 'additional_interactor_fluctuations': '', <br> 'n_components_range': [1,4], <br> 'do_diagonal': False } <br> To compute the off-diagonal elements, we use the default *do_diagonal*=False and split the matrix into 10 blocks. We also do 10 bootstrap samplings to obtain a better entropy estimate. ``` n_blocks = 10 n_cols = 4 n_bootstraps = 10 contact_map = out_dir+'distance_matrix_semi_bin_'+file_label+'.txt' additional_interactor_fluctuations = out_dir+'interactor_centroid_fluctuations_'+file_label+'.npy' additional_interactor_protein_contacts = out_dir+'cofactor_protein_residue_semi_binary_cmap_'+file_label+'.npy' n_components_range = [1,4] for i_block in range(1,n_blocks+1): # Set inputs kwargs={ 'trajectory_files': trajs, 'dt': dt, 'contact_map_file': contact_map, 'additional_interactor_fluctuations': additional_interactor_fluctuations, 'additional_interactor_protein_contacts': additional_interactor_protein_contacts, 'i_block': i_block, 'n_matrix_block_cols': n_cols, 'n_split_sets': n_bootstraps, 'n_components_range': n_components_range, 'file_label': file_label, 'out_directory': out_dir_MI, 'n_cores': n_cores, } # Compute mutual information matrix MI = allopath.MutualInformation(structure, **kwargs) MI.run() ``` ### 3c. Computing diagonal elements To estimate the diagonal elements, we use the same inputs as above except setting *do_diagonal*=True. Moreover, the matrix is not divided into blocks since the diagonal is much faster to compute. ***Note:*** *This step is only needed if you choose to normalize the mutual information in allopath.CurrentFlow (Section 4).* ``` # Set inputs kwargs={ 'trajectory_files': trajs, 'dt': dt, 'additional_interactor_fluctuations': additional_interactor_fluctuations, 'n_split_sets': n_bootstraps, 'file_label': file_label, 'out_directory': out_dir_MI, 'n_components_range': n_components_range, 'n_cores': n_cores, 'do_diagonal': True } # Compute diagonal of the MI matrix MI = allopath.MutualInformation(structure, **kwargs) MI.run() ``` ### 3d. Building matrix from blocks Next, the full MI matrix is built. ***allopath.from_matrix.build_matrix*** (*base_file_name*, *n_blocks*, file_label='', out_directory='') We use the same parameters as above. List of input parameters: * **base_file_name**: the base name of each file to be processed. This is given by *base_file_name*=*path_to_data*+'res_res_MI_part_' . * **n_blocks**: Total number of generated matrix blocks. * **file_label**: "File end name": label of the system that will be appended to the end of the produced files (default is ''). * **out_directory**: The directory where data should be written to (default is ''). The input *base_file_name* is named after the files in "Results_data/MI_data/". ``` base_file_name=out_dir+'MI_data/res_res_MI_part_' # Set inputs kwargs={ 'file_label': file_label, 'out_directory': out_dir+'MI_data/' } # Build matrix allopath.from_matrix_blocks.build_matrix(base_file_name, n_blocks, **kwargs) ``` ## 4. Current flow analysis ----------------------------------- Current flow analysis is done with *CurrentFlow*. ***allopath.CurrentFlow*** (**self,** *similarity_map_filename*, *contact_map_filenames*, *sources_filename*, *sinks_filename*, \**kwargs) To run current flow analysis in its simplest form, the files containing the similarity map (ie. our MI matrix), the contact map and the source and sink indices are needed. > allopath.CurrentFlow(similarity_map_filename, contact_map_filename, sources_filename, sinks_filename) Explanation of input (positional) parameters: * **similarity_map_filename**: File containing the similarity map (ie. the mutual information matrix). * **contact_map_filenames**: File containing the contact map(s). If multiple are given, one current flow profile per contact map will be computed (*Note: multiple network calculations are only supported for isolated-protein networks*). * **sources_filename**: File containing the residue indices of the sources. * **sinks_filenams**: File containing the residues indices of the sinks. Explanation of input keyword parameters: * **similarity_map_diagonal_filename**: File containing the diagonal elements of the mutual information matrix. * **additional_interactor_protein_contacts**: The interactor contact map (computed in Section 2b). * **out_directory**: The directory where data should be written. * **file_label**: "File end name": label of the system that will be appended to the end of the produced files. * **n_chains**: The number of (homomeric) chains/subunits in the main-protein (e.g. a tetrameric ion channel => n_chains = 4). * **n_cores**: Number of jobs to run with joblib. * **cheap_write**: If set to True, fewer files will be written. * **start_frame**: Used if multiple contact maps are supplied. *start_frame* is the index of the first frame to analyze. * **normalize_similarity_map**: Whether or not to normalize the similarity map with symmetric unertainty (*Note: applies to mutual information maps; Witten & Frank, 2005*) * **auxiliary_protein_indices**: Residue indices of auxiliary subunits. This is used when symmeterizing current flow over subunits (chains). The auxiliary subunits will also be averaged over chains, ie. one auxiliary subunit per chain is assumed. If there is no auxiliary subunit, just ignore this input to the current flow script. * **compute_current_flow_closeness**: Whether or not to compute current flow closeness instead of current flow betweenness. The default values are: <br> {'out_directory': '', <br> 'file_label': '', <br> 'similarity_map_diagonal_filename': '', <br> 'n_chains': 1, <br> 'n_cores': 1, <br> 'cheap_write': False, <br> 'start_frame': 0, <br> 'normalize_similarity_map': False, <br> 'auxiliary_protein_indices': '', <br> 'additional_interactor_protein_contacts': '', <br> 'compute_current_flow_closeness': False } <br> ``` similarity_map = out_dir+'MI_data/res_res_MI_compressed_'+file_label+'.npy' similarity_map_diagonal = out_dir+'MI_data/diagonal_MI_'+file_label+'.npy' contact_maps = [out_dir+'distance_matrix_semi_bin_'+file_label+'.txt'] additional_interactor_protein_contacts = out_dir+'cofactor_protein_residue_semi_binary_cmap_'+file_label+'.npy' n_chains=4 source_inds='input_data/inds_sources.txt' sink_inds='input_data/inds_sinks.txt' aux_inds='input_data/auxiliary_prot_inds.txt' compute_current_flow_closeness = False # False (ie. default) => will compute current flow betweenness. # Set this to True to compute current flow closeness centrality between each # source and all sinks instead. kwargs={ 'file_label': file_label, 'out_directory': out_dir, 'n_chains': n_chains, 'n_cores': n_cores, 'similarity_map_diagonal_filename': similarity_map_diagonal, 'normalize_similarity_map': False, 'auxiliary_protein_indices': aux_inds, 'additional_interactor_protein_contacts': additional_interactor_protein_contacts, 'compute_current_flow_closeness': compute_current_flow_closeness } CF = allopath.CurrentFlow(similarity_map, contact_maps, source_inds, sink_inds, **kwargs) CF.run() ``` ## 5. Project current flow on structure ---------------------------------------------------- As a last step, we project the current flow onto the structure (PDB file) with *make_pdb*. The current flow of ech residue will be mapped to the beta-column in the PDB. This can be visualized in VMD by setting the "Coloring method" to "Beta" in "Graphical Representations". > ***allopath.make_pdb.project_current_flow***(*pdb_file*, *current_flow_file*, \**kwargs) Explanation of input (positional arguments) parameters: * **pdb_file**: The .pdb file corresponding to the first trajectory frame. *Note: .gro does not work.* * **current_flow_file**: File containing the current flow. This is created by *CurrentFlow*, Section 4. **Note:** For homomultimers (using *n_chains > 1* in *CurrentFlow*), the file is *out_dir+'average_current_flow_'+file_label+'.npy'*. For *n_chains = 1*, the file is *out_dir+'current_flow_betweenness_'+file_label+'.npy'*. Explanation of input keyword arguments: * **out_directory**: The directory where pdb should be written. * **file_label**: "File end name": label of the system that will be appended to the end of the produced pdb. * **max_min_normalize**: Whether or not to scale the current flow between 0 and 1. * **interactor_atom_inds_file**: The atom indices used to define the interactors (generated in Section 2b). The default values are: <br> {'out_directory': '', <br> 'file_label': '', <br> 'max_min_normalize': False,<br> 'interactor_atom_inds_file': None } ``` out_file = out_dir+'PDBs/current_flow_'+file_label+'.pdb' current_flow = out_dir+'average_current_flow_'+file_label+'.npy' interactor_atom_inds_file = out_dir+'cofactor_interactor_atom_indices_'+file_label+'.npy' kwargs={ 'out_directory': out_dir+'PDBs/', 'file_label': file_label, 'interactor_atom_inds_file': interactor_atom_inds_file } # Create PDB with current flow values on the beta column allopath.make_pdb.project_current_flow(structure[0], current_flow, **kwargs) ```
github_jupyter
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # Start-to-Finish Example: Validating Shifted Kerr-Schild initial data against ETK version: ## Author: Patrick Nelson **Notebook Status:** <font color='green'><b>Validated</b></font> **Validation Notes:** This module validates all expressions used to set up initial data in * [Tutorial-ADM_Initial_Data-ShiftedKerrSchild](../Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb) against the C-code implementation of these expressions found in the original (trusted) [`GiRaFFEfood` Einstein Toolkit thorn](link), and confirms roundoff-level agreement. ### NRPy+ Source Code for this module: * [BSSN/ShiftedKerrSchild.py](../../edit/BSSN/ShiftedKerrSchild.py) [\[**tutorial**\]](../Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb) Generates Exact Wald initial data ## Introduction: This notebook validates the initial data routines that set up the Shifted Kerr-Schild initial spacetime data against the ETK implementation of the same equations. When this notebook is run, the significant digits of agreement between the old ETK and new NRPy+ versions of the algorithm will be evaluated. If the agreement falls below a thresold, the point, quantity, and level of agreement are reported [here](#compile_run). <a id='toc'></a> # Table of Contents $$\label{toc}$$ This notebook is organized as follows 1. [Step 1](#setup): Set up core functions and parameters for unit testing the initial data algorithms 1. [Step 1.a](#spacetime) Generate the spacetime metric 1. [Step 1.b](#download) Download original ETK files 1. [Step 1.c](#free_params) Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` 1. [Step 1.d](#interface) Create dummy files for the CCTK version of the code 1. [Step 2](#mainc): `ShiftedKerrSchild_unit_test.c`: The Main C Code 1. [Step 2.a](#compile_run): Compile and run the code to validate the output 1. [Step 3](#drift_notes): Output this notebook to $\LaTeX$-formatted PDF file 1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='setup'></a> # Step 1: Set up core functions and parameters for unit testing the initial data algorithms" \[Back to [top](#toc)\] $$\label{setup}$$ We'll start by appending the relevant paths to `sys.path` so that we can access sympy modules in other places. Then, we'll import NRPy+ core functionality and set up a directory in which to carry out our test. We will also declare the gridfunctions that are needed for this portion of the code. ``` import os, sys # Standard Python modules for multiplatform OS-level functions # First, we'll add the parent directory to the list of directories Python will check for modules. nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) nrpy_dir_path = os.path.join("..","..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) from outputC import outCfunction, lhrh # NRPy+: Core C code output module import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends import finite_difference as fin # NRPy+: Finite difference C code generation module import NRPy_param_funcs as par # NRPy+: Parameter interface import grid as gri # NRPy+: Functions having to do with numerical grids import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support import reference_metric as rfm # NRPy+: Reference metric support import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface out_dir = "Validation/" cmd.mkdir(out_dir) thismodule = "Start_to_Finish_UnitTest-GiRaFFEfood_NRPy" # Register the gridfunctions we need for this function gammaDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","gammaDD","sym01") betaU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL","betaU") alpha = gri.register_gridfunctions("AUXEVOL","alpha") ``` <a id='spacetime'></a> ## Step 1.a: Generate the spacetime metric \[Back to [top](#toc)\] $$\label{spacetime}$$ While many of the initial data we will use assume a flat background spacetime, some will require a specific metric. We will set those up as needed here. ``` # Exact Wald is more complicated. We'll need the Shifted Kerr Schild metric in Cartesian coordinates. import BSSN.ShiftedKerrSchild as sks sks.ShiftedKerrSchild(True) import reference_metric as rfm par.set_parval_from_str("reference_metric::CoordSystem","Cartesian") rfm.reference_metric() # Use the Jacobian matrix to transform the vectors to Cartesian coordinates. drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])], [sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])], [sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]]) dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() gammaDD = ixp.zerorank2() for i in range(3): for j in range(3): for k in range(3): for l in range(3): gammaDD[i][j] += drrefmetric__dx_0UDmatrix[(k,i)]*drrefmetric__dx_0UDmatrix[(l,j)]*sks.gammaSphDD[k][l].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) betaU = ixp.zerorank1() for i in range(3): for j in range(3): betaU[i] += dx__drrefmetric_0UDmatrix[(i,j)]*sks.betaSphU[j].subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1]) # We only need to set alpha and betaU in C for the original Exact Wald name = "Shifted_Kerr_Schild_initial_metric" desc = "Generate a spinning black hole with Shifted Kerr Schild metric." values_to_print = [ lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD00"),rhs=gammaDD[0][0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD01"),rhs=gammaDD[0][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD02"),rhs=gammaDD[0][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD11"),rhs=gammaDD[1][1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD12"),rhs=gammaDD[1][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","gammaDD22"),rhs=gammaDD[2][2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU0"),rhs=betaU[0]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU1"),rhs=betaU[1]), lhrh(lhs=gri.gfaccess("auxevol_gfs","betaU2"),rhs=betaU[2]), lhrh(lhs=gri.gfaccess("auxevol_gfs","alpha"),rhs=sks.alphaSph.subs(sks.r,rfm.xxSph[0]).subs(sks.th,rfm.xxSph[1])) ] outCfunction( outfile = os.path.join(out_dir,name+".h"), desc=desc, name=name, params ="const paramstruct *params,REAL *xx[3],REAL *auxevol_gfs", body = fin.FD_outputC("returnstring",values_to_print,params="outCverbose=False").replace("IDX4","IDX4S"), loopopts ="AllPoints,Read_xxs") ``` <a id='download'></a> ## Step 1.b: Download original ETK files \[Back to [top](#toc)\] $$\label{download}$$ Here, we download the relevant portion of the original `GiRaFFE` code from Bitbucket. ``` # First download the original GiRaFFE source code import urllib original_file_url = [ "https://bitbucket.org/zach_etienne/wvuthorns/raw/0a82c822748baf754c153db484d8bd2d0b7e39cb/ShiftedKerrSchild/src/ShiftedKerrSchild.c", ] original_file_name = [ "ShiftedKerrSchild.c", ] for i in range(len(original_file_url)): original_file_path = os.path.join(out_dir,original_file_name[i]) # Then download the original GiRaFFE source code # We try it here in a couple of ways in an attempt to keep # the code more portable try: original_file_code = urllib.request.urlopen(original_file_url[i]).read().decode('utf-8') except: original_file_code = urllib.urlopen(original_file_url[i]).read().decode('utf-8') # Write down the file the original GiRaFFE source code with open(original_file_path,"w") as file: file.write(original_file_code) ``` <a id='free_params'></a> ## Step 1.c: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\] $$\label{free_params}$$ Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`. Then we output `free_parameters.h`, which sets some basic grid parameters as well as the speed limit parameter we need for this function. ``` # Step 3.d # Step 3.d.ii: Set free_parameters.h with open(os.path.join(out_dir,"free_parameters.h"),"w") as file: file.write(""" // Set free-parameter values. const int NGHOSTS = 3; // Set free-parameter values for the initial data. // Override parameter defaults with values based on command line arguments and NGHOSTS. const int Nx0x1x2 = 5; params.Nxx0 = Nx0x1x2; params.Nxx1 = Nx0x1x2; params.Nxx2 = Nx0x1x2; params.Nxx_plus_2NGHOSTS0 = params.Nxx0 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS1 = params.Nxx1 + 2*NGHOSTS; params.Nxx_plus_2NGHOSTS2 = params.Nxx2 + 2*NGHOSTS; // Step 0d: Set up space and time coordinates // Step 0d.i: Declare \Delta x^i=dxx{0,1,2} and invdxx{0,1,2}, as well as xxmin[3] and xxmax[3]: const REAL xxmin[3] = {-1.5,-1.5,-1.5}; const REAL xxmax[3] = { 1.5, 1.5, 1.5}; params.dxx0 = (xxmax[0] - xxmin[0]) / ((REAL)params.Nxx0); params.dxx1 = (xxmax[1] - xxmin[1]) / ((REAL)params.Nxx1); params.dxx2 = (xxmax[2] - xxmin[2]) / ((REAL)params.Nxx2); params.invdx0 = 1.0 / params.dxx0; params.invdx1 = 1.0 / params.dxx1; params.invdx2 = 1.0 / params.dxx2; params.r0 = 0.4; params.a = 0.0; \n""") # Generates declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h par.generate_Cparameters_Ccodes(os.path.join(out_dir)) ``` <a id='interface'></a> ## Step 1.d: Create dummy files for the CCTK version of the code \[Back to [top](#toc)\] $$\label{interface}$$ The original `GiRaFFE` code depends on some functionalities of the CCTK. Since we only care about this one small function, we can get around this by creating some nearly-empty, non-functional files that can be included to satisfy the pre-processor without changing functionality. We will later replace what little functionality we need with some basic global variables and macros. ``` #include "cctk.h" #include "cctk_Arguments.h" #include "cctk_Parameters.h" with open(os.path.join(out_dir,"cctk.h"),"w") as file: file.write("""//""") with open(os.path.join(out_dir,"cctk_Arguments.h"),"w") as file: file.write("""#define DECLARE_CCTK_ARGUMENTS // #define CCTK_ARGUMENTS void """) with open(os.path.join(out_dir,"cctk_Parameters.h"),"w") as file: file.write("""#define DECLARE_CCTK_PARAMETERS // """) ``` <a id='mainc'></a> # Step 2: `ShiftedKerrSchild_unit_test.C`: The Main C Code \[Back to [top](#toc)\] $$\label{mainc}$$ Now that we have our vector potential and analytic magnetic field to compare against, we will start writing our unit test. We'll also import common C functionality, define `REAL`, the number of ghost zones, and the faces, and set the standard macros for NRPy+ style memory access. ``` %%writefile $out_dir/ShiftedKerrSchild_unit_test.C // These are common packages that we are likely to need. #include "stdio.h" #include "stdlib.h" #include "math.h" #include "stdint.h" // Needed for Windows GCC 6.x compatibility #define REAL double #include "declare_Cparameters_struct.h" // Standard NRPy+ memory access: #define IDX4S(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) ) // Standard formula to calculate significant digits of agreement: #define SDA(a,b) 1.0-log10(2.0*fabs(a-b)/(fabs(a)+fabs(b))) // Memory access definitions for NRPy+ #define GAMMADD00GF 0 #define GAMMADD01GF 1 #define GAMMADD02GF 2 #define GAMMADD11GF 3 #define GAMMADD12GF 4 #define GAMMADD22GF 5 #define BETAU0GF 6 #define BETAU1GF 7 #define BETAU2GF 8 #define ALPHAGF 9 #define KDD00GF 10 #define KDD01GF 11 #define KDD02GF 12 #define KDD11GF 13 #define KDD12GF 14 #define KDD22GF 15 #define NUM_AUXEVOL_GFS 16 // Include the functions that we want to test: #include "Shifted_Kerr_Schild_initial_metric.h" // Define CCTK macros #define CCTK_REAL double #define CCTK_INT int #define CCTK_VPARAMWARN(...) // #define CCTK_EQUALS(a,b) 1 struct cGH{}; const cGH* cctkGH; // More definitions to interface with ETK code: const int cctk_lsh[3] = {11,11,11}; const int grid_size = cctk_lsh[0]*cctk_lsh[1]*cctk_lsh[2]; import os gfs_list = ['x','y','z','r','SKSgrr','SKSgrth','SKSgrph','SKSgthth','SKSgthph','SKSgphph','SKSbetar','SKSbetath','SKSbetaph'] with open(os.path.join(out_dir,"ShiftedKerrSchild_unit_test.C"), 'a') as file: for gf in gfs_list: file.write("CCTK_REAL *"+gf+" = (CCTK_REAL *)malloc(sizeof(CCTK_REAL)*grid_size);\n") %%writefile -a $out_dir/ShiftedKerrSchild_unit_test.C CCTK_REAL *alp; CCTK_REAL *betax; CCTK_REAL *betay; CCTK_REAL *betaz; CCTK_REAL *gxx; CCTK_REAL *gxy; CCTK_REAL *gxz; CCTK_REAL *gyy; CCTK_REAL *gyz; CCTK_REAL *gzz; CCTK_REAL *kxx; CCTK_REAL *kxy; CCTK_REAL *kxz; CCTK_REAL *kyy; CCTK_REAL *kyz; CCTK_REAL *kzz; CCTK_REAL KerrSchild_radial_shift; CCTK_REAL BH_mass; CCTK_REAL BH_spin; // Dummy ETK function: #define CCTK_GFINDEX3D(cctkGH,i,j,k) (i) + cctk_lsh[0] * ( (j) + cctk_lsh[1] * (k) ) #include "ShiftedKerrSchild.c" int main() { paramstruct params; #include "set_Cparameters_default.h" // Step 0c: Set free parameters, overwriting Cparameters defaults // by hand or with command-line input, as desired. #include "free_parameters.h" #include "set_Cparameters-nopointer.h" // Set CCTK parameters to match NRPy+ parameters KerrSchild_radial_shift = r0; BH_mass = M; BH_spin = a; // Step 0d.ii: Set up uniform coordinate grids REAL *xx[3]; xx[0] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS0); xx[1] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS1); xx[2] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS2); for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) xx[0][j] = xxmin[0] + (j-NGHOSTS)*dxx0; for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) xx[1][j] = xxmin[1] + (j-NGHOSTS)*dxx1; for(int j=0;j<Nxx_plus_2NGHOSTS2;j++) xx[2][j] = xxmin[2] + (j-NGHOSTS)*dxx2; for(int k=0;k<Nxx_plus_2NGHOSTS2;k++) for(int j=0;j<Nxx_plus_2NGHOSTS1;j++) for(int i=0;i<Nxx_plus_2NGHOSTS0;i++) { int index = CCTK_GFINDEX3D(cctkGH,i,j,k); x[index] = xx[0][i]; y[index] = xx[1][j]; z[index] = xx[2][k]; r[index] = sqrt(x[index]*x[index] + y[index]*y[index] + z[index]*z[index]); } //for(int j=0;j<Nxx_plus_2NGHOSTS0;j++) printf("x[%d] = %.5e\n",j,xx[0][j]); // This is the array to which we'll write the NRPy+ variables. REAL *auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0); REAL *auxevol_ETK_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS2 * Nxx_plus_2NGHOSTS1 * Nxx_plus_2NGHOSTS0); // Memory access for metric gridfunctions for Exact Wald: gxx = auxevol_ETK_gfs + (grid_size*GAMMADD00GF); gxy = auxevol_ETK_gfs + (grid_size*GAMMADD01GF); gxz = auxevol_ETK_gfs + (grid_size*GAMMADD02GF); gyy = auxevol_ETK_gfs + (grid_size*GAMMADD11GF); gyz = auxevol_ETK_gfs + (grid_size*GAMMADD12GF); gzz = auxevol_ETK_gfs + (grid_size*GAMMADD22GF); alp = auxevol_ETK_gfs + (grid_size*ALPHAGF); betax = auxevol_ETK_gfs + (grid_size*BETAU0GF); betay = auxevol_ETK_gfs + (grid_size*BETAU1GF); betaz = auxevol_ETK_gfs + (grid_size*BETAU2GF); kxx = auxevol_ETK_gfs + (grid_size*KDD00GF); kxy = auxevol_ETK_gfs + (grid_size*KDD01GF); kxz = auxevol_ETK_gfs + (grid_size*KDD02GF); kyy = auxevol_ETK_gfs + (grid_size*KDD11GF); kyz = auxevol_ETK_gfs + (grid_size*KDD12GF); kzz = auxevol_ETK_gfs + (grid_size*KDD22GF); Shifted_Kerr_Schild_initial_metric(&params,xx,auxevol_gfs); ShiftedKS_ID(); int all_agree = 1; for(int i0=0;i0<Nxx_plus_2NGHOSTS0;i0++){ for(int i1=0;i1<Nxx_plus_2NGHOSTS1;i1++){ for(int i2=0;i2<Nxx_plus_2NGHOSTS2;i2++){ if(SDA(auxevol_gfs[IDX4S(BETAU0GF, i0,i1,i2)],betax[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU0 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(BETAU0GF, i0,i1,i2)],betax[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } if(SDA(auxevol_gfs[IDX4S(BETAU1GF, i0,i1,i2)],betay[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU1 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(BETAU1GF, i0,i1,i2)],betay[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } if(SDA(auxevol_gfs[IDX4S(BETAU2GF, i0,i1,i2)],betaz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU2 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(BETAU2GF, i0,i1,i2)],betaz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } if(SDA(auxevol_gfs[IDX4S(GAMMADD00GF, i0,i1,i2)],gxx[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU0 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(GAMMADD00GF, i0,i1,i2)],gxx[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } if(SDA(auxevol_gfs[IDX4S(GAMMADD11GF, i0,i1,i2)],gyy[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU1 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(GAMMADD11GF, i0,i1,i2)],gyy[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } if(SDA(auxevol_gfs[IDX4S(GAMMADD22GF, i0,i1,i2)],gzz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)])<10.0){ printf("Quantity betaU2 only agrees with the original GiRaFFE to %.2f digits at i0,i1,i2=%d,%d,%d!\n", SDA(auxevol_gfs[IDX4S(GAMMADD22GF, i0,i1,i2)],gzz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]),i0,i1,i2); all_agree=0; } //printf("NRPy: %.15e,%.15e,%.15e\n",auxevol_gfs[IDX4S(BETAU0GF, i0,i1,i2)],auxevol_gfs[IDX4S(BETAU1GF, i0,i1,i2)],auxevol_gfs[IDX4S(BETAU2GF, i0,i1,i2)]); //printf("CCTK: %.15e,%.15e,%.15e\n",betax[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)],betay[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)],betaz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]); //printf("NRPy: %.15e,%.15e,%.15e\n",auxevol_gfs[IDX4S(GAMMADD01GF, i0,i1,i2)],auxevol_gfs[IDX4S(GAMMADD02GF, i0,i1,i2)],auxevol_gfs[IDX4S(GAMMADD12GF, i0,i1,i2)]); //printf("CCTK: %.15e,%.15e,%.15e\n",gxy[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)],gxz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)],gyz[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]); } } } if(all_agree) printf("All quantities agree at all points!\n"); with open(os.path.join(out_dir,"ShiftedKerrSchild_unit_test.C"), 'a') as file: for gf in gfs_list: file.write(" free("+gf+");\n") %%writefile -a $out_dir/ShiftedKerrSchild_unit_test.C free(auxevol_gfs); free(auxevol_ETK_gfs); } ``` <a id='compile_run'></a> ## Step 2.a: Compile and run the code to validate the output \[Back to [top](#toc)\] $$\label{compile_run}$$ Finally, we can compile and run the code we have written. Once run, this code will output the level of agreement between the two codes and some information to help interpret those numbers. ``` import time print("Now compiling, should take ~2 seconds...\n") start = time.time() # cmd.C_compile(os.path.join(out_dir,"ShiftedKerrSchild_unit_test.c"), os.path.join(out_dir,"ShiftedKerrSchild_unit_test")) !g++ -Ofast -fopenmp -march=native -funroll-loops Validation/ShiftedKerrSchild_unit_test.C -o Validation/ShiftedKerrSchild_unit_test -lstdc++ end = time.time() print("Finished in "+str(end-start)+" seconds.\n\n") results_file = "out_ShiftedKerrSchild_test.txt" # os.chdir(out_dir) os.chdir(out_dir) # cmd.Execute(os.path.join("GiRaFFEfood_NRPy_unit_test")) cmd.Execute("ShiftedKerrSchild_unit_test",file_to_redirect_stdout=results_file) os.chdir(os.path.join("../")) ``` Here, we add some emergency brakes so that if the output from the test isn't good, we throw an error to stop the notebook dead in its tracks. This way, our automatic testing infrastructure can let us know if something goes wrong. We will also print the output from the test for convenience's sake. ``` with open(os.path.join(out_dir,results_file),"r") as file: output = file.readline() print(output) if output!="All quantities agree at all points!\n": # If this isn't the first line of this file, something went wrong! sys.exit(1) ``` <a id='latex_pdf_output'></a> # Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-Start_to_Finish_UnitTest-GiRaFFEfood_NRPy.pdf](Tutorial-Start_to_Finish_UnitTest-GiRaFFEfood_NRPy.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish_UnitTest-GiRaFFEfood_NRPy") ```
github_jupyter
# Aerospace and Defense Portfolio Risk and Returns ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import math import warnings warnings.filterwarnings("ignore") # fix_yahoo_finance is used to fetch data import yfinance as yf yf.pdr_override() # input # Aerospace and Defense symbols = ['LMT','NOC','RTN'] start = '2019-01-01' end = '2020-04-24' df = pd.DataFrame() for s in symbols: df[s] = yf.download(s,start,end)['Adj Close'] from datetime import datetime from dateutil import relativedelta d1 = datetime.strptime(start, "%Y-%m-%d") d2 = datetime.strptime(end, "%Y-%m-%d") delta = relativedelta.relativedelta(d2,d1) print('How many years of investing?') print('%s years' % delta.years) number_of_years = delta.years days = (df.index[-1] - df.index[0]).days days df.head() df.tail() plt.figure(figsize=(12,8)) plt.plot(df) plt.title('Aerospace and Defense Stocks Closing Price') plt.legend(labels=df.columns) # Normalize the data normalize = (df - df.min())/ (df.max() - df.min()) plt.figure(figsize=(18,12)) plt.plot(normalize) plt.title('Aerospace and Defense Stocks Normalize') plt.legend(labels=normalize.columns) stock_rets = df.pct_change().dropna() plt.figure(figsize=(12,8)) plt.plot(stock_rets) plt.title('Aerospace and Defense Stocks Returns') plt.legend(labels=stock_rets.columns) plt.figure(figsize=(12,8)) plt.plot(stock_rets.cumsum()) plt.title('Aerospace and Defense Stocks Returns Cumulative Sum') plt.legend(labels=stock_rets.columns) sns.set(style='ticks') ax = sns.pairplot(stock_rets, diag_kind='hist') nplot = len(stock_rets.columns) for i in range(nplot) : for j in range(nplot) : ax.axes[i, j].locator_params(axis='x', nbins=6, tight=True) ax = sns.PairGrid(stock_rets) ax.map_upper(plt.scatter, color='purple') ax.map_lower(sns.kdeplot, color='blue') ax.map_diag(plt.hist, bins=30) for i in range(nplot) : for j in range(nplot) : ax.axes[i, j].locator_params(axis='x', nbins=6, tight=True) plt.figure(figsize=(7,7)) corr = stock_rets.corr() # plot the heatmap sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, cmap="Reds") # Box plot stock_rets.plot(kind='box',figsize=(12,8)) rets = stock_rets.dropna() plt.figure(figsize=(12,8)) plt.scatter(rets.mean(), rets.std(),alpha = 0.5) plt.title('Stocks Risk & Returns') plt.xlabel('Expected returns') plt.ylabel('Risk') plt.grid(which='major') for label, x, y in zip(rets.columns, rets.mean(), rets.std()): plt.annotate( label, xy = (x, y), xytext = (50, 50), textcoords = 'offset points', ha = 'right', va = 'bottom', arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3')) rets = stock_rets.dropna() area = np.pi*20.0 sns.set(style='darkgrid') plt.figure(figsize=(12,8)) plt.scatter(rets.mean(), rets.std(), s=area) plt.xlabel("Expected Return", fontsize=15) plt.ylabel("Risk", fontsize=15) plt.title("Return vs. Risk for Stocks", fontsize=20) for label, x, y in zip(rets.columns, rets.mean(), rets.std()) : plt.annotate(label, xy=(x,y), xytext=(50, 0), textcoords='offset points', arrowprops=dict(arrowstyle='-', connectionstyle='bar,angle=180,fraction=-0.2'), bbox=dict(boxstyle="round", fc="w")) rest_rets = rets.corr() pair_value = rest_rets.abs().unstack() pair_value.sort_values(ascending = False) # Normalized Returns Data Normalized_Value = ((rets[:] - rets[:].min()) /(rets[:].max() - rets[:].min())) Normalized_Value.head() Normalized_Value.corr() normalized_rets = Normalized_Value.corr() normalized_pair_value = normalized_rets.abs().unstack() normalized_pair_value.sort_values(ascending = False) print("Stock returns: ") print(rets.mean()) print('-' * 50) print("Stock risks:") print(rets.std()) table = pd.DataFrame() table['Returns'] = rets.mean() table['Risk'] = rets.std() table.sort_values(by='Returns') table.sort_values(by='Risk') rf = 0.01 table['Sharpe Ratio'] = (table['Returns'] - rf) / table['Risk'] table table['Max Returns'] = rets.max() table['Min Returns'] = rets.min() table['Median Returns'] = rets.median() total_return = stock_rets[-1:].transpose() table['Total Return'] = 100 * total_return table table['Average Return Days'] = (1 + total_return)**(1 / days) - 1 table initial_value = df.iloc[0] ending_value = df.iloc[-1] table['CAGR'] = ((ending_value / initial_value) ** (252.0 / days)) -1 table table.sort_values(by='Average Return Days') ```
github_jupyter
``` import numpy as np import pandas as pd import scipy print(f"SciPy version: {scipy.__version__}") from collections import OrderedDict import scipy.sparse as sp import time import random from constants import (DATA_OCT, DATA_NOV, EXPORT_DIR, UX_CONSTANTS, SEED, NEW_USER_ID, NEW_PRODUCT_ID, T, USECOLS, EVENT_THRESHOLD, ALL_DATA_PATH, TRAIN_DATA_PATH, VAL_DATA_PATH, TEST_DATA_PATH, VAL_THRESHOLD, TEST_THRESHOLD) random.seed(SEED) ux_constants = pd.Series(pd.read_csv(UX_CONSTANTS, index_col=0, squeeze=True, header=None), dtype='float32') VIEW = ux_constants['view_to_purchase'] CART = ux_constants['cart_to_purchase'] REMOVE = ux_constants['remove_to_purchase'] PURCHASE = ux_constants['purchase_to_purchase'] def event_to_ux(event): event_weights = { 'view': VIEW, 'cart': CART, 'remove_from_cart': REMOVE, 'purchase': PURCHASE, } return event_weights.get(event, 0) df = pd.concat([pd.read_csv(DATA_OCT, engine='c', sep=',',usecols=USECOLS) ,pd.read_csv(DATA_NOV, engine='c', sep=',',usecols=USECOLS)]) df["event_type"] = df["event_type"].astype("category") df.info() start_time = time.time() # we start the timer after loading the dataframe start_dim = df.shape start_dim print(f"We start with {len(df.user_id.unique()):,} unique users.") ``` # Data Reduction ``` drop_visitors = set(df.user_id.value_counts()[df.user_id.value_counts()<EVENT_THRESHOLD].index) print(f"We will {T.R}drop {len(drop_visitors):,} ({len(drop_visitors)*100/len(df.user_id.unique()):.2f}%) users,{T.E} "+ f"for not meeting the minimum {T.R}{EVENT_THRESHOLD}{T.E} event requirement.") df = df[~df.user_id.isin(drop_visitors)] df.reset_index(inplace=True,drop=True) print(f"This way we have reduced the number of total events by {T.G}{100-len(df)*100/start_dim[0]:.2f}%{T.E}.") new_user_id = pd.DataFrame() new_user_id['user_id']=df.user_id.unique() print(f"We will have {T.B}{len(new_user_id):,} unique users.{T.E}") new_user_id.to_csv(NEW_USER_ID, index = True, header=True) uid_lookup = pd.Series(index=new_user_id.user_id,data=new_user_id.index) uid_lookup = uid_lookup.to_dict(OrderedDict) del new_user_id new_product_id = pd.DataFrame() new_product_id['product_id']=df.product_id.unique() print(f"We will have {T.B}{len(new_product_id):,} unique features{T.E} (products for e-commerce).") new_product_id.to_csv(NEW_PRODUCT_ID, index = True, header=True) pid_lookup = pd.Series(index=new_product_id.product_id,data=new_product_id.index) pid_lookup = pid_lookup.to_dict(OrderedDict) del new_product_id ``` # Feature engineering ``` number_of_users = df['user_id'].unique().shape[0] number_of_features = df['product_id'].unique().shape[0] def user_experience_matrix(df): last_index = df.shape[0]-1 # Use np.float32 for torch.cuda.FloatTensor.or np.float16 for torch.cuda.HalfTensor (float64 not recommended) uxm = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32) print(f" Event | User | Product | Event | Previous | {T.b}New UX{T.E}") for row in df.itertuples(): uid = uid_lookup[row.user_id] pid = pid_lookup[row.product_id] prev_ux = uxm[uid,pid] ux = np.tanh(prev_ux+event_to_ux(row.event_type)) # ux = prev_ux + 1 # test case calculating the number of events between the user-product pair uxm[uid,pid] = ux if (row.Index % 500000 == 0) or (row.Index == last_index): print(f"{row.Index:8} | "+ f"{uid:6} | "+ f"{pid:7} | "+ f"{row.event_type[:4]} | "+ f"{prev_ux:8.5f} | "+ f"{T.b}{ux:8.5f}{T.E}") return uxm uxm = user_experience_matrix(df) print(f"Elapsed time: {time.time()-start_time:.2f} seconds") # we stop the timer before the train-test-validaiton split ``` # Train - test - validation split ``` def save_to_npz(X,path): X = X.tocoo() sp.save_npz(path,X) print(f"{T.G}Sparse matrix saved to: {path}{T.E}") print(f"Train: {VAL_THRESHOLD*100:.2f}% \nValidation: {(1-TEST_THRESHOLD)*100:.2f}% \nTest: {(1-TEST_THRESHOLD)*100:.2f}%") NNZ = uxm.nnz print(f"Number of stored values: {NNZ:,}") uxm_train = sp.dok_matrix.copy(uxm) uxm_val = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32) uxm_test = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32) rows,cols = uxm_train.nonzero() for row,col in zip(rows,cols): rnd = random.random() if rnd > TEST_THRESHOLD: uxm_test[row,col] = uxm_train[row,col] uxm_train[row,col] = 0 elif rnd > VAL_THRESHOLD: uxm_val[row,col] = uxm_train[row,col] uxm_train[row,col] = 0 print(f"Number of train data values: {uxm_train.nnz:,} ({uxm_train.nnz*100/NNZ:.2f}%)") print(f"Number of validation data values: {uxm_val.nnz:,} ({uxm_val.nnz*100/NNZ:.2f}%)") print(f"Number of test data values: {uxm_test.nnz:,} ({uxm_test.nnz*100/NNZ:.2f}%)") errormessage = '''All datapoints should be in either the train, the test of the validation datasets. The reason might be a change in how .nnz of a DOK matrix (scipy.sparse.dok_matrix) is calculated. In version 1.4.1 SciPy setting the value to zero explicitly (X[i,j]=0) is not counted by .nnz''' assert NNZ - uxm_train.nnz - uxm_val.nnz - uxm_test.nnz == 0, errormessage save_to_npz(uxm,ALL_DATA_PATH) save_to_npz(uxm_train,TRAIN_DATA_PATH) save_to_npz(uxm_val,VAL_DATA_PATH) save_to_npz(uxm_test,TEST_DATA_PATH) ```
github_jupyter
# **Working memory training**: Module allegiance matrix calculation **Last edited:** 04-10-2018 Step 0: Loading libraries -------------------------------- ``` import sys sys.path.append("..") import os %matplotlib inline import scipy.io as sio import numpy as np from nilearn import plotting import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy import stats from fctools import networks, figures #---- matplotlib settings import matplotlib.pyplot as plt plt.style.use('seaborn-white') plt.rcParams['font.family'] = 'Helvetica' ``` Step 1: Getting modules names and color pallete ---------------------------------------- ``` labels = pd.read_csv(f'../support/modules.txt', sep = " ", header = None) power_colors_new = {'AU':'#d182c6', 'CER':'#9fc5e8', 'CO':'#7d009d', 'DA':'#75df33', 'DM':'#ed1126', 'FP':'#f6e838', 'MEM':'#bebab5', 'SAL':'#2a2a2a', 'SOM':'#6ccadf', 'SUB':'#980000', 'UNC':'#f58c00', 'VA':'#00a074', 'VIS':'#5131ac',} modules = sorted(labels[0].values) network_pal = (sns.color_palette(power_colors_new.values())) sns.palplot(sns.color_palette(power_colors_new.values())) network_lut = dict(zip(map(str, np.unique(modules)), network_pal)) network_colors = pd.Series(modules).map(network_lut) network_colors = np.asarray(network_colors) n_roi = len(labels) n_net = len(np.unique(modules)) ``` Step 2: Loading module assignment matrices ------------------------------------------------------------------------------- ``` top_dir = '/home/finc/Dropbox/Projects/LearningBrain/' mat = sio.loadmat(f'{top_dir}data/neuroimaging/03-modularity/dynamic/02-module_assignment/power_modules.mat') idx = np.argsort(labels[0]) module_assignment = mat['modules'] module_assignment = module_assignment[:, :, :, idx, :] ``` Step 3: calculating allegiance matrices ------------------------------------------- ``` # Calculating allegiance matrices (mean over optimizations) n_sub = module_assignment.shape[0] n_ses = module_assignment.shape[1] n_opt = module_assignment.shape[2] n_nod = module_assignment.shape[3] P = np.zeros((n_sub, n_ses, n_nod, n_nod)) for i in range(n_sub): print(f'Subject {i+1}') for j in range(n_ses): P[i,j,:,:] = networks.allegiance_matrix_opti(module_assignment[i,j,:,:,:]) np.save(f'{top_dir}data/neuroimaging/03-modularity/dynamic/03-allegiance_matrices/allegiance_matrix_power_opt_mean.npy', P) # Calculating allegiance matrices for each window (mean over optimizations) n_sub = len(module_assignment.shape[0]) n_ses = len(module_assignment.shape[1]) n_nod = len(module_assignment.shape[3]) n_win = len(module_assignment.shape[4]) W = np.zeros((n_sub, n_ses, n_win, n_nod, n_nod)) for i in range(n_sub): print(f'Subject {i+1}') W[i,j,:,:,:] = networks.all_window_allegiance_mean(module_assignment[i, j, :, :, :]) np.save(f'{top_dir}data/neuroimaging/03-modularity/dynamic/03-allegiance_matrices/window_allegiance_matrix_power_dualnback.npy', W) ```
github_jupyter
--- _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._ --- ## Applied Machine Learning, Module 1: A simple classification task ### Import required modules and load data file ``` %matplotlib notebook import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.model_selection import train_test_split fruits = pd.read_table('fruit_data_with_colors.txt') fruits.head() # create a mapping from fruit label value to fruit name to make results easier to interpret lookup_fruit_name = dict(zip(fruits.fruit_label.unique(), fruits.fruit_name.unique())) lookup_fruit_name ``` The file contains the mass, height, and width of a selection of oranges, lemons and apples. The heights were measured along the core of the fruit. The widths were the widest width perpendicular to the height. ### Examining the data ``` # plotting a scatter matrix from matplotlib import cm X = fruits[['height', 'width', 'mass', 'color_score']] y = fruits['fruit_label'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) cmap = cm.get_cmap('gnuplot') scatter = pd.scatter_matrix(X_train, c= y_train, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap=cmap) # plotting a 3D scatter plot from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.add_subplot(111, projection = '3d') ax.scatter(X_train['width'], X_train['height'], X_train['color_score'], c = y_train, marker = 'o', s=100) ax.set_xlabel('width') ax.set_ylabel('height') ax.set_zlabel('color_score') plt.show() ``` ### Create train-test split ``` # For this example, we use the mass, width, and height features of each fruit instance X = fruits[['mass', 'width', 'height']] y = fruits['fruit_label'] # default is 75% / 25% train-test split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) ``` ### Create classifier object ``` from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors = 5) ``` ### Train the classifier (fit the estimator) using the training data ``` knn.fit(X_train, y_train) ``` ### Estimate the accuracy of the classifier on future data, using the test data ``` knn.score(X_test, y_test) ``` ### Use the trained k-NN classifier model to classify new, previously unseen objects ``` # first example: a small fruit with mass 20g, width 4.3 cm, height 5.5 cm fruit_prediction = knn.predict([[20, 4.3, 5.5]]) lookup_fruit_name[fruit_prediction[0]] # second example: a larger, elongated fruit with mass 100g, width 6.3 cm, height 8.5 cm fruit_prediction = knn.predict([[100, 6.3, 8.5]]) lookup_fruit_name[fruit_prediction[0]] ``` ### Plot the decision boundaries of the k-NN classifier ``` from adspy_shared_utilities import plot_fruit_knn plot_fruit_knn(X_train, y_train, 5, 'uniform') # we choose 5 nearest neighbors ``` ### How sensitive is k-NN classification accuracy to the choice of the 'k' parameter? ``` k_range = range(1,20) scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors = k) knn.fit(X_train, y_train) scores.append(knn.score(X_test, y_test)) plt.figure() plt.xlabel('k') plt.ylabel('accuracy') plt.scatter(k_range, scores) plt.xticks([0,5,10,15,20]); ``` ### How sensitive is k-NN classification accuracy to the train/test split proportion? ``` t = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2] knn = KNeighborsClassifier(n_neighbors = 5) plt.figure() for s in t: scores = [] for i in range(1,1000): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1-s) knn.fit(X_train, y_train) scores.append(knn.score(X_test, y_test)) plt.plot(s, np.mean(scores), 'bo') plt.xlabel('Training set proportion (%)') plt.ylabel('accuracy'); ```
github_jupyter
## Dependencies ``` import json, warnings, shutil, glob from jigsaw_utility_scripts import * from scripts_step_lr_schedulers import * from transformers import TFXLMRobertaModel, XLMRobertaConfig from tensorflow.keras.models import Model from tensorflow.keras import optimizers, metrics, losses, layers SEED = 0 seed_everything(SEED) warnings.filterwarnings("ignore") pd.set_option('max_colwidth', 120) pd.set_option('display.float_format', lambda x: '%.4f' % x) ``` ## TPU configuration ``` strategy, tpu = set_up_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync) AUTO = tf.data.experimental.AUTOTUNE ``` # Load data ``` database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-polish/' k_fold = pd.read_csv(database_base_path + '5-fold.csv') valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv", usecols=['comment_text', 'toxic', 'lang']) print('Train samples: %d' % len(k_fold)) display(k_fold.head()) print('Validation samples: %d' % len(valid_df)) display(valid_df.head()) base_data_path = 'fold_1/' fold_n = 1 # Unzip files !tar -xf /kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-polish/fold_1.tar.gz ``` # Model parameters ``` base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/' config = { "MAX_LEN": 192, "BATCH_SIZE": 128, "EPOCHS": 3, "LEARNING_RATE": 1e-5, "ES_PATIENCE": None, "base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5', "config_path": base_path + 'xlm-roberta-large-config.json' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ``` ## Learning rate schedule ``` lr_min = 1e-7 lr_start = 0 lr_max = config['LEARNING_RATE'] step_size = len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) // config['BATCH_SIZE'] total_steps = config['EPOCHS'] * step_size hold_max_steps = 0 warmup_steps = step_size * 1 decay = .9997 rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])] y = [exponential_schedule_with_warmup(tf.cast(x, tf.float32), warmup_steps, hold_max_steps, lr_start, lr_max, lr_min, decay) for x in rng] sns.set(style="whitegrid") fig, ax = plt.subplots(figsize=(20, 6)) plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) ``` # Model ``` module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config) last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) cls_token = last_hidden_state[:, 0, :] output = layers.Dense(1, activation='sigmoid', name='output')(cls_token) model = Model(inputs=[input_ids, attention_mask], outputs=output) return model ``` # Train ``` # Load data x_train = np.load(base_data_path + 'x_train.npy') y_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32) x_valid = np.load(base_data_path + 'x_valid.npy') y_valid = np.load(base_data_path + 'y_valid_int.npy').reshape(x_valid.shape[1], 1).astype(np.float32) x_valid_ml = np.load(database_base_path + 'x_valid.npy') y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32) #################### ADD TAIL #################### x_train_tail = np.load(base_data_path + 'x_train_tail.npy') y_train_tail = np.load(base_data_path + 'y_train_int_tail.npy').reshape(x_train_tail.shape[1], 1).astype(np.float32) x_train = np.hstack([x_train, x_train_tail]) y_train = np.vstack([y_train, y_train_tail]) step_size = x_train.shape[1] // config['BATCH_SIZE'] valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE'] valid_2_step_size = x_valid.shape[1] // config['BATCH_SIZE'] # Build TF datasets train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED)) valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED)) valid_2_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid, y_valid, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED)) train_data_iter = iter(train_dist_ds) valid_data_iter = iter(valid_dist_ds) valid_2_data_iter = iter(valid_2_dist_ds) # Step functions @tf.function def train_step(data_iter): def train_step_fn(x, y): with tf.GradientTape() as tape: probabilities = model(x, training=True) loss = loss_fn(y, probabilities) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_auc.update_state(y, probabilities) train_loss.update_state(loss) for _ in tf.range(step_size): strategy.experimental_run_v2(train_step_fn, next(data_iter)) @tf.function def valid_step(data_iter): def valid_step_fn(x, y): probabilities = model(x, training=False) loss = loss_fn(y, probabilities) valid_auc.update_state(y, probabilities) valid_loss.update_state(loss) for _ in tf.range(valid_step_size): strategy.experimental_run_v2(valid_step_fn, next(data_iter)) @tf.function def valid_2_step(data_iter): def valid_step_fn(x, y): probabilities = model(x, training=False) loss = loss_fn(y, probabilities) valid_2_auc.update_state(y, probabilities) valid_2_loss.update_state(loss) for _ in tf.range(valid_2_step_size): strategy.experimental_run_v2(valid_step_fn, next(data_iter)) # Train model with strategy.scope(): model = model_fn(config['MAX_LEN']) lr = lambda: exponential_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32), warmup_steps=warmup_steps, lr_start=lr_start, lr_max=lr_max, decay=decay) optimizer = optimizers.Adam(learning_rate=lr) loss_fn = losses.binary_crossentropy train_auc = metrics.AUC() valid_auc = metrics.AUC() valid_2_auc = metrics.AUC() train_loss = metrics.Sum() valid_loss = metrics.Sum() valid_2_loss = metrics.Sum() metrics_dict = {'loss': train_loss, 'auc': train_auc, 'val_loss': valid_loss, 'val_auc': valid_auc, 'val_2_loss': valid_2_loss, 'val_2_auc': valid_2_auc} history = custom_fit_2(model, metrics_dict, train_step, valid_step, valid_2_step, train_data_iter, valid_data_iter, valid_2_data_iter, step_size, valid_step_size, valid_2_step_size, config['BATCH_SIZE'], config['EPOCHS'], config['ES_PATIENCE'], save_last=False) # model.save_weights('model.h5') # Make predictions # x_train = np.load(base_data_path + 'x_train.npy') # x_valid = np.load(base_data_path + 'x_valid.npy') x_valid_ml_eval = np.load(database_base_path + 'x_valid.npy') # train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO)) # valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO)) valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO)) # k_fold.loc[k_fold[f'fold_{fold_n}'] == 'train', f'pred_{fold_n}'] = np.round(train_preds) # k_fold.loc[k_fold[f'fold_{fold_n}'] == 'validation', f'pred_{fold_n}'] = np.round(valid_preds) valid_df[f'pred_{fold_n}'] = valid_ml_preds # Fine-tune on validation set #################### ADD TAIL #################### x_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')]) y_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml]) valid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE'] # Build TF datasets train_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail, config['BATCH_SIZE'], AUTO, seed=SEED)) train_ml_data_iter = iter(train_ml_dist_ds) # Fine-tune on validation set history_ml = custom_fit_2(model, metrics_dict, train_step, valid_step, valid_2_step, train_ml_data_iter, valid_data_iter, valid_2_data_iter, valid_step_size_tail, valid_step_size, valid_2_step_size, config['BATCH_SIZE'], 2, config['ES_PATIENCE'], save_last=False) # Join history for key in history_ml.keys(): history[key] += history_ml[key] model.save_weights('model.h5') # Make predictions valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO)) valid_df[f'pred_ml_{fold_n}'] = valid_ml_preds ### Delete data dir shutil.rmtree(base_data_path) ``` ## Model loss graph ``` plot_metrics_2(history) ``` # Model evaluation ``` # display(evaluate_model_single_fold(k_fold, fold_n, label_col='toxic_int').style.applymap(color_map)) ``` # Confusion matrix ``` # train_set = k_fold[k_fold[f'fold_{fold_n}'] == 'train'] # validation_set = k_fold[k_fold[f'fold_{fold_n}'] == 'validation'] # plot_confusion_matrix(train_set['toxic_int'], train_set[f'pred_{fold_n}'], # validation_set['toxic_int'], validation_set[f'pred_{fold_n}']) ``` # Model evaluation by language ``` display(evaluate_model_single_fold_lang(valid_df, fold_n).style.applymap(color_map)) # ML fine-tunned preds display(evaluate_model_single_fold_lang(valid_df, fold_n, pred_col='pred_ml').style.applymap(color_map)) ``` # Visualize predictions ``` print('English validation set') display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10)) print('Multilingual validation set') display(valid_df[['comment_text', 'toxic'] + [c for c in valid_df.columns if c.startswith('pred')]].head(10)) ``` # Test set predictions ``` x_test = np.load(database_base_path + 'x_test.npy') test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO)) submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv') submission['toxic'] = test_preds submission.to_csv('submission.csv', index=False) display(submission.describe()) display(submission.head(10)) ```
github_jupyter
# Data pre-processing steps 1. Remove columns that contain "Call" data 2. Transpose the dataframe so that each row is a patient and each column is a gene 3. Remove gene description and set the gene accession numbers as the column headers 4. Merge the data (expression values) with the class labels (patient numbers) ``` import itertools import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy testfile='../input/data_set_ALL_AML_independent.csv' trainfile='../input/data_set_ALL_AML_train.csv' patient_cancer='../input/actual.csv' train = pd.read_csv(trainfile) test = pd.read_csv(testfile) patient_cancer = pd.read_csv(patient_cancer) train.head() # Remove "call" columns from training a test dataframes train_keepers = [col for col in train.columns if "call" not in col] test_keepers = [col for col in test.columns if "call" not in col] train = train[train_keepers] test = test[test_keepers] train.head() # Transpose the columns and rows so that genes become features and rows become observations train = train.T test = test.T train.head() # Clean up the column names for training data train.columns = train.iloc[1] train = train.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric) # Clean up the column names for training data test.columns = test.iloc[1] test = test.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric) train.head() ``` ### Combine the data (gene expression) with class labels (patient numbers) ``` # Reset the index. The indexes of two dataframes need to be the same before you combine them train = train.reset_index(drop=True) # Subset the first 38 patient's cancer types pc_train = patient_cancer[patient_cancer.patient <= 38].reset_index(drop=True) # Combine dataframes for first 38 patients: Patient number + cancer type + gene expression values train = pd.concat([pc_train,train], axis=1) # Handle the test data for patients 38 through 72 # Clean up the index test = test.reset_index(drop=True) # Subset the last patient's cancer types to test pc_test = patient_cancer[patient_cancer.patient > 38].reset_index(drop=True) # Combine dataframes for last patients: Patient number + cancer type + gene expression values test = pd.concat([pc_test,test], axis=1) ``` # EDA --- There's a bunch of data, so to speed things up, only using a small sample of the training data for the EDA. ``` sample = train.iloc[:,2:].sample(n=100, axis=1) sample["cancer"] = train.cancer sample.describe().round() from sklearn import preprocessing ``` ### Distribution of the random sample before standardizing --- ``` sample = sample.drop("cancer", axis=1) sample.plot(kind="hist", legend=None, bins=20, color='k') sample.plot(kind="kde", legend=None); ``` ### Distribution of the random sample after standardizing --- ``` sample_scaled = pd.DataFrame(preprocessing.scale(sample)) sample_scaled.plot(kind="hist", normed=True, legend=None, bins=10, color='k') sample_scaled.plot(kind="kde", legend=None); ``` # Process the full set --- ``` # StandardScaler to remove mean and scale to unit variance from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit(train.iloc[:,2:]) scaled_train = scaler.transform(train.iloc[:,2:]) scaled_test = scaler.transform(test.iloc[:,2:]) x_train = train.iloc[:,2:] y_train = train.iloc[:,1] x_test = test.iloc[:,2:] y_test = test.iloc[:,1] ``` # Classifiers --- ``` # Grid Search for tuning parameters from sklearn.model_selection import GridSearchCV # RandomizedSearch for tuning (possibly faster than GridSearch) from sklearn.model_selection import RandomizedSearchCV # Bayessian optimization supposedly faster than GridSearch from bayes_opt import BayesianOptimization # Metrics from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, log_loss ## Models from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier ``` # Helper functions ``` # CHERCHEZ FOR PARAMETERS def cherchez(estimator, param_grid, search): """ This is a helper function for tuning hyperparameters using teh two search methods. Methods must be GridSearchCV or RandomizedSearchCV. Inputs: estimator: Logistic regression, SVM, KNN, etc param_grid: Range of parameters to search search: Grid search or Randomized search Output: Returns the estimator instance, clf """ try: if search == "grid": clf = GridSearchCV( estimator=estimator, param_grid=param_grid, scoring=None, n_jobs=-1, cv=10, verbose=0, return_train_score=True ) elif search == "random": clf = RandomizedSearchCV( estimator=estimator, param_distributions=param_grid, n_iter=10, n_jobs=-1, cv=10, verbose=0, random_state=1, return_train_score=True ) except: print('Search argument has to be "grid" or "random"') sys.exit(0) # Fit the model clf.fit(X=scaled_train, y=y_train) return clf # Function for plotting the confusion matrices def plot_confusion_matrix(cm, title="Confusion Matrix"): """ Plots the confusion matrix. Modified verison from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html Inputs: cm: confusion matrix title: Title of plot """ classes=["AML", "ALL"] plt.imshow(cm, interpolation='nearest', cmap=plt.cm.bone) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=0) plt.yticks(tick_marks, classes) plt.ylabel('Actual') plt.xlabel('Predicted') thresh = cm.mean() for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] < thresh else "black") ``` # Models being tested 1. Logisitc Regresison - Using Grid search and Randomized search for tuning hyperparameters 2. C-Support Vector Classification (SVM) - Using Grid search and Randomized search for tuning hyperparameters 3. K-Nearest Neighbors Classifier - Using Grid search and Randomized search for tuning hyperparameters 4. Decision Tree Classifier - Using only Grid search ``` # Logistic Regression # Paramaters logreg_params = {} logreg_params["C"] = [0.01, 0.1, 10, 100] logreg_params["fit_intercept"] = [True, False] logreg_params["warm_start"] = [True,False] logreg_params["random_state"] = [1] lr_dist = {} lr_dist["C"] = scipy.stats.expon(scale=.01) lr_dist["fit_intercept"] = [True, False] lr_dist["warm_start"] = [True,False] lr_dist["random_state"] = [1] logregression_grid = cherchez(LogisticRegression(), logreg_params, search="grid") acc = accuracy_score(y_true=y_test, y_pred=logregression_grid.predict(scaled_test)) cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=logregression_grid.predict(scaled_test)) print("**Grid search results**") print("Best training accuracy:\t", logregression_grid.best_score_) print("Test accuracy:\t", acc) logregression_random = cherchez(LogisticRegression(), lr_dist, search="random") acc = accuracy_score(y_true=y_test, y_pred=logregression_random.predict(scaled_test)) cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=logregression_random.predict(scaled_test)) print("**Random search results**") print("Best training accuracy:\t", logregression_random.best_score_) print("Test accuracy:\t", acc) plt.subplots(1,2) plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None) plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix") plt.subplot(121) plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix") # SVM svm_param = { "C": [.01, .1, 1, 5, 10, 100], "gamma": [0, .01, .1, 1, 5, 10, 100], "kernel": ["rbf"], "random_state": [1] } svm_dist = { "C": scipy.stats.expon(scale=.01), "gamma": scipy.stats.expon(scale=.01), "kernel": ["rbf"], "random_state": [1] } svm_grid = cherchez(SVC(), svm_param, "grid") acc = accuracy_score(y_true=y_test, y_pred=svm_grid.predict(scaled_test)) cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=svm_grid.predict(scaled_test)) print("**Grid search results**") print("Best training accuracy:\t", svm_grid.best_score_) print("Test accuracy:\t", acc) svm_random = cherchez(SVC(), svm_dist, "random") acc = accuracy_score(y_true=y_test, y_pred=svm_random.predict(scaled_test)) cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=svm_random.predict(scaled_test)) print("**Random search results**") print("Best training accuracy:\t", svm_random.best_score_) print("Test accuracy:\t", acc) plt.subplots(1,2) plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None) plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix") plt.subplot(121) plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix") # KNN knn_param = { "n_neighbors": [i for i in range(1,30,5)], "weights": ["uniform", "distance"], "algorithm": ["ball_tree", "kd_tree", "brute"], "leaf_size": [1, 10, 30], "p": [1,2] } knn_dist = { "n_neighbors": scipy.stats.randint(1,33), "weights": ["uniform", "distance"], "algorithm": ["ball_tree", "kd_tree", "brute"], "leaf_size": scipy.stats.randint(1,1000), "p": [1,2] } knn_grid = cherchez(KNeighborsClassifier(), knn_param, "grid") acc = accuracy_score(y_true=y_test, y_pred=knn_grid.predict(scaled_test)) cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=svm_grid.predict(scaled_test)) print("**Grid search results**") print("Best training accuracy:\t", knn_grid.best_score_) print("Test accuracy:\t", acc) knn_random = cherchez(KNeighborsClassifier(), knn_dist, "random") acc = accuracy_score(y_true=y_test, y_pred=knn_random.predict(scaled_test)) cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=knn_random.predict(scaled_test)) print("**Random search results**") print("Best training accuracy:\t", knn_random.best_score_) print("Test accuracy:\t", acc) plt.subplots(1,2) plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None) plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix") plt.subplot(121) plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix") # Decision tree classifier dtc_param = { "max_depth": [None], "min_samples_split": [2], "min_samples_leaf": [1], "min_weight_fraction_leaf": [0.], "max_features": [None], "random_state": [4], "max_leaf_nodes": [None], # None = infinity or int "presort": [True, False] } dtc_grid = cherchez(DecisionTreeClassifier(), dtc_param, "grid") acc = accuracy_score(y_true=y_test, y_pred=dtc_grid.predict(scaled_test)) cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=dtc_grid.predict(scaled_test)) print("**Grid search results**") print("Best training accuracy:\t", dtc_grid.best_score_) print("Test accuracy:\t", acc) plot_confusion_matrix(cfmatrix_grid, title="Decision Tree Confusion Matrix") ```
github_jupyter
# 1A.e - Correction de l'interrogation écrite du 14 novembre 2014 coût algorithmique, calcul de séries mathématiques ``` from jyquickhelper import add_notebook_menu add_notebook_menu() ``` ## Enoncé 1 ### Q1 Le code suivant produit une erreur. Corrigez le programme. ``` nbs = [ 1, 5, 4, 7 ] # for n in nbs: # s += n # ``` L'objectif de ce petit programme est de calculer la somme des éléments de la liste ``nbs``. L'exception est déclenché la variable ``s`` n'est jamais créé. Il manque l'instruction ``s=0``. ``` nbs = [ 1, 5, 4, 7 ] s = 0 for n in nbs: s += n s ``` ### Q2 Que vaut ``nbs`` dans le programme suivant : ``` def f(x) : return x%2 nbs = { i:f(i) for i in range(0,5) } nbs ``` ### Q3 On considère le programme suivant, il affiche ``None``, pourquoi ? ``` def ma_fonction(x1,y1,x2,y2): d = (x1-x2)**2 +(y1-y2)**2 print(d) d = ma_fonction(0,0,1,1) print(d) ``` Le ``2`` correspond au premier ``print(d)``, le ``None`` correspond au second. Pour s'en convaincre, il suffit d'ajouter quelques caractères supplémentaires : ``` def ma_fonction(x1,y1,x2,y2): d = (x1-x2)**2 +(y1-y2)**2 print("A",d) d = ma_fonction(0,0,1,1) print("B",d) ``` Donc la variable ``d`` en dehors de la fonction vaut ``None``, cela veut que le résultat de la fonction ``ma_fonction`` est ``None``. Il peut être ``None`` soit parce que la fonction contient explicitiement l'instruction ``return None`` soit parce qu'aucune instruction ``return`` n'ext exécutée. C'est le cas ici puisqu'il n'y a qu'une instruction ``print``. On remplace ``print`` par ``return``. ``` def ma_fonction(x1,y1,x2,y2): d = (x1-x2)**2 +(y1-y2)**2 return d d = ma_fonction(0,0,1,1) print(d) ``` ### Q4 Que vaut ``n`` en fonction de ``N`` ? ``` n = 0 N = 100 for i in range(0,N): for k in range(0,i): n += N n ``` Pour être plus précis, 495000 = $\frac{N^2(N-1)}{2}$. ### Q5 Une des lignes suivantes provoque une erreur, laquelle ? ``` a = 3 # b = "6" # a+b # a*b # ``` Lorsqu'on multiplie une chaîne de caractères par un entier, cela revient à la répliquer : ``3*"6" = "666"``. L'addition est impossible car on ne peut pas additionner un nombre avec une chaîne de caractères. ## Enoncé 2 ### Q1 Le code suivant produit une erreur. Proposez une correction. ``` nbs = ( 1, 5, 4, 7 ) # nbs[0] = 0 # ``` Le type [tuple](https://docs.python.org/3.4/tutorial/datastructures.html#tuples-and-sequences) sont [immutable](http://fr.wikipedia.org/wiki/Objet_immuable). On ne peut pas le modifier. Mais les listes peuvent l'être. ``` nbs = [ 1, 5, 4, 7 ] nbs[0] = 0 nbs ``` ### Q2 Que vaut ``c`` ? ``` d = {4: 'quatre'} c = d.get('4', None) print(c) ``` La méthode [get](https://docs.python.org/3.4/library/stdtypes.html#dict.get) retourne la valeur associée à une clé ou une autre valeur (ici ``None``) si elle ne s'y trouve pas. La raison pour laquelle le résultat est ``None`` ici est que '4' != 4. La clé '4' ne fait pas partie du dictionnaire. ### Q3 Que vaut ``x`` ? ``` N = 8 s = 0 while N > 0 : for i in range(N): s += 1 N //= 2 x = (s+1)//2 x ``` A chaque passage dans la boucle ``for``, on ajoute ``N`` à ``s``. A chaque passage dans la boucle ``while``, on divise ``N`` par 2. Donc, après la boucle ``while``, $s = N + N/2 + N/4 + N/8 + ...$. On répète cela jusqu'à ce que $N / 2^k$ soit plus grand que 0. Or, les divisions sont entières (symbole ``//``), ``1//2`` vaut 0. La condition devient jusqu'à ce que $N / 2^k <1$. Pour le reste, c'est une suite géométrique. Si on pose $N=2^k$, on calcule donc la somme : $$s = 2^k + 2 ^{k-1} + ... + 1 = \sum_{i=1}^{k} 2^i = \frac{2^{k+1}-1}{2-1} = 2^{k+1}-1$$ Et comme : $$x = \frac{s+1}{2} = 2^k = N$$ ### Q4 Que vaut ``c`` ? ``` l = ['a', 'b', 'c'] c = l[1] c ``` ### Q5 Par quoi faut-il remplacer les ``???`` pour avoir l'erreur ci-dessous ? ``` def fonction(N): li = None # on évite la variable l pour ne pas la confondre avec 1 for i in range(N): if li is None: li = [ ] li.append(i) return li ma_liste = fonction(0) ma_liste.append(-1) ``` Cette erreur se produit car ``ma_liste`` vaut ``None``. Si la fonction ``fonction`` retourne ``None``, c'est que l'instruction ``l = [ ]`` n'est jamais exécutée, donc que la condition ``if l is None`` n'est jamais vérifiée. On ne passe donc jamais dans la boucle ``for`` et ceci arrive si ``N`` est négatif ou nul. ## Enoncé 3 ### Q1 Que se passe-t-il ? ``` l = [ 0, 1,2,3] for i in range(len(l)): print(i) del l[i] # ``` L'erreur est due au fait que la boucle parcourt la liste en même temps qu'elle supprime des éléments. Le résultat est souvent une erreur. On vérifie en affichant ``i`` et ``l``. ``` l = [ 0, 1,2,3] for i in range(len(l)): print("i=",i,"l=",l) del l[i] # ``` ### Q2 Que vaut ``a`` ? ``` a = 2 for i in range(1,5): a += a a ``` La variable ``a`` double à chaque fois qu'on passe dans la boucle. On y passe **4** fois et on part de ``a=2``. Donc : $2*2*2*2*2=2^5=32$. ### Q3 Que vaut ``y`` ? ``` x = 2.67 y = int ( x * 2 ) / 2 y ``` La fonction revient à arrondir au demi inférieur, donc $2.5$. ### Q4 Combien d'étoiles le programme suivant affiche ? ``` import random def moyenne(l): s = 0 for x in l : print("*") s += x return s / len(l) def variance(l): return sum ( [ (x - moyenne(l))**2 for x in l ] ) / len(l) l = [ random.random() for i in range(0,100) ] print(variance(l)**0.5) ``` C'est un peu long à afficher, modifions le programme pour compter les étoiles plutôt que de les afficher. ``` star = 0 def moyenne(l): global star s = 0 for x in l : star += 1 s += x return s / len(l) def variance(l): return sum ( [ (x - moyenne(l))**2 for x in l ] ) / len(l) l = [ random.random() for i in range(0,100) ] print(variance(l)**0.5) print("star=",star) ``` Si $n$ est la longueur de la liste ``l``, le coût de la fonction ``moyenne`` est $O(n)$. Le coût de la fonction ``variance`` est $n$ fois le coût de la fonction ``moyenne``, soit $O(n^2)$. Celle-ci pourrait être beaucoup plus efficace en écrivant : ``` star = 0 def moyenne(l): global star s = 0 for x in l : star += 1 s += x return s / len(l) def variance(l): m = moyenne(l) # on mémorise le résultat return sum ( [ (x - m)**2 for x in l ] ) / len(l) l = [ random.random() for i in range(0,100) ] print(variance(l)**0.5) print("star=",star) ``` ### Q5 Que vaut ``x`` ? ``` import random x = random.randint(0,100) while x != 50: x = random.randint(0,100) x ``` ``x`` vaut nécessairement 50 puisque c'est la seule valeur qui permette de sortir de la boucle.
github_jupyter
``` import time import glob import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.metrics import confusion_matrix, accuracy_score, classification_report from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC %matplotlib inline plt.style.use('seaborn-dark-palette') import warnings warnings.filterwarnings('ignore') file = glob.iglob('*.csv') df = pd.read_csv(*file) print(f'The dimension of the data is - {df.shape}') df.head() df.tail() X = df.iloc[:, :-1].values Y = df.iloc[:, -1].values X Y print("Size of X: {}".format(X.shape)) print("Size of Y: {}".format(Y.shape)) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=0, shuffle=True) print("Size of X_train: {}".format(X_train.shape)) print("Size of X_test: {}".format(X_test.shape)) print("Size of Y_train: {}".format(Y_train.shape)) print("Size of Y_test: {}".format(Y_test.shape)) sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = SVC(kernel = 'rbf') classifier.fit(X_train, Y_train) y_pred = classifier.predict(X_test) y_pred cm = confusion_matrix(Y_test, y_pred) cm acc = accuracy_score(Y_test, y_pred) print(f"The accuracy in percentage - {acc*100}%") report = classification_report(Y_test, y_pred) print(report) acc = cross_val_score(estimator = classifier, X = X_train, y = Y_train, n_jobs = -1, verbose = 0, cv = 10) print(f"Accuracy Score: {acc.mean()*100:.3f}%") print(f"Standard Deviation: {acc.std()*100:.2f} %") start = time.time() parameters = [{'C': [0.25, 0.5, 0.75, 1], 'kernel': ['linear']}, {'C': [0.25, 0.5, 0.75, 1], 'kernel': ['rbf'], 'gamma': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}] grid_search = GridSearchCV(estimator = classifier, param_grid = parameters, scoring = 'accuracy', n_jobs = -1, cv = 10, verbose = 1 ) grid_search.fit(X_train, Y_train) best_accuracy = grid_search.best_score_ best_parameters = grid_search.best_params_ print(f"Accuracy Score: {best_accuracy*100:.3f}%") print(f"Best Parameters: {best_parameters}") end = time.time() print(f"Total Time Taken {end - start}") # Training Set figure = plt.figure(figsize = (10,10)) x_set, y_set = X_train, Y_train X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01), np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.4, cmap = ListedColormap(('red', 'green'))) for i, j in enumerate(np.unique(y_set)): plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], color = ListedColormap(('red', 'green'))(i), s = 15, marker = '*', label = j ) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) plt.title('Kernel - SVM Classifier (Training Set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() # Visuaizing the test case result figure = plt.figure(figsize = (10,10)) x_set, y_set = X_test, Y_test X1, X2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01), np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), cmap = ListedColormap(('red', 'green')), alpha = 0.4 ) for i, j in enumerate(np.unique(y_set)): plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1 ], color = ListedColormap(('red', 'green'))(i), s = 15, label = j, marker = '^' ) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) plt.title("Kernel SVM - Test Case") plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() ```
github_jupyter
# Spark JDBC to Databases - [Overview](#spark-jdbc-overview) - [Setup](#spark-jdbc-setup) - [Define Environment Variables](#spark-jdbc-define-envir-vars) - [Initiate a Spark JDBC Session](#spark-jdbc-init-session) - [Load Driver Packages Dynamically](#spark-jdbc-init-dynamic-pkg-load) - [Load Driver Packages Locally](#spark-jdbc-init-local-pkg-load) - [Connect to Databases Using Spark JDBC](#spark-jdbc-connect-to-dbs) - [Connect to a MySQL Database](#spark-jdbc-to-mysql) - [Connecting to a Public MySQL Instance](#spark-jdbc-to-mysql-public) - [Connecting to a Test or Temporary MySQL Instance](#spark-jdbc-to-mysql-test-or-temp) - [Connect to a PostgreSQL Database](#spark-jdbc-to-postgresql) - [Connect to an Oracle Database](#spark-jdbc-to-oracle) - [Connect to an MS SQL Server Database](#spark-jdbc-to-ms-sql-server) - [Connect to a Redshift Database](#spark-jdbc-to-redshift) - [Cleanup](#spark-jdbc-cleanup) - [Delete Data](#spark-jdbc-delete-data) - [Release Spark Resources](#spark-jdbc-release-spark-resources) <a id="spark-jdbc-overview"></a> ## Overview Spark SQL includes a data source that can read data from other databases using Java database connectivity (**JDBC**). The results are returned as a Spark DataFrame that can easily be processed in Spark SQL or joined with other data sources. For more information, see the [Spark documentation](https://spark.apache.org/docs/2.3.1/sql-programming-guide.html#jdbc-to-other-databases). <a id="spark-jdbc-setup"></a> ## Setup <a id="spark-jdbc-define-envir-vars"></a> ### Define Environment Variables Begin by initializing some environment variables. > **Note:** You need to edit the following code to assign valid values to the database variables (`DB_XXX`). ``` import os # Read Iguazio Data Science Platform ("the platform") environment variables into local variables V3IO_USER = os.getenv('V3IO_USERNAME') V3IO_HOME = os.getenv('V3IO_HOME') V3IO_HOME_URL = os.getenv('V3IO_HOME_URL') # Define database environment variables # TODO: Edit the variable definitions to assign valid values for your environment. %env DB_HOST = "" # Database host as a fully qualified name (FQN) %env DB_PORT = "" # Database port number %env DB_DRIVER = "" # Database driver [mysql/postgresql|oracle:thin|sqlserver] %env DB_Name = "" # Database|schema name %env DB_TABLE = "" # Table name %env DB_USER = "" # Database username %env DB_PASSWORD = "" # Database user password os.environ["PYSPARK_SUBMIT_ARGS"] = "--packages mysql:mysql-connector-java:5.1.39 pyspark-shell" ``` <a id="spark-jdbc-init-session"></a> ### Initiate a Spark JDBC Session You can select between two methods for initiating a Spark session with JDBC drivers ("Spark JDBC session"): - [Load Driver Packages Dynamically](#spark-jdbc-init-dynamic-pkg-load) (preferred) - [Load Driver Packages Locally](#spark-jdbc-init-local-pkg-load) <a id="spark-jdbc-init-dynamic-pkg-load"></a> #### Load Driver Packages Dynamically The preferred method for initiating a Spark JDBC session is to load the required JDBC driver packages dynamically from https://spark-packages.org/ by doing the following: 1. Set the `PYSPARK_SUBMIT_ARGS` environment variable to `"--packages <group>:<name>:<version> pyspark-shell"`. 2. Initiate a new spark session. The following example demonstrates how to initiate a Spark session that uses version 5.1.39 of the **mysql-connector-java** MySQL JDBC database driver (`mysql:mysql-connector-java:5.1.39`). ``` from pyspark.conf import SparkConf from pyspark.sql import SparkSession # Configure the Spark JDBC driver package # TODO: Replace `mysql:mysql-connector-java:5.1.39` with the required driver-pacakge information. os.environ["PYSPARK_SUBMIT_ARGS"] = "--packages mysql:mysql-connector-java:5.1.39 pyspark-shell" # Initiate a new Spark session; you can change the application name spark = SparkSession.builder.appName("Spark JDBC tutorial").getOrCreate() ``` <a id="spark-jdbc-init-local-pkg-load"></a> #### Load Driver Packages Locally You can also load the Spark JDBC driver package from the local file system of your Iguazio Data Science Platform ("the platform"). It's recommended that you use this method only if you don't have internet connection ("dark-site installations") or if there's no official Spark package for your database. The platform comes pre-deployed with MySQL, PostgreSQL, Oracle, Redshift, and MS SQL Server JDBC driver packages, which are found in the **/spark/3rd_party** directory (**$SPARK_HOME/3rd_party**). You can also copy additional driver packages or different versions of the pre-deployed drivers to the platform &mdash; for example, from the **Data** dashboard page. To load a JDBC driver package locally, you need to set the `spark.driver.extraClassPath` and `spark.executor.extraClassPath` Spark configuration properties to the path to a Spark JDBC driver package in the platform's file system. You can do this using either of the following alternative methods: - Preconfigure the path to the driver package &mdash; 1. In your Spark-configuration file &mdash; **$SPARK_HOME/conf/spark-defaults.conf** &mdash; set the `extraClassPath` configuration properties to the path to the relevant driver package: ```python spark.driver.extraClassPath = "<path to a JDBC driver package>" spark.executor.extraClassPath = "<path to a JDBC driver package>" ``` 2. Initiate a new spark session. - Configure the path to the driver package as part of the initiation of a new Spark session: ```python spark = SparkSession.builder. \ appName("<app name>"). \ config("spark.driver.extraClassPath", "<path to a JDBC driver package>"). \ config("spark.executor.extraClassPath", "<path to a JDBC driver package>"). \ getOrCreate() ``` The following example demonstrates how to initiate a Spark session that uses the pre-deployed version 8.0.13 of the **mysql-connector-java** MySQL JDBC database driver (**/spark/3rd_party/mysql-connector-java-8.0.13.jar**) ``` from pyspark.conf import SparkConf from pyspark.sql import SparkSession # METHOD I # Edit your Spark configuration file ($SPARK_HOME/conf/spark-defaults.conf), set the `spark.driver.extraClassPath` and # `spark.executor.extraClassPath` properties to the local file-system path to a pre-deployed Spark JDBC driver package. # Replace "/spark/3rd_party/mysql-connector-java-8.0.13.jar" with the relevant path. # spark.driver.extraClassPath = "/spark/3rd_party/mysql-connector-java-8.0.13.jar" # spark.executor.extraClassPath = "/spark/3rd_party/mysql-connector-java-8.0.13.jar" # # Then, initiate a new Spark session; you can change the application name. # spark = SparkSession.builder.appName("Spark JDBC tutorial").getOrCreate() # METHOD II # Initiate a new Spark Session; you can change the application name. # Set the same `extraClassPath` configuration properties as in Method #1 as part of the initiation command. # Replace "/spark/3rd_party/mysql-connector-java-8.0.13.jar" with the relevant path. # local file-system path to a pre-deployed Spark JDBC driver package spark = SparkSession.builder. \ appName("Spark JDBC tutorial"). \ config("spark.driver.extraClassPath", "/spark/3rd_party/mysql-connector-java-8.0.13.jar"). \ config("spark.executor.extraClassPath", "/spark/3rd_party/mysql-connector-java-8.0.13.jar"). \ getOrCreate() import pprint # Verify your configuration: run the following code to list the current Spark configurations, and check the output to verify that the # `spark.driver.extraClassPath` and `spark.executor.extraClassPath` properties are set to the correct local driver-pacakge path. conf = spark.sparkContext._conf.getAll() pprint.pprint(conf) ``` <a id="spark-jdbc-connect-to-dbs"></a> ## Connect to Databases Using Spark JDBC <a id="spark-jdbc-to-mysql"></a> ### Connect to a MySQL Database - [Connecting to a Public MySQL Instance](#spark-jdbc-to-mysql-public) - [Connecting to a Test or Temporary MySQL Instance](#spark-jdbc-to-mysql-test-or-temp) <a id="spark-jdbc-to-mysql-public"></a> #### Connect to a Public MySQL Instance ``` #Loading data from a JDBC source dfMySQL = spark.read \ .format("jdbc") \ .option("url", "jdbc:mysql://mysql-rfam-public.ebi.ac.uk:4497/Rfam") \ .option("dbtable", "Rfam.family") \ .option("user", "rfamro") \ .option("password", "") \ .option("driver", "com.mysql.jdbc.Driver") \ .load() dfMySQL.show() ``` <a id="spark-jdbc-to-mysql-test-or-temp"></a> #### Connect to a Test or Temporary MySQL Instance > **Note:** The following code won't work if the MySQL instance has been shut down. ``` dfMySQL = spark.read \ .format("jdbc") \ .option("url", "jdbc:mysql://172.31.33.215:3306/db1") \ .option("dbtable", "db1.fruit") \ .option("user", "root") \ .option("password", "my-secret-pw") \ .option("driver", "com.mysql.jdbc.Driver") \ .load() dfMySQL.show() ``` <a id="spark-jdbc-to-postgresql"></a> ### Connect to a PostgreSQL Database ``` # Load data from a JDBC source dfPS = spark.read \ .format("jdbc") \ .option("url", "jdbc:postgresql:dbserver") \ .option("dbtable", "schema.tablename") \ .option("user", "username") \ .option("password", "password") \ .load() dfPS2 = spark.read \ .jdbc("jdbc:postgresql:dbserver", "schema.tablename", properties={"user": "username", "password": "password"}) # Specify DataFrame column data types on read dfPS3 = spark.read \ .format("jdbc") \ .option("url", "jdbc:postgresql:dbserver") \ .option("dbtable", "schema.tablename") \ .option("user", "username") \ .option("password", "password") \ .option("customSchema", "id DECIMAL(38, 0), name STRING") \ .load() # Save data to a JDBC source dfPS.write \ .format("jdbc") \ .option("url", "jdbc:postgresql:dbserver") \ .option("dbtable", "schema.tablename") \ .option("user", "username") \ .option("password", "password") \ .save() dfPS2.write \ properties={"user": "username", "password": "password"}) # Specify create table column data types on write dfPS.write \ .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") \ .jdbc("jdbc:postgresql:dbserver", "schema.tablename", properties={"user": "username", "password": "password"}) ``` <a id="spark-jdbc-to-oracle"></a> ### Connect to an Oracle Database ``` # Read a table from Oracle (table: hr.emp) dfORA = spark.read \ .format("jdbc") \ .option("url", "jdbc:oracle:thin:username/password@//hostname:portnumber/SID") \ .option("dbtable", "hr.emp") \ .option("user", "db_user_name") \ .option("password", "password") \ .option("driver", "oracle.jdbc.driver.OracleDriver") \ .load() dfORA.printSchema() dfORA.show() # Read a query from Oracle query = "(select empno,ename,dname from emp, dept where emp.deptno = dept.deptno) emp" dfORA1 = spark.read \ .format("jdbc") \ .option("url", "jdbc:oracle:thin:username/password@//hostname:portnumber/SID") \ .option("dbtable", query) \ .option("user", "db_user_name") \ .option("password", "password") \ .option("driver", "oracle.jdbc.driver.OracleDriver") \ .load() dfORA1.printSchema() dfORA1.show() ``` <a id="spark-jdbc-to-ms-sql-server"></a> ### Connect to an MS SQL Server Database ``` # Read a table from MS SQL Server dfMS = spark.read \ .format("jdbc") \ .options(url="jdbc:sqlserver:username/password@//hostname:portnumber/DB") \ .option("dbtable", "db_table_name") \ .option("user", "db_user_name") \ .option("password", "password") \ .option("driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver" ) \ .load() dfMS.printSchema() dfMS.show() ``` <a id="spark-jdbc-to-redshift"></a> ### Connect to a Redshift Database ``` # Read data from a table dfRS = spark.read \ .format("com.databricks.spark.redshift") \ .option("url", "jdbc:redshift://redshifthost:5439/database?user=username&password=pass") \ .option("dbtable", "my_table") \ .option("tempdir", "s3n://path/for/temp/data") \ .load() # Read data from a query dfRS = spark.read \ .format("com.databricks.spark.redshift") \ .option("url", "jdbc:redshift://redshifthost:5439/database?user=username&password=pass") \ .option("query", "select x, count(*) my_table group by x") \ .option("tempdir", "s3n://path/for/temp/data") \ .load() # Write data back to a table dfRS.write \ .format("com.databricks.spark.redshift") \ .option("url", "jdbc:redshift://redshifthost:5439/database?user=username&password=pass") \ .option("dbtable", "my_table_copy") \ .option("tempdir", "s3n://path/for/temp/data") \ .mode("error") \ .save() # Use IAM role-based authentication dfRS.write \ .format("com.databricks.spark.redshift") \ .option("url", "jdbc:redshift://redshifthost:5439/database?user=username&password=pass") \ .option("dbtable", "my_table_copy") \ .option("tempdir", "s3n://path/for/temp/data") \ .option("aws_iam_role", "arn:aws:iam::123456789000:role/redshift_iam_role") \ .mode("error") \ .save() ``` <a id="spark-jdbc-cleanup"></a> ## Cleanup Prior to exiting, release disk space, computation, and memory resources consumed by the active session: - [Delete Data](#spark-jdbc-delete-data) - [Release Spark Resources](#spark-jdbc-release-spark-resources) <a id="spark-jdbc-delete-data"></a> ### Delete Data You can optionally delete any of the directories or files that you created. See the instructions in the [Creating and Deleting Container Directories](https://www.iguazio.com/docs/v3.0/data-layer/containers/working-with-containers/#create-delete-container-dirs) tutorial. For example, the following code uses a local file-system command to delete a **&lt;running user&gt;/examples/spark-jdbc** directory in the "users" container. Edit the path, as needed, then remove the comment mark (`#`) and run the code. ``` # !rm -rf /User/examples/spark-jdbc/ ``` <a id="spark-jdbc-release-spark-resources"></a> ### Release Spark Resources When you're done, run the following command to stop your Spark session and release its computation and memory resources: ``` spark.stop() ```
github_jupyter
``` #experiment name and snapshot folder (used for model persistence) from __future__ import print_function experiment_setup_name = "tutorial.wikicat.advanced" snapshot_path = "./agentnet_snapshots/" !mkdir ./agentnet_snapshots import numpy as np from matplotlib import pyplot as plt %matplotlib inline #theano imports #the problem is too simple to be run on GPU. Seriously. %env THEANO_FLAGS='device=cpu' import theano import theano.tensor as T floatX = theano.config.floatX import lasagne %load_ext autoreload %autoreload 2 ``` # This tutorial builds above the basic tutorial and shows several advanced tools * multi-layer (and in principle, arbitrary) agent memory * different reinforcement learning algorithms * model persistence __[todo: add more]__ # Experiment setup * Here we load an experiment environment (description below) * Designing one from scratch is explained in later tutorials ``` import agentnet.experiments.wikicat as experiment print(experiment.__doc__) #Create an environment with all default parameters env = experiment.WikicatEnvironment() from sklearn.cross_validation import train_test_split attrs, categories, feature_names = env.get_dataset() train_attrs,test_attrs,train_cats,test_cats = train_test_split(attrs,categories,test_size=0.99,random_state=32) print("train size:", train_attrs.shape,train_cats.shape) print("train size:", test_attrs.shape,test_cats.shape) print("features:",feature_names[::20]) env.load_random_batch(train_attrs,train_cats,5) ``` # agent setup * An agent implementation has to contain three parts: * Memory layer(s) * in this case, we train two GRU layers [details below] * Q-values evaluation layers * in this case, a lasagne dense layer based on memory layer * Resolver - acton picker layer * in this case, the resolver has epsilon-greedy policy ### two-layer memory architecture We train two memory states: * first one, based on observations, * second one, based on first one; Note that here we update the second memory layer based on the CURRENT state of the first one. Instead, you can try to feed it with a previous state. The q-values are estimated on a concatenated state, effectively on both memory states together, but there is no problem with limiting q-evaluator to only one: just pass the correct gru layer as an incoming layer to the q-evaluator. ### Implementation: We concatenate both memories into 1 state to pass it through the session loop. To perform memory update, we need to slice the concatenated state back into two memory states. We do so by defining an input map function and passing it into agent. We than concatenate two new states back to form a new memory state. ``` theano.function from agentnet.resolver import EpsilonGreedyResolver from agentnet.memory.rnn import GRUCell from agentnet.memory import GRUMemoryLayer from agentnet.agent import Agent import lasagne n_hid_1=512 #first GRU memory n_hid_2=512 #second GRU memory _observation_layer = lasagne.layers.InputLayer([None]+list(env.observation_shapes),name="obs_input") _prev_gru1_layer = lasagne.layers.InputLayer([None,n_hid_1],name="prev_gru1_state_input") _prev_gru2_layer = lasagne.layers.InputLayer([None,n_hid_2],name="prev_gru2_state_input") #memory gru1 = GRUMemoryLayer(n_hid_1, _observation_layer, _prev_gru1_layer, name="gru1") gru2 = GRUMemoryLayer(n_hid_2, gru1, #note that it takes CURRENT gru1 output as input. #replacing that with _prev_gru1_state would imply taking previous one. _prev_gru2_layer, name="gru2") concatenated_memory = lasagne.layers.concat([gru1,gru2]) #q_eval n_actions = len(feature_names) q_eval = lasagne.layers.DenseLayer(concatenated_memory, #taking both memories. #Replacing with gru1 or gru2 would mean taking one num_units = n_actions, nonlinearity=lasagne.nonlinearities.linear,name="QEvaluator") #resolver epsilon = theano.shared(np.float32(0.1),"e-greedy.epsilon") resolver = EpsilonGreedyResolver(q_eval,epsilon=epsilon,name="resolver") from collections import OrderedDict #all together agent = Agent(_observation_layer, OrderedDict([ (gru1,_prev_gru1_layer), (gru2,_prev_gru2_layer) ]), q_eval,resolver) #Since it's a single lasagne network, one can get it's weights, output, etc weights = lasagne.layers.get_all_params(resolver,trainable=True) weights ``` ## Agent setup in detail * __Memory layers__ * One-step recurrent layer * takes input and one's previous state * returns new memory state * Can be arbitrary lasagne layer * Several one-step recurrent units are implemented in __agentnet.memory__ * Note that lasagne's default recurrent networks roll for several steps at once * in other words, __using lasagne recurrent units as memory means recurrence inside recurrence__ * Using more than one memory layer is explained in farther tutorials * __Q-values evaluation layer__ * Can be arbitrary lasagne network * returns predicted Q-values for each action * Usually depends on memory as an input * __Resolver__ - action picker * Decides on what action is taken * Normally takes Q-values as input * Currently all experiments require integer output * Several resolver layers are implemented in __agentnet.resolver__ # Interacting with environment * an agent has a method that produces symbolic environment interaction sessions * interactions result in sequences of observations, actions, q-values,etc * one has to pre-define maximum session length. * in this case, environment implements an indicator of whether session has ended by current tick * Since this environment also implements Objective methods, it can evaluate rewards for each [batch, time_tick] ``` #produce interaction sequences of length <= 10 (state_seq),observation_seq,agent_state,action_seq,qvalues_seq = agent.get_sessions( env, session_length=10, batch_size=env.batch_size, ) gru1_seq = agent_state[gru1] gru2_seq = agent_state[gru2] #get rewards for all actions rewards_seq = env.get_reward_sequences(state_seq,action_seq) #get indicator whether session is still active is_alive_seq = env.get_whether_alive(observation_seq) ``` # Evaluating loss function * This part is similar to the basic tutorial but for the fact that we use 3-step q-learning * we evaluate the Q-loss manually, so this entire block is in essence equivalent to `qlearning_n_step.get_elementwise_objective(...)` #### Get (prediction,reference) pairs ``` #get reference Qvalues according to Qlearning algorithm from agentnet.learning import qlearning_n_step #gamma - delayed reward coefficient - what fraction of reward is retained if it is obtained one tick later gamma = theano.shared(np.float32(0.95),name = 'q_learning_gamma') reference_Qvalues = qlearning_n_step.get_elementwise_objective(qvalues_seq, action_seq, rewards_seq, n_steps=3, gamma_or_gammas=gamma, return_reference=True) #zero-out future rewards at session end from agentnet.learning.helpers import get_end_indicator end_action_ids = get_end_indicator(is_alive_seq).nonzero() # "set reference Qvalues at end action ids to just the immediate rewards" reference_Qvalues = T.set_subtensor(reference_Qvalues[end_action_ids], rewards_seq[end_action_ids]) #prevent gradient updates over reference Qvalues (since they depend on predicted Qvalues) from theano.gradient import disconnected_grad reference_Qvalues = disconnected_grad(reference_Qvalues) from agentnet.learning.helpers import get_action_Qvalues action_Qvalues = get_action_Qvalues(qvalues_seq,action_seq) ``` #### Define loss functions ``` #tensor of elementwise squared errors squared_error = lasagne.objectives.squared_error(reference_Qvalues,action_Qvalues) #zero-out ticks after session ended squared_error = squared_error * is_alive_seq #all code from Evaluation Loss Function beginning to this point is equivalent to #squared_error = qlearning_n_step.get_elementwise_objective(...) #compute average of squared error sums per session mse_loss = squared_error.sum() / is_alive_seq.sum() #regularize network weights from lasagne.regularization import regularize_network_params, l2 reg_l2 = regularize_network_params(resolver,l2)*10**-4 loss = mse_loss + reg_l2 ``` #### Compute weight updates ``` updates = lasagne.updates.adadelta(loss, weights,learning_rate=0.01) ``` #### Some auxilary evaluation metrics ``` mean_session_reward = rewards_seq.sum(axis=1).mean() #... ``` # Compile train and evaluation functions ``` train_fun = theano.function([],[loss,mean_session_reward],updates=updates) evaluation_fun = theano.function([],[loss,mse_loss,reg_l2,mean_session_reward]) ``` # session visualization tools * this is a completely optional step of visualizing agent's sessions as chains of actions * usually useful to get insight on what worked and what din't * in this case, we print strings following pattern * [action_name] ([predicted action qvalue]) -> reward [reference qvalue] | next iteration * plot shows * time ticks over X, abstract values over Y * bold lines are Qvalues for actions * dots on bold lines represent what actions were taken at each moment of time * dashed lines are agent's hidden state neurons * blue vertical line - session end __Warning! the visualization tools are underdeveloped and only allow simple operations.__ if you found yourself struggling to make it do what you want for 5 minutes, go write your own tool [and contribute it :)] ``` from agentnet.display.sessions import print_sessions get_printables = theano.function([], [ gru2_seq,qvalues_seq, action_seq,rewards_seq,reference_Qvalues,is_alive_seq ]) def display_sessions(with_plots = False): hidden_log,qvalues_log,actions_log,reward_log, reference_qv_log, is_alive_log = get_printables() print_sessions(qvalues_log,actions_log,reward_log, is_alive_seq = is_alive_log, #hidden_seq=hidden_log, #do not plot hidden since there's too many actions already reference_policy_seq = reference_qv_log, action_names=feature_names, legend = False, #do not show legend since there's too many labeled objects plot_policy = with_plots) #visualize untrained network performance (which is mostly random) env.load_random_batch(train_attrs,train_cats,1) display_sessions(with_plots=True) ``` # Training loop ``` #tools for model persistence from agentnet.utils.persistence import save,load import os from agentnet.display import Metrics score_log = Metrics() #starting epoch epoch_counter = 1 #moving average estimation alpha = 0.1 ma_reward_current = -7. ma_reward_greedy = -7. %%time n_epochs = 100000 batch_size= 10 for i in range(n_epochs): #train env.load_random_batch(train_attrs,train_cats,batch_size) resolver.rng.seed(i) loss,avg_reward = train_fun() ##update resolver's epsilon (chance of random action instead of optimal one) if epoch_counter%10 ==0: current_epsilon = 0.05 + 0.95*np.exp(-epoch_counter/10000.) resolver.epsilon.set_value(np.float32(current_epsilon)) ##record current learning progress and show learning curves if epoch_counter%100 ==0: ##update learning curves full_loss, q_loss, l2_penalty, avg_reward_current = evaluation_fun() ma_reward_current = (1-alpha)*ma_reward_current + alpha*avg_reward_current score_log["expected e-greedy reward"][epoch_counter] = ma_reward_current #greedy train resolver.epsilon.set_value(0) avg_reward_greedy = evaluation_fun()[-1] ma_reward_greedy = (1-alpha)*ma_reward_greedy + alpha*avg_reward_greedy score_log["expected greedy reward"][epoch_counter] = ma_reward_greedy #back to epsilon-greedy resolver.epsilon.set_value(np.float32(current_epsilon)) print("epoch %i,loss %.5f, epsilon %.5f, rewards: ( e-greedy %.5f, greedy %.5f) "%( epoch_counter,full_loss,current_epsilon,ma_reward_current,ma_reward_greedy)) print("rec %.3f reg %.3f"%(q_loss,l2_penalty)) if epoch_counter %1000 ==0: print("Learning curves:") score_log.plot() print("Random session examples") env.load_random_batch(train_attrs,train_cats,3) display_sessions(with_plots=False) #save snapshot if epoch_counter %10000 ==0: snap_name = "{}.epoch{}.pcl".format(os.path.join(snapshot_path,experiment_setup_name), epoch_counter) save(resolver,snap_name) print("saved", snap_name) epoch_counter +=1 # Time to drink some coffee! ``` # Evaluating results ``` score_log.plot("final") print("Random session examples") env.load_random_batch(train_attrs,train_cats,10) display_sessions(with_plots=True) #load earlier snapshot. #warning - this overrides the latest network params with earlier ones. #Replace 20000 with some 100000 (or whatever last snapshot epoch) if you wish to load latest snapshot back. snap_name = "{}.epoch{}.pcl".format(os.path.join(snapshot_path,experiment_setup_name), 20000) load(resolver,snap_name) print("Random session examples: early snapshot") env.load_random_batch(train_attrs,train_cats,10) display_sessions(with_plots=True) ```
github_jupyter
# Math Part 1 ``` from __future__ import print_function import tensorflow as tf import numpy as np from datetime import date date.today() author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises" tf.__version__ np.__version__ sess = tf.InteractiveSession() ``` NOTE on notation * _x, _y, _z, ...: NumPy 0-d or 1-d arrays * _X, _Y, _Z, ...: NumPy 2-d or higer dimensional arrays * x, y, z, ...: 0-d or 1-d tensors * X, Y, Z, ...: 2-d or higher dimensional tensors ## Arithmetic Operators Q1. Add x and y element-wise. ``` _x = np.array([1, 2, 3]) _y = np.array([-1, -2, -3]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q2. Subtract y from x element-wise. ``` _x = np.array([3, 4, 5]) _y = np.array(3) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q3. Multiply x by y element-wise. ``` _x = np.array([3, 4, 5]) _y = np.array([1, 0, -1]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q4. Multiply x by 5 element-wise. ``` _x = np.array([1, 2, 3]) x = tf.convert_to_tensor(_x) ``` Q5. Predict the result of this. ``` _x = np.array([10, 20, 30], np.int32) _y = np.array([2, 3, 5], np.int32) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) out1 = tf.div(x, y) out2 = tf.truediv(x, y) print(np.array_equal(out1.eval(), out2.eval())) print(out1.eval(), out1.eval().dtype) # tf.div() returns the same results as input tensors. print(out2.eval(), out2.eval().dtype)# tf.truediv() always returns floating point results. ``` Q6. Get the remainder of x / y element-wise. ``` _x = np.array([10, 20, 30], np.int32) _y = np.array([2, 3, 7], np.int32) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q7. Compute the pairwise cross product of x and y. ``` _x = np.array([1, 2, 3], np.int32) _y = np.array([4, 5, 6], np.int32) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` ## Basic Math Functions Q8. Add x, y, and z element-wise. ``` _x = np.array([1, 2, 3], np.int32) _y = np.array([4, 5, 6], np.int32) _z = np.array([7, 8, 9], np.int32) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) z = tf.convert_to_tensor(_y) ``` Q9. Compute the absolute value of X element-wise. ``` _X = np.array([[1, -1], [3, -3]]) X = tf.convert_to_tensor(_X) ``` Q10. Compute numerical negative value of x, elemet-wise. ``` _x = np.array([1, -1]) x = tf.convert_to_tensor(_x) ``` Q11. Compute an element-wise indication of the sign of x, element-wise. ``` _x = np.array([1, 3, 0, -1, -3]) x = tf.convert_to_tensor(_x) ``` Q12. Compute the reciprocal of x, element-wise. ``` _x = np.array([1, 2, 2/10]) x = tf.convert_to_tensor(_x) ``` Q13. Compute the square of x, element-wise. ``` _x = np.array([1, 2, -1]) x = tf.convert_to_tensor(_x) ``` Q14. Predict the results of this, paying attention to the difference among the family functions. ``` _x = np.array([2.1, 1.5, 2.5, 2.9, -2.1, -2.5, -2.9]) x = tf.convert_to_tensor(_x) ``` Q15. Compute square root of x element-wise. ``` _x = np.array([1, 4, 9], dtype=np.float32) x = tf.convert_to_tensor(_x) ``` Q16. Compute the reciprocal of square root of x element-wise. ``` _x = np.array([1., 4., 9.]) x = tf.convert_to_tensor(_x) ``` Q17. Compute $x^y$, element-wise. ``` _x = np.array([[1, 2], [3, 4]]) _y = np.array([[1, 2], [1, 2]]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q17. Compute $e^x$, element-wise. ``` _x = np.array([1., 2., 3.], np.float32) x = tf.convert_to_tensor(_x) ``` Q18. Compute natural logarithm of x element-wise. ``` _x = np.array([1, np.e, np.e**2]) x = tf.convert_to_tensor(_x) ``` Q19. Compute the max of x and y element-wise. ``` _x = np.array([2, 3, 4]) _y = np.array([1, 5, 2]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q20. Compute the min of x and y element-wise. ``` _x = np.array([2, 3, 4]) _y = np.array([1, 5, 2]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ``` Q21. Compuete the sine, cosine, and tangent of x, element-wise. ``` _x = np.array([-np.pi, np.pi, np.pi/2]) x = tf.convert_to_tensor(_x) ``` Q22. Compute (x - y)(x - y) element-wise. ``` _x = np.array([2, 3, 4]) _y = np.array([1, 5, 1]) x = tf.convert_to_tensor(_x) y = tf.convert_to_tensor(_y) ```
github_jupyter
``` import matplotlib as mpl import matplotlib.pyplot as plt age_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35] dev_x = [38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752] ax = plt.bar(age_x, dev_x) for index, value in zip(age_x, dev_x): plt.text(index, value+5000, f'{value:,}', ha='center', va='center', rotation=45) # alternatif cara untuk mengatur ukuran font, dengan menggunakan parameter fontsize plt.title('Salary By Age', fontsize=20) plt.xlabel('Age', fontsize=14) plt.ylabel('Salary (USD)', fontsize=14) plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` ax = plt.bar(age_x, dev_x) # misalkan kita ingin set warna pada bar age = 30 menjadi berwarna latar merah ax.patches[5].set_facecolor('red') plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` plt.bar(age_x, dev_x) for index, value in zip(age_x, dev_x): plt.text(index, value+7000 if index != 30 else value+12000, f'{value:,}', ha='center', va='center', color='red' if index==30 else 'black', fontsize=16 if index==30 else 10 , rotation=90) plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` ax = plt.bar(age_x, dev_x) # misalkan kita ingin set warna pada bar age = 30 menjadi berwarna latar merah ax.patches[5].set_facecolor('red') for index, value in zip(age_x, dev_x): plt.text(index, value+7000 if index != 30 else value+12000, f'{value:,}', ha='center', va='center', color='red' if index==30 else 'black', fontsize=16 if index==30 else 10 , rotation=90) plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` ax = plt.bar(age_x, dev_x) for index, value in enumerate(age_x): if value % 2 != 0: ax.patches[index].set_facecolor('red') plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` colors = ['red' if age % 2 else 'blue' for age in age_x] ax = plt.bar(age_x, dev_x, color=colors) plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ``` ``` colors = ['red', 'green', 'blue', 'cyan', 'magenta', 'black', 'white', 'olive', 'pink', 'purple', 'brown'] ax = plt.bar(age_x, dev_x, color=colors, edgecolor='k') plt.title('Salary By Age', fontsize=20) plt.xlabel('Age') plt.ylabel('Salary (USD)') plt.grid(axis='y', ls='--', alpha=.5) plt.show() ```
github_jupyter
# Dask pipeline ## Example: Tracking the International Space Station with Dask In this notebook we will be using two APIs: 1. [Google Maps Geocoder](https://developers.google.com/maps/documentation/geocoding/overview) 2. [Open Notify API for ISS location](http://api.open-notify.org/) We will use them to keep track of the ISS location and next lead time in relation to a list of cities. To create our diagrams and intelligently parallelise data, we use Dask, especially [Dask Delayed](../refactoring/performance/dask.html#Dask-Delayed). ### 1. Imports ``` import requests import logging import sys import numpy as np from time import sleep from datetime import datetime from math import radians from dask import delayed from operator import itemgetter from sklearn.neighbors import DistanceMetric ``` ### 2. Logger ``` logger = logging.getLogger() logger.setLevel(logging.INFO) ``` ### 3. Latitude and longitude pairs from a list of cities see also [Location APIs](https://locationiq.com/) ``` def get_lat_long(address): resp = requests.get( 'https://eu1.locationiq.org/v1/search.php', params={'key': '92e7ba84cf3465', 'q': address, 'format': 'json'} ) if resp.status_code != 200: print('There was a problem with your request!') print(resp.content) return data = resp.json()[0] return { 'name': data.get('display_name'), 'lat': float(data.get('lat')), 'long': float(data.get('lon')), } get_lat_long('Berlin, Germany') locations = [] for city in ['Seattle, Washington', 'Miami, Florida', 'Berlin, Germany', 'Singapore', 'Wellington, New Zealand', 'Beirut, Lebanon', 'Beijing, China', 'Nairobi, Kenya', 'Cape Town, South Africa', 'Buenos Aires, Argentina']: locations.append(get_lat_long(city)) sleep(2) locations ``` ## 4. Retrieve ISS data and determine lead times for cities ``` def get_spaceship_location(): resp = requests.get('http://api.open-notify.org/iss-now.json') location = resp.json()['iss_position'] return {'lat': float(location.get('latitude')), 'long': float(location.get('longitude'))} def great_circle_dist(lon1, lat1, lon2, lat2): "Found on SO: http://stackoverflow.com/a/41858332/380442" dist = DistanceMetric.get_metric('haversine') lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) X = [[lat1, lon1], [lat2, lon2]] kms = 6367 return (kms * dist.pairwise(X)).max() def iss_dist_from_loc(issloc, loc): distance = great_circle_dist(issloc.get('long'), issloc.get('lat'), loc.get('long'), loc.get('lat')) logging.info('ISS is ~%dkm from %s', int(distance), loc.get('name')) return distance def iss_pass_near_loc(loc): resp = requests.get('http://api.open-notify.org/iss-pass.json', params={'lat': loc.get('lat'), 'lon': loc.get('long')}) data = resp.json().get('response')[0] td = datetime.fromtimestamp(data.get('risetime')) - datetime.now() m, s = divmod(int(td.total_seconds()), 60) h, m = divmod(m, 60) logging.info('ISS will pass near %s in %02d:%02d:%02d',loc.get('name'), h, m, s) return td.total_seconds() iss_dist_from_loc(get_spaceship_location(), locations[4]) iss_pass_near_loc(locations[4]) ``` ## 5. Create a `delayed` pipeline ``` output = [] for loc in locations: issloc = delayed(get_spaceship_location)() dist = delayed(iss_dist_from_loc)(issloc, loc) output.append((loc.get('name'), dist)) closest = delayed(lambda x: sorted(x, key=itemgetter(1))[0])(output) closest ``` ## 6. Show DAG ``` closest.visualize() ``` ## 7. `compute()` ``` closest.compute() ```
github_jupyter
# Multi-Timescale Prediction This notebook showcases some ways to use the **MTS-LSTM** from our recent publication to generate predictions at multiple timescales: [**"Rainfall-Runoff Prediction at Multiple Timescales with a Single Long Short-Term Memory Network"**](https://arxiv.org/abs/2010.07921). Let's assume we have a set of daily meteorological forcing variables and a set of hourly variables, and we want to generate daily and hourly discharge predictions. Now, we could just go and train two separate LSTMs: One on the daily forcings to generate daily predictions, and one on the hourly forcings to generate hourly ones. One problem with this approach: It takes a _lot_ of time, even if you run it on a GPU. The reason is that the hourly model would crunch through a years' worth of hourly data to predict a single hour (assuming we provide the model input sequences with the same look-back that we usually use with daily data). That's $365 \times 24 = 8760$ time steps to process for each prediction. Not only does this take ages to train and evaluate, but also the training procedure becomes quite unstable and it is theoretically really hard for the model to learn dependencies over that many time steps. What's more, the daily and hourly predictions might end up being inconsistent, because the two models are entirely unrelated. ## MTS-LSTM MTS-LSTM solves these issues: We can use a single model to predict both hourly and daily discharge, and with some tricks, we can push the model toward predictions that are consistent across timescales. ### The Intuition The basic idea of MTS-LSTM is this: we can process time steps that are far in the past at lower temporal resolution. As an example, to predict discharge of September 10 9:00am, we'll certainly need fine-grained data for the previous few days or weeks. We might also need information from several months ago, but we probably _don't_ need to know if it rained at 6:00am or 7:00am on May 15. It's just so long ago that the fine resolution doesn't matter anymore. ### How it's Implemented The MTS-LSTM architecture follows this principle: To predict today's daily and hourly dicharge, we start feeding daily meteorological information from up to a year ago into the LSTM. At some point, say 14 days before today, we split our processing into two branches: 1. The first branch just keeps going with daily inputs until it outputs today's daily prediction. So far, there's no difference to normal daily-only prediction. 2. The second branch is where it gets interesting: We take the LSTM state from 14 days before today, apply a linear transformation to it, and then use the resulting states as the starting point for another LSTM, which we feed the 14 days of _hourly_ data until it generates today's 24 hourly predictions. Thus, in a single forward pass through the MTS-LSTM, we've generated both daily and hourly predictions. If you prefer visualizations, here's what the architecture looks like: ![MTS-LSTM-Visualization.jpg](https://raw.githubusercontent.com/neuralhydrology/neuralhydrology/master/examples/04-Multi-Timescale/mtslstm.jpg) You can see how the first 362 input steps are done at the daily timescale (the visualization uses 362 days, but in reality this is a tunable hyperparameter). Starting with day 363, two things happen: - The _daily_ LSTM just keeps going with daily inputs. - We take the hidden and cell states from day 362 and pass them through a linear layer. Starting with these new states, the _hourly_ LSTM begins processing hourly inputs. Finally, we pass the LSTMs' outputs through a linear output layer ($\text{FC}^H$ and $\text{FC}^D$) and get our predictions. ### Some Variations Now that we have this model, we can think of a few variations: 1. Because the MTS-LSTM has an individual branch for each timescale, we can actually use a different forcings product at each timescale (e.g., daily Daymet and hourly NLDAS). Going even further, we can use _multiple_ sets of forcings at each timescale (e.g., daily Daymet and Maurer, but only hourly NLDAS). This can improve predictions a lot (see [Kratzert et al., 2020](https://hess.copernicus.org/preprints/hess-2020-221/)). 2. We could also use the same LSTM weights in all timescales' branches. We call this model the shared MTS-LSTM (sMTS-LSTM). In our results, the shared version generated slightly better predictions if all we have is one forcings dataset. The drawback is that the model doesn't support per-timescale forcings. Thus, if you have several forcings datasets, you'll most likely get better predictions if you use MTS-LSTM (non-shared) and leverage all your datasets. 3. We can link the daily and hourly predictions during training to nudge the model towards predictions that are consistent across timescales. We do this by means of a regularization of the loss function that increases the loss if the average daily prediction aggregated from hourly predictions does not match the daily prediction. ## Using MTS-LSTM So, let's look at some code to train and evaluate an MTS-LSTM! The following code uses the `neuralhydrology` package to train an MTS-LSTM on daily and hourly discharge prediction. For the sake of a quick example, we'll train our model on just a single basin. When you actually care about the quality of your predictions, you'll generally get much better model performance when training on hundreds of basins. ``` import pickle from pathlib import Path import matplotlib.pyplot as plt from neuralhydrology.evaluation import metrics, get_tester from neuralhydrology.nh_run import start_run, eval_run from neuralhydrology.utils.config import Config ``` Every experiment in `neuralhydrology` uses a configuration file that specifies its setup. Let's look at some of the relevant configuration options: ``` run_config = Config(Path("1_basin.yml")) print('model:\t\t', run_config.model) print('use_frequencies:', run_config.use_frequencies) print('seq_length:\t', run_config.seq_length) ``` `model` is obvious: We want to use the MTS-LSTM. For the sMTS-LSTM, we'd set `run_config.shared_mtslstm = True`. In `use_frequencies`, we specify the timescales we want to predict. In `seq_length`, we specify for each timescale the look-back window. Here, we'll start with 365 days look-back, and the hourly LSTM branch will get the last 14 days ($336/24 = 14$) at an hourly resolution. As we're using the MTS-LSTM (and not sMTS-LSTM), we can use different input variables at each frequency. Here, we use Maurer and Daymet as daily inputs, while the hourly model component uses NLDAS, Maurer, and Daymet. Note that even though Daymet and Maurer are daily products, we can use them to support the hourly predictions. ``` print('dynamic_inputs:') run_config.dynamic_inputs ``` ## Training We start model training of our single-basin toy example with `start_run`. ``` start_run(config_file=Path("1_basin.yml")) ``` ## Evaluation Given the trained model, we can generate and evaluate its predictions. ``` run_dir = Path("runs/test_run_1410_151521") # you'll find this path in the output of the training above. # create a tester instance and start evaluation tester = get_tester(cfg=run_config, run_dir=run_dir, period="test", init_model=True) results = tester.evaluate(save_results=False, metrics=run_config.metrics) results.keys() ``` Let's take a closer look at the predictions and do some plots, starting with the daily results. Note that units are mm/h even for daily values, since we predict daily averages. ``` # extract observations and simulations daily_qobs = results["01022500"]["1D"]["xr"]["qobs_mm_per_hour_obs"] daily_qsim = results["01022500"]["1D"]["xr"]["qobs_mm_per_hour_sim"] fig, ax = plt.subplots(figsize=(16,10)) ax.plot(daily_qobs["date"], daily_qobs, label="Observed") ax.plot(daily_qsim["date"], daily_qsim, label="Simulated") ax.legend() ax.set_ylabel("Discharge (mm/h)") ax.set_title(f"Test period - daily NSE {results['01022500']['1D']['NSE_1D']:.3f}") # Calculate some metrics values = metrics.calculate_all_metrics(daily_qobs.isel(time_step=-1), daily_qsim.isel(time_step=-1)) print("Daily metrics:") for key, val in values.items(): print(f" {key}: {val:.3f}") ``` ...and finally, let's look more closely at the last few months' hourly predictions: ``` # extract a date slice of observations and simulations hourly_xr = results["01022500"]["1H"]["xr"].sel(date=slice("10-1995", None)) # The hourly data is indexed with two indices: The date (in days) and the time_step (the hour within that day). # As we want to get a continuous plot of several days' hours, we select all 24 hours of each day and then stack # the two dimensions into one consecutive datetime dimension. hourly_xr = hourly_xr.isel(time_step=slice(-24, None)).stack(datetime=['date', 'time_step']) hourly_xr['datetime'] = hourly_xr.coords['date'] + hourly_xr.coords['time_step'] hourly_qobs = hourly_xr["qobs_mm_per_hour_obs"] hourly_qsim = hourly_xr["qobs_mm_per_hour_sim"] fig, ax = plt.subplots(figsize=(16,10)) ax.plot(hourly_qobs["datetime"], hourly_qobs, label="Observation") ax.plot(hourly_qsim["datetime"], hourly_qsim, label="Simulation") ax.set_ylabel("Discharge (mm/h)") ax.set_title(f"Test period - hourly NSE {results['01022500']['1H']['NSE_1H']:.3f}") _ = ax.legend() ```
github_jupyter
# Helium Hydride (Tapered HeH+) Exemplar ## Step 0: Import various libraries ``` # Imports for QSCOUT import jaqalpaq from jaqalpaq.core import circuitbuilder from jaqalpaq.core.circuit import normalize_native_gates from jaqalpaq import pygsti from qscout.v1 import native_gates # Imports for basic mathematical functionality from math import pi import numpy as np # Imports for OpenFermion(-PySCF) import openfermion as of from openfermion.hamiltonians import MolecularData from openfermionpyscf import run_pyscf # Import for VQE optimizer from scipy import optimize ``` ## Step 1: SCF calculation to assmble the second-quantized Hamiltonian ``` # Set the basis set, spin, and charge of the H2 molecule basis = 'sto-3g' multiplicity = 1 charge = 1 #Charge is 1 for HeH+ # Set calculation parameters run_scf = 1 run_fci = 1 delete_input = True # Note: this option is critical as it ensures that the integrals are written out to an HDF5 file delete_output = False # Generate molecule at some bond length (0.8 Angstroms here) geometry = [('He', (0., 0., 0.)), ('H', (0., 0., 0.8))] molecule = MolecularData( geometry, basis, multiplicity, charge, filename='./HeH+_2_sto-3g_single_0.8') #Set file location of data # Run pyscf to generate new molecular data for sto-3g HeH+ molecule = run_pyscf(molecule, run_scf=run_scf, run_fci=run_fci, verbose=False) print("Bond Length in Angstroms: {}".format(0.8)) print("FCI (Exact) energy in Hartrees: {}".format(molecule.fci_energy)) ``` ## Step 2: Convert the fermionic Hamiltonian to a qubit Hamiltonian ``` #Get the Hamiltonian for HeH+ hamiltonian = molecule.get_molecular_hamiltonian() hamiltonian_ferm = of.get_fermion_operator(hamiltonian) hamiltonian_bk = of.symmetry_conserving_bravyi_kitaev(hamiltonian_ferm, active_orbitals=4, active_fermions=2) #Define terms and coefficients of our Hamiltonian terms = [] cs = [] #Coefficients for term in hamiltonian_bk.terms: paulis = [None, None] for pauli in term: paulis[pauli[0]] = pauli[1] terms += [paulis] cs += [hamiltonian_bk.terms[term]] ``` ## Step 3: Define UCC Ansatz circuit in JaqalPaq ``` def ansatz(theta): term_probs = [] for i in range(len(terms)): sexpr = [ 'circuit', #Define constants +-pi/2 ('let', 'pi2', pi/2), ('let', 'npi2', -pi/2), #Create a qubit register ('register', 'q', 2), ('map', 'q0', 'q', 0), ('map', 'q1', 'q', 1), #Define a hadamard macro ('macro', 'hadamard', 'a', ('sequential_block', ('gate', 'Sy', 'a'), ('gate', 'Px', 'a'), ), ), #Prepare the state |11> ('gate', 'prepare_all'), ('gate', 'Px', 'q0'), ('gate', 'Px', 'q1'), #Apply the UCC Ansatz exp[-i*theta(X1 Y0)] ('gate', 'MS', 'q1', 'q0', 'npi2', 0), ('gate', 'Rz', 'q1', theta), ('gate', 'MS', 'q1', 'q0', 'pi2', 0), ] #Change basis for measurement depending on term for j, qubit in enumerate(terms[i]): if qubit == 'X': sexpr+=('gate', 'hadamard', ('array_item', 'q', j)), if qubit == 'Y': sexpr+=('gate', 'Sxd', ('array_item', 'q', j)), sexpr+=('gate', 'measure_all'), circuit=circuitbuilder.build(sexpr, native_gates=normalize_native_gates(native_gates.NATIVE_GATES)) #Format results of simulation as a list of lists sim_result = pygsti.forward_simulate_circuit(circuit) probs = [] for state in sim_result: probs += [sim_result[state]] #Append probabilities of each state for a particular term term_probs += [probs] #Combine lists of probabilities of each term in Hamiltonian return term_probs ``` ## Step 4: Define functions to calculate energy expectation value of Ansatz state ``` #Calculate energy of one term of the Hamiltonian for one possible state def term_energy(term, state, coefficient, prob): parity = 1 for i in range(len(term)): #Change parity if state is occupied and is acted on by a pauli operator if term[i] != None and state[i] == '1': parity = -1*parity return coefficient*prob*parity #Calculate energy of the molecule for a given value of theta def calculate_energy(theta): energy = 0 probs = ansatz(theta[0]) #Convert tuple (from optimization) to float for circuit for i in range(len(terms)): #For each term in the hamiltonian for j in range(len(probs[0])): #For each possible state term = terms[i] state = '{0:02b}'.format(j) #convert state to binary (# of qubits) coefficient = cs[i].real prob = probs[i][j] energy += term_energy(term, state, coefficient, prob) return energy ``` ## Step 5: Minimize the energy expectation value in 𝜃 ``` #Minimize the energy using classical optimization optimize.minimize(fun=calculate_energy, x0=[0.01], method="COBYLA") #Can use "L-BFGS-B" instead ``` ## Step 6: Loop over previous steps to calculate ground state energy at different bond lengths ``` # Set the basis set, spin, and charge of the H2 molecule basis = 'sto-3g' multiplicity = 1 charge = 1 # Set calculation parameters run_scf = 1 run_fci = 1 delete_input = True # Note: this option is critical as it ensures that the integrals are written out to an HDF5 file delete_output = False optimized_energies = [] exact_energies = [] #Loop over bond lengths from 0.5 to 2.0 angstroms n_pts = 16 #Number of points bond_lengths = np.linspace(0.5,2.0,n_pts) for diatomic_bond_length in bond_lengths: # Generate molecule at some bond length geometry = [('He', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))] molecule = MolecularData( geometry, basis, multiplicity, charge, description=str(round(diatomic_bond_length, 2)), filename='./HeH+_2_sto-3g_single_dissociation') # Run pyscf molecule = run_pyscf(molecule, run_scf=run_scf, run_fci=run_fci, verbose=False) # Get the fermionic Hamiltonian for H2 and map it into qubits using the Bravyi-Kitaev encoding hamiltonian = molecule.get_molecular_hamiltonian() hamiltonian_ferm = of.get_fermion_operator(hamiltonian) hamiltonian_bk = of.symmetry_conserving_bravyi_kitaev(hamiltonian_ferm, active_orbitals=4, active_fermions=2) #Define terms and coefficients of our Hamiltonian terms = [] cs = [] #Coefficients for term in hamiltonian_bk.terms: paulis = [None, None] for pauli in term: paulis[pauli[0]] = pauli[1] terms += [paulis] cs += [hamiltonian_bk.terms[term]] # Minimize the expectation value of the energy using a classical optimizer (COBYLA) result = optimize.minimize(fun=calculate_energy, x0=[0.01], method="COBYLA") optimized_energies.append(result.fun) exact_energies.append(molecule.fci_energy) print("R={}\t Optimized Energy: {}".format(str(round(diatomic_bond_length, 2)), result.fun)) ``` ## Step 7: Plot the dissociation curve ``` import matplotlib import matplotlib.pyplot as pyplot # Plot the various energies for different bond lengths fig = pyplot.figure(figsize=(10,7)) pyplot.rcParams['font.size']=18 bkcolor = '#ffffff' ax = fig.add_subplot(1, 1, 1) pyplot.subplots_adjust(left=.2) ax.set_xlabel('R (Angstroms)') ax.set_ylabel(r'E (Hartrees)') ax.set_title(r'HeH+ 2-qubit bond dissociation curve') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) bond_lengths = [float(x) for x in bond_lengths] ax.plot(bond_lengths, optimized_energies, 'o', label='UCCSD', color='red') ax.plot(bond_lengths, exact_energies, '-', label='Full-CI', color='black') ax.legend(frameon=False) pyplot.show() fig.savefig("HeH+ Bond Dissociation Curve.pdf") ```
github_jupyter
# Regularization Welcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen! **You will learn to:** Use regularization in your deep learning models. Let's first import the packages you are going to use. ``` # import packages import numpy as np import matplotlib.pyplot as plt from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters import sklearn import sklearn.datasets import scipy.io from testCases import * %matplotlib inline plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' ``` **Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. <img src="images/field_kiank.png" style="width:600px;height:350px;"> <caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption> They give you the following 2D dataset from France's past 10 games. ``` train_X, train_Y, test_X, test_Y = load_2D_dataset() ``` Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field. - If the dot is blue, it means the French player managed to hit the ball with his/her head - If the dot is red, it means the other team's player hit the ball with their head **Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball. **Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. You will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. ## 1 - Non-regularized model You will use the following neural network (already implemented for you below). This model can be used: - in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use "`lambd`" instead of "`lambda`" because "`lambda`" is a reserved keyword in Python. - in *dropout mode* -- by setting the `keep_prob` to a value less than one You will first try the model without any regularization. Then, you will implement: - *L2 regularization* -- functions: "`compute_cost_with_regularization()`" and "`backward_propagation_with_regularization()`" - *Dropout* -- functions: "`forward_propagation_with_dropout()`" and "`backward_propagation_with_dropout()`" In each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model. ``` def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1): """ Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID. Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples) learning_rate -- learning rate of the optimization num_iterations -- number of iterations of the optimization loop print_cost -- If True, print the cost every 10000 iterations lambd -- regularization hyperparameter, scalar keep_prob - probability of keeping a neuron active during drop-out, scalar. Returns: parameters -- parameters learned by the model. They can then be used to predict. """ grads = {} costs = [] # to keep track of the cost m = X.shape[1] # number of examples layers_dims = [X.shape[0], 20, 3, 1] # Initialize parameters dictionary. parameters = initialize_parameters(layers_dims) # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID. if keep_prob == 1: a3, cache = forward_propagation(X, parameters) elif keep_prob < 1: a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob) # Cost function if lambd == 0: cost = compute_cost(a3, Y) else: cost = compute_cost_with_regularization(a3, Y, parameters, lambd) # Backward propagation. assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, # but this assignment will only explore one at a time if lambd == 0 and keep_prob == 1: grads = backward_propagation(X, Y, cache) elif lambd != 0: grads = backward_propagation_with_regularization(X, Y, cache, lambd) elif keep_prob < 1: grads = backward_propagation_with_dropout(X, Y, cache, keep_prob) # Update parameters. parameters = update_parameters(parameters, grads, learning_rate) # Print the loss every 10000 iterations if print_cost and i % 10000 == 0: print("Cost after iteration {}: {}".format(i, cost)) if print_cost and i % 1000 == 0: costs.append(cost) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (x1,000)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters ``` Let's train the model without any regularization, and observe the accuracy on the train/test sets. ``` parameters = model(train_X, train_Y) print ("On the training set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model. ``` plt.title("Model without regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting. ## 2 - L2 Regularization The standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from: $$J = -\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} \tag{1}$$ To: $$J_{regularized} = \small \underbrace{-\frac{1}{m} \sum\limits_{i = 1}^{m} \large{(}\small y^{(i)}\log\left(a^{[L](i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right) \large{)} }_\text{cross-entropy cost} + \underbrace{\frac{1}{m} \frac{\lambda}{2} \sum\limits_l\sum\limits_k\sum\limits_j W_{k,j}^{[l]2} }_\text{L2 regularization cost} \tag{2}$$ Let's modify your cost and observe the consequences. **Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\sum\limits_k\sum\limits_j W_{k,j}^{[l]2}$ , use : ```python np.sum(np.square(Wl)) ``` Note that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \frac{1}{m} \frac{\lambda}{2} $. ``` # GRADED FUNCTION: compute_cost_with_regularization def compute_cost_with_regularization(A3, Y, parameters, lambd): """ Implement the cost function with L2 regularization. See formula (2) above. Arguments: A3 -- post-activation, output of forward propagation, of shape (output size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) parameters -- python dictionary containing parameters of the model Returns: cost - value of the regularized loss function (formula (2)) """ m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost ### START CODE HERE ### (approx. 1 line) L2_regularization_cost = (lambd/(2*m))*(np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) ### END CODER HERE ### cost = cross_entropy_cost + L2_regularization_cost return cost A3, Y_assess, parameters = compute_cost_with_regularization_test_case() print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1))) ``` **Expected Output**: <table> <tr> <td> **cost** </td> <td> 1.78648594516 </td> </tr> </table> Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. **Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\frac{d}{dW} ( \frac{1}{2}\frac{\lambda}{m} W^2) = \frac{\lambda}{m} W$). ``` # GRADED FUNCTION: backward_propagation_with_regularization def backward_propagation_with_regularization(X, Y, cache, lambd): """ Implements the backward propagation of our baseline model to which we added an L2 regularization. Arguments: X -- input dataset, of shape (input size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation() lambd -- regularization hyperparameter, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y ### START CODE HERE ### (approx. 1 line) dW3 = 1./m * np.dot(dZ3, A2.T) + lambd*W3/m ### END CODE HERE ### db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) ### START CODE HERE ### (approx. 1 line) dW2 = 1./m * np.dot(dZ2, A1.T) + lambd*W2/m ### END CODE HERE ### db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) ### START CODE HERE ### (approx. 1 line) dW1 = 1./m * np.dot(dZ1, X.T) + lambd*W1/m ### END CODE HERE ### db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case() grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7) print ("dW1 = "+ str(grads["dW1"])) print ("dW2 = "+ str(grads["dW2"])) print ("dW3 = "+ str(grads["dW3"])) ``` **Expected Output**: <table> <tr> <td> **dW1** </td> <td> [[-0.25604646 0.12298827 -0.28297129] [-0.17706303 0.34536094 -0.4410571 ]] </td> </tr> <tr> <td> **dW2** </td> <td> [[ 0.79276486 0.85133918] [-0.0957219 -0.01720463] [-0.13100772 -0.03750433]] </td> </tr> <tr> <td> **dW3** </td> <td> [[-1.77691347 -0.11832879 -0.09397446]] </td> </tr> </table> Let's now run the model with L2 regularization $(\lambda = 0.7)$. The `model()` function will call: - `compute_cost_with_regularization` instead of `compute_cost` - `backward_propagation_with_regularization` instead of `backward_propagation` ``` parameters = model(train_X, train_Y, lambd = 0.7) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` Congrats, the test set accuracy increased to 93%. You have saved the French football team! You are not overfitting the training data anymore. Let's plot the decision boundary. ``` plt.title("Model with L2-regularization") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` **Observations**: - The value of $\lambda$ is a hyperparameter that you can tune using a dev set. - L2 regularization makes your decision boundary smoother. If $\lambda$ is too large, it is also possible to "oversmooth", resulting in a model with high bias. **What is L2-regularization actually doing?**: L2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. <font color='blue'> **What you should remember** -- the implications of L2-regularization on: - The cost computation: - A regularization term is added to the cost - The backpropagation function: - There are extra terms in the gradients with respect to weight matrices - Weights end up smaller ("weight decay"): - Weights are pushed to smaller values. ## 3 - Dropout Finally, **dropout** is a widely used regularization technique that is specific to deep learning. **It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means! <!-- To understand drop-out, consider this conversation with a friend: - Friend: "Why do you need all these neurons to train your network and classify images?". - You: "Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!" - Friend: "I see, but are you sure that your neurons are learning different features and not all the same features?" - You: "Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution." !--> <center> <video width="620" height="440" src="images/dropout1_kiank.mp4" type="video/mp4" controls> </video> </center> <br> <caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\_prob$ or keep it with probability $keep\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption> <center> <video width="620" height="440" src="images/dropout2_kiank.mp4" type="video/mp4" controls> </video> </center> <caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption> When you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. ### 3.1 - Forward propagation with dropout **Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. **Instructions**: You would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps: 1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$. 2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X < 0.5)`. Note that 0 and 1 are respectively equivalent to False and True. 3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values. 4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.) ``` # GRADED FUNCTION: forward_propagation_with_dropout def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): """ Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID. Arguments: X -- input dataset, of shape (2, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (20, 2) b1 -- bias vector of shape (20, 1) W2 -- weight matrix of shape (3, 20) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: A3 -- last activation value, output of the forward propagation, of shape (1,1) cache -- tuple, information stored for computing the backward propagation """ np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = D1<keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = np.multiply(A1, D1) # Step 3: shut down some neurons of A1 A1 = A1/keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) ### START CODE HERE ### (approx. 4 lines) D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = D2<keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = np.multiply(A2,D2) # Step 3: shut down some neurons of A2 A2 = A2/keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache X_assess, parameters = forward_propagation_with_dropout_test_case() A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7) print ("A3 = " + str(A3)) ``` **Expected Output**: <table> <tr> <td> **A3** </td> <td> [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]] </td> </tr> </table> ### 3.2 - Backward propagation with dropout **Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. **Instruction**: Backpropagation with dropout is actually quite easy. You will have to carry out 2 Steps: 1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. 2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`). ``` # GRADED FUNCTION: backward_propagation_with_dropout def backward_propagation_with_dropout(X, Y, cache, keep_prob): """ Implements the backward propagation of our baseline model to which we added dropout. Arguments: X -- input dataset, of shape (2, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation_with_dropout() keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) ### START CODE HERE ### (≈ 2 lines of code) dA2 = dA2*D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) ### START CODE HERE ### (≈ 2 lines of code) dA1 = dA1*D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case() gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8) print ("dA1 = " + str(gradients["dA1"])) print ("dA2 = " + str(gradients["dA2"])) ``` **Expected Output**: <table> <tr> <td> **dA1** </td> <td> [[ 0.36544439 0. -0.00188233 0. -0.17408748] [ 0.65515713 0. -0.00337459 0. -0. ]] </td> </tr> <tr> <td> **dA2** </td> <td> [[ 0.58180856 0. -0.00299679 0. -0.27715731] [ 0. 0.53159854 -0. 0.53159854 -0.34089673] [ 0. 0. -0.00292733 0. -0. ]] </td> </tr> </table> Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 14% probability. The function `model()` will now call: - `forward_propagation_with_dropout` instead of `forward_propagation`. - `backward_propagation_with_dropout` instead of `backward_propagation`. ``` parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3) print ("On the train set:") predictions_train = predict(train_X, train_Y, parameters) print ("On the test set:") predictions_test = predict(test_X, test_Y, parameters) ``` Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! Run the code below to plot the decision boundary. ``` plt.title("Model with dropout") axes = plt.gca() axes.set_xlim([-0.75,0.40]) axes.set_ylim([-0.75,0.65]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) ``` **Note**: - A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. - Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks. <font color='blue'> **What you should remember about dropout:** - Dropout is a regularization technique. - You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time. - Apply dropout both during forward and backward propagation. - During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. ## 4 - Conclusions **Here are the results of our three models**: <table> <tr> <td> **model** </td> <td> **train accuracy** </td> <td> **test accuracy** </td> </tr> <td> 3-layer NN without regularization </td> <td> 95% </td> <td> 91.5% </td> <tr> <td> 3-layer NN with L2-regularization </td> <td> 94% </td> <td> 93% </td> </tr> <tr> <td> 3-layer NN with dropout </td> <td> 93% </td> <td> 95% </td> </tr> </table> Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. Congratulations for finishing this assignment! And also for revolutionizing French football. :-) <font color='blue'> **What we want you to remember from this notebook**: - Regularization will help you reduce overfitting. - Regularization will drive your weights to lower values. - L2 regularization and Dropout are two very effective regularization techniques.
github_jupyter
### Global and Local Scopes In Python the **global** scope refers to the **module** scope. The scope of a variable is normally defined by **where** it is (lexically) defined in the code. ``` a = 10 ``` In this case, **a** is defined inside the main module, so it is a global variable. ``` def my_func(n): c = n ** 2 return c ``` In this case, **c** was defined inside the function **my_func**, so it is **local** to the function **my_func**. In this example, **n** is also **local** to **my_func** Global variables can be accessed from any inner scope in the module, for example: ``` def my_func(n): print('global:', a) c = a ** n return c my_func(2) ``` As you can see, **my_func** was able to reference the global variable **a**. But remember that the scope of a variable is determined by where it is assigned. In particular, any variable defined (i.e. assigned a value) inside a function is local to that function, even if the variable name happens to be global too! ``` def my_func(n): a = 2 c = a ** 2 return c print(a) print(my_func(3)) print(a) ``` In order to change the value of a global variable within an inner scope, we can use the **global** keyword as follows: ``` def my_func(n): global a a = 2 c = a ** 2 return c print(a) print(my_func(3)) print(a) ``` As you can see, the value of the global variable **a** was changed from within **my_func**. In fact, we can **create** global variables from within an inner function - Python will simply create the variable and place it in the **global** scope instead of the **local scope**: ``` def my_func(n): global var var = 'hello world' return n ** 2 ``` Now, **var** does not exist yet, since the function has not run: ``` print(var) ``` Once we call the function though, it will create that global **var**: ``` my_func(2) print(var) ``` #### Beware!! Remember that whenever you assign a value to a variable without having specified the variable as **global**, it is **local** in the current scope. **Moreover**, it does not matter **where** the assignment in the code takes place, the variable is considered local in the **entire** scope - Python determines the scope of objects at compile-time, not at run-time. Let's see an example of this: ``` a = 10 b = 100 def my_func(): print(a) print(b) my_func() ``` So, this works as expected - **a** and **b** are taken from the global scope since they are referenced **before** being assigned a value in the local scope. But now consider the following example: ``` a = 10 b = 100 def my_func(): print(a) print(b) b = 1000 my_func() ``` As you can see, **b** in the line ``print(b)`` is considered a **local** variable - that's because the **next** line **assigns** a value to **b** - hence **b** is scoped as local by Python for the **entire** function. Of course, functions are also objects, and scoping applies equally to function objects too. For example, we can "mask" the built-in `print` Python function: ``` print = lambda x: 'hello {0}!'.format(x) def my_func(name): return print(name) my_func('world') ``` You may be wondering how we get our **real** ``print`` function back! ``` del print print('hello') ``` Yay!! If you have experience in some other programming languages you may be wondering if loops and other code "blocks" have their own local scope too. For example in Java, the following would not work: ``for (int i=0; i<10; i++) { int x = 2 * i; } system.out.println(x); `` But in Python it works perfectly fine: ``` for i in range(10): x = 2 * i print(x) ``` In this case, when we assigned a value to `x`, Python put it in the global (module) scope, so we can reference it after the `for` loop has finished running.
github_jupyter
# Ungraded Lab: Walkthrough of ML Metadata Keeping records at each stage of the project is an important aspect of machine learning pipelines. Especially in production models which involve many iterations of datasets and re-training, having these records will help in maintaining or debugging the deployed system. [ML Metadata](https://www.tensorflow.org/tfx/guide/mlmd) addresses this need by having an API suited specifically for keeping track of any progress made in ML projects. As mentioned in earlier labs, you have already used ML Metadata when you ran your TFX pipelines. Each component automatically records information to a metadata store as you go through each stage. It allowed you to retrieve information such as the name of the training splits or the location of an inferred schema. In this notebook, you will look more closely at how ML Metadata can be used directly for recording and retrieving metadata independent from a TFX pipeline (i.e. without using TFX components). You will use TFDV to infer a schema and record all information about this process. These will show how the different components are related to each other so you can better interact with the database when you go back to using TFX in the next labs. Moreover, knowing the inner workings of the library will help you adapt it for other platforms if needed. Let's get to it! ## Imports ``` from ml_metadata.metadata_store import metadata_store from ml_metadata.proto import metadata_store_pb2 import tensorflow as tf print('TF version: {}'.format(tf.__version__)) import tensorflow_data_validation as tfdv print('TFDV version: {}'.format(tfdv.version.__version__)) import urllib import zipfile ``` ## Download dataset You will be using the [Chicago Taxi](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) dataset for this lab. Let's download the CSVs into your workspace. ``` # Download the zip file from GCP and unzip it url = 'https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/chicago_data.zip' zip, headers = urllib.request.urlretrieve(url) zipfile.ZipFile(zip).extractall() zipfile.ZipFile(zip).close() print("Here's what we downloaded:") !ls -R data ``` ## Process Outline Here is the figure shown in class that describes the different components in an ML Metadata store: <img src='images/mlmd_overview.png' alt='image of mlmd overview'> The green box in the middle shows the data model followed by ML Metadata. The [official documentation](https://www.tensorflow.org/tfx/guide/mlmd#data_model) describe each of these and we'll show it here as well for easy reference: * `ArtifactType` describes an artifact's type and its properties that are stored in the metadata store. You can register these types on-the-fly with the metadata store in code, or you can load them in the store from a serialized format. Once you register a type, its definition is available throughout the lifetime of the store. * An `Artifact` describes a specific instance of an ArtifactType, and its properties that are written to the metadata store. * An `ExecutionType` describes a type of component or step in a workflow, and its runtime parameters. * An `Execution` is a record of a component run or a step in an ML workflow and the runtime parameters. An execution can be thought of as an instance of an ExecutionType. Executions are recorded when you run an ML pipeline or step. * An `Event` is a record of the relationship between artifacts and executions. When an execution happens, events record every artifact that was used by the execution, and every artifact that was produced. These records allow for lineage tracking throughout a workflow. By looking at all events, MLMD knows what executions happened and what artifacts were created as a result. MLMD can then recurse back from any artifact to all of its upstream inputs. * A `ContextType` describes a type of conceptual group of artifacts and executions in a workflow, and its structural properties. For example: projects, pipeline runs, experiments, owners etc. * A `Context` is an instance of a ContextType. It captures the shared information within the group. For example: project name, changelist commit id, experiment annotations etc. It has a user-defined unique name within its ContextType. * An `Attribution` is a record of the relationship between artifacts and contexts. * An `Association` is a record of the relationship between executions and contexts. As mentioned earlier, you will use TFDV to generate a schema and record this process in the ML Metadata store. You will be starting from scratch so you will be defining each component of the data model. The outline of steps involve: 1. Defining the ML Metadata's storage database 1. Setting up the necessary artifact types 1. Setting up the execution types 1. Generating an input artifact unit 1. Generating an execution unit 1. Registering an input event 1. Running the TFDV component 1. Generating an output artifact unit 1. Registering an output event 1. Updating the execution unit 1. Seting up and generating a context unit 1. Generating attributions and associations You can then retrieve information from the database to investigate aspects of your project. For example, you can find which dataset was used to generate a particular schema. You will also do that in this exercise. For each of these steps, you may want to have the [MetadataStore API documentation](https://www.tensorflow.org/tfx/ml_metadata/api_docs/python/mlmd/MetadataStore) open so you can lookup any of the methods you will be using to interact with the metadata store. You can also look at the `metadata_store` protocol buffer [here](https://github.com/google/ml-metadata/blob/r0.24.0/ml_metadata/proto/metadata_store.proto) to see descriptions of each data type covered in this tutorial. ## Define ML Metadata's Storage Database The first step would be to instantiate your storage backend. As mentioned in class, there are several types supported such as fake (temporary) database, SQLite, MySQL, and even cloud-based storage. For this demo, you will just be using a fake database for quick experimentation. ``` # Instantiate a connection config connection_config = metadata_store_pb2.ConnectionConfig() # Set an empty fake database proto connection_config.fake_database.SetInParent() # Setup the metadata store store = metadata_store.MetadataStore(connection_config) ``` ## Register ArtifactTypes Next, you will create the artifact types needed and register them to the store. Since our simple exercise will just involve generating a schema using TFDV, you will only create two artifact types: one for the **input dataset** and another for the **output schema**. The main steps will be to: * Declare an `ArtifactType()` * Define the name of the artifact type * Define the necessary properties within these artifact types. For example, it is important to know the data split name so you may want to have a `split` property for the artifact type that holds datasets. * Use `put_artifact_type()` to register them to the metadata store. This generates an `id` that you can use later to refer to a particular artifact type. *Bonus: For practice, you can also extend the code below to create an artifact type for the statistics.* ``` # Create ArtifactType for the input dataset data_artifact_type = metadata_store_pb2.ArtifactType() data_artifact_type.name = 'DataSet' data_artifact_type.properties['name'] = metadata_store_pb2.STRING data_artifact_type.properties['split'] = metadata_store_pb2.STRING data_artifact_type.properties['version'] = metadata_store_pb2.INT # Register artifact type to the Metadata Store data_artifact_type_id = store.put_artifact_type(data_artifact_type) # Create ArtifactType for Schema schema_artifact_type = metadata_store_pb2.ArtifactType() schema_artifact_type.name = 'Schema' schema_artifact_type.properties['name'] = metadata_store_pb2.STRING schema_artifact_type.properties['version'] = metadata_store_pb2.INT # Register artifact type to the Metadata Store schema_artifact_type_id = store.put_artifact_type(schema_artifact_type) print('Data artifact type:\n', data_artifact_type) print('Schema artifact type:\n', schema_artifact_type) print('Data artifact type ID:', data_artifact_type_id) print('Schema artifact type ID:', schema_artifact_type_id) ``` ## Register ExecutionType You will then create the execution types needed. For the simple setup, you will just declare one for the data validation component with a `state` property so you can record if the process is running or already completed. ``` # Create ExecutionType for Data Validation component dv_execution_type = metadata_store_pb2.ExecutionType() dv_execution_type.name = 'Data Validation' dv_execution_type.properties['state'] = metadata_store_pb2.STRING # Register execution type to the Metadata Store dv_execution_type_id = store.put_execution_type(dv_execution_type) print('Data validation execution type:\n', dv_execution_type) print('Data validation execution type ID:', dv_execution_type_id) ``` ## Generate input artifact unit With the artifact types created, you can now create instances of those types. The cell below creates the artifact for the input dataset. This artifact is recorded in the metadata store through the `put_artifacts()` function. Again, it generates an `id` that can be used for reference. ``` # Declare input artifact of type DataSet data_artifact = metadata_store_pb2.Artifact() data_artifact.uri = './data/train/data.csv' data_artifact.type_id = data_artifact_type_id data_artifact.properties['name'].string_value = 'Chicago Taxi dataset' data_artifact.properties['split'].string_value = 'train' data_artifact.properties['version'].int_value = 1 # Submit input artifact to the Metadata Store data_artifact_id = store.put_artifacts([data_artifact])[0] print('Data artifact:\n', data_artifact) print('Data artifact ID:', data_artifact_id) ``` ## Generate execution unit Next, you will create an instance of the `Data Validation` execution type you registered earlier. You will set the state to `RUNNING` to signify that you are about to run the TFDV function. This is recorded with the `put_executions()` function. ``` # Register the Execution of a Data Validation run dv_execution = metadata_store_pb2.Execution() dv_execution.type_id = dv_execution_type_id dv_execution.properties['state'].string_value = 'RUNNING' # Submit execution unit to the Metadata Store dv_execution_id = store.put_executions([dv_execution])[0] print('Data validation execution:\n', dv_execution) print('Data validation execution ID:', dv_execution_id) ``` ## Register input event An event defines a relationship between artifacts and executions. You will generate the input event relationship for dataset artifact and data validation execution units. The list of event types are shown [here](https://github.com/google/ml-metadata/blob/master/ml_metadata/proto/metadata_store.proto#L187) and the event is recorded with the `put_events()` function. ``` # Declare the input event input_event = metadata_store_pb2.Event() input_event.artifact_id = data_artifact_id input_event.execution_id = dv_execution_id input_event.type = metadata_store_pb2.Event.DECLARED_INPUT # Submit input event to the Metadata Store store.put_events([input_event]) print('Input event:\n', input_event) ``` ## Run the TFDV component You will now run the TFDV component to generate the schema of dataset. This should look familiar since you've done this already in Week 1. ``` # Infer a schema by passing statistics to `infer_schema()` train_data = './data/train/data.csv' train_stats = tfdv.generate_statistics_from_csv(data_location=train_data) schema = tfdv.infer_schema(statistics=train_stats) schema_file = './schema.pbtxt' tfdv.write_schema_text(schema, schema_file) print("Dataset's Schema has been generated at:", schema_file) ``` ## Generate output artifact unit Now that the TFDV component has finished running and schema has been generated, you can create the artifact for the generated schema. ``` # Declare output artifact of type Schema_artifact schema_artifact = metadata_store_pb2.Artifact() schema_artifact.uri = schema_file schema_artifact.type_id = schema_artifact_type_id schema_artifact.properties['version'].int_value = 1 schema_artifact.properties['name'].string_value = 'Chicago Taxi Schema' # Submit output artifact to the Metadata Store schema_artifact_id = store.put_artifacts([schema_artifact])[0] print('Schema artifact:\n', schema_artifact) print('Schema artifact ID:', schema_artifact_id) ``` ## Register output event Analogous to the input event earlier, you also want to define an output event to record the ouput artifact of a particular execution unit. ``` # Declare the output event output_event = metadata_store_pb2.Event() output_event.artifact_id = schema_artifact_id output_event.execution_id = dv_execution_id output_event.type = metadata_store_pb2.Event.DECLARED_OUTPUT # Submit output event to the Metadata Store store.put_events([output_event]) print('Output event:\n', output_event) ``` ## Update the execution unit As the TFDV component has finished running successfully, you need to update the `state` of the execution unit and record it again to the store. ``` # Mark the `state` as `COMPLETED` dv_execution.id = dv_execution_id dv_execution.properties['state'].string_value = 'COMPLETED' # Update execution unit in the Metadata Store store.put_executions([dv_execution]) print('Data validation execution:\n', dv_execution) ``` ## Setting up Context Types and Generating a Context Unit You can group the artifacts and execution units into a `Context`. First, you need to define a `ContextType` which defines the required context. It follows a similar format as artifact and event types. You can register this with the `put_context_type()` function. ``` # Create a ContextType expt_context_type = metadata_store_pb2.ContextType() expt_context_type.name = 'Experiment' expt_context_type.properties['note'] = metadata_store_pb2.STRING # Register context type to the Metadata Store expt_context_type_id = store.put_context_type(expt_context_type) ``` Similarly, you can create an instance of this context type and use the `put_contexts()` method to register to the store. ``` # Generate the context expt_context = metadata_store_pb2.Context() expt_context.type_id = expt_context_type_id # Give the experiment a name expt_context.name = 'Demo' expt_context.properties['note'].string_value = 'Walkthrough of metadata' # Submit context to the Metadata Store expt_context_id = store.put_contexts([expt_context])[0] print('Experiment Context type:\n', expt_context_type) print('Experiment Context type ID: ', expt_context_type_id) print('Experiment Context:\n', expt_context) print('Experiment Context ID: ', expt_context_id) ``` ## Generate attribution and association relationships With the `Context` defined, you can now create its relationship with the artifact and executions you previously used. You will create the relationship between schema artifact unit and experiment context unit to form an `Attribution`. Similarly, you will create the relationship between data validation execution unit and experiment context unit to form an `Association`. These are registered with the `put_attributions_and_associations()` method. ``` # Generate the attribution expt_attribution = metadata_store_pb2.Attribution() expt_attribution.artifact_id = schema_artifact_id expt_attribution.context_id = expt_context_id # Generate the association expt_association = metadata_store_pb2.Association() expt_association.execution_id = dv_execution_id expt_association.context_id = expt_context_id # Submit attribution and association to the Metadata Store store.put_attributions_and_associations([expt_attribution], [expt_association]) print('Experiment Attribution:\n', expt_attribution) print('Experiment Association:\n', expt_association) ``` ## Retrieving Information from the Metadata Store You've now recorded the needed information to the metadata store. If we did this in a persistent database, you can track which artifacts and events are related to each other even without seeing the code used to generate it. See a sample run below where you investigate what dataset is used to generate the schema. (**It would be obvious which dataset is used in our simple demo because we only have two artifacts registered. Thus, assume that you have thousands of entries in the metadata store.*) ``` # Get artifact types store.get_artifact_types() # Get 1st element in the list of `Schema` artifacts. # You will investigate which dataset was used to generate it. schema_to_inv = store.get_artifacts_by_type('Schema')[0] # print output print(schema_to_inv) # Get events related to the schema id schema_events = store.get_events_by_artifact_ids([schema_to_inv.id]) print(schema_events) ``` You see that it is an output of an execution so you can look up the execution id to see related artifacts. ``` # Get events related to the output above execution_events = store.get_events_by_execution_ids([schema_events[0].execution_id]) print(execution_events) ``` You see the declared input of this execution so you can select that from the list and lookup the details of the artifact. ``` # Look up the artifact that is a declared input artifact_input = execution_events[0] store.get_artifacts_by_id([artifact_input.artifact_id]) ``` Great! Now you've fetched the dataset artifact that was used to generate the schema. You can approach this differently and we urge you to practice using the different methods of the [MetadataStore API](https://www.tensorflow.org/tfx/ml_metadata/api_docs/python/mlmd/MetadataStore) to get more familiar with interacting with the database. ### Wrap Up In this notebook, you got to practice using ML Metadata outside of TFX. This should help you understand its inner workings so you will know better how to query ML Metadata stores or even set it up for your own use cases. TFX leverages this library to keep records of pipeline runs and you will get to see more of that in the next labs. Next up, you will review how to work with schemas and in the next notebook, you will see how it can be implemented in a TFX pipeline.
github_jupyter
## Exercise: Pricing a European Call Option under Risk Neutrality #### John Stachurski Let's price a European option under the assumption of risk neutrality (for simplicity). Suppose that the current time is $t=0$ and the expiry date is $n$. We need to evaluate $$ P_0 = \beta^n \mathbb E_0 \max\{ S_n - K, 0 \} $$ given * the discount factor $\beta$ * the strike price $K$ * the stochastic process $\{S_t\}$ A common model for $\{S_t\}$ is $$ \ln \frac{S_{t+1}}{S_t} = \mu + \sigma \xi_{t+1} $$ where $\{ \xi_t \}$ is IID and standard normal. However, its predictions are in some ways counterfactual. For example, volatility is not stationary but rather changes over time. Here's an improved version: $$ \ln \frac{S_{t+1}}{S_t} = \mu + \sigma_t \xi_{t+1} $$ where $$ \sigma_t = \exp(h_t), \quad h_{t+1} = \rho h_t + \nu \eta_{t+1} $$ Compute the price of the option $P_0$ by Monte Carlo, averaging over realizations $S_n^1, \ldots, S_n^M$ of $S_n$ and appealing to the law of large numbers: $$ \mathbb E_0 \max\{ S_n - K, 0 \} \approx \frac{1}{M} \sum_{m=1}^M \max \{S_n^m - K, 0 \} $$ Use the following parameters: ``` β = 0.96 μ = 0.005 S0 = 10 h0 = 0 K = 100 n = 10 ρ = 0.5 ν = 0.01 M = 5_000_000 ``` **Suggestion**: Start without jitting your functions, as jitted functions are harder to debug. Chose a smaller value for `M` and try to get your code to run. Then think about jitting. The distribution of prices is heavy tailed, so the result has high variance even for large `M`. My best estimate is around $1,530. ### Solution ``` import numpy as np from numpy.random import randn from numba import jit, prange from quantecon import tic, toc ``` Here's a solution that's jitted but not parallelized. A parallelized solution is below. ``` @jit(nopython=True) def compute_call_price(β=0.96, μ=0.005, S0=10, h0=0, K=100, n=10, ρ=0.5, ν=0.01, M=5_000_000): current_sum = 0.0 for m in range(M): s = np.log(S0) h = h0 for t in range(n): s = s + μ + np.exp(h) * randn() h = ρ * h + ν * randn() current_sum += np.maximum(np.exp(s) - K, 0) return β**n + current_sum / M tic() price = compute_call_price() toc() tic() price = compute_call_price() toc() price ``` Let's try to parallelize this task. ``` @jit(nopython=True, parallel=True) def compute_call_price_parallel(β=0.96, μ=0.005, S0=10, h0=0, K=100, n=10, ρ=0.5, ν=0.01, M=50_000_000): current_sum = 0.0 for m in prange(M): s = np.log(S0) h = h0 for t in range(n): s = s + μ + np.exp(h) * randn() h = ρ * h + ν * randn() current_sum += np.maximum(np.exp(s) - K, 0) return β**n + current_sum / M tic() price = compute_call_price_parallel() toc() tic() price = compute_call_price_parallel() toc() price ```
github_jupyter
# Weight Sampling Tutorial If you want to fine-tune one of the trained original SSD models on your own dataset, chances are that your dataset doesn't have the same number of classes as the trained model you're trying to fine-tune. This notebook explains a few options for how to deal with this situation. In particular, one solution is to sub-sample (or up-sample) the weight tensors of all the classification layers so that their shapes correspond to the number of classes in your dataset. This notebook explains how this is done. ## 0. Our example I'll use a concrete example to make the process clear, but of course the process explained here is the same for any dataset. Consider the following example. You have a dataset on road traffic objects. Let this dataset contain annotations for the following object classes of interest: `['car', 'truck', 'pedestrian', 'bicyclist', 'traffic_light', 'motorcycle', 'bus', 'stop_sign']` That is, your dataset contains annotations for 8 object classes. You would now like to train an SSD300 on this dataset. However, instead of going through all the trouble of training a new model from scratch, you would instead like to use the fully trained original SSD300 model that was trained on MS COCO and fine-tune it on your dataset. The problem is: The SSD300 that was trained on MS COCO predicts 80 different classes, but your dataset has only 8 classes. The weight tensors of the classification layers of the MS COCO model don't have the right shape for your model that is supposed to learn only 8 classes. Bummer. So what options do we have? ### Option 1: Just ignore the fact that we need only 8 classes The maybe not so obvious but totally obvious option is: We could just ignore the fact that the trained MS COCO model predicts 80 different classes, but we only want to fine-tune it on 8 classes. We could simply map the 8 classes in our annotated dataset to any 8 indices out of the 80 that the MS COCO model predicts. The class IDs in our dataset could be indices 1-8, they could be the indices `[0, 3, 8, 1, 2, 10, 4, 6, 12]`, or any other 8 out of the 80. Whatever we would choose them to be. The point is that we would be training only 8 out of every 80 neurons that predict the class for a given box and the other 72 would simply not be trained. Nothing would happen to them, because the gradient for them would always be zero, because these indices don't appear in our dataset. This would work, and it wouldn't even be a terrible option. Since only 8 out of the 80 classes would get trained, the model might get gradually worse at predicting the other 72 clases, but we don't care about them anyway, at least not right now. And if we ever realize that we now want to predict more than 8 different classes, our model would be expandable in that sense. Any new class we want to add could just get any one of the remaining free indices as its ID. We wouldn't need to change anything about the model, it would just be a matter of having the dataset annotated accordingly. Still, in this example we don't want to take this route. We don't want to carry around the computational overhead of having overly complex classifier layers, 90 percent of which we don't use anyway, but still their whole output needs to be computed in every forward pass. So what else could we do instead? ### Option 2: Just ignore those weights that are causing problems We could build a new SSD300 with 8 classes and load into it the weights of the MS COCO SSD300 for all layers except the classification layers. Would that work? Yes, that would work. The only conflict is with the weights of the classification layers, and we can avoid this conflict by simply ignoring them. While this solution would be easy, it has a significant downside: If we're not loading trained weights for the classification layers of our new SSD300 model, then they will be initialized randomly. We'd still benefit from the trained weights for all the other layers, but the classifier layers would need to be trained from scratch. Not the end of the world, but we like pre-trained stuff, because it saves us a lot of training time. So what else could we do? ### Option 3: Sub-sample the weights that are causing problems Instead of throwing the problematic weights away like in option 2, we could also sub-sample them. If the weight tensors of the classification layers of the MS COCO model don't have the right shape for our new model, we'll just **make** them have the right shape. This way we can still benefit from the pre-trained weights in those classification layers. Seems much better than option 2. The great thing in this example is: MS COCO happens to contain all of the eight classes that we care about. So when we sub-sample the weight tensors of the classification layers, we won't just do so randomly. Instead, we'll pick exactly those elements from the tensor that are responsible for the classification of the 8 classes that we care about. However, even if the classes in your dataset were entirely different from the classes in any of the fully trained models, it would still make a lot of sense to use the weights of the fully trained model. Any trained weights are always a better starting point for the training than random initialization, even if your model will be trained on entirely different object classes. And of course, in case you happen to have the opposite problem, where your dataset has **more** classes than the trained model you would like to fine-tune, then you can simply do the same thing in the opposite direction: Instead of sub-sampling the classification layer weights, you would then **up-sample** them. Works just the same way as what we'll be doing below. Let's get to it. ``` import h5py import numpy as np import shutil from misc_utils.tensor_sampling_utils import sample_tensors ``` ## 1. Load the trained weights file and make a copy First, we'll load the HDF5 file that contains the trained weights that we need (the source file). In our case this is "`VGG_coco_SSD_300x300_iter_400000.h5`" (download link available in the README of this repo), which are the weights of the original SSD300 model that was trained on MS COCO. Then, we'll make a copy of that weights file. That copy will be our output file (the destination file). ``` # TODO: Set the path for the source weights file you want to load. weights_source_path = 'models/VGG_coco_SSD_512x512_iter_360000.h5' # TODO: Set the path and name for the destination weights file # that you want to create. weights_destination_path = 'models/VGG_coco_SSD_512x512_iter_400000_subsampled_6_classes.h5' # Make a copy of the weights file. shutil.copy(weights_source_path, weights_destination_path) # Load both the source weights file and the copy we made. # We will load the original weights file in read-only mode so that we can't mess up anything. weights_source_file = h5py.File(weights_source_path, 'r') weights_destination_file = h5py.File(weights_destination_path) ``` ## 2. Figure out which weight tensors we need to sub-sample Next, we need to figure out exactly which weight tensors we need to sub-sample. As mentioned above, the weights for all layers except the classification layers are fine, we don't need to change anything about those. So which are the classification layers in SSD300? Their names are: ``` classifier_names = ['conv4_3_norm_mbox_conf', 'fc7_mbox_conf', 'conv6_2_mbox_conf', 'conv7_2_mbox_conf', 'conv8_2_mbox_conf', 'conv9_2_mbox_conf', 'conv10_2_mbox_conf'] ``` ## 3. Figure out which slices to pick The following section is optional. I'll look at one classification layer and explain what we want to do, just for your understanding. If you don't care about that, just skip ahead to the next section. We know which weight tensors we want to sub-sample, but we still need to decide which (or at least how many) elements of those tensors we want to keep. Let's take a look at the first of the classifier layers, "`conv4_3_norm_mbox_conf`". Its two weight tensors, the kernel and the bias, have the following shapes: ``` conv4_3_norm_mbox_conf_kernel = weights_source_file[classifier_names[0]][classifier_names[0]]['kernel:0'] conv4_3_norm_mbox_conf_bias = weights_source_file[classifier_names[0]][classifier_names[0]]['bias:0'] print("Shape of the '{}' weights:".format(classifier_names[0])) print() print("kernel:\t", conv4_3_norm_mbox_conf_kernel.shape) print("bias:\t", conv4_3_norm_mbox_conf_bias.shape) ``` So the last axis has 324 elements. Why is that? - MS COCO has 80 classes, but the model also has one 'backgroud' class, so that makes 81 classes effectively. - The 'conv4_3_norm_mbox_loc' layer predicts 4 boxes for each spatial position, so the 'conv4_3_norm_mbox_conf' layer has to predict one of the 81 classes for each of those 4 boxes. That's why the last axis has 4 * 81 = 324 elements. So how many elements do we want in the last axis for this layer? Let's do the same calculation as above: - Our dataset has 8 classes, but our model will also have a 'background' class, so that makes 9 classes effectively. - We need to predict one of those 9 classes for each of the four boxes at each spatial position. That makes 4 * 9 = 36 elements. Now we know that we want to keep 36 elements in the last axis and leave all other axes unchanged. But which 36 elements out of the original 324 elements do we want? Should we just pick them randomly? If the object classes in our dataset had absolutely nothing to do with the classes in MS COCO, then choosing those 36 elements randomly would be fine (and the next section covers this case, too). But in our particular example case, choosing these elements randomly would be a waste. Since MS COCO happens to contain exactly the 8 classes that we need, instead of sub-sampling randomly, we'll just take exactly those elements that were trained to predict our 8 classes. Here are the indices of the 9 classes in MS COCO that we are interested in: `[0, 1, 2, 3, 4, 6, 8, 10, 12]` The indices above represent the following classes in the MS COCO datasets: `['background', 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign']` How did I find out those indices? I just looked them up in the annotations of the MS COCO dataset. While these are the classes we want, we don't want them in this order. In our dataset, the classes happen to be in the following order as stated at the top of this notebook: `['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'traffic_light', 'motorcycle', 'bus', 'stop_sign']` For example, '`traffic_light`' is class ID 5 in our dataset but class ID 10 in the SSD300 MS COCO model. So the order in which I actually want to pick the 9 indices above is this: `[0, 3, 8, 1, 2, 10, 4, 6, 12]` So out of every 81 in the 324 elements, I want to pick the 9 elements above. This gives us the following 36 indices: ``` n_classes_source = 81 #classes_of_interest = [0, 3, 8, 1, 2, 10, 4, 6, 12] classes_of_interest = [0, 74,68,80,65,40,47] subsampling_indices = [] for i in range(int(324/n_classes_source)): indices = np.array(classes_of_interest) + i * n_classes_source subsampling_indices.append(indices) subsampling_indices = list(np.concatenate(subsampling_indices)) print(subsampling_indices) ``` These are the indices of the 36 elements that we want to pick from both the bias vector and from the last axis of the kernel tensor. This was the detailed example for the '`conv4_3_norm_mbox_conf`' layer. And of course we haven't actually sub-sampled the weights for this layer yet, we have only figured out which elements we want to keep. The piece of code in the next section will perform the sub-sampling for all the classifier layers. ## 4. Sub-sample the classifier weights The code in this section iterates over all the classifier layers of the source weights file and performs the following steps for each classifier layer: 1. Get the kernel and bias tensors from the source weights file. 2. Compute the sub-sampling indices for the last axis. The first three axes of the kernel remain unchanged. 3. Overwrite the corresponding kernel and bias tensors in the destination weights file with our newly created sub-sampled kernel and bias tensors. The second step does what was explained in the previous section. In case you want to **up-sample** the last axis rather than sub-sample it, simply set the `classes_of_interest` variable below to the length you want it to have. The added elements will be initialized either randomly or optionally with zeros. Check out the documentation of `sample_tensors()` for details. ``` # TODO: Set the number of classes in the source weights file. Note that this number must include # the background class, so for MS COCO's 80 classes, this must be 80 + 1 = 81. n_classes_source = 81 # TODO: Set the indices of the classes that you want to pick for the sub-sampled weight tensors. # In case you would like to just randomly sample a certain number of classes, you can just set # `classes_of_interest` to an integer instead of the list below. Either way, don't forget to # include the background class. That is, if you set an integer, and you want `n` positive classes, # then you must set `classes_of_interest = n + 1`. #classes_of_interest = [0, 3, 8, 1, 2, 10, 4, 6, 12] classes_of_interest = [0, 74,68,80,65,40,47] # classes_of_interest = 9 # Uncomment this in case you want to just randomly sub-sample the last axis instead of providing a list of indices. for name in classifier_names: # Get the trained weights for this layer from the source HDF5 weights file. kernel = weights_source_file[name][name]['kernel:0'].value bias = weights_source_file[name][name]['bias:0'].value # Get the shape of the kernel. We're interested in sub-sampling # the last dimension, 'o'. height, width, in_channels, out_channels = kernel.shape # Compute the indices of the elements we want to sub-sample. # Keep in mind that each classification predictor layer predicts multiple # bounding boxes for every spatial location, so we want to sub-sample # the relevant classes for each of these boxes. if isinstance(classes_of_interest, (list, tuple)): subsampling_indices = [] for i in range(int(out_channels/n_classes_source)): indices = np.array(classes_of_interest) + i * n_classes_source subsampling_indices.append(indices) subsampling_indices = list(np.concatenate(subsampling_indices)) elif isinstance(classes_of_interest, int): subsampling_indices = int(classes_of_interest * (out_channels/n_classes_source)) else: raise ValueError("`classes_of_interest` must be either an integer or a list/tuple.") # Sub-sample the kernel and bias. # The `sample_tensors()` function used below provides extensive # documentation, so don't hesitate to read it if you want to know # what exactly is going on here. new_kernel, new_bias = sample_tensors(weights_list=[kernel, bias], sampling_instructions=[height, width, in_channels, subsampling_indices], axes=[[3]], # The one bias dimension corresponds to the last kernel dimension. init=['gaussian', 'zeros'], mean=0.0, stddev=0.005) # Delete the old weights from the destination file. del weights_destination_file[name][name]['kernel:0'] del weights_destination_file[name][name]['bias:0'] # Create new datasets for the sub-sampled weights. weights_destination_file[name][name].create_dataset(name='kernel:0', data=new_kernel) weights_destination_file[name][name].create_dataset(name='bias:0', data=new_bias) # Make sure all data is written to our output file before this sub-routine exits. weights_destination_file.flush() ``` That's it, we're done. Let's just quickly inspect the shapes of the weights of the '`conv4_3_norm_mbox_conf`' layer in the destination weights file: ``` conv4_3_norm_mbox_conf_kernel = weights_destination_file[classifier_names[0]][classifier_names[0]]['kernel:0'] conv4_3_norm_mbox_conf_bias = weights_destination_file[classifier_names[0]][classifier_names[0]]['bias:0'] print("Shape of the '{}' weights:".format(classifier_names[0])) print() print("kernel:\t", conv4_3_norm_mbox_conf_kernel.shape) print("bias:\t", conv4_3_norm_mbox_conf_bias.shape) ``` Nice! Exactly what we wanted, 36 elements in the last axis. Now the weights are compatible with our new SSD300 model that predicts 8 positive classes. This is the end of the relevant part of this tutorial, but we can do one more thing and verify that the sub-sampled weights actually work. Let's do that in the next section. ## 5. Verify that our sub-sampled weights actually work In our example case above we sub-sampled the fully trained weights of the SSD300 model trained on MS COCO from 80 classes to just the 8 classes that we needed. We can now create a new SSD300 with 8 classes, load our sub-sampled weights into it, and see how the model performs on a few test images that contain objects for some of those 8 classes. Let's do it. ``` from keras.optimizers import Adam from keras import backend as K from keras.models import load_model from models.keras_ssd300 import ssd_300 from models.keras_ssd512 import ssd_512 from keras_loss_function.keras_ssd_loss import SSDLoss from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast from keras_layers.keras_layer_L2Normalization import L2Normalization from data_generator.object_detection_2d_data_generator import DataGenerator from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels from data_generator.object_detection_2d_patch_sampling_ops import RandomMaxCropFixedAR from data_generator.object_detection_2d_geometric_ops import Resize ``` ### 5.1. Set the parameters for the model. As always, set the parameters for the model. We're going to set the configuration for the SSD300 MS COCO model. ``` img_height = 480 # Height of the input images img_width = 640 # Width of the input images img_channels = 3 # Number of color channels of the input images subtract_mean = [123, 117, 104] # The per-channel mean of the images in the dataset swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we should set this to `True`, but weirdly the results are better without swapping. # TODO: Set the number of classes. n_classes = 6 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO scales = [0.04, 0.1, 0.26, 0.42, 0.58, 0.74, 0.9, 1.06] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets. # scales = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets. aspect_ratios = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5], [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters two_boxes_for_ar1 = True steps = [8, 16, 32, 64, 128, 256, 512] # The space between two adjacent anchor box center points for each predictor layer. offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer. clip_boxes = False # Whether or not you want to limit the anchor boxes to lie entirely within the image boundaries variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are scaled as in the original implementation normalize_coords = True ``` ### 5.2. Build the model Build the model and load our newly created, sub-sampled weights into it. ``` weights_destination_path = 'models/VGG_coco_SSD_512x512_iter_400000_subsampled_6_classes.h5' # 1: Build the Keras model K.clear_session() # Clear previous models from memory. model = ssd_512(image_size=(img_height, img_width, img_channels), n_classes=n_classes, mode='inference', l2_regularization=0.0005, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, normalize_coords=normalize_coords, subtract_mean=subtract_mean, divide_by_stddev=None, swap_channels=swap_channels, confidence_thresh=0.5, iou_threshold=0.45, top_k=200, nms_max_output_size=400, return_predictor_sizes=False) print("Model built.") # 2: Load the sub-sampled weights into the model. # Load the weights that we've just created via sub-sampling. weights_path = weights_destination_path model.load_weights(weights_path, by_name=True) print("Weights file loaded:", weights_path) # 3: Instantiate an Adam optimizer and the SSD loss function and compile the model. adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) model.compile(optimizer=adam, loss=ssd_loss.compute_loss) ``` ### 5.3. Load some images to test our model on We sub-sampled some of the road traffic categories from the trained SSD300 MS COCO weights, so let's try out our model on a few road traffic images. The Udacity road traffic dataset linked to in the `ssd7_training.ipynb` notebook lends itself to this task. Let's instantiate a `DataGenerator` and load the Udacity dataset. Everything here is preset already, but if you'd like to learn more about the data generator and its capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository. ``` dataset = DataGenerator() # TODO: Set the paths to your dataset here. images_path = '../../datasets/Udacity_Driving/driving_dataset_consolidated_small/' labels_path = '../../datasets/Udacity_Driving/driving_dataset_consolidated_small/labels.csv' dataset.parse_csv(images_dir=images_path, labels_filename=labels_path, input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation. include_classes='all', random_sample=False) print("Number of images in the dataset:", dataset.get_dataset_size()) ``` Make sure the batch generator generates images of size `(300, 300)`. We'll first randomly crop the largest possible patch with aspect ratio 1.0 and then resize to `(300, 300)`. ``` convert_to_3_channels = ConvertTo3Channels() random_max_crop = RandomMaxCropFixedAR(patch_aspect_ratio=img_width/img_height) resize = Resize(height=img_height, width=img_width) generator = dataset.generate(batch_size=1, shuffle=True, transformations=[convert_to_3_channels, random_max_crop, resize], returns={'processed_images', 'processed_labels', 'filenames'}, keep_images_without_gt=False) test = DataGenerator() # TODO: Set the paths to the dataset here. Pascal_VOC_dataset_images_dir = '../datasets/ICUB_6/testsameimg' Pascal_VOC_dataset_annotations_dir = '../datasets/ICUB_6/testsameans' Pascal_VOC_dataset_image_set_filename = '../datasets/ICUB_6/testsame.txt' # The XML parser needs to now what object class names to look for and in which order to map them to integers. classes = ['background','book','cellphone','hairbrush','mouse','perfume','sunglasses'] test.parse_xml(images_dirs=[Pascal_VOC_dataset_images_dir], image_set_filenames=[Pascal_VOC_dataset_image_set_filename], annotations_dirs=[Pascal_VOC_dataset_annotations_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=False, ret=False) generator = test.generate(batch_size=4, shuffle=True, transformations=[], label_encoder=None, returns={'processed_images', 'processed_labels', 'filenames'}, keep_images_without_gt=False) # Generate samples batch_images, batch_labels, batch_filenames = next(generator) i = 0 # Which batch item to look at print("Image:", batch_filenames[i]) print() print("Ground truth boxes:\n") print(batch_labels[i]) ``` ### 5.4. Make predictions and visualize them ``` # Make a prediction y_pred = model.predict(batch_images) import numpy as np # Decode the raw prediction. i = 0 confidence_threshold = 0.5 y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])] np.set_printoptions(precision=2, suppress=True, linewidth=90) print("Predicted boxes:\n") print(' class conf xmin ymin xmax ymax') print(y_pred_thresh[0]) # Visualize the predictions. from matplotlib import pyplot as plt %matplotlib inline plt.figure(figsize=(20,12)) plt.imshow(batch_images[i]) current_axis = plt.gca() classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'traffic_light', 'motorcycle', 'bus', 'stop_sign'] # Just so we can print class names onto the image instead of IDs # Draw the predicted boxes in blue for box in y_pred_thresh[i]: class_id = box[0] confidence = box[1] xmin = box[2] ymin = box[3] xmax = box[4] ymax = box[5] label = '{}: {:.2f}'.format(classes[int(class_id)], confidence) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='blue', fill=False, linewidth=2)) current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'blue', 'alpha':1.0}) # Draw the ground truth boxes in green (omit the label for more clarity) for box in batch_labels[i]: class_id = box[0] xmin = box[1] ymin = box[2] xmax = box[3] ymax = box[4] label = '{}'.format(classes[int(class_id)]) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) #current_axis.text(box[1], box[3], label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0}) ``` Seems as if our sub-sampled weights were doing a good job, sweet. Now we can fine-tune this model on our dataset with 8 classes.
github_jupyter
## Build an MTH5 and Operate the Aurora Pipeline Outlines the process of making an MTH5 file, generating a processing config, and running the aurora processor ``` # Required imports for theh program. from pathlib import Path import sys import pandas as pd from mth5.clients.make_mth5 import MakeMTH5 from mth5 import mth5, timeseries from mt_metadata.utils.mttime import get_now_utc, MTime from aurora.config.config_creator import ConfigCreator from aurora.pipelines.process_mth5 import process_mth5_run ``` Build an MTH5 file from information extracted by IRIS ``` # Set path so MTH5 file builds to current working directory. default_path = Path().cwd() # Initialize the Make MTH5 code. m = MakeMTH5(mth5_version='0.1.0') m.client = "IRIS" # Generate data frame of FDSN Network, Station, Location, Channel, Startime, Endtime codes of interest ZUCAS04LQ1 = ['ZU', 'CAS04', '', 'LQE', '2020-06-02T19:00:00', '2020-07-13T19:00:00'] ZUCAS04LQ2 = ['ZU', 'CAS04', '', 'LQN', '2020-06-02T19:00:00', '2020-07-13T19:00:00'] ZUCAS04BF1 = ['ZU', 'CAS04', '', 'LFE', '2020-06-02T19:00:00', '2020-07-13T19:00:00'] ZUCAS04BF2 = ['ZU', 'CAS04', '', 'LFN', '2020-06-02T19:00:00', '2020-07-13T19:00:00'] ZUCAS04BF3 = ['ZU', 'CAS04', '', 'LFZ', '2020-06-02T19:00:00', '2020-07-13T19:00:00'] request_list = [ZUCAS04LQ1, ZUCAS04LQ2, ZUCAS04BF1, ZUCAS04BF2, ZUCAS04BF3] # Turn list into dataframe request_df = pd.DataFrame(request_list, columns=m.column_names)\ # Inspect the dataframe print(request_df) # Request the inventory information from IRIS inventory = m.get_inventory_from_df(request_df, data=False) # Inspect the inventory inventory ``` Builds an MTH5 file from the user defined database. Note: Intact keeps the MTH5 open after it is done building ``` mth5_object = m.make_mth5_from_fdsnclient(request_df, interact=True) # mth5_object.open_mth5(h5_path, 'w') # h5_path = str(default_path)+'/ZU_CAS04.h5' #mth5_object.close_mth5() ``` Extract information from the open MTH5 Object ``` mth5_object # Collect information from the MTh5 Object and use it in the config files. mth5_filename = mth5_object.filename version = mth5_object.file_version # Edit and update the MTH5 metadata s = mth5_object.get_station("CAS04") s.metadata.location.declination.model = 'IGRF' s.write_metadata() # Get the available stations and runs from the MTH5 object ch_summary = mth5_object.channel_summary ch_summary available_runs = ch_summary.run.unique() sr = ch_summary.sample_rate.unique() if len(sr) != 1: print('Only one sample rate per run is available') available_stations = ch_summary.station.unique() sr[0] available_stations[0] mth5_object ``` Generate an Aurora Configuration file using MTH5 as an input ``` station_id = available_stations[0] run_id = available_runs[0] sample_rate = sr[0] config_maker = ConfigCreator() config_path = config_maker.create_run_config(station_id, run_id, mth5_filename, sample_rate) config_path ``` Run the Aurora Pipeline using the input MTh5 and Confiugration File ``` show_plot='True' tf_cls = process_mth5_run( config_path, run_id, mth5_path=mth5_filename, units="MT", show_plot=False, z_file_path=None, return_collection=False, ) type(tf_cls) ``` Write the transfer functions generated by the Aurora pipeline ``` tf_cls.write_tf_file(fn="emtfxml_test.xml", file_type="emtfxml") tf_cls.write_tf_file(fn="emtfxml_test.xml", file_type="edi") tf_cls.write_tf_file(fn="emtfxml_test.xml", file_type="zmm") ```
github_jupyter
``` import wandb wandb.init(project="test") from wandb.integration.sb3 import WandbCallback ''' A large part of the code in this file was sourced from the rl-baselines-zoo library on GitHub. In particular, the library provides a great parameter optimization set for the PPO2 algorithm, as well as a great example implementation using optuna. Source: https://github.com/araffin/rl-baselines-zoo/blob/master/utils/hyperparams_opt.py ''' import optuna import pandas as pd import numpy as np from pathlib import Path import time import numpy as np import os import datetime import csv import argparse from functools import partial import gym from stable_baselines3 import PPO from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import DummyVecEnv, VecVideoRecorder, SubprocVecEnv #from wandb.integration.sb3 import WandbCallback #import wandb #env = Template_Gym() #from stable_baselines.gail import generate_expert_traj #from stable_baselines.gail import ExpertDataset timestamp = datetime.datetime.now().strftime('%y%m%d%H%M%S') config = {"policy_type": "MlpPolicy", "total_timesteps": 25000} experiment_name = f"PPO_{int(time.time())}" class Optimization(): def __init__(self): self.reward_strategy = 'sortino2' #self.input_data_file = 'data/coinbase_hourly.csv' self.params_db_file = 'sqlite:///params.db' # number of parallel jobs self.n_jobs = 1 # maximum number of trials for finding the best hyperparams self.n_trials = 100 #number of test episodes per trial self.n_test_episodes = 10 # number of evaluations for pruning per trial self.n_evaluations = 10 #self.df = pd.read_csv(input_data_file) #self.df = df.drop(['Symbol'], axis=1) #self.df = df.sort_values(['Date']) #self.df = add_indicators(df.reset_index()) #self.train_len = int(len(df) * 0.8) #self.df = df[:train_len] #self.validation_len = int(train_len * 0.8) #self.train_df = df[:validation_len] #self.test_df = df[validation_len:] def make_env(self, env_id, rank, seed=0, eval=False): """ Utility function for multiprocessed env. :param env_id: (str) the environment ID :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess """ def _init(): self.eval= eval env = gym.make("CartPole-v1") env.seed(seed + rank) return env #set_global_seeds(seed) return _init #def make_env(): #env = gym.make("CartPole-v1") #env = Monitor(env) # record stats such as returns #return env def optimize_envs(self, trial): return { 'reward_func': self.reward_strategy, 'forecast_len': int(trial.suggest_loguniform('forecast_len', 1, 200)), 'confidence_interval': trial.suggest_uniform('confidence_interval', 0.7, 0.99), } def optimize_ppo2(self,trial): return { 'n_steps': int(trial.suggest_loguniform('n_steps', 16, 2048)), 'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999), 'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1.), 'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1), 'clip_range': trial.suggest_uniform('clip_range', 0.1, 0.4), 'n_epochs': int(trial.suggest_loguniform('n_epochs', 1, 48)), #'lam': trial.suggest_uniform('lam', 0.8, 1.) } def optimize_agent(self,trial): #self.env_params = self.optimize_envs(trial) env_id = "default" num_e = 1 # Number of processes to use env = gym.make("CartPole-v1") #self.train_env = DummyVecEnv([lambda: env()]) self.train_env = gym.make('CartPole-v1') #self.train_env = VecNormalize(self.train_env, norm_obs=True, norm_reward=True) #self.test_env = DummyVecEnv([lambda: env()]) self.test_env = env = gym.make('CartPole-v1') #self.test_env = VecNormalize(self.train_env, norm_obs=True, norm_reward=True) self.model_params = self.optimize_ppo2(trial) self.model = PPO(config["policy_type"], self.train_env, verbose=0, tensorboard_log=Path("./tensorboard2").name, **self.model_params) #self.model = PPO2(CustomPolicy_2, self.env, verbose=0, learning_rate=1e-4, nminibatches=1, tensorboard_log="./min1" ) last_reward = -np.finfo(np.float16).max #evaluation_interval = int(len(train_df) / self.n_evaluations) evaluation_interval = 3000 for eval_idx in range(self.n_evaluations): try: self.model.learn(evaluation_interval) except AssertionError: raise rewards = [] n_episodes, reward_sum = 0, 0.0 obs = self.test_env.reset() while n_episodes < self.n_test_episodes: action, _ = self.model.predict(obs) obs, reward, done, _ = self.test_env.step(action) reward_sum += reward if done: rewards.append(reward_sum) reward_sum = 0.0 n_episodes += 1 obs = self.test_env.reset() last_reward = np.mean(rewards) trial.report(-1 * last_reward, eval_idx) #if trial.should_prune(eval_idx): #raise optuna.structs.TrialPruned() return -1 * last_reward def optimize(self): study_name = 'ppo299_' + self.reward_strategy #study = optuna.create_study( #study_name=study_name, storage=self.params_db_file, load_if_exists=True) study = optuna.create_study( study_name=study_name, storage=self.params_db_file, load_if_exists=True) try: study.optimize(self.optimize_agent, n_trials=self.n_trials, n_jobs=self.n_jobs) except KeyboardInterrupt: pass print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print('Value: ', trial.value) print('Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) return study.trials_dataframe() #if __name__ == '__main__': run = Optimization() run.optimize() import time import gym from stable_baselines3 import PPO from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import DummyVecEnv, VecVideoRecorder from wandb.integration.sb3 import WandbCallback import wandb config = {"policy_type": "MlpPolicy", "total_timesteps": 25000} experiment_name = f"PPO_{int(time.time())}" # Initialise a W&B run wandb.init( name=experiment_name, project="test", config=config, sync_tensorboard=True, # auto-upload sb3's tensorboard metrics monitor_gym=True, # auto-upload the videos of agents playing the game save_code=True, # optional ) def make_env(): env = gym.make("CartPole-v1") env = Monitor(env) # record stats such as returns return env env = DummyVecEnv([make_env]) env = VecVideoRecorder(env, "videos", record_video_trigger=lambda x: x % 2000 == 0, video_length=200) model = PPO(config["policy_type"], env, verbose=1, tensorboard_log=f"runs/{experiment_name}") # Add the WandbCallback model.learn( total_timesteps=config["total_timesteps"], callback=WandbCallback( gradient_save_freq=100, model_save_freq=1000, model_save_path=f"models/{experiment_name}", ), ) import gym from stable_baselines3 import PPO env = gym.make("CartPole-v1") model = PPO("MlpPolicy", env, verbose=1) model.learn(total_timesteps=10000) obs = env.reset() for i in range(1000): action, _states = model.predict(obs, deterministic=True) obs, reward, done, info = env.step(action) env.render() if done: obs = env.reset() env.close() """ Sampler for PPO hyperparams. :param trial: :return: """ batch_size = trial.suggest_categorical("batch_size", [8, 16, 32, 64, 128, 256, 512]) n_steps = trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048]) gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999]) learning_rate = trial.suggest_loguniform("learning_rate", 1e-5, 1) lr_schedule = "constant" # Uncomment to enable learning rate schedule # lr_schedule = trial.suggest_categorical('lr_schedule', ['linear', 'constant']) ent_coef = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1) clip_range = trial.suggest_categorical("clip_range", [0.1, 0.2, 0.3, 0.4]) n_epochs = trial.suggest_categorical("n_epochs", [1, 5, 10, 20]) gae_lambda = trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0]) max_grad_norm = trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5]) vf_coef = trial.suggest_uniform("vf_coef", 0, 1) net_arch = trial.suggest_categorical("net_arch", ["small", "medium"]) # Uncomment for gSDE (continuous actions) # log_std_init = trial.suggest_uniform("log_std_init", -4, 1) # Uncomment for gSDE (continuous action) # sde_sample_freq = trial.suggest_categorical("sde_sample_freq", [-1, 8, 16, 32, 64, 128, 256]) # Orthogonal initialization ortho_init = False # ortho_init = trial.suggest_categorical('ortho_init', [False, True]) # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu']) activation_fn = trial.suggest_categorical("activation_fn", ["tanh", "relu"]) ''' A large part of the code in this file was sourced from the rl-baselines-zoo library on GitHub. In particular, the library provides a great parameter optimization set for the PPO2 algorithm, as well as a great example implementation using optuna. Source: https://github.com/araffin/rl-baselines-zoo/blob/master/utils/hyperparams_opt.py ''' import optuna import pandas as pd import numpy as np from pathlib import Path import time import gym import numpy as np import os import datetime import csv import argparse from functools import partial import time import gym from stable_baselines3 import PPO from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.vec_env import DummyVecEnv, VecVideoRecorder, SubprocVecEnv, VecNormalize #from stable_baselines3 import PPO #from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.utils import set_random_seed #from wandb.integration.sb3 import WandbCallback #import wandb #from stable_baselines.common.policies import MlpLnLstmPolicy, LstmPolicy, CnnPolicy, MlpPolicy #from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv,VecNormalize #from stable_baselines3.common import set_global_seeds #from stable_baselines import ACKTR, PPO2, SAC #from stable_baselines.deepq import DQN #from stable_baselines.deepq.policies import FeedForwardPolicy #from ..env import Template_Gym #from ..common import CustomPolicy, CustomPolicy_2, CustomLSTMPolicy, CustomPolicy_4, CustomPolicy_3, CustomPolicy_5 #from ..common import PairList, PairConfig, PairsConfigured #env = Template_Gym() #from stable_baselines.gail import generate_expert_traj #from stable_baselines.gail import ExpertDataset timestamp = datetime.datetime.now().strftime('%y%m%d%H%M%S') #pc = PairsConfigured() config = {"policy_type": "MlpPolicy", "total_timesteps": 25000} experiment_name = f"PPO_{int(time.time())}" class Optimization(): def __init__(self, config): self.reward_strategy = 'Name it' #self.input_data_file = 'data/coinbase_hourly.csv' self.params_db_file = 'sqlite:///params.db' # number of parallel jobs self.n_jobs = 1 # maximum number of trials for finding the best hyperparams self.n_trials = 100 #number of test episodes per trial self.n_test_episodes = 10 # number of evaluations for pruning per trial self.n_evaluations = 10 self.config = config #self.df = pd.read_csv(input_data_file) #self.df = df.drop(['Symbol'], axis=1) #self.df = df.sort_values(['Date']) #self.df = add_indicators(df.reset_index()) #self.train_len = int(len(df) * 0.8) #self.df = df[:train_len] #self.validation_len = int(train_len * 0.8) #self.train_df = df[:validation_len] #self.test_df = df[validation_len:] #def make_env(self, env_id, rank, seed=0, eval=False): """ Utility function for multiprocessed env. :param env_id: (str) the environment ID :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess """ #def _init(): #self.config = config #self.eval= eval #env = gym.make(config["env_name"]) #env = Monitor(env) #env = Template_Gym(config=self.config, eval=self.eval) #env.seed(seed + rank) #return env #set_global_seeds(seed) #return _init #def make_env(env_id, rank, seed=0): """ Utility function for multiprocessed env. :param env_id: (str) the environment ID :param num_env: (int) the number of environments you wish to have in subprocesses :param seed: (int) the inital seed for RNG :param rank: (int) index of the subprocess """ #def _init(): #env = gym.make(env_id) #env.seed(seed + rank) #return env #set_random_seed(seed) #return _init def make_env(): env = gym.make(config["env_name"]) env = Monitor(env) # record stats such as returns return env # Categorical parameter #optimizer = trial.suggest_categorical('optimizer', ['MomentumSGD', 'Adam']) # Int parameter #num_layers = trial.suggest_int('num_layers', 1, 3) # Uniform parameter #dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 1.0) # Loguniform parameter #learning_rate = trial.suggest_loguniform('learning_rate', 1e-5, 1e-2) # Discrete-uniform parameter #drop_path_rate = trial.suggest_discrete_uniform('drop_path_rate', 0.0, 1.0, 0.1) def optimize_envs(self, trial): return { 'reward_func': self.reward_strategy, 'forecast_len': int(trial.suggest_loguniform('forecast_len', 1, 200)), 'confidence_interval': trial.suggest_uniform('confidence_interval', 0.7, 0.99), } def optimize_config(self, trial): return { 'sl': trial.suggest_loguniform('sl', 1.0, 10.0), 'tp': trial.suggest_loguniform('tp', 1.0 ,10.0) } def optimize_ppo2(self,trial): return { #'n_steps': int(trial.suggest_int('n_steps', 16, 2048)), #'gamma': trial.suggest_loguniform('gamma', 0.9, 0.9999), #'learning_rate': trial.suggest_loguniform('learning_rate', 1e-5, 1.), #'ent_coef': trial.suggest_loguniform('ent_coef', 1e-8, 1e-1), #'cliprange': trial.suggest_uniform('cliprange', 0.1, 0.4), #'noptepochs': int(trial.suggest_int('noptepochs', 1, 48)), #'lam': trial.suggest_uniform('lam', 0.8, 1.) 'batch_size': trial.suggest_categorical("batch_size", [8, 16, 32, 64, 128, 256, 512]), 'n_steps': int(trial.suggest_categorical("n_steps", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])), 'gamma': trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999]), 'learning_rate': trial.suggest_loguniform("learning_rate", 1e-5, 1), #'lr_schedule' = "constant" # Uncomment to enable learning rate schedule # lr_schedule = trial.suggest_categorical('lr_schedule', ['linear', 'constant']) 'ent_coef': trial.suggest_loguniform("ent_coef", 0.00000001, 0.1), 'clip_range': trial.suggest_categorical("clip_range", [0.1, 0.2, 0.3, 0.4]), 'n_epochs': trial.suggest_categorical("n_epochs", [1, 5, 10, 20]), 'gae_lambda': trial.suggest_categorical("gae_lambda", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0]), 'max_grad_norm': trial.suggest_categorical("max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5]), 'vf_coef': trial.suggest_uniform("vf_coef", 0, 1) #'net_arch' = trial.suggest_categorical("net_arch", ["small", "medium"]) # Uncomment for gSDE (continuous actions) # log_std_init = trial.suggest_uniform("log_std_init", -4, 1) # Uncomment for gSDE (continuous action) # sde_sample_freq = trial.suggest_categorical("sde_sample_freq", [-1, 8, 16, 32, 64, 128, 256]) # Orthogonal initialization #ortho_init = False # ortho_init = trial.suggest_categorical('ortho_init', [False, True]) # activation_fn = trial.suggest_categorical('activation_fn', ['tanh', 'relu', 'elu', 'leaky_relu']) #'activation_fn': trial.suggest_categorical("activation_fn", ["tanh", "relu"]) } def optimize_lstm(self, trial): return { 'lstm': trial.suggest_categorical('optimizer', ['lstm', 'mlp']) } def ob_types(self, trial): return { 'lstm': trial.suggest_categorical('optimizer', ['lstm', 'mlp']) } def optimize_agent(self,trial): run = wandb.init( project="sb3", config=config, sync_tensorboard=True, # auto-upload sb3's tensorboard metrics monitor_gym=True, # auto-upload the videos of agents playing the game save_code=True, # optional ) #self.env_params = self.optimize_envs(trial) env_id = "default"+str() num_e = self.n_jobs # Number of processes to use #self.config_param = self.optimize_config(trial) #self.config.sl = self.config_param['sl'] #self.config.sl = self.config_param['tp'] #self.model_type = self.optimize_lstm(trial) #self.model_type = self.model_type['lstm'] #self.model_type = "mlp" #if self.model_type == 'mlp': #self.policy = CustomPolicy_5 #else: #self.policy = MlpPolicy #self.train_env = SubprocVecEnv([self.make_env(env_id+str('train'), i) for i in range(num_e)]) #SubprocVecEnv([make_env(env_id, i) for i in range(num_cpu)]) #self.train_env = SubprocVecEnv([self.make_env(env_id, i, eval=False) for i in range(num_e)]) #self.train_env = VecNormalize(self.train_env, norm_obs=True, norm_reward=True) #self.test_env = SubprocVecEnv([self.make_env(env_id+str("test"), i) for i in range(num_e)]) #self.test_env = SubprocVecEnv([self.make_env(env_id, i, eval=True) for i in range(num_e)]) #self.test_env = VecNormalize(self.test_env, norm_obs=True, norm_reward=True) env = gym.make("CartPole-v1") self.train_env = DummyVecEnv([lambda: env]) self.train_env = VecVideoRecorder(self.train_env, "videos", record_video_trigger=lambda x: x % 2000 == 0, video_length=200) #self.train_env = DummyVecEnv([env]) #self.train_env = VecNormalize(self.train_env, norm_obs=True, norm_reward=True) self.test_env = DummyVecEnv([lambda: env]) self.test_env = VecVideoRecorder(self.test_env, "videos", record_video_trigger=lambda x: x % 2000 == 0, video_length=200) #self.test_env = DummyVecEnv([env]) try: self.test_env.load_running_average("saves") self.train_env.load_running_average("saves") except: print('cant load') self.model_params = self.optimize_ppo2(trial) self.model = PPO(config["policy_type"], self.train_env, verbose=1, tensorboard_log=f"runs", **self.model_params ) #self.model = PPO2(CustomPolicy_2, self.env, verbose=0, learning_rate=1e-4, nminibatches=1, tensorboard_log="./min1" ) last_reward = -np.finfo(np.float16).max #evaluation_interval = int(len(train_df) / self.n_evaluations) evaluation_interval = 3500 for eval_idx in range(self.n_evaluations): try: #self.model.learn(evaluation_interval) self.model.learn( total_timesteps=evaluation_interval, callback=WandbCallback(gradient_save_freq=100, model_save_path=f"models/{run.id}", verbose=2, ), ) #self.test_env.save_running_average("saves") #self.train_env.save_running_average("saves") except: print('did not work') rewards = [] n_episodes, reward_sum = 0, 0.0 print('Eval') obs = self.test_env.reset() #state = None #done = [False for _ in range(self.env.num_envs)] while n_episodes < self.n_test_episodes: action, _ = self.model.predict(obs, deterministic=True) obs, reward, done, _ = self.test_env.step(action) reward_sum += reward if done: rewards.append(reward_sum) reward_sum = 0.0 n_episodes += 1 obs = self.test_env.reset() last_reward = np.mean(rewards) trial.report(-1 * last_reward, eval_idx) if trial.should_prune(): raise optuna.structs.TrialPruned() run.finish() return -1 * last_reward def optimize(self, config): self.config = config study_name = 'ppo2_single_ready' study_name = 'ppo2_single_ready_nosltp' study_name = 'ppo2_single_ready_nosltp_all_yeah' study_name = 'ppo2_eur_gbp_op' study_name = 'ppo2_gbp_chf_op' study_name = 'ppo2_gbp_chf_h1_new1' study_name = 'ppo2_gbp_chf_h4_r_new11' study_name = 'ppo2_gbp_chf_h4_r_withvolfixed' study_name = 'ppo2_gbp_chf_h4_r_withvolclosefix212' study_name = 'ppo2_gbp_chf_h4_loged_sortinonew' study_name = 'AUD_CHF_4H_SELL_C5_NEW' study_name = 'wandb' study = optuna.create_study( study_name=study_name, storage=self.params_db_file, load_if_exists=True) try: study.optimize(self.optimize_agent, n_trials=self.n_trials, n_jobs=self.n_jobs) except KeyboardInterrupt: pass print('Number of finished trials: ', len(study.trials)) print('Best trial:') trial = study.best_trial print(trial.number) print('Value: ', trial.value) print('Params: ') for key, value in trial.params.items(): print(' {}: {}'.format(key, value)) return study.trials_dataframe() #if __name__ == '__main__': #optimize() run = Optimization(config) run.optimize(config) ```
github_jupyter
Objective ------------------------ Try out different hypothesis to investigate the effect of lockdown measures on mobility - Assume that mobility is affected by weather, lockdown and miscellanous - Consider misc. info to be one such as week info (if it is a holisday week etc...) - Assume mobility follows a weekly pattern (people tend to spend less time in parks Mo-Fr for example). Exploit assumptions about human routines here - Consider every day independent of one another Methodology ---------------------------------- Consider - Derive features for weather (initially consider simply the medan temperature) - Lockdown index (some number) - Mobility value - is_weekend # Data Sources In order to run the cells the data has to be downloaded manually from these sources. Special thanks to the following sources for providing an open source license to access the data. * Apple mobility data: https://covid19.apple.com/mobility * Oxford stringency: https://github.com/OxCGRT/covid-policy-tracker * Weather forecast from Yr, delivered by the Norwegian Meteorological Institute and NRK: https://api.met.no/weatherapi/locationforecast/2.0/ * Historical weather data from https://mesonet.agron.iastate.edu/ASOS/ ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import datetime from ipywidgets import Dropdown,IntSlider from IPython.display import display import os %matplotlib inline from functools import reduce try: import graphviz except: !pip install graphviz import graphviz try: import pydotplus except: !pip install pydotplus from IPython.display import display import networkx as nx try: import pydot except: !pip install pydot try: from dowhy import CausalModel except: #!pip install sympy !pip install -I dowhy from dowhy import CausalModel ``` Hypothesis I ------------------ Consider daily data for Berlin Weather: historical air temperature Mobility: Apple (Transit) Stringency: OXCGRT ``` from project_lib import Project project = Project.access() Oxford_Stringency_Index_credentials = project.get_connected_data(name="Oxford Stringency Index") import dsx_core_utils, os, io import pandas as pd from sqlalchemy import create_engine import sqlalchemy sqla_url= "db2+ibm_db://" + Oxford_Stringency_Index_credentials['username']+ ':' + Oxford_Stringency_Index_credentials['password'] + "@"+ Oxford_Stringency_Index_credentials['host'] + ":50001/BLUDB;Security=ssl;" #sqlalchemy engine = create_engine(sqla_url, pool_size=10, max_overflow=20) conn = engine.connect() # @hidden_cell # The following code contains the credentials for a connection in your Project. # You might want to remove those credentials before you share your notebook. from project_lib import Project project = Project.access() Apple_transit_mobility_credentials = project.get_connected_data(name="Apple Transit Mobility") apple_sqla_url= "db2+ibm_db://" + Apple_transit_mobility_credentials['username']+ ':' + Apple_transit_mobility_credentials['password'] + "@"+ Apple_transit_mobility_credentials['host'] + ":50001/BLUDB;Security=ssl;" #sqlalchemy apple_engine = create_engine(apple_sqla_url, pool_size=10, max_overflow=20) apple_conn = apple_engine.connect() app_mob_df = pd.read_sql_table(Apple_transit_mobility_credentials['datapath'].split("/")[-1].lower(), apple_conn,index_col=['Timestamp']) be_app_trans_df = app_mob_df[app_mob_df.region=='Berlin'] be_app_trans_df.drop(columns=['region'],inplace=True) ox_df = pd.read_sql_table("oxford_stringency_index", conn) #ox_df.rename({'datetime_date':'date'},axis=1,inplace=True) # Stringency Germany #ox_df = pd.read_csv("/project_data/data_asset/sun/oxcgrt/OxCGRT_latest.csv") ox_df["date"] = pd.to_datetime(ox_df["date"],format="%Y%m%d") be_ox_df = ox_df[ox_df.countrycode=="DEU"] be_ox_df.index= be_ox_df['date'] be_ox_df = be_ox_df[['stringencyindex']] be_ox_df.rename({'stringencyindex':'lockdown'},axis=1,inplace=True) # Max temperature be_weather_df = pd.read_csv("/project_data/data_asset/mercury/weather/berlin_historical_weather.csv",index_col=[0]) be_weather_df.index = pd.to_datetime(be_weather_df.index) dfs = [be_ox_df,be_app_trans_df,be_weather_df] df_final = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs) df_final['is_weekend'] = np.where((df_final.index.weekday == 5)|(df_final.index.weekday == 6),1,0) #df_final.rename({'stringencyindex':'lockdown'},axis=1,inplace=True) #df_final.to_csv('/project_data/data_asset/mercury/germany_daily_asset_with_other_weather_params.csv') df_final.head() fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8)) axs[0].plot(df_final['mobility']) axs[1].plot(df_final['lockdown']) axs[2].plot(df_final['air_temperature']) ``` Why do I think day information is good? Looking at the graph above, it suggests that there is a strong periodic component in the mobility info. Let me plot the Power Spectral Density and check if there is any kind of periodicity in the data. ``` plt.figure(figsize=(16,8)) plt.stem(np.abs(np.fft.fft(df_final[df_final.index<=pd.to_datetime('2020-03-15')]['mobility'].values-np.mean(df_final[df_final.index<=pd.to_datetime('2020-03-15')]['mobility'].values)))) ``` Let me consider week of the day as a feature for Causal Inference. Add it as a column in the datasource. ``` df_final.dropna() h1_causal_graph = nx.DiGraph() h1_causal_graph.add_edge('is_weekend','mobility') h1_causal_graph.add_edge('lockdown','mobility') h1_causal_graph.add_edge('air_temperature','lockdown') h1_causal_graph.add_edge('air_temperature','mobility') graph_filename_h1='causal_mobility_weather_h1.dot' nx.drawing.nx_pydot.write_dot(h1_causal_graph,graph_filename_h1) with open(graph_filename_h1) as f: dot_graph = f.read() graphviz.Source(dot_graph) h1_model = CausalModel(data=df_final.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h1,proceed_when_unidentifiable=True) print(h1_model) h1_estimand = h1_model.identify_effect() print(h1_estimand) h1_estimate = h1_model.estimate_effect(h1_estimand,method_name='backdoor.linear_regression',test_significance=True) print(h1_estimate) ``` Validate the causal effect estimate ``` h1_ref1 = h1_model.refute_estimate(estimand=h1_estimand, estimate=h1_estimate,method_name='placebo_treatment_refuter') print(h1_ref1) h1_ref2 = h1_model.refute_estimate(estimand=h1_estimand, estimate=h1_estimate,method_name='random_common_cause') print(h1_ref2) ``` Hypothesis II ------------------ Using google mobility instead of Apple transit mobility Consider daily data for Berlin Weather: historical air temperature Mobility: Google mobility data - transit station Stringency: OXCGRT data ``` # @hidden_cell # The following code contains the credentials for a connection in your Project. # You might want to remove those credentials before you share your notebook. Google_mobility_credentials = project.get_connected_data(name="Google mobility") Google_mobility_df = pd.read_sql_table(Google_mobility_credentials['datapath'].split("/")[-1].lower(),conn) be_google_mobility_df = Google_mobility_df[Google_mobility_df.sub_region_1=="Berlin"][['transit_stations_percent_change_from_baseline']] be_google_mobility_df.index = pd.to_datetime(Google_mobility_df[Google_mobility_df.sub_region_1=="Berlin"]['date']) dfs2 = [be_ox_df,be_google_mobility_df,be_weather_df] df_final2 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs2) df_final2.rename({'transit_stations_percent_change_from_baseline':'mobility','StringencyIndex':'lockdown'},axis=1,inplace=True) df_final2['is_weekend'] = np.where((df_final2.index.weekday == 5)|(df_final2.index.weekday == 6),1,0) fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8)) axs[0].plot(df_final2['mobility']) axs[1].plot(df_final2['lockdown']) axs[2].plot(df_final2['air_temperature']) h2_causal_graph = nx.DiGraph() h2_causal_graph.add_edge('is_weekend','mobility') h2_causal_graph.add_edge('lockdown','mobility') h2_causal_graph.add_edge('air_temperature','lockdown') h2_causal_graph.add_edge('air_temperature','mobility') graph_filename_h2='causal_mobility_weather_h2.dot' nx.drawing.nx_pydot.write_dot(h2_causal_graph,graph_filename_h2) with open(graph_filename_h2) as f: dot_graph = f.read() graphviz.Source(dot_graph) h2_model = CausalModel(data=df_final2.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h2,proceed_when_unidentifiable=True) print(h2_model) h2_estimand = h2_model.identify_effect() print(h2_estimand) h2_estimate = h2_model.estimate_effect(h2_estimand,method_name='backdoor.linear_regression',test_significance=True) print(h2_estimate) h2_ref1 = h2_model.refute_estimate(estimand=h2_estimand, estimate=h2_estimate,method_name='placebo_treatment_refuter') print(h2_ref1) h2_ref2 = h2_model.refute_estimate(estimand=h2_estimand, estimate=h2_estimate,method_name='random_common_cause') print(h2_ref2) ``` **Remark** As Google data is available only from mid Feb whereas Apple mobility data is available since mid Jan. So, we use Apple mobility data Hypothesis III ------------------ Consider daily data for Berlin Weather: historical air temperature Mobility: Apple (Transit) Stringency: OXCGRT Clustering data ``` # @hidden_cell # The following code contains the credentials for a connection in your Project. # You might want to remove those credentials before you share your notebook. from project_lib import Project project = Project.access() Emergent_DB2_Warehouse_credentials = project.get_connection(name="db2 Warehouse ealuser") import dsx_core_utils, os, io import pandas as pd from sqlalchemy import create_engine import sqlalchemy sqla_url= "db2+ibm_db://" + Emergent_DB2_Warehouse_credentials['username']+ ':' + Emergent_DB2_Warehouse_credentials['password'] + "@"+ Emergent_DB2_Warehouse_credentials['host'] + ":50001/BLUDB;Security=ssl;" #sqlalchemy engine = create_engine(sqla_url, pool_size=10, max_overflow=20) stringency_clustering_df = pd.read_sql_query('SELECT * FROM "EALUSER"."STRINGENCY_INDEX_CLUSTERING"',engine) be_stringency_clustering_df = stringency_clustering_df[stringency_clustering_df.country=="Germany"] be_stringency_clustering_df.index = pd.to_datetime(be_stringency_clustering_df['state_date']) be_stringency_clustering_df = be_stringency_clustering_df.rename({'state_value':'lockdown'},axis=1)[['lockdown']] dfs3 = [be_stringency_clustering_df,be_app_trans_df,be_weather_df] df_final3 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs3) df_final3.rename({'change':'mobility'},axis=1,inplace=True) df_final3['is_weekend'] = np.where((df_final3.index.weekday == 5)|(df_final3.index.weekday == 6),1,0) #df_final.to_csv('/project_data/data_asset/mercury/germany_daily_asset_with_other_weather_params.csv') fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8)) axs[0].plot(df_final3['mobility']) axs[1].plot(df_final3['lockdown']) axs[2].plot(df_final3['air_temperature']) h3_causal_graph = nx.DiGraph() h3_causal_graph.add_edge('is_weekend','mobility') h3_causal_graph.add_edge('lockdown','mobility') h3_causal_graph.add_edge('air_temperature','lockdown') h3_causal_graph.add_edge('air_temperature','mobility') graph_filename_h3='causal_mobility_weather_h3.dot' nx.drawing.nx_pydot.write_dot(h3_causal_graph,graph_filename_h3) with open(graph_filename_h3) as f: dot_graph = f.read() graphviz.Source(dot_graph) h3_model = CausalModel(data=df_final3.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h3,proceed_when_unidentifiable=True) print(h3_model) h3_estimand = h3_model.identify_effect() print(h3_estimand) h3_estimate = h3_model.estimate_effect(h3_estimand,method_name='backdoor.linear_regression',test_significance=True) print(h3_estimate) h3_ref1 = h3_model.refute_estimate(estimand=h3_estimand, estimate=h3_estimate,method_name='placebo_treatment_refuter') print(h3_ref1) h3_ref2 = h3_model.refute_estimate(estimand=h3_estimand, estimate=h3_estimate,method_name='random_common_cause') print(h3_ref2) ``` **Remark** The Causal estimate has a really low p value when we use the stringency clustering data. So, we can also replace the raw Oxford stringency data with the stringency clustering data Hypothesis IV ------------------ Consider daily data for Berlin Weather: historical air temperature Mobility: Waze mobility data - Source: https://raw.githubusercontent.com/ActiveConclusion/COVID19_mobility/master/waze_reports/Waze_City-Level_Data.csv Stringency: OXCGRT data ``` waze_df = pd.read_csv("https://raw.githubusercontent.com/ActiveConclusion/COVID19_mobility/master/waze_reports/Waze_City-Level_Data.csv") waze_df['Date'] = pd.to_datetime(waze_df['Date']) be_waze_df = waze_df[waze_df.City=="Berlin"] be_waze_df.index = be_waze_df['Date'] be_waze_df = be_waze_df[['% Change In Waze Driven Miles/KMs']] be_waze_df.columns = ['mobility'] dfs4 = [be_ox_df,be_waze_df,be_weather_df] df_final4 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs4) df_final4['is_weekend'] = np.where((df_final4.index.weekday == 5)|(df_final4.index.weekday == 6),1,0) #df_final4.rename({'StringencyIndex':'lockdown'},axis=1,inplace=True) df_final4 fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8)) axs[0].plot(df_final4['mobility']) axs[1].plot(df_final4['lockdown']) axs[2].plot(df_final4['air_temperature']) h4_causal_graph = nx.DiGraph() h4_causal_graph.add_edge('is_weekend','mobility') h4_causal_graph.add_edge('lockdown','mobility') h4_causal_graph.add_edge('air_temperature','lockdown') h4_causal_graph.add_edge('air_temperature','mobility') graph_filename_h4='causal_mobility_weather_h4.dot' nx.drawing.nx_pydot.write_dot(h4_causal_graph,graph_filename_h4) with open(graph_filename_h4) as f: dot_graph = f.read() graphviz.Source(dot_graph) h4_model = CausalModel(data=df_final4.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h4,proceed_when_unidentifiable=True) print(h4_model) h4_estimand = h4_model.identify_effect() print(h4_estimand) h4_estimate = h4_model.estimate_effect(h4_estimand,method_name='backdoor.linear_regression',test_significance=True) print(h4_estimate) h4_ref1 = h4_model.refute_estimate(estimand=h4_estimand, estimate=h4_estimate,method_name='placebo_treatment_refuter') print(h4_ref1) h4_ref2 = h4_model.refute_estimate(estimand=h4_estimand, estimate=h4_estimate,method_name='random_common_cause') print(h4_ref2) ``` **Comments** As the data corresponds to only driving data, the plot shows that it is not really affected by the lockdown measures. Moreover, the driving mobility data is available only from 01.03.2020 Hypothesis V ------------------ Consider daily data for other cities/country such as London, New york and Singapore Weather: historical air temperature Mobility: Apple mobility (transit) Stringency: OXCGRT data 1. London - EGLL, GBR 2. New York - NYC, USA 3. Singapore - WSAP, SGP ``` app_df = pd.read_csv("/project_data/data_asset/sun/apple_mobility/applemobilitytrends-2020-10-14.csv") def region_specific_data(mobility_region,weather_station,stringency_country_code): cs_app_trans_df = app_df[(app_df.region==mobility_region)& (app_df.transportation_type=="transit")].drop(['geo_type','region','transportation_type', 'alternative_name','sub-region','country'],axis=1).transpose() cs_app_trans_df.columns= ['mobility'] # Stringency Germany if stringency_country_code == "GBR": # Consider only England cs_ox_df = ox_df[ox_df.regionname=="England"] cs_ox_df.index= cs_ox_df['date'] cs_ox_df = cs_ox_df[['stringencyindex']] elif stringency_country_code == "USA": # Consider only New York cs_ox_df = ox_df[ox_df.regionname=="New York"] cs_ox_df.index= cs_ox_df['date'] cs_ox_df = cs_ox_df[['stringencyindex']] else: cs_ox_df = ox_df[ox_df.countrycode==stringency_country_code] cs_ox_df.index= cs_ox_df['date'] cs_ox_df = cs_ox_df[['stringencyindex']] # Max temperature historical_url = "https://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?station={}&data=tmpc&year1=2020&month1=1&day1=1&year2=2020&month2=10&day2=28&tz=Etc%2FUTC&format=onlycomma&latlon=no&missing=M&trace=T&direct=no&report_type=1&report_type=2".format(weather_station) hist_weather_df = pd.read_csv(historical_url) # Replace missing and trace as na hist_weather_df.replace("M",np.nan,inplace=True) hist_weather_df.replace("M",np.nan,inplace=True) #Convert to float hist_weather_df['tmpc'] = hist_weather_df['tmpc'].astype(np.float64) hist_weather_df['valid'] = pd.to_datetime(hist_weather_df['valid']) hist_weather_df.rename({'valid':'time','tmpc':'air_temperature'},axis=1, inplace=True) hist_weather_df.index = hist_weather_df['time'] hist_weather_df = hist_weather_df.resample("1D").median() dfs = [cs_ox_df,cs_app_trans_df,hist_weather_df] df_final = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs) df_final.rename({'stringencyindex':'lockdown'},axis=1,inplace=True) df_final['is_weekend'] = np.where((df_final.index.weekday == 5)|(df_final.index.weekday == 6),1,0) #return df_final fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8)) axs[0].plot(df_final['mobility']) axs[1].plot(df_final['lockdown']) axs[2].plot(df_final['air_temperature']) fig.suptitle(mobility_region) plt.show() causal_graph = nx.DiGraph() causal_graph.add_edge('is_weekend','mobility') causal_graph.add_edge('lockdown','mobility') causal_graph.add_edge('air_temperature','lockdown') causal_graph.add_edge('air_temperature','mobility') graph_filename_='causal_mobility_weather_.dot' nx.drawing.nx_pydot.write_dot(causal_graph,graph_filename_) with open(graph_filename_) as f: dot_graph = f.read() graphviz.Source(dot_graph) _model = CausalModel(data=df_final.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_,proceed_when_unidentifiable=True) print(_model) _estimand = _model.identify_effect() print(_estimand) _estimate = _model.estimate_effect(_estimand,method_name='backdoor.linear_regression',test_significance=True) print(_estimate) _ref1 = _model.refute_estimate(estimand=_estimand, estimate=_estimate,method_name='placebo_treatment_refuter') print(_ref1) _ref2 = _model.refute_estimate(estimand=_estimand, estimate=_estimate,method_name='random_common_cause') print(_ref2) return 1 region_specific_data('London','EGLL', 'GBR') region_specific_data('New York','NYC', 'USA') region_specific_data('Singapore','WSAP', 'SGP') ``` **Comments** * For all three cities the estimator parameters given by the dowhy model are the same: "mobility ~ lockdown+is_weekend+air_temperature+lockdown*is_weekend" **Author** * Shri Nishanth Rajendran - AI Development Specialist, R² Data Labs, Rolls Royce Special thanks to Deepak Srinivasan and Alvaro Corrales Cano
github_jupyter
# Studying avoided crossing for a 1 cavity-2 qubit system, <mark>with and without thermal losses</mark> 1. **Introduction** 2. **Problem parameters** 3. **Setting up operators, Hamiltonian's, and the initial state** 4. **Demonstrating avoided crossing** * Plotting the ramp pulse generated * Solving the Master equation and plotting the results (without thermal losses) 5. **Studying the effect of various ramp times on avoided crossing** * { Case I } <u>No thermal losses</u> * { Case II } <u>Thermal losses</u> * Plotting the results 6. Calculating the Fidelity and Concurrence **Author** : Soumya Shreeram (shreeramsoumya@gmail.com)<br> **Supervisor** : Yu-Chin Chao (ychao@fnal.gov) <br> **Date**: 9th August 2019<br> This script was coded as part of the Helen Edwards Summer Internship program at Fermilab. The code studies the effect of avoided crossing for loading a photon from a qubit into the cavity. This is done by generating pulses with varying ramp times, and raising one of the qubit's frequecy above the cavity. ## 1. Introduction The Jaynes-Cumming model is used to explain light-matter interaction in a system with a qubit and a single cavity mode. The Hamiltonian $H$ can be extended to describe a 2-qubit and cavity system as, $$ H = \hbar \omega_c a^{\dagger}a+ \sum_{i=1}^2\frac{1}{2}\hbar \omega_{qi}\ \sigma_i^z + \sum_{i=1}^2\frac{1}{2} \hbar g(a^{\dagger} + a)(\sigma_i^-+\sigma_i^+)$$ which simplifies under the rotating-wave approximation as $$ H_{\rm RWA} = \hbar \omega_c a^{\dagger}a+ \sum_{i=1}^2\frac{1}{2}\hbar \omega_a \sigma_i^z + \sum_{i=1}^2\frac{1}{2} \ \hbar g\ (a^{\dagger}\sigma_i^- + a\ \sigma_i^+)$$ where $\omega_c$ and $\omega_{qi}$ are the cavity and qubit frequencies, while $a$ and $\sigma_i^-$ are the annihalation operators for the cavity and qubit respectively. Note that $i=1,2$ represents the 2 qubits. ``` %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib import cm plt.rcParams.update({'font.size': 16}) import numpy as np from numpy import ones,vstack from numpy.linalg import lstsq from math import pi from scipy.signal import find_peaks from time import sleep import sys from qutip import * ``` ## 2. Problem parameters Here we use $\hbar=1$; the coupling terms are redefined with a multiple of $2\pi$ before them for convinience. ``` def generateTimePulse(tr, th): """ Function that generates the pulse based on the input parameters @param tr :: ramp up/down time for the pulse @param th :: hold time for the pulse @return t_pulse :: np array with 4 times that define the pulse """ t_pulse = [0, 0, 0, 0] t_pulse[0] = 0 t_pulse[1] = tr + t_pulse[0] t_pulse[2] = t_pulse[1] + th t_pulse[3] = t_pulse[2] + tr print("The time pulse is: ", t_pulse) return t_pulse """------------- FREQUENCIES -----------------""" w_q1 = 2*pi*6.5; # Qubit 1 frequency w_q2 = 2*pi*6.8; # Qubit 2 frequency: range from 1-9 GHz w_f = 2*pi*7.1; # Resonator/ Filter frequency """------------- COUPLING --------------------""" g_q1f = 2*pi*0.135 # qubit 1-fitler coupling #g_q2f = 2*pi*0.415 # qubit 2-fitler coupling numF = 1 # number of filters N = 2 # number of fock states times = np.linspace(0,200,1500) """------------- DISSIPATION PARAMETERS -----""" kappa = 5*10**-3 # cavity dissipation rate n_th_a = 3*10**-3 # avg. no. of thermal bath excitation r1 = 5*10**-6 # qubit relaxation rate r2 = 1*10**-5 # qubit dephasing rate """------------- PULSE CONTROL PARAMETERS -----""" tr = 0 # ramp up and ramp down times th = 110 # hold time t_pulse = generateTimePulse(tr, th) # amplitude to raise pulse above cavity frequency (optional) d = 0.25 w_top = w_f + 2*pi*d no_ramps = 800 # number of ramps pulses sent into the Hamiltonian ``` ## 3. Setting up the operators, Hamiltonian's, and Initial state For every qubit: <br> <br> **sm** $\ \rightarrow \ \hat{\sigma}^{+(-)}$ is the raising and lowering operator of the *qubit* <br> **sz** $\ \ \rightarrow \ \sigma_z $ is the Pauli-z matrix of the *qubit* <br> **n** $\ \ \ \rightarrow \ n$ is the number operator ``` def numOp(m): """ Computes the number operator @param loweringMat :: lowering matrix operator for a system """ return m.dag()*m def rwaCoupling(m1, m2): return m1.dag()*m2 + m2.dag()*m1 def setXYlabel(ax, x, y, req_title, title_): """ Generic function to set labels for plots """ ax.set_xlabel(x) ax.set_ylabel(y) if req_title == True: ax.set_title(title_) return ``` ### 3.1 Operators ``` # cavity a = tensor(destroy(N), qeye(2), qeye(2)) nc = numOp(a) # qubit 1 sm1 = tensor(qeye(N), sigmam(), qeye(2)) sz1 = tensor(qeye(N), sigmaz(), qeye(2)) n1 = numOp(sm1) # qubit 2 sm2 = tensor(qeye(N), qeye(2), sigmam()) sz2 = tensor(qeye(N), qeye(2), sigmaz()) n2 = numOp(sm2) # collapse operators c_ops = [] # cavity relaxation rate = kappa * (1 + n_th_a) c_ops.append(np.sqrt(rate) * a) # cavity excitation # qubit 1 relaxation c_ops.append(np.sqrt(r1 * (1+n_th_a)) * sm1) c_ops.append(np.sqrt(r1 * n_th_a) * sm1.dag()) c_ops.append(np.sqrt(r2) * sz1) # qubit 2 relaxation c_ops.append(np.sqrt(r1 * (1+n_th_a)) * sm2) c_ops.append(np.sqrt(r1 * n_th_a) * sm2.dag()) c_ops.append(np.sqrt(r2) * sz2) ``` ### 3.2 Hamiltonian's and initial state ``` # Qubit Hamiltonians (Hq1+Hq2) Hq1 = 0.5*sz1 Hq2 = 0.5*sz2 # Filter Hamiltonians (refer formula in the Introduction) Hf = numOp(a) # Qubit-Filter Hamiltonian Hqf = g_q1f*(rwaCoupling(a, sm1) + rwaCoupling(a, sm2)) # time-independent Hamiltonian (see later) H0 = w_f*Hf + w_q2*Hq2 + Hqf H = H0 + w_q1*Hq1 # Resultant Hamiltonian ``` ### 3.3 Initial State ``` # initial state of the system. Qubit 1: excited, Qubit 2: ground st. psi0 = tensor(basis(N,0), basis(2,0), basis(2,1)) ``` ## 4. Demonstrating avoided crossing In this section the qubit frequency is raised above the cavity frequency by applying a linearly varying ramp time $t$ (ns). The signal is held for a time $T-2t$ before it is ramped down again. Tranformations on closed quantum states can be modelled by unitary operators. The combined time-dependent Hamiltonian for a system undergoing a tranformation that can be representated as, $$ H(t) = H_0 + \sum_{i=0}^n c_i(t)H_i$$ where $H_0$ is called the time-independent drift Hamiltonian and $H_i$ are the control Hamiltonians with a time varying amplitude $c_i(t)$. Here we write the Hamiltonian in a function-based time dependent way. See other ways [here](http://qutip.org/docs/latest/guide/dynamics/dynamics-time.html). Here the time-dependent coefficients, $f_n(t)$ of the Hamiltonian (e.g. `wf_t, w1_t,w2_t`) are expressed using Python functions ### 4.1 Functions ``` """---------------------------------------- PULSE FUNCTIONS ------------------------------------------""" def fitLine(t_pulse, i, j, w1, w2, t): """ Function generates a best fit line between [x1, y1] ->[x2, y2] Input: @param t_pulse :: np array containing the 4 points parameterizing the pulse @param i,j :: indicies of t_pulse determining the start-stop times @param w1, w2 :: lower and higher frequencies of the ramp pulse @param t :: interable time variable Returns: @polynomial(t) :: best-fit y value at t """ # compute coefficients coefficients = np.polyfit([t_pulse[i], t_pulse[j]], [w1, w2], 1) # generate best-fit polynmial polynomial = np.poly1d(coefficients) return polynomial(t) def rampUp(t_pulse, w1, w2, t): """ Generating a ramp up pulse Input: @param t_pulse :: np array containing the 4 points parameterizing the pulse @param w1, w2 :: lower and higher frequencies of the ramp pulse @param t :: interable time variable Returns: @param w :: int giving the y-value based on t """ t0 = t_pulse[0] t1 = t_pulse[1] if t0 != t1: if t < t1: return w1 + fitLine(t_pulse, 0, 1, 0, (w2-w1), t)*(t>t0) if t > t1: return w1 + (w2-w1)*(t>t1) else: return w1 + (w2 - w1)*(t > t1) def rampDown(t_pulse, w1, w2, t): """ Generating a ramp Down pulse Same as the ramp Up pulse given above only with the """ t2 = t_pulse[2] t3 = t_pulse[3] if t2 != t3: if t > t2: return w1 + fitLine(t_pulse, 2, 3, (w2-w1), 0, t)*(t>t2 and t<t3) if t < t2: return w1 + (w2-w1)*(t<t2) else: return w1 + (w2-w1)*(t<t2) def wq1_t(t, args=None): """ Function defines the time depended co-efficent of the qubit 1 w_q1(t) is a pulse wave going from 0 to height (w_f-w_q1) at T0_1 """ return (rampUp(t_pulse, w_q1, w_top, t) + rampDown(t_pulse, w_q1, w_top, t)-w_top) def wq1_tdown(t, args=None): """ Function defines the time depended co-efficent of the qubit 1 w_q1(t) is a pulse wave going from 0 to height (w_f-w_q1) at T0_1 """ return rampDown(t_pulse, w_q1, w_top, t) def wf_t(t, args=None): """ Function defines the time depended co-efficent of the filters (Although, there is no frequency change of the filters with time) so w_f(t) = constant """ return w_f def wq2_t(t, args=None): """ Function defines the time depended co-efficent of qubit 2 (Although, there is no frequency change of the quibit 2 with time) so w_q2(t) = constant """ return w_q2 """--------------------------------------------- HAMILTONIAN FUNCTIONS ---------------------------------------------""" def plotPulse(ax, times, t_pulse, w_q1, w_top, colorCode, label_, ramp): """ Plots the required pulse """ if ramp == True: plotting = ax.plot(times, [rampUp(t_pulse, w_q1, w_top, t)/(2*pi) for t in times], colorCode, label=label_) elif ramp == False: plotting = ax.plot(times, [rampDown(t_pulse, w_q1, w_top, t)/(2*pi) for t in times], colorCode, label=label_) if ramp == 'Custom': plotting = ax.plot(times, [(rampUp(t_pulse, w_q1, w_top, t) + rampDown(t_pulse, w_q1, w_top, t)-w_top)/(2*pi) for t in times], colorCode, label=r"$\Delta$t = %.1f ns"%(t_pulse[1]-t_pulse[0])) return plotting def labelTimes(t_r, t_H): return r"$\Delta t = %.2f {\ \rm ns}, t_{\rm H} = %.2f {\ \rm ns}$"%(t_r, t_H) def plotFrequencies(ax, times, wf_t, Colour, labels_, linestyle_): """ Function plots the frequencies as a function of times """ ax.plot(times, np.array(list(map(wf_t, times)))/(2*pi), Colour, linewidth=2, label=labels_, linestyle=linestyle_) ax.legend(loc = 'center left', bbox_to_anchor = (1.0, 0.5)) return def setLabels(ax, tr, th, plot_no): """ Function sets the labels of the x-y axis in the plot below """ if plot_no == 0: ax.set_ylabel("Frequency (GHz)", fontsize=16) ax.set_title(labelTimes(tr, th)) else: ax.set_xlabel("Time (ns)") ax.set_ylabel("Occupation \n probability") return def plotProb(ax, times, component, res, Colour, labels_, linestyle_): """ Function plots the occupation probabilities of the components after running mesolve """ ax.plot(times, np.real(expect(component, res.states)), Colour, linewidth=1.5, label=labels_, linestyle=linestyle_) ax.legend(loc = 'center left', bbox_to_anchor = (1.0, 0.5)) return ``` ### 4.1 Plotting the ramp pulse generated The figure below demonstrated how the combination of ramping up and down forms the required pulse. ``` fig, ax = plt.subplots(1, 1, figsize=(7,5)) t_pulse1 = [t_pulse[0], t_pulse[1]+2.5, t_pulse[2]-2.5, t_pulse[3]] t_pulse2 = [t_pulse[0], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], t_pulse[3]] # plotting the pulses plotPulse(ax, times, t_pulse, w_q1, w_top, 'g--', r"$\Delta$t = Ramp up", True) plotPulse(ax, times, t_pulse, w_q1, w_top, 'b--', r"$\Delta$t = Ramp down", False) plotPulse(ax, times, t_pulse, w_q1, w_top, 'r', ' ', 'Custom') plotPulse(ax, times, t_pulse1, w_q1, w_top, '#03fcba', ' ', 'Custom') plotPulse(ax, times, t_pulse2, w_q1, w_top, '#c4f2f1', ' ', 'Custom') # guide lines ax.axvline(x=t_pulse[0], color='#f2d4c4', linestyle='--') ax.axvline(x=t_pulse[3], color='#f2d4c4', linestyle='--') ax.axvline(x=t_pulse2[2], color='#f2d4c4', linestyle='--') setXYlabel(ax, 'Time (ns)', 'Frequency (Hz)', False, '') ax.legend(loc="upper right") fig.tight_layout() ``` ### 4.2 Solving the Master equation and plotting the results (without thermal losses) ``` opts = Options(nsteps = 50000, atol = 1e-30) # time dependent Hamiltonian H_t = [H0, [Hq1, wq1_t]] # Evolving the system res1 = mesolve(H_t, psi0, times, [], []) fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12,7)) labels_ = ["cavity", "qubit 1", "qubit 2"] w_list = [wf_t, wq1_t, wq2_t] colors_ = ['#b4bfbc', 'b', '#b0ed3e'] linestyle_ = ['--', '-', '-'] components_ = [nc, n1, n2] for i in [0, 1, 2]: plotFrequencies(axes[0], times, w_list[i], colors_[i], labels_[i], linestyle_[i]) setLabels(axes[0], tr, th, 0) for i in [0, 1, 2]: plotProb(axes[1], times, components_[i], res1, colors_[i], labels_[i], linestyle_[i]) setLabels(axes[1], tr, th, 1) fig.tight_layout() ``` ## 5. Studying the effect of various ramp times on avoided crossing ``` def showProgress(idx, n): """ Function prints the progress bar for a running function @param idx :: iterating index @param n :: total number of iterating variables/ total length """ j = (idx+1)/n sys.stdout.write('\r') sys.stdout.write("[%-20s] %d%%" % ('='*int(20*j), 100*j)) sys.stdout.flush() sleep(0.25) return def findIndex(times, t4): """ Function finds the index in the times array at required point t4 @param times :: np array contains the times at which H is evaluated @param t4 :: the point at which the pulse ends @returns param idx_array[0] :: the index of t4 in the times array """ idx_array = [] for i, t in enumerate(times): if t >= t4 and t < t4+1: idx_array.append(i) return idx_array[0] def genTimePulses(rampList): """ Generates pulses with variable ramp times @param rampList :: List with """ ramp_vals = np.empty((0, 4)) for dt in rampList: t_new = [t_pulse[0], t_pulse[1]+dt, t_pulse[2]-dt, t_pulse[3]] ramp_vals = np.append(ramp_vals, [t_new], axis=0) return ramp_vals def printShape(ramp_dt_array): print("\nDimensions of the resultant 2D array:", np.shape(ramp_dt_array)) return # get the point after the ramp down excitation t_idx = findIndex(times, t_pulse[3]) # generating a range of pulse with varying ramp times rampList = np.linspace(t_pulse[1], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], no_ramps)-t_pulse[1] # generates the pulses ramp_vals = genTimePulses(rampList) ``` ### { Case I } No thermal losses: Evaluating the excited state population at <mark>all times</mark> of the pulse. The excited state population is studied for a range of different ramp pulses. ``` #ramp_dt_array2D = evaluateHam2D(ramp_vals, True, no_ramps, H0, Hq1, wq1_t) no_loss = True exp_vals = [] ramp_exp_arr = [] ramp_dt_array2D = np.empty((0, len(times))) for i in range(no_ramps): t_pulse = ramp_vals[i][:] # time dependent Hamiltonian H_t = [H0, [Hq1, wq1_t]] # Evolving the system with/without thermal losses if no_loss == True: output = mesolve(H_t, psi0, times, [], []) else: output = mesolve(H_t, psi0, times, c_ops, []) exp_vals = np.real(expect(n1, output.states)) exp_val = np.mean(exp_vals[t_idx:-1]) ramp_dt_array2D = np.append(ramp_dt_array2D, [exp_vals], axis=0) ramp_exp_arr.append(exp_val) # progress bar showProgress(i, no_ramps) printShape(ramp_dt_array2D) ``` ### { Case II } <u>Thermal losses</u>: Evaluating the excited state population at the <mark>end of ramp down</mark> of the pulse. The excited state population is studied for a range of different ramp pulses. ``` no_loss = False exp_valsi = [] ramp_exp_arri = [] ramp_dt_array2Di = np.empty((0, len(times))) for i in range(no_ramps): t_pulse = ramp_vals[i][:] # time dependent Hamiltonian H_t = [H0, [Hq1, wq1_t]] # Evolving the system with/without thermal losses if no_loss == True: output = mesolve(H_t, psi0, times, [], []) else: output = mesolve(H_t, psi0, times, c_ops, []) exp_valsi = np.real(expect(n1, output.states)) exp_vali = np.mean(exp_valsi[t_idx:-1]) ramp_dt_array2Di = np.append(ramp_dt_array2Di, [exp_valsi], axis=0) ramp_exp_arri.append(exp_vali) # progress bar showProgress(i, no_ramps) printShape(ramp_dt_array2Di) ``` ### 5.1 Plotting the result obtained for different ramp times <mark>without thermal losses</mark> ``` def plotForVariousRamps(rampList, times, ramp_exp_arr, t_eval): """ Plots the variation in the excitation probability as a function of times and ramp up/down times @param rampList :: array of times by which the ramp time is increased @param times :: array of times at which H is evaluated @praem ramp_dt_array2D :: 2D array of occupation probabilities resulting for evaluating at various ramp times """ fig, ax = plt.subplots(1, 2, figsize=(11,4)) ax[0].plot(rampList, ramp_exp_arr, 'k.-', markerfacecolor='r', markeredgecolor='r', markersize=8) setXYlabel(ax[0], r'Ramp times $t$ (ns)', 'Excited population', True, '%d cavity'%(numF) ) Colors_ = ['r', 'b', 'g', '#ffd500'] for i,j in enumerate([0, findIndex(rampList, 18.3), findIndex(rampList, 36.7), findIndex(rampList, 55)]): ax[1].hlines(ramp_exp_arr[j], times[t_eval], times[-1], color=Colors_[i], linewidth=2.5, label=r'$\Delta t =$ %.2f'%rampList[j]) ax[1].legend() setXYlabel(ax[1], 'Times (ns)', 'Final occupation probabilty', False, 'Occupation probabilty vs times for various ramps\n' ) fig.tight_layout() return def plot3Dramps(rampList, times, ramp_dt_array2D): """ 3D plot of the variation in the excitation probability as a function of times and ramp up/down times @param rampList :: array of times by which the ramp time is increased @param times :: array of times at which H is evaluated @praem ramp_dt_array2D :: 2D array of occupation probabilities resulting for evaluating at various ramp times """ fig = plt.figure(figsize=(12,7)) ax = fig.gca(projection='3d') X, Y = np.meshgrid(rampList, times) surf = ax.plot_surface(X, Y, np.transpose(ramp_dt_array2D), rstride=1, cstride=1, cmap=cm.gist_heat, linewidth=1, antialiased=False) #surf2 = ax.plot_wireframe(X, Y, np.transpose(ramp_dt_array2D), rstride=40, cstride=40, color='k', linewidth=0.5) # Add a color bar, axis properties fig.colorbar(surf, shrink=0.5, aspect=10) ax.set_xlabel('\nRamp times' + r'$\ \Delta t$ (ns)') ax.set_ylabel('\nTime (ns)') ax.set_zlabel('\nOccupation Probabilities'); ax.set_title(labelTimes(tr, th)) ax.view_init(16, 25) plt.show() return def FourierTransformOf(rampList, ramp_exp_arr): """ Function calculates the Fourier Transform of the input x-y data @param rampLish :: x-values e.g. array of times @param ramp_exp_arr :: real valued array whose FFT is calculated @returns freq_arr :: x-vales in the freqeuncy domain power :: Fourier transformed values of input ramp_exp_arr """ # fft of ram_exp_arr ramp_FFT = np.fft.rfft(ramp_exp_arr) power = np.real(ramp_FFT)*np.real(ramp_FFT)+np.imag(ramp_FFT)*np.imag(ramp_FFT) # generating the FFT frequency array start_pt = 1/rampList[-1] freq_arr = np.linspace(start_pt, start_pt*len(power), len(power)) return freq_arr, power def plotFFT(ax, rampList, ramp_exp_arr): """ Function finds the peaks in the FFT spectrum and plots the results @param rampList :: x-vales e.g. array of times @param ramp_exp_arr :: real valued array whose FFT is calculated """ rampList_FFT, ramp_exp_arr_FFT = FourierTransformOf(rampList, ramp_exp_arr) # find peaks peak, _ = find_peaks(ramp_exp_arr_FFT, distance=100) # plot ax.plot(rampList_FFT[1:], ramp_exp_arr_FFT[1:], color='#d97829', linestyle=':', marker= '.', markersize=8) ax.plot(rampList_FFT[peak], ramp_exp_arr_FFT[peak], 'ro') setXYlabel(ax, 'Frequency (GHz)', r'$\mathcal{F}\ [n_1]:$ 1 cavity', True, '(x, y) = (%.1f, %.2f)'%(ramp_exp_arr_FFT[peak], rampList_FFT[peak])) fig.tight_layout() return ramp_exp_arr_FFT[peak], rampList_FFT[peak] def printResults(y, x): print(' Power value: ', y) print(' Frequency value: ', x) return plotForVariousRamps(rampList, times, ramp_exp_arr, t_idx) ``` Plotting the Fourier Transform of the above plot showing Excited population as a function of Ramp times (ns). The plot below helps to summarize the shift between slow and fast modes. ``` fig, ax = plt.subplots(1, 2, figsize=(8,4)) br_pt = 20 yf_peak, xf_peak = plotFFT(ax[0], rampList[:findIndex(rampList, br_pt)], ramp_exp_arr[:findIndex(rampList, br_pt)]) ax[0].set_xlim(0.01, 1.5) yf_peak1, xf_peak1 = plotFFT(ax[1], rampList[findIndex(rampList, br_pt+5):], ramp_exp_arr[findIndex(rampList, br_pt+5):]) ax[1].set_xlim(0, 0.5) print('Small ramp times (t<%.2f):'%br_pt) printResults(yf_peak, xf_peak) print('\nLarge ramp tines (t>%.2f):'%(br_pt+5)) printResults(yf_peak1, xf_peak1) ``` 3D plot summing up the above two plots. ``` plot3Dramps(rampList, times, ramp_dt_array2D) ``` ### 5.2 Plotting the result obtained for different ramp times <mark>with thermal losses</mark> ``` plotForVariousRamps(rampList, times, ramp_exp_arri, t_idx) plot3Dramps(rampList, times, ramp_dt_array2Di) ``` ## 6. Calculating the Fidelity and Concurrence ``` # extract the final state from the result of the simulation rho_final = res1.states[-1] # trace out the resonator mode and print the two-qubit density matrix rho_qubits = ptrace(rho_final, [1, 2]) rho_qubits # compare to the ideal result of the sqrtiswap gate (plus phase correction) for the current initial state rho_qubits_ideal = ket2dm(tensor(phasegate(0), phasegate(-pi/2)) * sqrtiswap() * tensor(basis(2,0), basis(2,1))) rho_qubits_ideal print('Fidelity = ', fidelity(rho_qubits, rho_qubits_ideal)) print('Concurrence = ', concurrence(rho_qubits)) ```
github_jupyter
# seaborn.jointplot --- Seaborn's `jointplot` displays a relationship between 2 variables (bivariate) as well as 1D profiles (univariate) in the margins. This plot is a convenience class that wraps [JointGrid](http://seaborn.pydata.org/generated/seaborn.JointGrid.html#seaborn.JointGrid). ``` %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np plt.rcParams['figure.figsize'] = (20.0, 10.0) plt.rcParams['font.family'] = "serif" ``` The multivariate normal distribution is a nice tool to demonstrate this type of plot as it is sampling from a multidimensional Gaussian and there is natural clustering. I'll set the covariance matrix equal to the identity so that the X and Y variables are uncorrelated -- meaning we will just get a blob ``` # Generate some random multivariate data x, y = np.random.RandomState(8).multivariate_normal([0, 0], [(1, 0), (0, 1)], 1000).T df = pd.DataFrame({"x":x,"y":y}) ``` Default plot ``` p = sns.jointplot(data=df,x='x', y='y') ``` Currently, `jointplot` wraps `JointGrid` with the following options for `kind`: - scatter - reg - resid - kde - hex Scatter is the default parameters ``` p = sns.jointplot(data=df,x='x', y='y',kind='scatter') ``` 'reg' plots a linear regression line. Here the line is close to flat because we chose our variables to be uncorrelated ``` p = sns.jointplot(data=df,x='x', y='y',kind='reg') ``` 'resid' plots the residual of the data to the regression line -- which is not very useful for this specific example because our regression line is almost flat and thus the residual is almost the same as the data. ``` x2, y2 = np.random.RandomState(9).multivariate_normal([0, 0], [(1, 0), (0, 1)], len(x)).T df2 = pd.DataFrame({"x":x,"y":y2}) p = sns.jointplot(data=df,x='x', y='y',kind='resid') ``` `kde` plots a kernel density estimate in the margins and converts the interior into a shaded countour plot ``` p = sns.jointplot(data=df,x='x', y='y',kind='kde') ``` 'hex' bins the data into hexagons with histograms in the margins. At this point you probably see the "pre-cooked" nature of `jointplot`. It provides nice defaults, but if you wanted, for example, a KDE on the margin of this hexplot you will need to use `JointGrid`. ``` p = sns.jointplot(data=df,x='x', y='y',kind='hex') ``` `stat_func` can be used to provide a function for computing a summary statistic from the data. The full x, y data vectors are passed in, so the function must provide one value or a tuple from many. As an example, I'll provide `tmin`, which when used in this way will return the smallest value of x that was greater than its corresponding value of y. ``` from scipy.stats import tmin p = sns.jointplot(data=df, x='x', y='y',kind='kde',stat_func=tmin) # tmin is computing roughly the equivalent of the following print(df.loc[df.x>df.y,'x'].min()) ``` Change the color ``` p = sns.jointplot(data=df, x='x', y='y', kind='kde', color="#99ffff") ``` ``` p = sns.jointplot(data=df, x='x', y='y', kind='kde', ratio=1) ``` Create separation between 2D plot and marginal plots with `space` ``` p = sns.jointplot(data=df, x='x', y='y', kind='kde', space=2) ``` `xlim` and `ylim` can be used to adjust the field of view ``` p = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-15,15), ylim=(-15,15)) ``` Pass additional parameters to the marginal plots with `marginal_kws`. You can pass similar options to `joint_kws` and `annot_kws` ``` p = sns.jointplot(data=df, x='x', y='y', kind='kde', marginal_kws={'lw':5, 'color':'red'}) ``` Finalize ``` sns.set(rc={'axes.labelsize':30, 'figure.figsize':(20.0, 10.0), 'xtick.labelsize':25, 'ytick.labelsize':20}) from itertools import chain p = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-3,3), ylim=(-3,3), space=0, stat_func=None, marginal_kws={'lw':3, 'bw':0.2}).set_axis_labels('X','Y') p.ax_marg_x.set_facecolor('#ccffccaa') p.ax_marg_y.set_facecolor('#ccffccaa') for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): l.set_linestyle('--') l.set_color('black') plt.text(-1.7,-2.7, "Joint Plot", fontsize = 55, color='Black', fontstyle='italic') fig, ax = plt.subplots(1,1) sns.set(rc={'axes.labelsize':30, 'figure.figsize':(20.0, 10.0), 'xtick.labelsize':25, 'ytick.labelsize':20}) from itertools import chain p = sns.jointplot(data=df, x='x', y='y', kind='kde', xlim=(-3,3), ylim=(-3,3), space=0, stat_func=None, ax=ax, marginal_kws={'lw':3, 'bw':0.2}).set_axis_labels('X','Y') p.ax_marg_x.set_facecolor('#ccffccaa') p.ax_marg_y.set_facecolor('#ccffccaa') for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): l.set_linestyle('--') l.set_color('black') plt.text(-1.7,-2.7, "Joint Plot", fontsize = 55, color='Black', fontstyle='italic') # p = sns.jointplot(data=df, # x='x', # y='y', # kind='kde', # xlim=(-3,3), # ylim=(-3,3), # space=0, # stat_func=None, # ax=ax[1], # marginal_kws={'lw':3, # 'bw':0.2}).set_axis_labels('X','Y') # p.ax_marg_x.set_facecolor('#ccffccaa') # p.ax_marg_y.set_facecolor('#ccffccaa') # for l in chain(p.ax_marg_x.axes.lines,p.ax_marg_y.axes.lines): # l.set_linestyle('--') # l.set_color('black') p.savefig('../../figures/jointplot.png') ```
github_jupyter
# Think Bayes: Chapter 9 This notebook presents code and exercises from Think Bayes, second edition. Copyright 2016 Allen B. Downey MIT License: https://opensource.org/licenses/MIT ``` from __future__ import print_function, division % matplotlib inline import warnings warnings.filterwarnings('ignore') import math import numpy as np from thinkbayes2 import Pmf, Cdf, Suite, Joint import thinkplot ``` ## Improving Reading Ability From DASL(http://lib.stat.cmu.edu/DASL/Stories/ImprovingReadingAbility.html) > An educator conducted an experiment to test whether new directed reading activities in the classroom will help elementary school pupils improve some aspects of their reading ability. She arranged for a third grade class of 21 students to follow these activities for an 8-week period. A control classroom of 23 third graders followed the same curriculum without the activities. At the end of the 8 weeks, all students took a Degree of Reading Power (DRP) test, which measures the aspects of reading ability that the treatment is designed to improve. > Summary statistics on the two groups of children show that the average score of the treatment class was almost ten points higher than the average of the control class. A two-sample t-test is appropriate for testing whether this difference is statistically significant. The t-statistic is 2.31, which is significant at the .05 level. I'll use Pandas to load the data into a DataFrame. ``` import pandas as pd df = pd.read_csv('drp_scores.csv', skiprows=21, delimiter='\t') df.head() ``` And use `groupby` to compute the means for the two groups. ``` grouped = df.groupby('Treatment') for name, group in grouped: print(name, group.Response.mean()) ``` The `Normal` class provides a `Likelihood` function that computes the likelihood of a sample from a normal distribution. ``` from scipy.stats import norm class Normal(Suite, Joint): def Likelihood(self, data, hypo): """ data: sequence of test scores hypo: mu, sigma """ mu, sigma = hypo likes = norm.pdf(data, mu, sigma) return np.prod(likes) ``` The prior distributions for `mu` and `sigma` are uniform. ``` mus = np.linspace(20, 80, 101) sigmas = np.linspace(5, 30, 101) ``` I use `itertools.product` to enumerate all pairs of `mu` and `sigma`. ``` from itertools import product control = Normal(product(mus, sigmas)) data = df[df.Treatment=='Control'].Response control.Update(data) ``` After the update, we can plot the probability of each `mu`-`sigma` pair as a contour plot. ``` thinkplot.Contour(control, pcolor=True) thinkplot.Config(xlabel='mu', ylabel='sigma') ``` And then we can extract the marginal distribution of `mu` ``` pmf_mu0 = control.Marginal(0) thinkplot.Pdf(pmf_mu0) thinkplot.Config(xlabel='mu', ylabel='Pmf') ``` And the marginal distribution of `sigma` ``` pmf_sigma0 = control.Marginal(1) thinkplot.Pdf(pmf_sigma0) thinkplot.Config(xlabel='sigma', ylabel='Pmf') ``` **Exercise:** Run this analysis again for the control group. What is the distribution of the difference between the groups? What is the probability that the average "reading power" for the treatment group is higher? What is the probability that the variance of the treatment group is higher? ``` # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # It looks like there is a high probability that the mean of # the treatment group is higher, and the most likely size of # the effect is 9-10 points. # It looks like the variance of the treated group is substantially # smaller, which suggests that the treatment might be helping # low scorers more than high scorers. ``` ## Paintball Suppose you are playing paintball in an indoor arena 30 feet wide and 50 feet long. You are standing near one of the 30 foot walls, and you suspect that one of your opponents has taken cover nearby. Along the wall, you see several paint spatters, all the same color, that you think your opponent fired recently. The spatters are at 15, 16, 18, and 21 feet, measured from the lower-left corner of the room. Based on these data, where do you think your opponent is hiding? Here's the Suite that does the update. It uses `MakeLocationPmf`, defined below. ``` class Paintball(Suite, Joint): """Represents hypotheses about the location of an opponent.""" def __init__(self, alphas, betas, locations): """Makes a joint suite of parameters alpha and beta. Enumerates all pairs of alpha and beta. Stores locations for use in Likelihood. alphas: possible values for alpha betas: possible values for beta locations: possible locations along the wall """ self.locations = locations pairs = [(alpha, beta) for alpha in alphas for beta in betas] Suite.__init__(self, pairs) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: pair of alpha, beta data: location of a hit Returns: float likelihood """ alpha, beta = hypo x = data pmf = MakeLocationPmf(alpha, beta, self.locations) like = pmf.Prob(x) return like def MakeLocationPmf(alpha, beta, locations): """Computes the Pmf of the locations, given alpha and beta. Given that the shooter is at coordinates (alpha, beta), the probability of hitting any spot is inversely proportionate to the strafe speed. alpha: x position beta: y position locations: x locations where the pmf is evaluated Returns: Pmf object """ pmf = Pmf() for x in locations: prob = 1.0 / StrafingSpeed(alpha, beta, x) pmf.Set(x, prob) pmf.Normalize() return pmf def StrafingSpeed(alpha, beta, x): """Computes strafing speed, given location of shooter and impact. alpha: x location of shooter beta: y location of shooter x: location of impact Returns: derivative of x with respect to theta """ theta = math.atan2(x - alpha, beta) speed = beta / math.cos(theta)**2 return speed ``` The prior probabilities for `alpha` and `beta` are uniform. ``` alphas = range(0, 31) betas = range(1, 51) locations = range(0, 31) suite = Paintball(alphas, betas, locations) suite.UpdateSet([15, 16, 18, 21]) ``` To visualize the joint posterior, I take slices for a few values of `beta` and plot the conditional distributions of `alpha`. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are. ``` locations = range(0, 31) alpha = 10 betas = [10, 20, 40] thinkplot.PrePlot(num=len(betas)) for beta in betas: pmf = MakeLocationPmf(alpha, beta, locations) pmf.label = 'beta = %d' % beta thinkplot.Pdf(pmf) thinkplot.Config(xlabel='Distance', ylabel='Prob') ``` Here are the marginal posterior distributions for `alpha` and `beta`. ``` marginal_alpha = suite.Marginal(0, label='alpha') marginal_beta = suite.Marginal(1, label='beta') print('alpha CI', marginal_alpha.CredibleInterval(50)) print('beta CI', marginal_beta.CredibleInterval(50)) thinkplot.PrePlot(num=2) thinkplot.Cdf(Cdf(marginal_alpha)) thinkplot.Cdf(Cdf(marginal_beta)) thinkplot.Config(xlabel='Distance', ylabel='Prob') ``` To visualize the joint posterior, I take slices for a few values of `beta` and plot the conditional distributions of `alpha`. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are. ``` betas = [10, 20, 40] thinkplot.PrePlot(num=len(betas)) for beta in betas: cond = suite.Conditional(0, 1, beta) cond.label = 'beta = %d' % beta thinkplot.Pdf(cond) thinkplot.Config(xlabel='Distance', ylabel='Prob') ``` Another way to visualize the posterio distribution: a pseudocolor plot of probability as a function of `alpha` and `beta`. ``` thinkplot.Contour(suite.GetDict(), contour=False, pcolor=True) thinkplot.Config(xlabel='alpha', ylabel='beta', axis=[0, 30, 0, 20]) ``` Here's another visualization that shows posterior credible regions. ``` d = dict((pair, 0) for pair in suite.Values()) percentages = [75, 50, 25] for p in percentages: interval = suite.MaxLikeInterval(p) for pair in interval: d[pair] += 1 thinkplot.Contour(d, contour=False, pcolor=True) thinkplot.Text(17, 4, '25', color='white') thinkplot.Text(17, 15, '50', color='white') thinkplot.Text(17, 30, '75') thinkplot.Config(xlabel='alpha', ylabel='beta', legend=False) ``` **Exercise:** From [John D. Cook](http://www.johndcook.com/blog/2010/07/13/lincoln-index/) "Suppose you have a tester who finds 20 bugs in your program. You want to estimate how many bugs are really in the program. You know there are at least 20 bugs, and if you have supreme confidence in your tester, you may suppose there are around 20 bugs. But maybe your tester isn't very good. Maybe there are hundreds of bugs. How can you have any idea how many bugs there are? There’s no way to know with one tester. But if you have two testers, you can get a good idea, even if you don’t know how skilled the testers are. Suppose two testers independently search for bugs. Let k1 be the number of errors the first tester finds and k2 the number of errors the second tester finds. Let c be the number of errors both testers find. The Lincoln Index estimates the total number of errors as k1 k2 / c [I changed his notation to be consistent with mine]." So if the first tester finds 20 bugs, the second finds 15, and they find 3 in common, we estimate that there are about 100 bugs. What is the Bayesian estimate of the number of errors based on this data? ``` # Solution goes here # Solution goes here # Solution goes here # Solution goes here ``` **Exercise:** The GPS problem. According to [Wikipedia]()  > GPS included a (currently disabled) feature called Selective Availability (SA) that adds intentional, time varying errors of up to 100 meters (328 ft) to the publicly available navigation signals. This was intended to deny an enemy the use of civilian GPS receivers for precision weapon guidance. > [...] > Before it was turned off on May 2, 2000, typical SA errors were about 50 m (164 ft) horizontally and about 100 m (328 ft) vertically.[10] Because SA affects every GPS receiver in a given area almost equally, a fixed station with an accurately known position can measure the SA error values and transmit them to the local GPS receivers so they may correct their position fixes. This is called Differential GPS or DGPS. DGPS also corrects for several other important sources of GPS errors, particularly ionospheric delay, so it continues to be widely used even though SA has been turned off. The ineffectiveness of SA in the face of widely available DGPS was a common argument for turning off SA, and this was finally done by order of President Clinton in 2000. Suppose it is 1 May 2000, and you are standing in a field that is 200m square. You are holding a GPS unit that indicates that your location is 51m north and 15m west of a known reference point in the middle of the field. However, you know that each of these coordinates has been perturbed by a "feature" that adds random errors with mean 0 and standard deviation 30m. 1) After taking one measurement, what should you believe about your position? Note: Since the intentional errors are independent, you could solve this problem independently for X and Y. But we'll treat it as a two-dimensional problem, partly for practice and partly to see how we could extend the solution to handle dependent errors. You can start with the code in gps.py. 2) Suppose that after one second the GPS updates your position and reports coordinates (48, 90). What should you believe now? 3) Suppose you take 8 more measurements and get: (11.903060613102866, 19.79168669735705) (77.10743601503178, 39.87062906535289) (80.16596823095534, -12.797927542984425) (67.38157493119053, 83.52841028148538) (89.43965206875271, 20.52141889230797) (58.794021026248245, 30.23054016065644) (2.5844401241265302, 51.012041625783766) (45.58108994142448, 3.5718287379754585) At this point, how certain are you about your location? ``` # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ``` **Exercise:** [The Flea Beetle problem from DASL](http://lib.stat.cmu.edu/DASL/Datafiles/FleaBeetles.html) Datafile Name: Flea Beetles Datafile Subjects: Biology Story Names: Flea Beetles Reference: Lubischew, A.A. (1962) On the use of discriminant functions in taxonomy. Biometrics, 18, 455-477. Also found in: Hand, D.J., et al. (1994) A Handbook of Small Data Sets, London: Chapman & Hall, 254-255. Authorization: Contact Authors Description: Data were collected on the genus of flea beetle Chaetocnema, which contains three species: concinna (Con), heikertingeri (Hei), and heptapotamica (Hep). Measurements were made on the width and angle of the aedeagus of each beetle. The goal of the original study was to form a classification rule to distinguish the three species. Number of cases: 74 Variable Names: Width: The maximal width of aedeagus in the forpart (in microns) Angle: The front angle of the aedeagus (1 unit = 7.5 degrees) Species: Species of flea beetle from the genus Chaetocnema Suggestions: 1. Plot CDFs for the width and angle data, broken down by species, to get a visual sense of whether the normal distribution is a good model. 2. Use the data to estimate the mean and standard deviation for each variable, broken down by species. 3. Given a joint posterior distribution for `mu` and `sigma`, what is the likelihood of a given datum? 4. Write a function that takes a measured width and angle and returns a posterior PMF of species. 5. Use the function to classify each of the specimens in the table and see how many you get right. ``` import pandas as pd df = pd.read_csv('flea_beetles.csv', delimiter='\t') df.head() # Solution goes here ```
github_jupyter
**Documentation for getting started with ipyleaflet:** https://ipyleaflet.readthedocs.io **Video tutorial for this:** https://www.youtube.com/watch?v=VW1gYD5eB6E ## Create default interactive map ``` # import the package import ipyleaflet # define m as a default map m = ipyleaflet.Map() # display map m ``` ## Customize default map settings ``` # import some classes from the package, so "ipyleaflet." no longer needs to be typed just before them from ipyleaflet import Map, FullScreenControl, LayersControl, DrawControl, MeasureControl, ScaleControl # define a map with new center and zoom settings m = Map(center=[30, -85], zoom=3, scroll_wheel_zoom=True) # set display height at 500 pixels m.layout.height="500px" # display map m ``` ## Add widget controls to interactive map interface ``` # add full screen control, default position is top left m.add_control(FullScreenControl()) # add layers control m.add_control(LayersControl(position="topright")) # add draw control m.add_control(DrawControl(position="topleft")) # add measure control m.add_control(MeasureControl()) # add scale control m.add_control(ScaleControl(position="bottomleft")) ``` ## Add basemaps ``` # import some classes from the package, so "ipyleaflet." no longer needs to be typed just before them from ipyleaflet import basemaps, TileLayer # add OpenTopoMap basemap layer m.add_layer(basemaps.OpenTopoMap) # add Esri.WorldImagery basemap layer m.add_layer(basemaps.Esri.WorldImagery) # display map m # define a tile layer for Google Maps google_map = TileLayer( url="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}", attribution="Google", name="Google Maps", ) # add layer to map m.add_layer(google_map) # define a tile layer for Google Satellite Imagery google_satellite = TileLayer( url="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}", attribution="Google", name="Google Satellite" ) # add layer to map m.add_layer(google_satellite) # display map m # disable the map attribution label m.attribution_control = False ``` ## Add markers ``` # import marker class from package from ipyleaflet import Marker # define three markers marker1 = Marker(name='marker1', location=(40, -100)) marker2 = Marker(name='marker2', location=(30, -90)) marker3 = Marker(name='marker3', location=(20, -80)) # add them as layers m.add_layer(marker1) m.add_layer(marker2) m.add_layer(marker3) # display map m ``` ## Add marker cluster ``` # import classes from package from ipyleaflet import Map, Marker, MarkerCluster # define three markers marker1 = Marker(name='marker1', location=(50, -100)) marker2 = Marker(name='marker2', location=(30, -110)) marker3 = Marker(name='marker3', location=(40, -90)) # define marker cluster marker_cluster = MarkerCluster( markers=(marker1, marker2, marker3), name="marker cluster" ) # add marker cluster as map layer m.add_layer(marker_cluster) # display map m ```
github_jupyter
``` import seaborn as sns from matplotlib import pyplot as plt import numpy as np import pandas as pd #load dataset into the notebook data = pd.read_csv('titanic.csv') data.head() #get all coumns in small caps data.columns.str.lower() #lets look the mean of survival using gender data.groupby('Sex')[['Survived']].mean() #here we see that the survival rate for females was hire than that of men #this shows that about 20% of men survived and 75% of females survived #lets group them further by class data.groupby(['Sex','Pclass'])[['Survived']].mean().unstack() # this shows tha most females that survived outcomes survival of mens from both # first , second and third classses # It also show that most of people in the first class # survival rate was hire than the restt # the survival reduces as you move from first class to third class #the above can also be written using pivot_table function as shown below data.pivot_table('Survived', index='Sex', columns='Pclass') #let then check the survival using #we will group into different ages i.e 0 to 18 , 18 to 35 and 35 to 80 age = pd.cut(data['Age'], [0, 18,35, 80]) data.pivot_table('Survived', ['Sex', age], 'Pclass').unstack() #The results also shows similar results to girl child in the first class #as the survival as also in over 90% unlike boychild #but for boychild in the ages between 0-18 their survival was abit high #it also shows people of age 35 to 80 in both genders in the third class #did not survived many like for mens its alsmost everyone died #lets now compute the total for each class survival using margin keyword in the pivod table function data.pivot_table('Survived', index='Sex', columns='Pclass', margins=True) #the females were the ost survived unlike males #First class people survived more than the others members #the rate of survival generally was abit low about 40% ``` ## Analysis of The Dataset ``` import copy import warnings warnings.filterwarnings('ignore') ``` ## Description for Columns in the dataset - survival - Survival (0 = No; 1 = Yes) - class - Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd) - name - Name - sex - Sex - age - Age - sibsp - Number of Siblings/Spouses Aboard - parch - Number of Parents/Children Aboard - ticket - Ticket Number - fare - Passenger Fare - cabin - Cabin - embarked - Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton) - boat - Lifeboat (if survived) - body - Body number (if did not survive and body was recovered) ### This May also help : https://data.world/nrippner/titanic-disaster-dataset ``` # create new df df = copy.copy(data) df.shape #check sample data df.head() #checking information from the df df.info() ``` ## Findings.. - Age , Cabin has nulls data - It has 891 entries - has fields with object data type ...Need to be cleaned to correct types - 12 columns are in the df ``` #checking stats information about the numericals df.describe().T ``` ## Findings .. - Looking on real Data ..we have columns like Survived , Age , sibsp , parch , fare - The Age , sibsp , parch , fare seems to be unevenly distributed by checking on quartiles ## Checking THe Data Quality ``` #We gonna check the percentage of nulls in each column field. nulls_sums = df.isna().sum() percent_nulls = nulls_sums /(len(df)) percent_nulls ``` ## Findings .. - Cabin has 77.1% , Age has 19.7% and Embarked has 0.225% nulss - Since Cabin Has very high amount of nulls , i will drop the column n from the df - For Age , I will use median to replace the nulls since it is not a good idea to remove this either row_wise or column as it will affect the data greatly - For embarked i will drop the rows with nulls as they are small ``` #remove the cabin col df.drop('Cabin' , axis = 1 , inplace = True) #fill nulls with median or mean for age age_median = df['Age'].median(skipna = True) df['Age'].fillna(age_median, inplace = True) #drop the rows with nulls for embarked #will use boolean to filter out nulls df = df[df['Embarked'].isna() != True] df.shape # create a copy of df df1 = df.copy() df1.shape , df.shape ``` ## Detecting Outliers - Will Use boxplot ``` plt.figure(figsize = (14, 7)) # create a one row with for cols for four plots plt.subplot(1,4,1) # Draw for age sns.boxplot(y= df['Age']) plt.title("CHeck age outliers") # for fare plt.subplot(1,4,2) sns.boxplot(y= df['Fare']) plt.title("CHeck Fare outliers") # siblings plt.subplot(1,4,3) sns.boxplot(y= df['SibSp']) plt.title("CHeck sibsp outliers") # for childres plt.subplot(1,4,4) sns.boxplot(y= df['Parch']) plt.title("CHeck Parch outliers") ``` ## Findings - From the above 4 attributes we get all has outliers ...as there are many whiskers outside range - Fare is the one with most outliers ``` #Lets now check survival rate with regard to siblings sns.catplot(x = 'SibSp' , col = "Survived" , data = df , kind = 'count') ``` ## Findings ... - Mostof who survived were those that were single siblings aboard - The rest many of then never survived ``` #Lets now check survival rate with regard to parents abord sns.catplot(x = 'Parch' , col = "Survived" , data = df , kind = 'count') ``` ## Findings ... - Single parents also Survived most - From The above two plots. **WE can conclude that parch and sibsp shows whether a sibling is accompanied by parent or not** - I will Merge the two cols labels(1 or 0) to see if a single person is with another one else ``` #if you add sibsp and parch and is over 0 , return 1 else zero def checkAccopany(x): if (x['Parch'] + x['SibSp'] > 0): return 1 else: return 0 # create the new merged col df['is_Accompanied'] = df.apply(checkAccopany , axis = 1) df.head() #use survival and new is_accompanied col to check sns.catplot(x = 'is_Accompanied' , col = "Survived" , data = df , kind = 'count') ``` ## Findings - Those who were not accompanied mostly perished more than those accompanied - Those who were accompanied survived more than the other ones. ``` #now checking about fare.. #i will use distplot...shows distribution and histgrams combined plt.figure(figsize = (12 , 7)) sns.distplot(df['Fare']) plt.title("Fare Distributiron") ``` ## Findings ... - The fare is more skewed to the right. (more data is on the right) - The skewness need to be removed... can use logs to standard it. ``` #using log function to try balance skewness plt.figure(figsize = (12 , 7)) sns.distplot(df['Fare'].map(lambda x: np.log(x) if x >0 else 0)) plt.title(" Logarithmic Fare Distributiron") ``` ## We have made the data be less skewed for uniformity.. - The fare column can now be replaced with log values since is more uniform ``` #perform logs to fare col df['Fare'] = df['Fare'].map(lambda x: np.log(x) if x >0 else 0) ## LEts now check sex and class distribution using survival sns.catplot(x = 'Sex' ,y = "Survived" , data = df , col = 'Pclass', kind = 'bar') ``` ## Findings... - Females were likely to have survived most. - Those in the first class also survived more ``` #using embarked sns.catplot(x = 'Sex' ,y = "Survived" , data = df , col = 'Embarked', kind = 'bar') ``` ## Findings .. - Those who boarded from Port Label **C** are likely to have Survived more than others ``` ##Cecking Age survival rates plt.figure(figsize = (12 , 6)) sns.distplot(df['Age']) ``` ## Most of the people aboarded were likely to be most of agegroup 20 --- 40 ``` #Checking survival based on ages # 1. Those who survived plt.figure(figsize = (12 , 6)) sns.distplot(df[df['Survived'] == 1]['Age']) plt.title("Distribution Of Survived") ``` ## Those wilth less than 60 years were most likely to survive. - greater chances of survival was on btween 30 and 35 years ``` #Checking survival based on ages # 1. Those who didn't survived plt.figure(figsize = (12 , 6)) sns.distplot(df[df['Survived'] == 0]['Age']) plt.title("Distribution Of who did not Survived") ``` ## AGed were likely not to survive. - Its skewed to left.. more of aged did not survive ``` ##Survival based on fare.. sns.boxplot(x = 'Survived' , y = 'Fare' , data = df) ``` ## __Most Survived are likely to have paid more fare. - Those who survived have a mean fare greater than non survived
github_jupyter
``` import numpy as np import pandas as pd import scipy as sp import matplotlib.pyplot as plt import matplotlib as mpl import seaborn as sns sns.set() from IPython.core.pylabtools import figsize import statsmodels.api as sm from patsy import dmatrix df = pd.read_csv('~/src/properties_2016.csv') df.tail() train_2016_df = pd.read_csv('~/src/train_2016.csv') train_2016_df.tail() df.tail() cat = ['parcelid', 'airconditioningtypeid', 'architecturalstyletypeid', 'buildingqualitytypeid', \ 'buildingclasstypeid', 'decktypeid', 'fips', 'hashottuborspa', 'heatingorsystemtypeid', \ 'propertycountylandusecode', 'propertylandusetypeid', 'propertyzoningdesc', \ 'pooltypeid10', 'pooltypeid2', 'pooltypeid7', \ 'rawcensustractandblock', 'censustractandblock', 'regionidcounty', 'regionidcity', \ 'regionidzip', 'regionidneighborhood', 'typeconstructiontypeid', 'yearbuilt', \ 'assessmentyear', 'taxdelinquencyyear', 'fireplaceflag', 'storytypeid', 'taxdelinquencyflag'] cat cat_df = df[cat] cat_df.tail() result_df = pd.merge(cat_df, train_2016_df) result_df.tail() # fillna를 위한 values 만들기, fillna 적용하기(Nan >> nulls_) values = {} for i in cat: values[i] = "nulls_" + i[:10] result_df.fillna(values, inplace=True) result_df.tail() def find_others(pvalue, a, b): others = [] length = len(pvalue) for i in range(length): if pvalue.values[i] > 0.01: others.append(pvalue.index[i][a:b]) del others[0] del others[-1] return others # # transatctiondate 제거 # del result_df['transactiondate'] # result_df.tail() # 수식 만들기 formula = "logerror ~ " for i in cat[1:2]: formula += "C(" + i + ") + " formula = formula[:-3] y = result_df.iloc[:, -1:] X = result_df.iloc[:, :-1] model = sm.OLS.from_formula(formula, data=result_df) print(model.fit().summary()) sm.stats.anova_lm(model.fit()) result_df.head() # 빈도 확인 for i in cat[1:]: print(result_df[i].value_counts()) # others = [3, 9] # for i in others: # result_df.loc[result_df['airconditioningtypeid'] == i] = "nulls_airconditi" # print(result_df.groupby(['airconditioningtypeid']).size().reset_index) formula = "logerror ~ " for i in cat[:]: formula += "C(" + i + ") + " formula = formula[:-3] formula # # formula for all(박제) # formula = 'logerror ~ C(parcelid) + C(airconditioningtypeid) + C(architecturalstyletypeid) \ # + C(buildingqualitytypeid) + C(buildingclasstypeid) + C(decktypeid) + C(fips) \ # + C(hashottuborspa) + C(heatingorsystemtypeid) + C(propertycountylandusecode) \ # + C(propertylandusetypeid) + C(propertyzoningdesc) + C(pooltypeid10) + C(pooltypeid2) \ # + C(pooltypeid7) + C(rawcensustractandblock) + C(censustractandblock) + C(regionidcounty) \ # + C(regionidcity) + C(regionidzip) + C(regionidneighborhood) + C(typeconstructiontypeid) \ # + C(yearbuilt) + C(assessmentyear) + C(taxdelinquencyyear) + C(fireplaceflag) \ # + C(storytypeid) + C(taxdelinquencyflag)' # formula formula = 'logerror ~ C(parcelid) + C(airconditioningtypeid) + C(architecturalstyletypeid) \ + C(buildingqualitytypeid) + C(buildingclasstypeid) + C(decktypeid) + C(fips) \ + C(hashottuborspa) + C(heatingorsystemtypeid) + C(propertycountylandusecode) \ + C(propertylandusetypeid) + C(pooltypeid10) + C(pooltypeid2) \ + C(pooltypeid7) + C(regionidcounty) \ + C(regionidcity) + C(regionidzip) + C(regionidneighborhood) + C(typeconstructiontypeid) \ + C(yearbuilt) + C(assessmentyear) + C(taxdelinquencyyear) + C(fireplaceflag) \ + C(storytypeid) + C(taxdelinquencyflag)' formula # VIF(C) result_df.iloc[:, 15:].tail() ## pvalues가 큰 값 제거하는 프로그램 # 1.yearbuilt : .. > 3 # 2.regionidneighborhood : # 수식 만들기 formula = "logerror ~ C(regionidneighborhood)" model = sm.OLS.from_formula(formula, data=result_df) result = model.fit() print(result.summary()) # sm.stats.anova_lm(model.fit()) a = model.fit().pvalues a[a < 0.01] a.index[1][-7:-3] # pvalues 만들기 pvalue = result.pvalues pvalue.tail() pvalue = pvalue.reset_index() pvalue.tail() pvalue.index.values pvalue.tail() others = [] length = len(pvalue) for i in range(length): if pvalue.iloc[:,-1:].values[i] > 0.01: idx = pvalue.index.values[i] + 1 others.append(idx) others len(others), len(pvalue), type(others) result_copy = result_df.copy() result_copy.tail() result_copy["regionidneighborhood"].replace(others, 1, inplace=True) result_copy["regionidneighborhood"].tail() result_copy[15:].tail() result_copy['regionidneighborhood'].value_counts() pvalue.index.values[0] + 1 others = [] length = len(pvalue) for i in range(length): if pvalue.values[i] > 0.01: others.append(pvalue.index[i][a:b]) xxx_df['A'].index.values.tolist() def find_others(pvalue, a, b): others = [] length = len(pvalue) for i in range(length): if pvalue.values[i] > 0.01: others.append(pvalue.index[i][a:b]) del others[0] del others[-1] list(np.array(others).astype(float)) return others others = find_others(pvalue, -7, -3) others result_df["yearbuilt"].replace(to_replace=others, "others", inplace=True) result_df["yearbuilt"].tail() # 수식 만들기 formula = "logerror ~ C(yearbuilt)" model = sm.OLS.from_formula(formula, data=result_df) result = model.fit() print(result.summary()) # sm.stats.anova_lm(model.fit()) result_df['yearbuilt'].tail() xxx_df = pd.DataFrame() xxx_df['A'] = [1, 2, 3, 4] xxx_df['B'] = [1, 2, 3, 4] xxx_df xxx_df['A'].index.values.tolist() result_copy["regionidneighborhood"].replace(others, value="others", inplace=True) result_copy["regionidneighborhood"].tail() type(others) xxx_df.replace([1, 2, 3], 9, inplace=True) xxx_df xxx_df # 수식 만들기 formula = "logerror ~ C(yearbuilt)" model = sm.OLS.from_formula(formula, data=result_df) print(model.fit().summary2()) # sm.stats.anova_lm(model.fit()) ``` ## _poolfamily_ ``` pool = ['parcelid', 'hashottuborspa', 'poolcnt', 'pooltypeid10', 'pooltypeid2', 'pooltypeid7'] pool_df = df[pool] pool_df.tail() pool_df.isna().sum() result = pd.merge(pool_df, train_2016_df) result.tail() result.isna().sum() result.fillna(0, inplace=True) result.tail() X = result.iloc[:, 1:6] y = result.iloc[:, 6:7] model = sm.OLS.from_formula("logerror ~ C(hashottuborspa) + C(poolcnt) + C(pooltypeid10) + \ C(pooltypeid2) + C(pooltypeid7) + 0", data = result) print(model.fit().summary()) ``` ## _architecturalstyletypeid_ ``` arch_df = df[["parcelid", "architecturalstyletypeid"]] arch_df.tail() arch_count = df.groupby(["architecturalstyletypeid"]).size().reset_index(name="counts") arch_count arch_df = pd.merge(arch_df, train_2016_df) arch_df.tail() arch_dummy = pd.get_dummies(arch_df['architecturalstyletypeid'], columns=['two', 'three', 'seven', 'eight', 'ten', 'twentyone']) arch_dummy.tail() result_df = pd.concat([arch_dummy, train_2016_df], axis=1) result_df.tail() result_df.columns = ['Bungalow', 'CapeCod', 'Contemporary', 'Conventional', \ 'FrenchProvincial', 'RanchRambler', 'parcelid', 'logerror', 'transactiondate'] result_df.tail() model_architect = sm.OLS.from_formula("logerror ~ C(Bungalow) + C(CapeCod) + C(Contemporary) +\ C(Conventional) + C(FrenchProvincial) + C(RanchRambler)", data=result_df) sm.stats.anova_lm(model_architect.fit()) # 전체 print(model_architect.fit().summary()) model_architect = sm.OLS.from_formula("logerror ~ C(Bungalow) + C(Contemporary) +\ C(Conventional) + C(FrenchProvincial) + C(RanchRambler)", data=result_df) sm.stats.anova_lm(model_architect.fit()) # CapeCod 제거 print(model_architect.fit().summary()) arch_df.tail() model_arch = sm.OLS.from_formula("logerror ~ C(architecturalstyletypeid) + 0", data=arch_df) sm.stats.anova_lm(model_arch.fit()) arch_count = df.groupby(["architecturalstyletypeid"]).size().reset_index(name="counts") arch_count ``` ## _construction_ ``` construction = ['architecturalstyletypeid', 'typeconstructiontypeid', \ 'buildingclasstypeid', 'buildingqualitytypeid'] construction construction_df = df[construction] construction_df.tail() construction_df.isna().sum() df1 = pd.get_dummies(construction_df['architecturalstyletypeid']) df1 train_2016_df.tail() result_df = pd.merge(construction_df, train_2016_df) result_df.tail() result_df construction_df1 = construction_df.groupby(["architecturalstyletypeid"]).size().reset_index(name="counts") construction_df1 construction_df2 = construction_df.groupby(["typeconstructiontypeid"]).size().reset_index(name="counts") construction_df2 construction_df3 = construction_df.groupby(["buildingclasstypeid"]).size().reset_index(name="counts") construction_df3 construction_df4 = construction_df.groupby(["buildingqualitytypeid"]).size().reset_index(name="counts") construction_df4 construction result_df1 = result_df.dropna() result_df1.tail() result_df2 = result_df.fillna(0) result_df2.tail() result_df.tail() construction_df1 = result_df.groupby(["architecturalstyletypeid"]).size().reset_index(name="counts") construction_df1 sample = ["secon", "three", "seven", "eight", "ten", "twentyone"] df1 = pd.get_dummies(result_df['architecturalstyletypeid']) df1.head() len(df1) architect_df = pd.DataFrame(columns=sample) architect_df.head() architect_df result2 = pd.concat([df1, train_2016_df]) result2.tail() pd df1.iloc[:, :1].tail() model0 = sm.OLS.from_formula("logerror ~ C(architecturalstyletypeid) + C(typeconstructiontypeid) \ + C(buildingclasstypeid) + C(buildingqualitytypeid) + 0", data=result_df2) print(model0.fit().summary()) dmatrix("architecturalstyletypeid", construction_df) model1 = sm.OLS.from_formula("logerror ~ C(architecturalstyletypeid) + 0", data=result_df) print(model1.fit().summary()) sns.stripplot(x="architecturalstyletypeid", y="logerror", data=result_df, jitter=True, alpha=.3) sns.pointplot(x="architecturalstyletypeid", y="logerror", data=result_df, dodge=True, color='r') plt.show() model_architect = sm.OLS.from_formula("logerror ~ C(architecturalstyletypeid) + 0", data=result_df) sm.stats.anova_lm(model_architect.fit()) result_df.tail() pd.get_dummies() ``` ## _카테고리 데이터 모델링_ ``` df.tail() cat = ['parcelid', 'airconditioningtypeid', 'architecturalstyletypeid', 'buildingqualitytypeid', \ 'buildingclasstypeid', 'decktypeid', 'fips', 'heatingorsystemtypeid', \ 'propertycountylandusecode', 'propertylandusetypeid', 'propertyzoningdesc', \ 'rawcensustractandblock', 'censustractandblock', 'regionidcounty', 'regionidcity', \ 'regionidzip', 'regionidneighborhood', 'typeconstructiontypeid', 'yearbuilt', \ 'assessmentyear', 'taxdelinquencyyear'] cat cat_df = df[cat] cat_df.tail() result_df = pd.merge(cat_df, train_2016_df) result_df.tail() values = {} for i in cat: values[i] = "nulls_" + i[:10] values result_df.fillna(values, inplace=True) del result_df['transactiondate'] result_df.tail() formula = "logerror ~ " for i in cat[1:]: formula += "C(" + i + ") + " formula = formula[:-3] formula y = result_df.iloc[:, -1:] X = result_df.iloc[:, :-1] model = sm.OLS.from_formula(formula, data=result_df) print(model.fit().summary()) result_df ```
github_jupyter
# Read datasets ``` import pandas as pd countries_of_the_world = pd.read_csv('../datasets/countries-of-the-world.csv') countries_of_the_world.head() mpg = pd.read_csv('../datasets/mpg.csv') mpg.head() student_data = pd.read_csv('../datasets/student-alcohol-consumption.csv') student_data.head() young_people_survey_data = pd.read_csv('../datasets/young-people-survey-responses.csv') young_people_survey_data.head() import matplotlib.pyplot as plt import seaborn as sns ``` # Count plots In this exercise, we'll return to exploring our dataset that contains the responses to a survey sent out to young people. We might suspect that young people spend a lot of time on the internet, but how much do they report using the internet each day? Let's use a count plot to break down the number of survey responses in each category and then explore whether it changes based on age. As a reminder, to create a count plot, we'll use the catplot() function and specify the name of the categorical variable to count (x=____), the Pandas DataFrame to use (data=____), and the type of plot (kind="count"). Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt. ``` survey_data = young_people_survey_data # Create count plot of internet usage sns.catplot(x="Internet usage", data=survey_data, kind="count") # Show plot plt.show() # Change the orientation of the plot sns.catplot(y="Internet usage", data=survey_data, kind="count") # Show plot plt.show() survey_data["Age Category"] = ['Less than 21' if x < 21 else '21+' for x in survey_data['Age']] # Create column subplots based on age category sns.catplot(y="Internet usage", data=survey_data, kind="count", col="Age Category") # Show plot plt.show() ``` # Bar plots with percentages Let's continue exploring the responses to a survey sent out to young people. The variable "Interested in Math" is True if the person reported being interested or very interested in mathematics, and False otherwise. What percentage of young people report being interested in math, and does this vary based on gender? Let's use a bar plot to find out. As a reminder, we'll create a bar plot using the catplot() function, providing the name of categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of categorical plot (kind="bar"). Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt. ``` survey_data["Interested in Math"] = [True if x > 3 else False for x in survey_data['Mathematics']] # Create a bar plot of interest in math, separated by gender sns.catplot(x="Gender", y="Interested in Math", data=survey_data, kind="bar") # Show plot plt.show() ``` # Customizing bar plots In this exercise, we'll explore data from students in secondary school. The "study_time" variable records each student's reported weekly study time as one of the following categories: "<2 hours", "2 to 5 hours", "5 to 10 hours", or ">10 hours". Do students who report higher amounts of studying tend to get better final grades? Let's compare the average final grade among students in each category using a bar plot. Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt. ``` # Create bar plot of average final grade in each study category sns.catplot(x="study_time", y="G3", data=student_data, kind="bar") # Show plot plt.show() # Rearrange the categories sns.catplot(x="study_time", y="G3", data=student_data, kind="bar", order=["<2 hours", "2 to 5 hours", "5 to 10 hours", ">10 hours"]) # Show plot plt.show() # Turn off the confidence intervals sns.catplot(x="study_time", y="G3", data=student_data, kind="bar", order=["<2 hours", "2 to 5 hours", "5 to 10 hours", ">10 hours"], ci=None) # Show plot plt.show() ``` # Create and interpret a box plot Let's continue using the student_data dataset. In an earlier exercise, we explored the relationship between studying and final grade by using a bar plot to compare the average final grade ("G3") among students in different categories of "study_time". In this exercise, we'll try using a box plot look at this relationship instead. As a reminder, to create a box plot you'll need to use the catplot() function and specify the name of the categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of plot (kind="box"). We have already imported matplotlib.pyplot as plt and seaborn as sns. ``` # Specify the category ordering study_time_order = ["<2 hours", "2 to 5 hours", "5 to 10 hours", ">10 hours"] # Create a box plot and set the order of the categories sns.catplot(x="study_time", y="G3", data=student_data, kind='box', order=study_time_order) # Show plot plt.show() ``` ## Question Which of the following is a correct interpretation of this box plot? Possible Answers: The median grade among students studying less than 2 hours is 10.0. # Omitting outliers Now let's use the student_data dataset to compare the distribution of final grades ("G3") between students who have internet access at home and those who don't. To do this, we'll use the "internet" variable, which is a binary (yes/no) indicator of whether the student has internet access at home. Since internet may be less accessible in rural areas, we'll add subgroups based on where the student lives. For this, we can use the "location" variable, which is an indicator of whether a student lives in an urban ("Urban") or rural ("Rural") location. Seaborn has already been imported as sns and matplotlib.pyplot has been imported as plt. As a reminder, you can omit outliers in box plots by setting the sym parameter equal to an empty string (""). ``` # Create a box plot with subgroups and omit the outliers sns.catplot(x="internet", y="G3", data=student_data, kind='box', hue="location", sym="") # Show plot plt.show() ``` # Adjusting the whiskers In the lesson we saw that there are multiple ways to define the whiskers in a box plot. In this set of exercises, we'll continue to use the student_data dataset to compare the distribution of final grades ("G3") between students who are in a romantic relationship and those that are not. We'll use the "romantic" variable, which is a yes/no indicator of whether the student is in a romantic relationship. Let's create a box plot to look at this relationship and try different ways to define the whiskers. We've already imported Seaborn as sns and matplotlib.pyplot as plt. ``` # Extend the whiskers to the 5th and 95th percentile sns.catplot(x="romantic", y="G3", data=student_data, kind="box", whis=0.5) # Show plot plt.show() # Extend the whiskers to the 5th and 95th percentile sns.catplot(x="romantic", y="G3", data=student_data, kind="box", whis=[5, 95]) # Show plot plt.show() # Set the whiskers at the min and max values sns.catplot(x="romantic", y="G3", data=student_data, kind="box", whis=[0, 100]) # Show plot plt.show() ``` # Customizing point plots Let's continue to look at data from students in secondary school, this time using a point plot to answer the question: does the quality of the student's family relationship influence the number of absences the student has in school? Here, we'll use the "famrel" variable, which describes the quality of a student's family relationship from 1 (very bad) to 5 (very good). As a reminder, to create a point plot, use the catplot() function and specify the name of the categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of categorical plot (kind="point"). We've already imported Seaborn as sns and matplotlib.pyplot as plt. ``` # Create a point plot of family relationship vs. absences sns.catplot(x="famrel", y="absences", data=student_data, kind="point") # Show plot plt.show() # Add caps to the confidence interval sns.catplot(x="famrel", y="absences", data=student_data, kind="point", capsize=0.2) # Show plot plt.show() # Remove the lines joining the points sns.catplot(x="famrel", y="absences", data=student_data, kind="point", capsize=0.2, join=False) # Show plot plt.show() ``` # Point plots with subgroups Let's continue exploring the dataset of students in secondary school. This time, we'll ask the question: is being in a romantic relationship associated with higher or lower school attendance? And does this association differ by which school the students attend? Let's find out using a point plot. We've already imported Seaborn as sns and matplotlib.pyplot as plt. Use sns.catplot() and the student_data DataFrame to create a point plot with relationship status ("romantic") on the x-axis and number of absences ("absences") on the y-axis. Create subgroups based on the school that they attend ("school") ``` # Create a point plot with subgroups sns.catplot(x="romantic", y="absences", data=student_data, kind="point", hue="school") # Show plot plt.show() # Turn off the confidence intervals for this plot sns.catplot(x="romantic", y="absences", data=student_data, kind="point", hue="school", ci=None) # Show plot plt.show() # Import median function from numpy from numpy import median # Plot the median number of absences instead of the mean sns.catplot(x="romantic", y="absences", data=student_data, kind="point", hue="school", ci=None, estimator=median) # Show plot plt.show() ```
github_jupyter
# Preprocessing To begin the training process, the raw images first had to be preprocessed. For the most part, this meant removing the banners that contained image metadata while retaining as much useful image data as possible. To remove the banners, I used a technique called "reflective padding" which meant I remove the banner region, then pad the edges with its own reflection. An example of this is shown here: In order to remove the banners, however, they must first be detected. This was done using kernels in OpenCV to detect vertical and horizontal lines within the image. For instance, let's say you start with this image: ``` from pathlib import Path import matplotlib.pyplot as plt import matplotlib.image as mpimg file = '../data/Raw_Data/Particles/L2_000b4469b73e3fb3558d20b33b91fcb0.jpg' img = mpimg.imread(file) fig, ax = plt.subplots(1, 1, figsize=(10,10)) ax.set_axis_off() ax.imshow(img) ``` The first step would be to create a binary mask of the image where all pixels above a threshold becomes 255 and all pixels below the threshold becomes 0. Since the banners in our images are mostly white, the threshold value chosen was 250. This is to ensure it is mostly only the banner that is left in the mask. ``` import cv2 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # binarization only works if the image is first converted to greyscale ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY) # binarize the image using 250 as the threshold value fig, ax = plt.subplots(1, 1, figsize=(10,10)) ax.set_axis_off() ax.imshow(thresh) ``` Next, use [erosion and dilation](https://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html) to find where the vertical and horizontal lines are within the image. By successively replacing pixels with the minimum (erosion) then maximum value (dilation) over the area of a kernel, largely vertical regions of the image are maintained using a tall thin kernel while a short long kernel mantains the largely horizontal regions of the image. ``` # Find the verticle and horizontal lines in the image verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 13)) horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 1)) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) img_v = cv2.erode(thresh, verticle_kernel, iterations = 3) vert_lines_img = cv2.dilate(img_v, verticle_kernel, iterations = 3) img_h = cv2.erode(thresh, horizontal_kernel, iterations = 3) hori_lines_img = cv2.dilate(img_h, horizontal_kernel, iterations = 3) fig, ax = plt.subplots(1, 2, figsize=(20,20)) ax[0].set_axis_off() ax[0].imshow(vert_lines_img) ax[1].set_axis_off() ax[1].imshow(hori_lines_img) ``` The two masks are then added together and a final erosion + binarization is performed on the inverted array to ensure we are left with a binary mask where pixel values of 0 indicate the banner region and pixel values of 255 indicate everywhere else. ``` img_add = cv2.addWeighted(vert_lines_img, 0.5, hori_lines_img, 0.5, 0.0) img_final = cv2.erode(~img_add, kernel, iterations = 3) ret, thresh2 = cv2.threshold(img_final, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) fig, ax = plt.subplots(1, 2, figsize=(20,20)) ax[0].set_axis_off() ax[0].imshow(img_add) ax[1].set_axis_off() ax[1].imshow(thresh2) img_final ```
github_jupyter
<a href="https://colab.research.google.com/github/GavinHacker/recsys_model/blob/master/7_recbaserecall.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # 使用基于电影相似度进行推荐的方法进行召回 ### install library ``` !pip install pymysql from google.colab import drive drive.mount('/content/drive') ``` ## Functional code ``` import pandas as pd import pymysql import pymysql.cursors from functools import reduce import numpy as np import pandas as pd import uuid import datetime #from pyfm import pylibfm from sklearn.feature_extraction import DictVectorizer from sklearn.metrics.pairwise import pairwise_distances np.set_printoptions(precision=3) np.set_printoptions(suppress=True) from sklearn.datasets import dump_svmlight_file from sklearn.preprocessing import OneHotEncoder import pickle as pkl from sklearn.metrics import roc_auc_score, mean_squared_error from sklearn.datasets import load_svmlight_file from sklearn.linear_model import LogisticRegression def get_connection(): return pymysql.connect(host='rm-2zeqqm6994abi7b6dqo.mysql.rds.aliyuncs.com', user='noone', password='Huawei12#$', db='recsys', port=3306, charset ='utf8', use_unicode=True) ``` ## 获取最新的comment,实际recsys_core实现的时候使用mqlog消息 ``` def get_comment_data(): df_comment_new_data = pd.read_sql_query("select * from comment_new where newdata = 1 ", get_connection()) df_comment_new_data_ldim = df_comment_new_data.loc[:,['ID','MOVIEID','USERID']] return df_comment_new_data_ldim ``` ## 获取基于相似度推荐的电影集合 ``` def get_ibmovie_by_movieid(movieid, connection): sql = 'select DISTINCT recmovieid from ibmovie where movieid = \'%s\'' % movieid try: with connection.cursor() as cursor: cout=cursor.execute(sql) return cursor.fetchall() except Exception as e: print(e) connection.close() return None ``` ## 对recmovie表插入数据,保留原始movieid,即根据哪个电影推荐而来 ``` def insert_or_update_recmovie(movieid, userid, srcmovieid, connection): _id = uuid.uuid4() time_now = datetime.datetime.now() q_sql = 'select id from recmovie where userid=\'%s\' and movieid=\'%s\'' % (userid, movieid) i_sql = 'insert into recmovie (id, userid, movieid, rectime, srcmovieid) values (\'%s\', \'%s\', \'%s\', \'%s\', \'%s\')' % (_id, userid, movieid, time_now, srcmovieid) exist_list = None try: with connection.cursor() as cursor: #print(q_sql) cout=cursor.execute(q_sql) exist_list = cursor.fetchall() if len(exist_list) > 0: with connection.cursor() as cursor: for item in exist_list: u_sql = 'update recmovie set rectime=\'%s\' where id=\'%s\'' % (time_now, item[0]) cursor.execute(u_sql) else: with connection.cursor() as cursor: cursor.execute(i_sql) except Exception as e: print(e) connection.close() return None ``` * test code insert_or_update_recmovie('10430817','cf2349f9c01f9a5cd4050aebd30ab74f',conn) ``` def update_comment_new_data_flag(rid, connection): sql = 'update comment_new set newdata = 0 where id = \'%s\'' % rid try: with connection.cursor() as cursor: cout=cursor.execute(sql) except Exception as e: print(e) connection.close() ``` ## 根据用户的打分进行相似电影召回,暂不限数量,具体召回数量取决于用户看过的电影的相似电影集合交集 ``` def func_main(): df_comment_new_data_ldim = get_comment_data() conn = get_connection() for i in df_comment_new_data_ldim.index: print(df_comment_new_data_ldim.iloc[i]['MOVIEID'], df_comment_new_data_ldim.iloc[i]['USERID']) ibmovie_list = get_ibmovie_by_movieid(df_comment_new_data_ldim.iloc[i]['MOVIEID'], get_connection()) for j in ibmovie_list: insert_or_update_recmovie(j[0],'cf2349f9c01f9a5cd4050aebd30ab74f', df_comment_new_data_ldim.iloc[i]['MOVIEID'], conn) update_comment_new_data_flag(df_comment_new_data_ldim.iloc[i]['ID'], conn) conn.commit() #func_main() ```
github_jupyter
# 03 - Registering a Model in your Workspace Now that we have trained a set of models and identified the run containing the best model, we want to deploy the model for inferencing. ``` import environs e_vars = environs.Env() e_vars.read_env('../workshop.env') USER_NAME = e_vars.str("USER_NAME") EXPERIMENT_NAME = e_vars.str('EXPERIMENT_NAME') ENVIRONMENT_NAME = e_vars.str("ENVIRONMENT_NAME") DATASET_NAME = e_vars.str("DATASET_NAME") SERVICE_NAME = e_vars.str("SERVICE_NAME") MODEL_NAME = e_vars.str("MODEL_NAME") if not USER_NAME: raise NotImplementedError("Please enter your username in the `.env` file and run this cell again.") from azureml.core import Workspace, Experiment ws = Workspace.from_config() experiment = Experiment(ws, EXPERIMENT_NAME) ``` ### Find the Best Run We can use the SDK to search through our runs to determine which was the best run. In our case, we'll use RMSE to determine the best metric. ``` from tqdm import tqdm def find_best_run(experiment, metric, goal='minimize'): runs = {} run_metrics = {} # Create dictionaries containing the runs and the metrics for all runs containing the metric for r in tqdm(experiment.get_runs(include_children=True)): metrics = r.get_metrics() if metric in metrics.keys(): runs[r.id] = r run_metrics[r.id] = metrics if goal == 'minimize': min_run = min(run_metrics, key=lambda k: run_metrics[k][metric]) return runs[min_run] else: max_run = max(run_metrics, key=lambda k: run_metrics[k][metric]) return runs[max_run] best_run = find_best_run(experiment, 'rmse', 'minimize') # Display the metrics best_run.get_metrics() ``` ### Register a model from best run We have already identified which run contains the "best model" by our evaluation criteria. Each run has a file structure associated with it that contains various files collected during the run. Since a run can have many outputs we need to tell AML which file from those outputs represents the model that we want to use for our deployment. We can use the `run.get_file_names()` method to list the files associated with the run, and then use the `run.register_model()` method to place the model in the workspace's model registry. When using `run.register_model()` we supply a `model_name` that is meaningful for our scenario and the `model_path` of the model relative to the run. In this case, the model path is what is returned from `run.get_file_names()` ``` # View the files in the run for f in best_run.get_file_names(): if 'logs' not in f: print(f) # Register the model with the workspace model = best_run.register_model(model_name=MODEL_NAME, model_path='outputs/model.pkl') ``` Once a model is registered, it is accessible from the list of models on the AML workspace. If you register models with the same name multiple times, AML keeps a version history of those models for you. The `Model.list()` lists all models in a workspace, and can be filtered by name, tags, or model properties. ``` # Find all models called "diabetes_regression_model" and display their version numbers from azureml.core.model import Model models = Model.list(ws, name=MODEL_NAME) for m in models: print(m.name, m.version) ``` <br><br><br><br><br> ###### Copyright (c) Microsoft Corporation. All rights reserved. ###### Licensed under the MIT License.
github_jupyter
``` from datascience import * import seaborn as sns import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import statsmodels.formula.api as smf from matplotlib.lines import Line2D plt.style.use('seaborn') #Data clarification #Rank - Current World ranking based on 4 last competitions #Name - Name of the participant #Country - Country of origin of participant #Age - Age of participant #Average Arrow - Average lifetime competitive arrow score, reference: http://texasarchery.info/wp-content/uploads/2015/08/NASP-JOAD-How-to-score.jpg base = pd.read_csv('Cleaned Data Men age and avg arrow.csv') base2 = pd.read_csv('Cleaned Data Men age and avg arrow.csv') base = base.drop(columns=['Country']) base #quick mean age calculation for paper base['Age'].mean() BaseByAge = base.sort_values(by=['Age'],ascending= False) #Calculate how many arhers of each age there are #Would like to show if there is a strong preference for certain ages AgeAmount = base.groupby('Age',as_index = False).count() #list(AgeAmount) AgeAmount #Time to clean the data after some unexpected results CleanedAgeA = AgeAmount.drop(columns=['Name','Score','Average Arrow']) CleanedAgeA = CleanedAgeA.rename(columns={'World Rank': '# of Archers'}) #Goal here was to keep the original cleaned data set,unfortunately this did not work, but is fixed later CleanedAgeAB = CleanedAgeA CleanedAgeAB['cum_sum'] = CleanedAgeA['# of Archers'].cumsum() CleanedAgeAB['Cumulative %'] = 100*CleanedAgeA.cum_sum/CleanedAgeA['# of Archers'].sum() CleanedAgeAB #I Realize now that calculating the percentage was entirely pointless # as our data has 100 points, therefore the cumulative sum is the same as the #percentage df1 = CleanedAgeAB.drop(columns=['cum_sum',"# of Archers"]) df2 = CleanedAgeAB.drop(columns=['cum_sum',"Cumulative %"]) #Goal: Make a chart showcasing most archers are very young CleanedAgeAB.plot.area(x = 'Age', y = ["# of Archers",'Cumulative %'],alpha = 0.5) #Goal: make a graph representing the presence of each age in the dataset #CleanedAgeA = CleanedAgeA.drop(columns = ['cum_sum','Cumulative %']) ax = CleanedAgeA.plot.bar(x = 'Age',y = '# of Archers',legend = False,colormap = 'Accent') ax.set_ylabel("# of Archers") #The following data could probably be grouped into age brackets to produce a cleaner graph, but this one is more accurate #Question 2 Graphs begin here #Goal: Produce a graph showing performance for archers aged under 25 and 25 and over #1st, seperate the data by age groups ElderData = BaseByAge.iloc[0:44] YoungData = BaseByAge.iloc[44:100] #Find the means of desired variables df1 = ElderData.mean() df2 = YoungData.mean() CombinedMeans = pd.concat([df1,df2], axis = 1) CombinedMeans.columns = ['Older','Younger'] CombinedMeans #Creating a small multiple figure in order to show difference in performance #of younger and older archers fig, axs = plt.subplots(1,4) row1 = CombinedMeans.iloc[0] row2 = CombinedMeans.iloc[1] row3 = CombinedMeans.iloc[2] row4 = CombinedMeans.iloc[3] fig.subplots_adjust(wspace = 1) axs[1].set_ylim(50,51) axs[1].set_ylabel('Average World Placement') axs[2].set_ylim(80,90) axs[2].set_ylabel('Average Score') axs[0].set_ylim(20,30) axs[0].set_ylabel('Average Age') axs[3].set_ylim(9,9.1) axs[3].set_ylabel('Average Arrow Points') row1.plot.bar(ax = axs[1]) row2.plot.bar(ax = axs[2]) row3.plot.bar(ax = axs[0]) row4.plot.bar(ax = axs[3]) #Question 3: Since Recurrent shoulder pains are more prevalent in #archers aged 20 and up, can this be observed in their performance or sport retirement age? #Question 4: Since there seem to be many factors such as stress, anxiety, probability of injury, and heart rate control that can influence the performance of an archer, is there a formula for the "perfect" archer? #Question 5: If the above question is true, can these trends be noticed in the world of professional archery today? #Goal: Compile a data set comparing the performance of archers younger than 20, and archers older than 20,but younger than 23 in order to minimize the effect of longer training by older archers #We should end up with data about archers ages 17-19 and 20-23 #print(BaseByAge.iloc[51:88]) injuryYoung = BaseByAge.iloc[88:100] injuryOld = BaseByAge.iloc[51:88] #Since we are evaluating performance based on age again, we can do the same calculations as we did before df1 = injuryYoung.mean() df2 = injuryOld.mean() CombinedMeans = pd.concat([df1,df2], axis = 1) CombinedMeans.columns = ['Younger','Older'] CombinedMeans #This data does not support the conclusion of the research paper #plausible causes: more training could mean easily overcoming any disadvantages the pain could cause #Archers who feel chronic pain do not perform as well and do not compete at the highest levels, #meaning they are not represented in our dataset #without access to medical records, we cannot establish if some archers stop due to shoulder pain, making us unable #to associate pain with retirement age fig, axs = plt.subplots(1,4) row1 = CombinedMeans.iloc[0] row2 = CombinedMeans.iloc[1] row3 = CombinedMeans.iloc[2] row4 = CombinedMeans.iloc[3] fig.subplots_adjust(wspace = 1) axs[1].set_ylim(45,60) axs[1].set_ylabel('Average World Placement') axs[2].set_ylim(70,90) axs[2].set_ylabel('Average Score') axs[0].set_ylim(15,25) axs[0].set_ylabel('Average Age') axs[3].set_ylim(9,9.1) axs[3].set_ylabel('Average Arrow Points') row1.plot.bar(ax = axs[1]) row2.plot.bar(ax = axs[2]) row3.plot.bar(ax = axs[0]) row4.plot.bar(ax = axs[3]) #Based on the articles,Let us summarize what each found to be the best age for an archer #Article 1: an archer of age under 35 #Article 2: an archer of age over 25 #Article 3: an archer of age under 20 - although this data does not agree with our research so far #Article 4: an archer of around the age of late 20's/early 30's but younger than 50 #Article 5: an archer of around the age of late 20's/early 30's but younger than 50 #This leaves us with the optimal age of around 27-35 #Time to test if this trend stands true for the current world of archery #Goal: Compare and contrast different "generations" of archers in order to see which performs the best #print(BaseByAge.iloc[0:4]) ageGroup1 = BaseByAge.iloc[73:100] ageGroup2 = BaseByAge.iloc[30:73] ageGroup3 = BaseByAge.iloc[11:30] ageGroup4 = BaseByAge.iloc[5:11] ageGroup5 = BaseByAge.iloc[0:4] df1 = ageGroup1.mean() df2 = ageGroup2.mean() df3 = ageGroup3.mean() df4 = ageGroup4.mean() df5 = ageGroup5.mean() CombinedMeans = pd.concat([df1,df2,df3,df4,df5], axis = 1) CombinedMeans.columns = ['17-21','22-26','27-31','32-34','35-37'] CombinedMeans #Would like to space x labels out so they are more readable fig, axs = plt.subplots(1,4) fig.set_tight_layout row1 = CombinedMeans.iloc[0] row2 = CombinedMeans.iloc[1] row3 = CombinedMeans.iloc[2] row4 = CombinedMeans.iloc[3] fig.subplots_adjust(wspace = 1.3) axs[1].set_ylim(30,65) axs[1].set_ylabel('Average World Placement') axs[2].set_ylim(70,115) axs[2].set_ylabel('Average Score') axs[0].set_ylim(17,37) axs[0].set_ylabel('Average Age') axs[3].set_ylim(9.04,9.12) axs[3].set_ylabel('Average Arrow Points') row1.plot.bar(ax = axs[1]) row2.plot.bar(ax = axs[2]) row3.plot.bar(ax = axs[0]) row4.plot.bar(ax = axs[3]) df = pd.read_csv("Cleaned Data Men age and avg arrow.csv", index_col = 0) df = df.drop(columns=['Country']) df.columns = ["Name", "Score", "Age", "Average_Arrow"] model = 'Score ~ %s'%(" + ".join(df.columns.values[1:])) linear_regression = smf.ols(model, data = df).fit() linear_regression.summary() linear_regression.params std_err = linear_regression.params - linear_regression.conf_int()[0] std_err bd_df = pd.DataFrame({'coef' : linear_regression.params.values[1:], 'err' : std_err.values[1:], 'name' : std_err.index.values [1:]}) bd_df #START OF PROJECT 2 #Initialization of new data df2 = pd.read_csv("New Archery Data.csv", index_col = 0) df3 = pd.read_csv("Younger.csv", index_col = 0) df4 = pd.read_csv("Older.csv", index_col = 0) df2.columns = ["Name", "Score","Age", "Average_Arrow"] sns.lmplot(x = "Age", y = "Score", data = df3 , x_estimator = np.mean, x_ci = .95) #Adding some statistics that may be necessary for the calculations df2.sort_values(by=['Age'],ascending= True) AgeAmount = df2.groupby('Age',as_index = False).count() CleanedAgeA = AgeAmount.drop(columns=['Score','Average_Arrow']) CleanedAgeA = CleanedAgeA.rename(columns={'Name': '# of Archers'}) CleanedAgeAB = CleanedAgeA CleanedAgeAB['Cumulative Amount'] = CleanedAgeA['# of Archers'].cumsum() CleanedAgeAB['% of total'] = (CleanedAgeAB['# of Archers'] /199 * 100) CleanedAgeAB['Cumulative %'] = CleanedAgeAB['% of total'].cumsum() CleanedAgeAB = CleanedAgeAB.round(2) df = CleanedAgeAB df #Now that we have a base new dataset, it's time to do some analysis #TO DO LIST: #1 Present the data at hand in an appealing way #2 Find out if there is a visible decline at the age of around 35 - How fast do we age? #3 Find out if people older than 28 tend to perform better than those 17-28 - Psych skills of elite archers #4 Find out if people of around 20-30 perform better than those who are around 40-50 - Lars and Bo - heart rate and focus #1 shows us Most of the archers are between 20 and 30 #Most archers quit before they reach their best age df.plot.area(x = 'Age', y = ["# of Archers",'Cumulative %'],alpha = 0.5) #df.plot.(x = 'Age', y = ["# of Archers","Cumulative %"]) #2 Here we can see that archers steadily increase their performance until 33 #and performance decreases after sns.lmplot(x = "Age", y = "Score", data = df3 , x_estimator = np.mean, x_ci = .95) sns.lmplot(x = "Age", y = "Score", data = df4 , x_estimator = np.mean, x_ci = .95) #3 n3y = pd.read_csv("17-28.csv", index_col = 0) n3o = pd.read_csv("28+.csv",index_col = 0) n3b = pd.read_csv("29-33.csv",index_col=0) n3y.mean().plot(kind = 'bar') fig, axs = plt.subplots(1,3,sharey = True) row1 = n3y.mean() row2 = n3o.mean() row3 = n3b.mean() #axs[0].set_ylabel() axs[0].set_xlabel("Ages 17-28") axs[1].set_xlabel("Ages 28-50") axs[2].set_xlabel("Ages 28-33") row1.plot.bar(ax = axs[0]) row2.plot.bar(ax = axs[1]) row3.plot.bar(ax = axs[2]) axs[0].axhline(54, color = 'r') axs[1].axhline(54, color = 'r') axs[1].axhline(58, color = 'orange') axs[2].axhline(54, color = 'r') axs[2].axhline(58, color = 'orange') #This shows us there is a difference in performance of archers aged # 17-28 and 28+, but also if we take into account the results of goal#2 #We get an even better score for this age group of archers #4 n4y = pd.read_csv("20-30.csv", index_col = 0) n4o = pd.read_csv("40-50.csv", index_col = 0) fig, axs = plt.subplots(1,2,sharey = True) row1 = n4y.mean() row2 = n4o.mean() #axs[0].set_ylabel() axs[0].set_xlabel("Ages 20-30") axs[1].set_xlabel("Ages 40-50") row1.plot.bar(ax = axs[0]) row2.plot.bar(ax = axs[1]) #axs[0].axhline(54, color = 'r') ```
github_jupyter
``` CLR = { 'blue': ['#e0f3ff', '#aadeff', '#2bb1ff', '#15587f', '#0b2c40'], 'gold': ['#fff3dc', '#ffebc7', '#ffddab', '#b59d79', '#5C4938'], 'red': ['#ffd8e8', '#ff9db6', '#ff3e72', '#6B404C', '#521424'], 'gray': ['#eeeeee', '#bbbbbb', '#999999', '#666666', '#333333'], } import pathlib import matplotlib.pyplot as plt import matplotlib.patches as ppt d_loop = [(10, 0.9326189901912585), (20, 3.431747987633571), (30, 7.900712997070514), (40, 23.176472008344717), (50, 30.817242004559375), (60, 37.93227899586782), (70, 47.18338800012134), (80, 61.70217400358524), (90, 78.50472899735905), (100, 97.76843499275856), (110, 118.02218899538275), (120, 141.43822199548595), (200, 439.25697400118224), (250, 717.5912509992486)] d_vectorized = [(10, 1.4458689984166995), (20, 5.35220600431785), (30, 10.920982997049578), (40, 17.124196005170234), (50, 24.84369200828951), (60, 35.458537997328676), (70, 48.63317900162656), (80, 62.86764900141861), (90, 79.89497699600179), (100, 98.36747299414128), (110, 119.616230003885), (120, 141.98320099967532), (200, 388.0320140015101), (250, 581.6465469979448)] d_lookup = [(10, 0.06900400330778211), (20, 0.11245900532230735), (30, 0.15335599891841412), (40, 0.4065209941472858), (50, 0.37285100552253425), (60, 0.6236290064407513), (70, 0.8157380070770159), (80, 1.0058790066977963), (90, 1.3296270044520497), (100, 1.5350780013250187), (110, 1.9105770043097436), (120, 2.305513000464998), (200, 6.753337002010085), (250, 9.713487001135945), (400, 30.437451001489535), (500, 47.650370994233526), (1000, 191.76472099206876), (2000, 738.4437500004424)] def fig_before(title: str, xlabel, ylabel): fig = plt.figure() ax = fig.add_subplot(111) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) return fig, ax out_dir = pathlib.Path('opt/') def fig_after(fig, ax, patches=None, fname=None): if patches: ax.legend(handles=patches) if display: plt.show(fig) if fname: for out_file in [str(out_dir/fname) + s for s in ('.png', '.svg')]: print('saving to', out_file) fig.savefig(out_file) fig.clear() plt.close(fig) fig, ax = fig_before('Performance Comparison', 'Token Count', 'Miliseconds') ax.set_xscale('log') data = (d_loop, d_vectorized, d_lookup) colors = (CLR['red'][2], CLR['blue'][2], CLR['gray'][4]) names = ('loop', 'vectorized', 'lookup') patches = [] for d, color, name in zip(data, colors, names): x, y = zip(*d) ax.plot([q**2 for q in x], y, color=color) patches.append(ppt.Patch(color=color, label=name)) ax.axvline(x=200**2, color='black', ls='dashed', lw=1) fig_after(fig, ax, patches=patches, fname='benchmark') for ds in data: print() for k, v in ds: print(k, v) ```
github_jupyter
## Data Visualization - Pie Chart: Compare Percentages - Bar Chart: Compare Scores across groups - Histogram: Show frequency of values/value range - Line Chart: Show trend of Scores - Scatter Plot: Show Relationship between a pair of Scores - Map: Show Geo Distribution of data |Type|Variable Y|Variable X| |:--:|:--:|:--:| |Pie Chart|Fractions|None| |Bar Chart|Numbers|Categories| |Histogram|Integer|Categories/Value Range| |Line Chart|Numbers|Time/Date/Period| |Scatter Plot|Numbers|Numbers| |Map|Latitude|Longtitude| ### Sign up for Plot.ly 1. Sign up for Plot.ly: https://plot.ly/Auth/login/?action=signup# 2. Get your API token: Settings -> API Keys -> Regenerate Key -> Copy your newly created key 3. Save your API key somewhere <div class="alert alert-block alert-warning"> **<b>Reminder</b>** Free account can only call Plot.ly API 100 times per day and generate up to 25 graphs.</div> ``` import plotly.plotly as py #Import library and give it an abbreviated name import plotly.graph_objs as go #go: graph object from plotly import tools py.sign_in('USER NAME', 'API TOKEN') #fill in your user name and API token import package (as py) ! pip3 install plotly ``` *** ## Pie Chart ``` labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values) py.iplot([trace]) a=[1,2,3] type(a) a={'name':'junior'} type(a) a[0] #change data labels by re-defining parameter "textinfo" labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values, textinfo='label+value') py.iplot([trace], filename='pie_chart') #change color setting by re-defining "marker" parameter labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values, marker={'colors':['red','blue']}) py.iplot([trace], filename='pie_chart') #turn the pie chart into a donut by re-defining "hole" parameter labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values, hole=0.2, marker={'colors':['red','blue']}) py.iplot([trace], filename='pie_chart') #change the graph size to 400*300 and add a title by re-defining "width" and "height" in "layout" labels = ['Female','Male'] values = [40,20] trace = go.Pie(labels=labels, values=values) layout=go.Layout(width=500,height=500,title='Gender Distribution') fig=go.Figure([trace],layout) py.iplot(fig, filename='pie_chart') ``` #### <font style="color: blue">Practice:</font> --- <font style="color: blue"> Please download the Hong Kong census data about educational attainment from <a href='https://juniorworld.github.io/python-workshop-2018/doc/Hong Kong Census Educational Attainment.csv'>this link</a>. <p>Create a pie chart to visualize the percentages of different education levels in 2016. The pie chart should meet following requirements:</p> 1. Donut style 2. Change slice colors </font> ``` #Write down your code here #--------------------------------------------------------- import pandas as pd edu_table=pd.read_csv('doc\Hong Kong Census Educational Attainment.csv') labels=edu_table['Level'] values=edu_table['2016'] trace=go.Pie(labels=labels,values=values,hole=0.2,marker={'colors':['red','blue','pink','orange','green','grey','yellow']}) py.iplot([trace],filename='pie chart') edu_table edu_table['2016'] edu_table.loc[0] ``` *** ## Bar Chart <br>For more details: https://plot.ly/python/reference/#bar ``` x = ['Female','Male'] y = [1.6,1.8] trace = go.Bar(x=x,y=y) py.iplot([trace], filename='bar_chart') #Widen the gap between bars by increasing "bargap" parameters in layout x = ['Female','Male'] y = [40,20] trace = go.Bar(x=x,y=y) layout = go.Layout(bargap=0.5) fig = go.Figure([trace],layout) py.iplot(fig, filename='bar_chart') #Grouped bar chart x = ['Female','Male'] y1 = [40,20] y2 = [30,50] trace1 = go.Bar(x=x,y=y1,name='class1') trace2 = go.Bar(x=x,y=y2,name='class2') py.iplot([trace1,trace2], filename='bar_chart') #Stacked/Relative bar chart by re-defining "barmode" in layout x = ['Female','Male'] y1 = [40,20] y2 = [30,50] trace1 = go.Bar(x=x,y=y1,name='class1') trace2 = go.Bar(x=x,y=y2,name='class2') layout = go.Layout(barmode='stack') fig = go.Figure([trace1,trace2],layout) py.iplot(fig, filename='bar_chart') #100% Stacked bar chart by re-defining "barnorm" as "percent" in layout x = ['Female','Male'] y1 = [40,20] y2 = [30,50] trace1 = go.Bar(x=x,y=y1) trace2 = go.Bar(x=x,y=y2) layout = go.Layout(barmode='stack',barnorm='percent') fig = go.Figure([trace1,trace2],layout) py.iplot(fig, filename='bar_chart') x = ['Female','Male'] y1 = [40,20] y2 = [30,50] trace1 = go.Bar(x=x,y=y1) trace2 = go.Bar(x=x,y=y2) layout = go.Layout(barmode='stack',barnorm='fraction',yaxis={'tickformat':'%'}) fig = go.Figure([trace1,trace2],layout) py.iplot(fig, filename='bar_chart') ``` #### <font style="color: blue">Practice:</font> --- <font style="color: blue"> Please refer to "Hong Kong Census Educational Attainment.csv". <p>Create a bar chart to visualize the percentages of different education levels in different years, i.e. 2006, 2011 and 2016. The bar chart should meet following requirements:</p> 1. A bar represents a year 2. 100% Stacked bar chart: higher education levels stacked on top of lower ones and the bar's full length is 100% 2. The gap between bar groups = 0.2 </font> ``` #Write down your code here #--------------------------------------------------------- x=['year 2016','year 2011','year 2006'] y1 = edu_table.loc[0][1:] y2 = edu_table.loc[1][1:] y3 = edu_table.loc[2][1:] y4 = edu_table.loc[3][1:] y5 = edu_table.loc[4][1:] y6 = edu_table.loc[5][1:] y7 = edu_table.loc[6][1:] label=edu_table['Level'] trace1 = go.Bar(x=x,y=y1,name=label[0]) trace2 = go.Bar(x=x,y=y2,name=label[1]) trace3 = go.Bar(x=x,y=y3,name=label[2]) trace4 = go.Bar(x=x,y=y4,name=label[3]) trace5 = go.Bar(x=x,y=y5,name=label[4]) trace6 = go.Bar(x=x,y=y6,name=label[5]) trace7 = go.Bar(x=x,y=y7,name=label[6]) layout = go.Layout(barnorm='percent',barmode='stack') fig=go.Figure(data=[trace1,trace2,trace3,trace4,trace5,trace6,trace7],layout=layout) py.iplot(fig,filename='barchart') x=['year 2016','year 2011','year 2006'] label=edu_table['Level'] data=[] for i in range(7): trace=go.Bar(x=x,y=edu_table.loc[i][1:],name=label[i]) data.append(trace) layout = go.Layout(barnorm='percent',barmode='stack') fig=go.Figure(data,layout) py.iplot(fig,filename='barchart') edu_table2=edu_table[['2006','2011','2016']] edu_table2 ``` *** ## Break *** ## Histogram Histogram is a special type of bar chart where one's y value is its count. It is used to show data distribution: viusalize the skewness and central tendency. <br>For more details: https://plot.ly/python/reference/#histogram ``` a=[1,2,3,3,4,4,4,5,5,6,7,3,3,2] trace=go.Histogram(x=a) py.iplot([trace],filename='Histogram') #Change the bins by re-defining "size" parameter in xbins a=[1,2,3,3,4,4,4,5,5,6,7,3,3,2] trace=go.Histogram(x=a,xbins={'size':1}) py.iplot([trace],filename='Histogram') #Convert into a 100% Histogram whose y value is percentage of getting a value #Re-define the "histnorm" to a "percent" mode a=[1,2,3,3,4,4,4,5,5,6,7,3,3,2] trace=go.Histogram(x=a,xbins={'size':1},histnorm='probability') layout=go.Layout(yaxis={'tickformat':'%'}) fig=go.Figure([trace],layout) py.iplot(fig,filename='Histogram') #Decrease every element in "a" by one unit to create a new list "b" #Grouped Histogram a=[1,2,3,3,4,4,4,5,5,6,7,3,3,2] b=[i-1 for i in a] #Write your code here trace1=go.Histogram(x=a,xbins={'size':1}) trace2=go.Histogram(x=b,xbins={'size':1}) py.iplot([trace1,trace2],filename='Histogram') #Overlay Histogram of a and b #Increase the transparency by re-defining "opacity" parameter #Change color by re-defining "color" parameter in "marker" #Change the value of "barmode" parameter in layout to "overlay" trace1=go.Histogram(x=a,xbins={'size':1},opacity=0.5,marker={'color':'blue'}) trace2=go.Histogram(x=b,xbins={'size':1},opacity=0.5,marker={'color':'red'}) layout=go.Layout(barmode='overlay') fig=go.Figure([trace1,trace2],layout) py.iplot(fig,filename='Histogram') ``` #### <font style="color: blue">Practice:</font> --- <font style="color: blue"> <font style="color: blue"> Please download YouTube Popularity data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/Youtube.csv'>this link</a>. <p>Create three Histograms to visualize the distribution of views, likes, dislikes and comments. The histograms should meet following requirements:</p> 1. One basic histogram to show distribution of "views" 2. One basic histogram to show distribution of "log(views)" 3. One 100% overlay histogram to show distributions of log(likes), log(dislikes) and log(comments) Hint: to apply logarithmic transformation, you can use numpy's log10 function. For example: to calcualte the logrithm of a variable "a". </font> >```python import numpy as np a=np.log10(a)``` ``` #Write your code here pop_table=pd.read_csv('doc/Youtube.csv') pop_table.head() a=pop_table['views'] trace=go.Histogram(x=a) py.iplot([trace],file='histogram') import numpy as np a=np.log10(pop_table['views']) trace=go.Histogram(x=a) py.iplot([trace],file='histogram') a=np.log10(pop_table['likes']) b=np.log10(pop_table['dislikes']) c=np.log10(pop_table['comments']) trace1=go.Histogram(x=a,opacity=0.5,name='likes') trace2=go.Histogram(x=b,opacity=0.5,name='dislikes') trace3=go.Histogram(x=c,opacity=0.5,name='comments') layout=go.Layout(barmode='overlay') fig=go.Figure([trace1,trace2,trace3],layout) py.iplot(fig,filename='Histogram') ``` ## Line Chart In Plot.ly, line chart is defined as a special scatter plot whose scatters are connected by lines. <br>For more details: https://plot.ly/python/reference/#scatter ``` #create your first line chart x=[1,2,3] y=[10,22,34] trace1=go.Scatter(x=x,y=y,mode='lines') #mode='lines','markers','lines+markers' py.iplot([trace1],filename='line chart') #add markers to it by changing mode to "lines+markers" x=[1,2,3] y=[10,22,34] trace1=go.Scatter(x=x,y=y,mode='lines+markers') py.iplot([trace1],filename='line chart') #make it a dashed line by re-defining the "dash" parameters in "line" #try other alternative shapes: "solid", "dot", "dash", "longdash", "dashdot", or "longdashdot" x=[1,2,3] y=[10,22,34] trace1=go.Scatter(x=x,y=y,mode='lines+markers',line={'dash':'dash'}) py.iplot([trace1],filename='line chart') #fill the area below x=[1,2,3] y=[-10,22,34] trace1=go.Scatter(x=x,y=y,mode='lines',fill='tozeroy') #mode='lines' py.iplot([trace1],filename='line chart') #add another trace to it x=[1,2,3] y1=[10,22,34] y2=[34,22,10] trace1=go.Scatter(x=x,y=y1,mode='lines') trace2=go.Scatter(x=x,y=y2,mode='lines') py.iplot([trace1,trace2],filename='line chart') #change the range of axis x=[1,2,3] y1=[10,22,34] y2=[34,22,10] trace1=go.Scatter(x=x,y=y1,mode='lines') trace2=go.Scatter(x=x,y=y2,mode='lines') layout=go.Layout(yaxis={'range':[0,35]},xaxis={'range':[0,3]}) fig=go.Figure([trace1,trace2],layout) py.iplot(fig,filename='line chart') #stacked line chart by re-defining "stackgroup" parameter x=[1,2,3] y1=[10,22,34] y2=[34,22,10] trace1=go.Scatter(x=x,y=y1,mode='lines',stackgroup='1') trace2=go.Scatter(x=x,y=y2,mode='lines',stackgroup='1') py.iplot([trace1,trace2],filename='line chart') ``` #### <font style="color: blue">Practice:</font> --- <font style="color: blue"> <font style="color: blue"> Please download stock price data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/stock.csv'>this link</a>. <p>Create a line chart to visualize the trend of these five listed companies. The line chart should meet following requirements:</p> 1. Name lines after companies </font> ``` #Write your code here ``` ## Scatter Plot <br>For more details: https://plot.ly/python/reference/#scatter ``` #create your first scatter plot x=[1,2,3,4,5] y=[10,22,34,40,50] trace1=go.Scatter(x=x,y=y,mode='markers') py.iplot([trace1],filename='scatter') #style the markers x=[1,2,3,4,5] y=[10,22,34,40,50] trace1=go.Scatter(x=x,y=y,mode='markers',marker={'size':10,'color':'red'}) py.iplot([trace1],filename='scatter') #assign different sizes and colors to markers x=[1,2,3,4,5] y=[10,22,34,40,50] trace1=go.Scatter(x=x,y=y,mode='markers',marker={'size':y,'color':x}) py.iplot([trace1],filename='scatter') #assign color according to values in colorscale #"Colorscale" options: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis x=[1,2,3,4,5] y=[10,22,34,40,50] trace1=go.Scatter(x=x,y=y,mode='markers',marker={'size':y,'color':x,'colorscale':'Rainbow'}) py.iplot([trace1],filename='scatter') #give names to them x=[1,2,3,4,5] y=[10,22,34,40,50] trace1=go.Scatter(x=x,y=y,mode='markers',marker={'size':y,'color':x,'colorscale':'Rainbow'},text=['a','b','c','d','e']) py.iplot([trace1],filename='scatter') #try plotting scatters in a 3D space x=[1,2,3,4,5] y=[10,22,34,40,50] z=[2,3,4,5,6] trace1=go.Scatter3d(x=x,y=y,z=z,mode='markers') py.iplot([trace1],filename='scatter') #Change axis titles x=[1,2,3,4,5] y=[10,22,34,40,50] z=[2,3,4,5,6] trace1=go.Scatter3d(x=x,y=y,z=z,mode='markers') layout=go.Layout(scene={'xaxis':{'title':'length'},'yaxis':{'title':'width'},'zaxis':{'title':'height'}}) fig=go.Figure([trace1],layout) py.iplot(fig,filename='scatter') ``` #### <font style="color: blue">Practice:</font> --- <font style="color: blue"> <font style="color: blue"> Please download box office data from <a href='https://juniorworld.github.io/python-workshop-2018/doc/movies.csv'>this link</a>. <p>Create a 3D scatter plot to visualize these movies. The scatter plot should meet following requirements:</p> 1. X axis represents "Production Budget" 2. Y axis represents "Box Office" 3. Z axis represents "ROI" (Return on Investment) 4. Size scatters according to their "IMDB Ratings" 5. Color scatters according to their "Genre" 6. Name scatters after movies </font> ``` import pandas as pd movies=pd.read_csv('doc\movies.csv') colors=[] for genre in movies['Genre']: if genre =='Comedy': colors.extend([1]) else: colors.extend([len(genre)]) np.unique(movies['Genre']) #Write your code here x=movies['Production Budget (millions)'] y=movies['Box Office (millions)'] z=movies['ROI'] trace1=go.Scatter3d(x=x,y=y,z=z,mode='markers',marker={'size':movies['Rating IMDB']*2,'color':colors,'colorscale':'Rainbow'}) layout=go.Layout(title='movies',scene={'xaxis':{'title':'Production Budget (millions)'},'yaxis':{'title':'Box Office (millions)'},'zaxis':{'title':'ROI'}}) fig=go.Figure([trace1],layout) py.iplot(fig,filename='scatter') ``` <div class="alert alert-block alert-info"> **<b>Tips</b>** Two tools to better work with colors in Python: <br>1. W3S color palette: https://www.w3schools.com/colors/colors_palettes.asp <br>2. colorlover: https://github.com/jackparmer/colorlover</div>
github_jupyter
# Quantum Kernel Alignment with Qiskit Runtime <br> **Classification with Support Vector Machines**<br> Classification problems are widespread in machine learning applications. Examples include credit card risk, handwriting recognition, and medical diagnosis. One approach to tackling classification problems is the support vector machine (SVM) [1,2]. This supervised learning algorithm uses labeled data samples to train a model that can predict to which class a test sample belongs. It does this by finding a separating hyperplane maximizing the margin between data classes. Often, data is not linearly separable in the original space. In these cases, the kernel trick is used to implicitly encode a transformation of the data into a higher-dimensional feature space, through the inner product between pairs of data points, where the data may become separable. **Quantum Kernels**<br> Quantum computers can be used to encode classical data in a quantum-enhanced feature space. In 2019, IBM introduced an algorithm called the quantum kernel estimator (QKE) for computing quantum kernels [3]. This algorithm uses quantum circuits with data provided classically and offers an efficient way to evaluate inner products between data in a quantum feature space. For two data samples $\theta$ and $\theta'$, the kernel matrix is given as $$ K(\theta, \theta') = \lvert\langle 0^n \rvert U^\dagger(\theta) U(\theta') \lvert 0^n \rangle \rvert^2, $$ where $U(\theta)$ prepares the quantum feature state. Quantum kernels used in a classification framework inherit the convex optimization program of the SVM and avoid common limitations of variational quantum classifiers. A key observation of this paper was that a necessary condition for a computational advantage requires quantum circuits for the kernel that are hard to simulate classically. More recently, IBM proved that quantum kernels can offer superpolynomial speedups over any classical learner on a learning problem based on the hardness of the discrete logarithm problem [4]. This means that quantum kernels can someday offer quantum advantage on suitable problems. **Quantum Kernels that Exploit Structure in Data**<br> An important approach in the search for practical quantum advantage in machine learning is to identify quantum kernels for learning problems that have underlying structure in the data. We've taken a step in this direction in our recent paper [5], where we introduced a broad class of quantum kernels that exploit group structure in data. Examples of learning problems for data with group structure could include learning permutations or classifying translations. We call this new class of kernels _covariant quantum kernels_ as they are related to covariant quantum measurements. The quantum feature map is defined by a unitary representation $D(\theta)$ of a group $G$ for some element $\theta \in G$, and a fiducial reference state $\lvert\psi\rangle = V\lvert0^n\rangle$ prepared by a unitary circuit $V$. The kernel matrix is given as $$ K(\theta, \theta') = \vert\langle 0^n \rvert V^\dagger D^\dagger(\theta) D(\theta') V \lvert 0^n \rangle \rvert^2. \qquad (1) $$ In general, the choice of the fiducial state is not known _a priori_ and can significantly impact the performance of the classifier. Here, we use a method called quantum kernel alignment (QKA) to find a good fiducial state for a given group. **Aligning Quantum Kernels on a Dataset**<br> In practice, SVMs require a choice of the kernel function. Sometimes, symmetries in the data can inform this selection, other times it is chosen in an ad hoc manner. Kernel alignment is one approach to learning a kernel on a given dataset by iteratively adapting it to have high similarity to a target kernel informed from the underlying data distribution [6]. As a result, the SVM with an aligned kernel will likely generalize better to new data than with an unaligned kernel. Using this concept, we introduced in [5] an algorithm for quantum kernel alignment, which provides a way to learn a quantum kernel from a family of kernels. Specifically, the algorithm optimizes the parameters in a quantum circuit to maximize the alignment of a kernel while converging to the maximum SVM margin. In the context of covariant quantum kernels, we extend Eq. $(1)$ to $$ K_\lambda(\theta,\theta') = \lvert\langle 0^n \rvert V^\dagger_\lambda D^\dagger(\theta) D(\theta') V_\lambda \lvert 0^n \rangle \rvert^2, \qquad (2) $$ and use QKA to learn a good fiducial state parametrized by $\lambda$ for a given group. **Covariant Quantum Kernels on a Specific Learning Problem**<br> Let's try out QKA on a learning problem. In the following, we'll consider a binary classification problem we call _labeling cosets with error_ [5]. In this problem, we will use a group and a subgroup to form two cosets, which will represent our data classes. We take the group $G = SU(2)^{\otimes n}$ for $n$ qubits, which is the special unitary group of $2\times2$ matrices and has wide applicability in nature, for example, the Standard Model of particle physics and in many condensed matter systems. We take the graph-stabilizer subgroup $S_{\mathrm{graph}} \in G$ with $S_{\mathrm{graph}} = \langle \{ X_i \otimes_{k:(k,i) \in \mathcal{E}} Z_k \}_{i \in \mathcal{V}} \rangle$ for a graph $(\mathcal{E},\mathcal{V})$ with edges $\mathcal{E}$ and vertices $\mathcal{V}$. Note that the stabilizers fix a stabilizer state such that $D_s \lvert \psi\rangle = \lvert \psi\rangle$. This observation will be useful a bit later. To generate the dataset, we write the rotations of the group as $D(\theta_1, \theta_2, 0)=\exp(i \theta_1 X) \exp(i \theta_2 Z) \in SU(2)$, so that each qubit is parametrized by the first two Euler angles (the third we set to zero). Then, we draw randomly two sets of angles $\mathbf{\theta}_\pm \in [-\pi/4, \pi/4]^{2n}$ for the $n$-qubit problem. From these two sets, we construct a binary classification problem by forming two left-cosets (representing the two classes) with those angles, $C_\pm = D(\mathbf{\theta}_\pm) S_{\mathrm{graph}}$ where $D(\mathbf{\theta}_\pm) = \otimes_{k=1}^n D(\theta_\pm^{2k-1}, \theta_\pm^{2k}, 0)$. Note that the elements of the cosets can again be written in terms of Euler angles. We build training and testing sets by randomly drawing elements from $C_\pm$ such that the dataset has samples $i=1,...,m$ containing the first two Euler angles for each qubit $\mathbf{\theta}_{y_i} = (\theta_{y_i}^{1}, \theta_{y_i}^{2}, \theta_{y_i}^{3}, \theta_{y_i}^{4}, ..., \theta_{y_i}^{2n-1}, \theta_{y_i}^{2n})$ and labels $y_i \in \{-1,1\}$ that indicate to which coset a sample belongs. Next, we select a fiducial state. A natural candidate is the stabilizer state we encountered above. Why? Because this is a subgroup invariant state, $D_s\lvert\psi\rangle = \lvert\psi\rangle$, which causes the data for a given coset to be mapped to a unique state: $D(\mathbf{\theta}_\pm)D_s \lvert\psi\rangle = D(\mathbf{\theta}_\pm) \lvert\psi\rangle$. This means the classifier only needs to distinguish the _two_ states $D(\mathbf{\theta}_\pm) \lvert\psi\rangle \langle \psi\rvert D^\dagger(\mathbf{\theta}_\pm)$ for every element of the coset. In this tutorial, we will add a small Gaussian error with variance $0.01$ to the Euler angles of the dataset. This noise will perturb these two states, but if the variance is sufficiently small, we expect the states will still be classified correctly. Let's consider a parametrized version of the stabilizer state, associated with the coupling graph $(\mathcal{E},\mathcal{V})$ given by the device connectivity, as our fiducial state and then use kernel alignment to find its optimal parameters. Specifically, we'll replace the initial layers of Hadamards in the graph state with $y$-rotations by an angle $\lambda$, $$ \lvert \psi_\lambda\rangle = V_\lambda \lvert 0^n\rangle = \prod_{(k,t) \in \mathcal{E}} CZ_{k,t} \prod_{k \in \mathcal{V}} \exp\left(i \frac{\lambda}{2} Y_k\right)\lvert 0^n\rangle, $$ where $CZ=\mathrm{diag}(1,1,1,-1)$. Then, given two samples from our dataset, $\mathbf{\theta}$ and $\mathbf{\theta}'$, the kernel matrix is evaluated as in Eq. $(2)$. If we initialize the kernel with $\lambda \approx 0$, we expect the quantum kernel alignment algorithm to converge towards the optimal $\lambda = \pi/2$ and the classifier to yield 100\% test accuracy. Let's define two specific problem instances to test these ideas out. We'll be using the quantum device `ibmq_montreal`, with coupling map shown below: <br> <img src="images/chip.png" width="500"> <br> We'll pick two different subgraphs, one for 7 qubits and one for 10, to define our problem instances. Using these subgraphs, we'll generate the corresponding datasets as described above, and then align the quantum kernel with QKA to learn a good fiducial state. <br> <img src="images/subgraphs.png" width="550"> <br> **Speeding up Algorithms with Qiskit Runtime**<br> QKA is an iterative quantum-classical algorithm, in which quantum hardware is used to execute parametrized quantum circuits for evaluating the quantum kernel matrices with QKE, while a classical optimizer tunes the parameters of those circuits to maximize the alignment. Iterative algorithms of this type can be slow due to latency between the quantum and classical calculations. Qiskit Runtime is a new architecture that can speed up iterative algorithms like QKA by co-locating classical computations with the quantum hardware executions. In this tutorial, we'll use QKA with Qiskit Runtime to learn a good quantum kernel for the _labeling cosets with error_ problem defined above. <br> **References**<br> [1] B. E. Boser, I. M. Guyon, and V. N. Vapnik, Proceedings of the Fifth Annual Workshop on Computational Learning Theory, COLT ’92 (Association for Computing Machinery, New York, NY, USA, 1992) pp. 144-152 [link](https://doi.org/10.1145/130385.130401) <br> [2] V. Vapnik, The Nature of Statistical Learning Theory, Information Science and Statistics (Springer New York, 2013) [link](https://books.google.com/books?id=EqgACAAAQBAJ) <br> [3] V. Havlíček, A. D. Córcoles, K. Temme, A. W. Harrow, A. Kandala, J. M. Chow, and J. M. Gambetta, Nature 567, 209-212 (2019) [link](https://doi.org/10.1038/s41586-019-0980-2) <br> [4] Y. Liu, S. Arunachalam, and K. Temme, arXiv:2010.02174 (2020) [link](https://arxiv.org/abs/2010.02174) <br> [5] J. R. Glick, T. P. Gujarati, A. D. Córcoles, Y. Kim, A. Kandala, J. M. Gambetta, K. Temme, arXiv:2105.03406 (2021) [link](https://arxiv.org/abs/2105.03406)<br> [6] N. Cristianini, J. Shawe-taylor, A. Elisseeff, and J. Kandola, Advances in Neural Information Processing Systems 14 (2001) [link](https://proceedings.neurips.cc/paper/2001/file/1f71e393b3809197ed66df836fe833e5-Paper.pdf) <br> # Load your IBM Quantum account and get the quantum backend We'll be using the 27-qubit device `ibmq_montreal` for this tutorial. ``` import sys sys.path.insert(0, '..') # Add qiskit_runtime directory to the path from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(project='qiskit-runtime') # Change this to your provider. backend = provider.get_backend('ibmq_montreal') ``` # Invoke the Quantum Kernel Alignment program Before executing the runtime program for QKA, we need to prepare the dataset and configure the input parameters for the algorithm. ### 1. Prepare the dataset First, we load the dataset from the `csv` file and then extract the labeled training and test samples. Here, we'll look at the 7-qubit problem, shown above in subfigure a). A second dataset is also available for the 10-qubit problem in b). ``` import pandas as pd df = pd.read_csv('../qiskit_runtime/qka/aux_file/dataset_graph7.csv',sep=',', header=None) # alterative problem: dataset_graph10.csv data = df.values ``` Let's take a look at the data to see how it's formatted. Each row of the dataset contains a list of Euler angles, followed by the class label $\pm1$ in the last column. For an $n$-qubit problem, there are $2n$ features corresponding to the first two Euler angles for each qubit (recall discussion above). The rows alternate between class labels. ``` print(df.head(4)) ``` Now, let's explicitly construct the training and test samples (denoted `x`) and their labels (denoted `y`). ``` import numpy as np # choose number of training and test samples per class: num_train = 10 num_test = 10 # extract training and test sets and sort them by class label train = data[:2*num_train, :] test = data[2*num_train:2*(num_train+num_test), :] ind=np.argsort(train[:,-1]) x_train = train[ind][:,:-1] y_train = train[ind][:,-1] ind=np.argsort(test[:,-1]) x_test = test[ind][:,:-1] y_test = test[ind][:,-1] ``` ### 2. Configure the QKA algorithm The first task is to set up the feature map and its entangler map, which specifies the arrangement of $CZ$ gates in the fiducial state. We will choose this to match the connectivity of the problem subgraph, pictured above. We also initialize the fiducial state parameter $\lambda$ with `initial_point`. ``` from qiskit_runtime.qka import FeatureMap d = np.shape(data)[1]-1 # feature dimension is twice the qubit number em = [[0,2],[3,4],[2,5],[1,4],[2,3],[4,6]] # we'll match this to the 7-qubit graph # em = [[0,1],[2,3],[4,5],[6,7],[8,9],[1,2],[3,4],[5,6],[7,8]] # we'll match this to the 10-qubit graph fm = FeatureMap(feature_dimension=d, entangler_map=em) # define the feature map initial_point = [0.1] # set the initial parameter for the feature map ``` Let's print out the circuit for the feature map (the circuit for the kernel will be a feature map for one data sample composed with an inverse feature map for a second sample). The first part of the feature map is the fiducial state, which is prepared with a layer of $y$ rotations followed by $CZ$s. Then, the last two layers of $z$ and $x$ rotations in the circuit denote the group representation $D(\theta)$ for a data sample $\theta$. Note that a single-qubit rotation is defined as $RP(\phi) = \exp(- i [\phi/2] P)$ for $P \in {X, Y, Z}$. ``` from qiskit.tools.visualization import circuit_drawer circuit_drawer(fm.construct_circuit(x=x_train[0], parameters=initial_point), output='text', fold=200) ``` Next, we set the values for the SVM soft-margin penalty `C` and the number of SPSA iterations `maxiters` we use to align the quantum kernel. ``` C = 1 # SVM soft-margin penalty maxiters = 10 # number of SPSA iterations ``` Finally, we decide how to map the virtual qubits of our problem graph to the physical qubits of the hardware. For example, in the 7-qubit problem, we can directly map the virtual qubits `[0, 1, 2, 3, 4, 5, 6]` to the physical qubits `[10, 11, 12, 13, 14, 15, 16]` of the device. This allows us to avoid introducing SWAP gates for qubits that are not connected, which can increase the circuit depth. ``` initial_layout = [10, 11, 12, 13, 14, 15, 16] # see figure above for the 7-qubit graph # initial_layout = [9, 8, 11, 14, 16, 19, 22, 25, 24, 23] # see figure above for the 10-qubit graph ``` ### 3. Set up and run the program We're almost ready to run the program. First, let's take a look at the program metadata, which includes a description of the input parameters and their default values. ``` print(provider.runtime.program('quantum-kernel-alignment')) ``` We see that this program has several input parameters, which we'll configure below. To run the program, we'll set up its two main components: `inputs` (the input parameters from the program metadata) and `options` (the quantum backend). We'll also define a callback function so that the intermediate results of the algorithm will be printed as the program runs. Note that each step of the algorithm for the settings we've selected here takes approximately 11 minutes. ``` def interim_result_callback(job_id, interim_result): print(f"interim result: {interim_result}\n") program_inputs = { 'feature_map': fm, 'data': x_train, 'labels': y_train, 'initial_kernel_parameters': initial_point, 'maxiters': maxiters, 'C': C, 'initial_layout': initial_layout } options = {'backend_name': backend.name()} job = provider.runtime.run(program_id="quantum-kernel-alignment", options=options, inputs=program_inputs, callback=interim_result_callback, ) print(job.job_id()) result = job.result() ``` ### 4. Retrieve the results of the program Now that we've run the program, we can retrieve the output, which is the aligned kernel parameter and the aligned kernel matrix. Let's also plot this kernel matrix (we'll subtract off the diagonal to show the contrast between the remaining entries). The kernel matrix is expected to have a block-diagonal structure. This reflects the fact that the kernel maps the input data effectively to just two states (modulo the small noise we added to the data; recall the discussion above). That is, data in the same coset (same class label) have a larger overlap than do data from different cosets. ``` print(f"aligned_kernel_parameters: {result['aligned_kernel_parameters']}") from matplotlib import pyplot as plt from pylab import cm plt.rcParams['font.size'] = 20 plt.imshow(result['aligned_kernel_matrix']-np.identity(2*num_train), cmap=cm.get_cmap('bwr', 20)) plt.show() ``` # Use the results of the program to test an SVM on new data Equipped with the aligned kernel and its optimized parameter, we can use the `sklearn` package to train an SVM and then evaluate its classification accuracy on new test points. Note that a second kernel matrix built from the test points is needed for the SVM decision function. ``` from qiskit_runtime.qka import KernelMatrix from sklearn.svm import SVC from sklearn import metrics # train the SVM with the aligned kernel matrix: kernel_aligned = result['aligned_kernel_matrix'] model = SVC(C=C, kernel='precomputed') model.fit(X=kernel_aligned, y=y_train) # test the SVM on new data: km = KernelMatrix(feature_map=fm, backend=backend, initial_layout=initial_layout) kernel_test = km.construct_kernel_matrix(x1_vec=x_test, x2_vec=x_train, parameters=result['aligned_kernel_parameters']) labels_test = model.predict(X=kernel_test) accuracy_test = metrics.balanced_accuracy_score(y_true=y_test, y_pred=labels_test) print(f"accuracy test: {accuracy_test}") import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
``` import networkx import collections %load_ext autoreload %autoreload 2 from pymedphys._experimental import tree, graphviz module_dependencies = tree.get_module_dependencies() internal_modules = set(module_dependencies.keys()) root = 'pymedphys' top_level_api = [item for item in module_dependencies[root] if not item[2].startswith('_')] module_apis = [item[0] for item in top_level_api if item[0] == item[1]] second_level_apis = {} for module in module_apis: second_level_apis[module] = [item for item in module_dependencies[module] if not item[2].startswith('_')] exposure_module_maps = { f"{root}.{item[2]}": item[1] for item in top_level_api if item[0] != item[1] } for module, second_level_api in second_level_apis.items(): exposure_module_maps = { **exposure_module_maps, **{f"{module}.{item[2]}": item[1] for item in second_level_api} } exposure_module_maps def create_svg(api_names, module_name, href, module_dependencies, internal_modules): di_graph = networkx.DiGraph() di_graph.add_node(module_name) traversal_nodes = {module_name} while traversal_nodes: node = traversal_nodes.pop() raw_dependencies = module_dependencies[node] for dependency in raw_dependencies: if ( not dependency[2].startswith('_') and not dependency[1] in di_graph and dependency[1] in internal_modules ): traversal_nodes.add(dependency[1]) di_graph.add_node(dependency[1]) di_graph.add_edge(node, dependency[1]) for api_name in api_names: di_graph.add_node(api_name) di_graph.add_edge(api_name, module_name) edges = "" for edge in di_graph.edges: edges = edges + f'"{edge[0]}" -> "{edge[1]}";\n' graphviz.dot_string_to_svg( f""" digraph sample {{ {{ node [shape=rectangle]; }} rankdir = LR; {edges} }} """, f'{module_name}.svg' ) api_name, module_name = list(exposure_module_maps.items())[0] api_name, module_name exposure_module_maps module_api_map = collections.defaultdict(lambda: []) for key, item in exposure_module_maps.items(): module_api_map[item].append(key) module_api_map href="https://github.com/pymedphys/pymedphys/tree/main/lib/pymedphys" # create_svg(api_name, module_name, href, module_dependencies, internal_modules) module_api_map for module_name, api_names in module_api_map.items(): create_svg(api_names, module_name, module_dependencies, internal_modules) # internal_modules ```
github_jupyter
# Evaluation metrics for classification models ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os pd.options.mode.chained_assignment = None %matplotlib inline ``` ### Back with the credit card default dataset ``` # Loading the dataset DATA_DIR = '../data' FILE_NAME = 'credit_card_default.csv' data_path = os.path.join(DATA_DIR, FILE_NAME) ccd = pd.read_csv(data_path, index_col="ID") ccd.rename(columns=lambda x: x.lower(), inplace=True) ccd.rename(columns={'default payment next month':'default'}, inplace=True) # getting the groups of features bill_amt_features = ['bill_amt'+ str(i) for i in range(1,7)] pay_amt_features = ['pay_amt'+ str(i) for i in range(1,7)] numerical_features = ['limit_bal','age'] + bill_amt_features + pay_amt_features # Creating creating binary features ccd['male'] = (ccd['sex'] == 1).astype('int') ccd['grad_school'] = (ccd['education'] == 1).astype('int') ccd['university'] = (ccd['education'] == 2).astype('int') ccd['married'] = (ccd['marriage'] == 1).astype('int') # simplifying pay features pay_features= ['pay_' + str(i) for i in range(1,7)] for x in pay_features: ccd.loc[ccd[x] <= 0, x] = 0 # simplifying delayed features delayed_features = ['delayed_' + str(i) for i in range(1,7)] for pay, delayed in zip(pay_features, delayed_features): ccd[delayed] = (ccd[pay] > 0).astype(int) # creating a new feature: months delayed ccd['months_delayed'] = ccd[delayed_features].sum(axis=1) ``` ## Splitting and standarizing the dataset ``` numerical_features = numerical_features + ['months_delayed'] binary_features = ['male','married','grad_school','university'] X = ccd[numerical_features + binary_features] y = ccd['default'].astype(int) ## Split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=5/30, random_state=25) ## Standarize from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(X_train[numerical_features]) X_train.loc[:, numerical_features] = scaler.transform(X_train[numerical_features]) # Standarize also the testing set X_test.loc[:, numerical_features] = scaler.transform(X_test[numerical_features]) ``` ## Performance metrics ``` from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=25, max_features=6, max_depth=4, random_state=61) rf.fit(X_train, y_train) from sklearn.metrics import confusion_matrix def CM(y_true, y_pred): M = confusion_matrix(y_true, y_pred) out = pd.DataFrame(M, index=["Obs Paid", "Obs Default"], columns=["Pred Paid", "Pred Default"]) return out threshold = 0.5 y_pred_prob = rf.predict_proba(X_test)[:,1] y_pred = (y_pred_prob > threshold).astype(int) CM(y_test, y_pred) from sklearn.metrics import precision_score, recall_score, accuracy_score precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) accuracy = accuracy_score(y_test, y_pred) print("Precision: {:0.1f}%, Recall: {:.1f}%, Accuracy: {:0.1f}%".format(100*precision, 100*recall, 100*accuracy)) ``` ## Visualization methods for evaluating classification models ### Visualizing probabilities ``` plt.hist(y_pred_prob, bins=25, ec='k'); fig, ax = plt.subplots(figsize=(8,5)) sns.kdeplot(y_pred_prob[y_test==1], shade=True, color='red', label="Defaults", ax=ax) sns.kdeplot(y_pred_prob[y_test==0], shade=True, color='green', label="Paid", ax=ax) ax.set_title("Distribution of predicted probabilies", fontsize=16) ax.legend() plt.grid(); ``` ### ROC and precision-recall curves ``` threshold = 0.4 y_pred_prob = rf.predict_proba(X_test)[:,1] y_pred = (y_pred_prob > threshold).astype(int) CM(y_test, y_pred) precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) accuracy = accuracy_score(y_test, y_pred) print("Precision: {:0.1f}%, Recall: {:.1f}%, Accuracy: {:0.1f}%".format(100*precision, 100*recall, 100*accuracy)) from sklearn.metrics import precision_recall_curve precs, recs, ths = precision_recall_curve(y_test, y_pred_prob) fig, ax = plt.subplots(figsize=(8,5)) ax.plot(ths, precs[1:], label='Precision') ax.plot(ths, recs[1:], label='Recall') ax.set_title('Precision and recall for different thresholds', fontsize=16) ax.set_xlabel('Theshold', fontsize=14) ax.set_ylabel('Precision, Recall', fontsize=14) ax.set_xlim(0.1,0.7) ax.legend(); ax.grid(); fig, ax = plt.subplots(figsize=(8,5)) ax.plot(precs, recs) ax.set_title('Precision-recall curve', fontsize=16) ax.set_xlabel('Precision', fontsize=14) ax.set_ylabel('Recall', fontsize=14) ax.set_xlim(0.3,0.7) ax.grid(); from sklearn.metrics import roc_curve fpr, tpr, ths = roc_curve(y_test, y_pred_prob) fig, ax = plt.subplots(figsize=(8,5)) ax.plot(fpr, tpr) ax.set_title('ROC curve', fontsize=16) ax.set_xlabel('False positive rate', fontsize=14) ax.set_ylabel('Recall, true negative rate', fontsize=14) ax.grid(); ``` ### Defining a custom metric for classification ``` def class_cost(y_true, y_pred, cost_fn=1, cost_fp=1): M = confusion_matrix(y_true, y_pred) N = len(y_true) FN = M[1,0] FP = M[0,1] return (cost_fn*FN + cost_fp*FP)/N class_cost(y_test, y_pred) thresholds = np.arange(0.05, 0.95, 0.01) costs = [] for th in thresholds: y_pred = (y_pred_prob > th).astype(int) costs.append(class_cost(y_test, y_pred, cost_fn=3, cost_fp=1)) costs = np.array(costs) fig, ax = plt.subplots(figsize=(8,5)) ax.plot(thresholds, costs) ax.set_title('Cost vs threshold', fontsize=16) ax.set_xlabel('Threshold', fontsize=14) ax.set_ylabel('Cost', fontsize=14) ax.grid(); min_cost_th = thresholds[costs.argmin()] min_cost_th y_pred = (y_pred_prob > min_cost_th).astype(int) precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) print("Precision: {:0.1f}%, Recall: {:.1f}%".format(100*precision, 100*recall)) CM(y_test, y_pred) ```
github_jupyter
``` from __future__ import print_function from textwrap import dedent import pytablewriter table_name = "example_table" headers = ["int", "float", "str", "bool", "mix", "time"] data = [ [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], [2, "-2.23", "foo", False, None, "2017-12-23 12:34:51+0900"], [3, 0, "bar", "true", "inf", "2017-03-03 22:44:55+0900"], [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], ] for name in pytablewriter.TableWriterFactory.get_format_names(): print(name) for name in pytablewriter.TableWriterFactory.get_extensions(): print(name) from pytablewriter import MarkdownTableWriter writer = MarkdownTableWriter() writer.table_name = "zone" writer.headers = ["zone_id", "country_code", "zone_name"] writer.value_matrix = [ ["1", "AD", "Europe/Andorra"], ["2", "AE", "Asia/Dubai"], ["3", "AF", "Asia/Kabul"], ["4", "AG", "America/Antigua"], ["5", "AI", "America/Anguilla"], ] writer.write_table() writer = pytablewriter.CsvTableWriter() writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.SpaceAlignedTableWriter() writer.headers = ["PID", "USER", "PR", "NI", "VIRT", "RES", "SHR", "S", "%CPU", "%MEM", "TIME+", "COMMAND"] writer.value_matrix = csv1 = [ [32866, "root", 20, 0, 48344, 3924, 3448, "R", 5.6, 0.2, "0:00.03", "top"], [1, "root", 20, 0, 212080, 7676, 5876, "S", 0, 0.4, "1:06.56", "systemd"], [2, "root", 20, 0, 0, 0, 0, "S", 0, 0, "0:01.92", "kthreadd"], [4, "root", 0, -20, 0, 0, 0, "S", 0, 0, "0:00.00", "kworker/0:0H"], ] writer.write_table() writer = pytablewriter.TsvTableWriter() writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.HtmlTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.JavaScriptTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.JsonTableWriter() #writer.table_name = "Timezone" writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.JsonLinesTableWriter() writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.JsonTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.LatexMatrixWriter() writer.table_name = "A" writer.value_matrix = [ [0.01, 0.00125, 0.0], [1.0, 99.9, 0.01], [1.2, 999999.123, 0.001], ] writer.write_table() ``` \begin{equation} A = \left( \begin{array}{rrr} 0.01 & 0.0012 & 0.000 \\ 1.00 & 99.9000 & 0.010 \\ 1.20 & 999999.1230 & 0.001 \\ \end{array} \right) \end{equation} ``` writer = pytablewriter.LatexMatrixWriter() writer.table_name = "B" writer.value_matrix = [ ["a_{11}", "a_{12}", "\\ldots", "a_{1n}"], ["a_{21}", "a_{22}", "\\ldots", "a_{2n}"], [r"\vdots", "\\vdots", "\\ddots", "\\vdots"], ["a_{n1}", "a_{n2}", "\\ldots", "a_{nn}"], ] writer.write_table() ``` \begin{equation} B = \left( \begin{array}{llll} a_{11} & a_{12} & \ldots & a_{1n} \\ a_{21} & a_{22} & \ldots & a_{2n} \\ \vdots & \vdots & \ddots & \vdots \\ a_{n1} & a_{n2} & \ldots & a_{nn} \\ \end{array} \right) \end{equation} ``` writer = pytablewriter.LatexTableWriter() writer.headers = headers writer.value_matrix = data writer.write_table() ``` \begin{array}{r | r | l | l | l | l} \hline \verb|int| & \verb|float| & \verb|str| & \verb|bool| & \verb|mix| & \verb|time| \\ \hline \hline 0 & 0.10 & hoge & True & 0 & \verb|2017-01-01 03:04:05+0900| \\ \hline 2 & -2.23 & foo & False & & \verb|2017-12-23 12:34:51+0900| \\ \hline 3 & 0.00 & bar & True & \infty & \verb|2017-03-03 22:44:55+0900| \\ \hline -10 & -9.90 & & False & NaN & \verb|2017-01-01 00:00:00+0900| \\ \hline \end{array} ``` from pytablewriter import MarkdownTableWriter writer = MarkdownTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() from pytablewriter import MarkdownTableWriter writer = MarkdownTableWriter() writer.table_name = "write example with a margin" writer.headers = headers writer.value_matrix = data writer.margin = 1 # add a whitespace for both sides of each cell writer.write_table() writer = pytablewriter.MediaWikiTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.NumpyTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.PandasDataFrameWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.PandasDataFrameWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.is_datetime_instance_formatting = False writer.write_table() writer = pytablewriter.PythonCodeTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.PythonCodeTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.is_datetime_instance_formatting = False writer.write_table() writer = pytablewriter.RstGridTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.RstSimpleTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.RstCsvTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.LtsvTableWriter() writer.headers = headers writer.value_matrix = data writer.write_table() writer = pytablewriter.TomlTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() from datetime import datetime import pytablewriter as ptw writer = ptw.JavaScriptTableWriter() writer.headers = ["header_a", "header_b", "header_c"] writer.value_matrix = [ [-1.1, "2017-01-02 03:04:05", datetime(2017, 1, 2, 3, 4, 5)], [0.12, "2017-02-03 04:05:06", datetime(2017, 2, 3, 4, 5, 6)], ] print("// without type hints: column data types detected automatically by default") writer.table_name = "without type hint" writer.write_table() print("// with type hints: Integer, DateTime, String") writer.table_name = "with type hint" writer.type_hints = [ptw.Integer, ptw.DateTime, ptw.String] writer.write_table() from datetime import datetime import pytablewriter as ptw writer = ptw.PythonCodeTableWriter() writer.value_matrix = [ [-1.1, float("inf"), "2017-01-02 03:04:05", datetime(2017, 1, 2, 3, 4, 5)], [0.12, float("nan"), "2017-02-03 04:05:06", datetime(2017, 2, 3, 4, 5, 6)], ] # column data types detected automatically by default writer.table_name = "python variable without type hints" writer.headers = ["float", "infnan", "string", "datetime"] writer.write_table() # set type hints writer.table_name = "python variable with type hints" writer.headers = ["hint_int", "hint_str", "hint_datetime", "hint_str"] writer.type_hints = [ptw.Integer, ptw.String, ptw.DateTime, ptw.String] writer.write_table() writer = pytablewriter.MarkdownTableWriter() writer.from_csv(dedent("""\ "i","f","c","if","ifc","bool","inf","nan","mix_num","time" 1,1.10,"aa",1.0,"1",True,Infinity,NaN,1,"2017-01-01 00:00:00+09:00" 2,2.20,"bbb",2.2,"2.2",False,Infinity,NaN,Infinity,"2017-01-02 03:04:05+09:00" 3,3.33,"cccc",-3.0,"ccc",True,Infinity,NaN,NaN,"2017-01-01 00:00:00+09:00" """)) writer.write_table() writer = pytablewriter.MarkdownTableWriter() writer.table_name = "ps" writer.from_csv( dedent("""\ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.0 0.4 77664 8784 ? Ss May11 0:02 /sbin/init root 2 0.0 0.0 0 0 ? S May11 0:00 [kthreadd] root 4 0.0 0.0 0 0 ? I< May11 0:00 [kworker/0:0H] root 6 0.0 0.0 0 0 ? I< May11 0:00 [mm_percpu_wq] root 7 0.0 0.0 0 0 ? S May11 0:01 [ksoftirqd/0] """), delimiter=" ") writer.write_table() from pytablewriter import MarkdownTableWriter from pytablewriter.style import Align, Style writer = MarkdownTableWriter() writer.table_name = "specify alignment for each column manually" writer.headers = ["left", "right", "center", "auto (int)", "auto (str)", "None (same as AUTO)"] writer.value_matrix = [ [0, "r", "center align", 0, "a", "n"], [11, "right align", "c", 11, "auto", "none"], ] # set alignments for each column writer.styles = [ Style(align=Align.LEFT), Style(align=Align.RIGHT), Style(align=Align.CENTER), Style(align=Align.AUTO), Style(align=Align.AUTO), None, ] writer.write_table() from pytablewriter import MarkdownTableWriter from pytablewriter.style import Style writer = MarkdownTableWriter() writer.table_name = "set style by styles" writer.headers = [ "auto align", "left align", "center align", "bold", "italic", "bold italic ts", ] writer.value_matrix = [ [11, 11, 11, 11, 11, 11], [1234, 1234, 1234, 1234, 1234, 1234], ] # specify styles for each column writer.styles = [ Style(), Style(align="left"), Style(align="center"), Style(font_weight="bold"), Style(font_style="italic"), Style(font_weight="bold", font_style="italic", thousand_separator=","), ] writer.write_table() from pytablewriter import MarkdownTableWriter from pytablewriter.style import Style writer = MarkdownTableWriter() writer.headers = ["A", "B", "C",] writer.value_matrix = [[11, 11, 11], [1234, 1234, 1234]] writer.table_name = "set style by index" writer.set_style(1, Style(align="center", font_weight="bold")) writer.set_style(2, Style(thousand_separator=" ")) writer.write_table() writer.write_null_line() writer.table_name = "set style by header" writer.set_style("B", Style(font_style="italic")) writer.write_table() import pytablewriter writer = pytablewriter.MarkdownTableWriter() writer.headers = ["int", "float", "str", "bool", "mix", "time"] writer.value_matrix = [ [0, 0.1, "hoge", True, 0, "2017-01-01 03:04:05+0900"], [2, "-2.23", "foo", False, None, "2017-12-23 45:01:23+0900"], [3, 0, "bar", "true", "inf", "2017-03-03 33:44:55+0900"], [-10, -9.9, "", "FALSE", "nan", "2017-01-01 00:00:00+0900"], ] print(writer.dumps()) from pytablewriter import MarkdownTableWriter from pytablewriter.style import ThousandSeparator writer = MarkdownTableWriter() writer.headers = ["wo_format", "comma_i", "space_f"] writer.value_matrix = [ [1000, 1234567, 1234567.8], [1000, 1234567, 1234567.8], [1000, 1234567, 1234567.8], ] writer.styles = [ Style(thousand_separator=ThousandSeparator.NONE), Style(thousand_separator=ThousandSeparator.COMMA), Style(thousand_separator=ThousandSeparator.SPACE), ] writer.write_table() from pytablewriter import LatexTableWriter from pytablewriter.style import Style, FontSize writer = LatexTableWriter() writer.table_name = "style test: font size" writer.headers = ["none", "empty_style", "tiny", "small", "medium", "large"] writer.value_matrix = [[111, 111, 111, 111, 111, 111], [1234, 1234, 1234, 1234, 1234, 1234]] writer.styles = [ None, Style(), Style(font_size=FontSize.TINY), Style(font_size=FontSize.SMALL), Style(font_size=FontSize.MEDIUM), Style(font_size=FontSize.LARGE), ] writer.write_table() ``` \begin{array}{r | r | r | r | r | r} \hline \verb|none| & \verb|empty_style| & \verb|tiny| & \verb|small| & \verb|medium| & \verb|large| \\ \hline \hline 111 & 111 & \tiny 111 & \small 111 & \normalsize 111 & \large 111 \\ \hline 1234 & 1234 & \tiny 1234 & \small 1234 & \normalsize 1234 & \large 1234 \\ \hline \end{array} ``` from pytablewriter import UnicodeTableWriter writer = UnicodeTableWriter() writer.table_name = table_name writer.headers = headers writer.value_matrix = data writer.write_table() ```
github_jupyter
``` import pickle import numpy as np import seaborn as sns import matplotlib.pyplot as plt with open('cdrk_lastepisode_heat.pickle', 'rb') as f: last_heat = pickle.load(f) with open('cdrk_heat_unique0.pickle', 'rb') as f: heat_uniq0 = pickle.load(f) with open('cdrk_heat_freq0.pickle', 'rb') as f: heat_freq0 = pickle.load(f) with open('cdrk_heat_unique1.pickle', 'rb') as f: heat_uniq1 = pickle.load(f) with open('cdrk_heat_freq1.pickle', 'rb') as f: heat_freq1 = pickle.load(f) # with open('cdrk_random_corr0.pickle', 'rb') as f: # rand_corr0 = pickle.load(f) # with open('cdrk_recent_corr0.pickle', 'rb') as f: # recen_corr0 = pickle.load(f) with open('cdrk_classic_Q.pickle', 'rb') as f: Q = pickle.load(f) num_episodes = len(heat_freq0) num_actions = 15 num_sub = 500 np.unique(last_heat[:, 0, :, :], return_counts=True)[1]/num_sub/num_actions**2 np.unique(last_heat[:, 0, :, :], return_counts=True) full_freq0 = np.zeros((num_episodes, num_actions)) for i in range(num_episodes): full_freq0[i, heat_uniq0[i].astype(int)] = heat_freq0[i] np.argmax(np.sum(full_freq0, axis=0)) max_price = np.zeros(num_episodes) max_freq = np.zeros(num_episodes) bottom8_freq = np.zeros(num_episodes) bottom3_freq = np.zeros(num_episodes) for i in range(num_episodes): max_price[i] = np.max(heat_uniq0[i]) max_freq[i] = np.argmax(full_freq0[i, :]) bottom8_freq[i] = np.sum(full_freq0[i, :10]) bottom3_freq[i] = np.sum(full_freq0[i, :3]) plt.figure(figsize=(8, 6)) ax = sns.heatmap(last_heat[-1, 1, :, :], cbar=False, annot=True) plt.xlabel('Classic player') plt.ylabel('Deep player') cbar = ax.figure.colorbar(ax.collections[0]) cbar.set_ticks([0, 2, 4, 6, 8, 10, 12, 14]) # cbar.set_ticks([1.43, 1.51, 1.59, 1.67, 1.75, 1.83, 1.91, 1.99]) fig = ax.get_figure() # fig.savefig('.eps', format='eps', dpi=200, bbox_inches='tight', pad_inches=0.1) fig, ax = plt.subplots(figsize=(8, 6), dpi=120) ax.plot(bottom8_freq/112500, color='tab:blue', label=r'Price $\leq$ 1.79') ax.plot(full_freq0[:, 10]/112500, color='tab:orange', label =r'Price = 1.83') ax.set_ylabel('Percent') ax.set_xlabel('Episodes') ax.legend(loc='best') ax.grid(True) # plt.savefig('.eps', format='eps', dpi=1000, bbox_inches='tight', pad_inches=0.1) plt.show() np.unique(max_price, return_counts=True) fig, ax = plt.subplots(figsize=(8, 6), dpi=120) ax.plot(1.43 + 0.04*max_price, color='tab:blue', label='Highest') ax.plot(1.43 + 0.04*max_freq, color='tab:orange', label ='Most frequent') ax.set_ylabel('Price') ax.yaxis.set_ticks(np.arange(1.43, 2.0, 0.04)) ax.set_xlabel('Episodes') ax.legend(loc='best') ax.grid(True) # plt.savefig('.eps', format='eps', dpi=1000, bbox_inches='tight', pad_inches=0.1) plt.show() full_freq1 = np.zeros((num_episodes, num_actions)) for i in range(num_episodes): full_freq1[i, heat_uniq1[i].astype(int)] = heat_freq1[i] max_price1 = np.zeros(num_episodes) max_freq1 = np.zeros(num_episodes) bottom8_freq1 = np.zeros(num_episodes) bottom3_freq1 = np.zeros(num_episodes) for i in range(num_episodes): max_price1[i] = np.max(heat_uniq1[i]) max_freq1[i] = np.argmax(full_freq1[i, :]) bottom8_freq1[i] = np.sum(full_freq1[i, :8]) bottom3_freq1[i] = np.sum(full_freq1[i, :3]) fig, ax = plt.subplots(figsize=(18, 6), dpi=120) ax.plot(bottom8_freq1/112500, color='tab:blue', label=r'Price $\leq$ 1.71') ax.plot(bottom3_freq1/112500, color='tab:orange', label =r'Price $\leq$ 1.51') ax.set_ylabel('Percent') ax.set_xlabel('Episodes') ax.legend(loc='best') ax.grid(True) # plt.savefig('.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1) plt.show() fig, ax = plt.subplots(figsize=(18, 6), dpi=120) ax.plot(1.43 + 0.04*max_price1, color='tab:blue', label='Highest price') ax.plot(1.43 + 0.04*max_freq1, color='tab:orange', label ='Most frequent price') ax.set_ylabel('Price') ax.yaxis.set_ticks(np.arange(1.43, 2.0, 0.04)) ax.set_xlabel('Episodes') ax.legend(loc='best') ax.grid(True) # plt.savefig('.eps', format='eps', dpi=500, bbox_inches='tight', pad_inches=0.1) plt.show() ind N = 2000 - 30 ind = np.arange(N+1, N+31) width = 0.5 # plt.style.use('default') cm = plt.get_cmap('tab20') plt.rcParams["axes.prop_cycle"] = plt.cycler('color', [cm(1.*i/num_actions) for i in range(num_actions)]) p = [] fig, ax = plt.subplots(figsize=(8,6), dpi=120) for k in range(num_actions): p.append(plt.bar(ind, full_freq1[N:N+30, k]/112500, width, bottom = np.sum(full_freq1[N:N+30, :k], axis=1)/112500)) plt.legend((p[0][0], p[1][0], p[2][0], p[3][0], p[4][0], p[5][0], p[6][0], p[7][0], p[8][0], p[9][0], p[10][0], p[11][0], p[12][0], p[13][0], p[14][0]), ('1.43', '1.47', '1.51', '1.55', '1.59', '1.63', '1.67', '1.71', '1.75', '1.79', '1.83', '1.87', '1.91', '1.95', '1.99'), bbox_to_anchor=(1.0, 1.0)) plt.xticks(ind) plt.xticks(rotation=70) ax.set_xlabel('Episodes') ax.set_ylabel('Percent') # plt.savefig('.eps', format='eps', dpi=1000, bbox_inches='tight', pad_inches=0.1) plt.show() ```
github_jupyter
# Deterministic Inputs, Noisy “And” gate model (DINA) This notebook will show you how to train and use the DINA. First, we will show how to get the data (here we use Math1 from math2015 as the dataset). Then we will show how to train a DINA and perform the parameters persistence. At last, we will show how to load the parameters from the file and evaluate on the test dataset. The script version could be found in [DINA.py](DINA.ipynb) ## Data Preparation Before we process the data, we need to first acquire the dataset which is shown in [prepare_dataset.ipynb](prepare_dataset.ipynb) ``` # Data preprocessing, split train/valid/test data import numpy as np import random import json train_ratio = 0.8 valid_ratio = 0 # Q matrix np.savetxt("../../data/math2015/Math1/q_m.csv", np.loadtxt("../../data/math2015/Math1/q.txt", dtype=int), delimiter=',', fmt='%d') # response matrix, split dataset R = (np.loadtxt("../../data/math2015/Math1/data.txt") == 1).astype(float) stu_num, prob_num = R.shape[0], R.shape[1] train_logs, valid_logs, test_logs = [], [], [] for stu in range(stu_num): stu_logs = [] for prob in range(prob_num): log = {'user_id': int(stu), 'item_id': int(prob), 'score': R[stu][prob]} stu_logs.append(log) random.shuffle(stu_logs) train_logs += stu_logs[: int(train_ratio * prob_num)] valid_logs += stu_logs[int(train_ratio * prob_num): int(train_ratio * prob_num) + int(valid_ratio * prob_num)] test_logs += stu_logs[int(train_ratio * prob_num) + int(valid_ratio * prob_num):] with open("../../data/math2015/Math1/train_data.json", 'w', encoding='utf8') as file: json.dump(train_logs, file, indent=4, ensure_ascii=False) with open("../../data/math2015/Math1/valid_data.json", 'w', encoding='utf8') as file: json.dump(valid_logs, file, indent=4, ensure_ascii=False) with open("../../data/math2015/Math1/test_data.json", 'w', encoding='utf8') as file: json.dump(test_logs, file, indent=4, ensure_ascii=False) print(train_logs[0], test_logs[0]) # Load the data from files q_m = np.loadtxt("../../data/math2015/Math1/q_m.csv", dtype=int, delimiter=',') prob_num, know_num = q_m.shape[0], q_m.shape[1] # training data with open("../../data/math2015/Math1/train_data.json", encoding='utf-8') as file: train_set = json.load(file) stu_num = max([x['user_id'] for x in train_set]) + 1 R = -1 * np.ones(shape=(stu_num, prob_num)) for log in train_set: R[log['user_id'], log['item_id']] = log['score'] # testing data with open("../../data/math2015/Math1/test_data.json", encoding='utf-8') as file: test_set = json.load(file) len(train_set), len(test_set) ``` ## Training and Persistence ``` import logging logging.getLogger().setLevel(logging.INFO) from EduCDM import DINA cdm = DINA(R, q_m, stu_num, prob_num, know_num, skip_value=-1) cdm.train(epoch=2, epsilon=1e-3) cdm.save("dina.params") ``` ## Loading and Testing ``` cdm.load("dina.params") rmse, mae = cdm.eval(test_set) print("RMSE: %.6f, MAE: %.6f" % (rmse, mae)) ``` ## Incremental Training ``` new_data = [{'user_id': 0, 'item_id': 0, 'score': 1.0}, {'user_id': 1, 'item_id': 2, 'score': 0.0}] cdm.inc_train(new_data, epoch=2, epsilon=1e-3) ``` ## Evaluate User's State ``` stu_rec = np.array([0, 1, -1, 0, -1, 0, 1, 1, 0, 1, 0, 1, 0, -1, -1, -1, -1, 0, 1, -1]) dia_id, dia_state = cdm.transform(stu_rec) print("id of user's state is %d, state is " % dia_id + str(dia_state)) # To see the relation between dia_id and dia_state dia_state == cdm.all_states[dia_id] ```
github_jupyter
## Ejercicio 1 Dada la siguiente lista: > ```ejer_1 = [1,2,3,4,5]``` Inviertela par que quede de la siguiente manera > ```ejer_1 = [5,4,3,2,1]``` ## Ejercicio 2 Eleva todos los elementos de la lista al cuadrado > ```ejer_2 = [1,2,3,4,5]``` ## Ejercicio 3 Crea una lista nueva con todas las combinaciones de las siguientes os listas: > ```ejer_3_1 = ["Hola", "amigo"]``` > > ```ejer_3_2 = ["Que", "tal"]``` Obten el siguiente output: ['Hola Que', 'Hola tal', 'amigo Que', 'amigo tal'] ## Ejercicio 4 Dada la siguiente lista, encuentra el valor 45, sustituyelo por el 0 > ```ejer_4 = [20, 47, 19, 29, 45, 67, 78, 90]``` ## Ejercicio 5 Dada la siguiente lista, elimina todos los valores iguales a 3 > ```ejer_5 = [3, 20, 3, 47, 19, 3, 29, 45, 67, 78, 90, 3, 3]``` ``` ejer_5 = [3, 20, 3, 47, 19, 3, 29, 45, 67, 78, 90, 3, 3] lista_nueva = [] for i in ejer_5: if i != 3: lista_nueva.append(i) print(lista_nueva) lista_nueva2 = [] for k in range(len(ejer_5)): if ejer_5[k] != 3: lista_nueva2.append(ejer_5[k]) print(lista_nueva2) ejer_5 = [3, 20, 3, 47, 19, 3, 29, 45, 67, 78, 90, 3, 3] for i in range(len(ejer_5)): if ejer_5[i] == 3: ejer_5.remove(3) print(ejer_5) print(ejer_5) ``` ## Ejercicio 6 1. Crea una tupla con 3 elementos 2. Crea otra tupla con un elemento y comprueba su tipo 3. Crea una tupla con elementos de diferentes tipos 4. Imprime por pantalla el primer y ultimo elemento de la tupla del apartado 3. Usa `len` para el ultimo 5. Añade un elemento a la tupla del apartado 3. 6. Eliminar un elemento de la tupla del apartado 5, que se encuentre más o menos en la mitad. 7. Convierte la tupla del apartado 5 en una lista ## Ejercicio 7 Concatena todos los elementos de la tupla en un unico string. Para ello utiliza el metodo `.join()` de los Strings > ```ejer_7 = ("cien", "cañones", "por", "banda")``` Resultado: `cien cañones por banda` ## Ejercicio 8 Obten el tercer elemento de la siguiente tupla, y el tercero empezando por la cola > ```ejer_8 = (3, 20, 3, 47, 19, 3, 29, 45, 67, 78, 90, 3, 3)``` ## Ejercicio 9 1. ¿Cuántas veces se repite el 3 en la siguiente tupla? 2. Crea una tupla nueva con los elementos desde la posicion 5 a la 10. 3. ¿Cuántos elementos tiene la tupla `ejer_9`? > ```ejer_9 = (3, 20, 3, 47, 19, 3, 29, 45, 67, 78, 90, 3, 3, 5, 2, 4, 7, 9, 4, 2, 4, 3, 3, 4, 6, 7)``` ## Ejercicio 10 Comprueba si el numero 60 esta en la tupla del ejercicio 9 ## Ejercicio 11 1. Convierte la tupla del apartado 10 en una lista 2. Convierte la tupla del apartado 10 en un set 3. Convierte la tupla del apartado 10 en un diccionario. Usa también los indices ## Ejercicio 12 Convierte la siguiente tupla en un diccionario > ```ejer_12 = [("x", 1), ("x", 2), ("x", 3), ("y", 1), ("y", 2), ("z", 1)]``` ``` ejer_12 = [("x", 1), ("x", 2), ("x", 3), ("y", 1), ("y", 2), ("z", 1)] dict(ejer_12) dic = {} dic['x'] = 1 dic['x'] = 2 dic['x'] = 3 dic['y'] = 1 dic['y'] = 2 dic['z'] = 1 dic ``` ## Ejercicio 13 1. Crea una lista ordenada ascendente con las claves del diccionario 2. Crea otra lista ordenada descendente con los valores 3. Añade una nueva clave/valor 4. Busca la clave 2 dentro del diccionario 5. Itera la clave y el valor del diccionario con un unico for > ```ejer_13 = {4:78, 2:98, 8:234, 5:29}``` ## Ejercicio 14 Junta ambos diccionarios. Para ello, utiliza `update > ```ejer_14_1 = {1: 11, 2: 22}``` > > ```ejer_14_2 = {3: 33, 4: 44}``` ## Ejercicio 15 Suma todos los valores del dicionario > ```ejer_15 = {1: 11, 2: 22, 3: 33, 4: 44, 5: 55}``` ## Ejercicio 16 Multiplica todos los valores del diccionario > ```ejer_16 = {1: 11, 2: 22, 3: 33, 4: 44, 5: 55}``` ## Ejercicio 17 1. Crea un set de tres elementos 2. Añade un cuarto 3. Elimina el utlimo elemento añadido 4. Elimina el elemento 10, si está presenta. Usa `discard()`
github_jupyter
# Building ERDDAP Datasets This notebook documents the process of creating XML fragments for nowcast system run results files for inclusion in `/results/erddap-datasets/datasets.xml` which is symlinked to `/opt/tomcat/content/erddap/datasets.xml` on the `skookum` ERDDAP server instance. The contents are a combination of: * instructions for using the `GenerateDatasetsXml.sh` and `DasDds.sh` tools found in the `/opt/tomcat/webapps/erddap/WEB-INF/` directory * instructions for forcing the server to update the datasets collection via the `/results/erddap/flags/` directory * code and metadata to transform the output of `GenerateDatasetsXml.sh` into XML fragments that are ready for inclusion in `/results/erddap-datasets/datasets.xml` This is a snapshot of the `erddap-datasets/ERDDAP_datasets.ipynb` notebook that is used to maintain the `datasets.xml` file. Please see https://bitbucket.org/salishsea/erddap-datasets for the active, version controlled version of this notebook, and the production ``datasets.xml` file. ``` from collections import OrderedDict from lxml import etree ``` **NOTE** The next cell mounts the `/results` filesystem on `skookum` locally. It is intended for use if when this notebook is run on a laptop or other non-Waterhole machine that has `sshfs` installed and a mount point for `/results` available in its root filesystem. Don't execute the cell if that doesn't describe your situation. ``` !sshfs skookum:/results /results ``` The `metadata` dictionary below contains information for dataset attribute tags whose values need to be changed, or that need to be added for all datasets. The keys are the dataset attribute names. The values are dicts containing a required `text` item and perhaps an optional `after` item. The value associated with the `text` key is the text content for the attribute tag. When present, the value associated with the `after` key is the name of the dataset attribute after which a new attribute tag containing the `text` value is to be inserted. ``` metadata = OrderedDict([ ('coverage_content_type', { 'text': 'modelResult', 'after': 'cdm_data_type', }), ('infoUrl', { 'text': 'https://salishsea-meopar-docs.readthedocs.io/en/latest/results_server/index.html#salish-sea-model-results', }), ('institution', {'text': 'UBC EOAS'}), ('institution_fullname', { 'text': 'Earth, Ocean & Atmospheric Sciences, University of British Columbia', 'after': 'institution', }), ('license', { 'text': '''The Salish Sea MEOPAR NEMO model results are copyright 2013-2021 by the Salish Sea MEOPAR Project Contributors and The University of British Columbia. They are licensed under the Apache License, Version 2.0. http://www.apache.org/licenses/LICENSE-2.0''', }), ('project', { 'text':'Salish Sea MEOPAR NEMO Model', 'after': 'title', }), ('creator_name', { 'text': 'Salish Sea MEOPAR Project Contributors', 'after': 'project', }), ('creator_email', { 'text': 'sallen@eos.ubc.ca', 'after': 'creator_name', }), ('creator_url', { 'text': 'https://salishsea-meopar-docs.readthedocs.io/', 'after': 'creator_email', }), ('acknowledgement', { 'text': 'MEOPAR, ONC, Compute Canada', 'after': 'creator_url', }), ('drawLandMask', { 'text': 'over', 'after': 'acknowledgement', }), ]) ``` The `datasets` dictionary below provides the content for the dataset `title` and `summary` attributes. The `title` attribute content appears in the the datasets list table (among other places). It should be `<`80 characters long, and note that only the 1st 40 characters will appear in the table. The `summary` attribute content appears (among other places) when a user hovers the cursor over the `?` icon beside the `title` content in the datasets list table. The text that is inserted into the `summary` attribute tag by code later in this notebook is the `title` content followed by the `summary` content, separated by a blank line. The keys of the `datasets` dict are the `datasetID` strings that are used in many places by the ERDDAP server. They are structured as follows: * `ubc` to indicate that the dataset was produced at UBC * `SS` to indicate that the dataset is a product of the Salish Sea NEMO model * a few letters to indicate the model runs that produce the dataset: * `n` to indicate that the dataset is from a nowcast run, * `f` for forecast, * `f2` for forecast2 (aka preliminary forecast), * `hg` for hindcast-green * `ng` for nowcast-green, * `a` for atmospheric forcing, * a description of the dataset variables; e.g. `PointAtkinsonSSH` or `3DuVelocity` * the time interval of values in the dataset; e.g. `15m`, `1h`, `1d` * the dataset version; e.g. `V16-10`, or `V1` Versioning was changed to a [CalVer](http://calver.org/) type scheme in Oct-2016. Thereafter versions are of the form `Vyymm` and indicate the year and month when the dataset entered production. So: * `ubcSSnPointAtkinsonSSH15mV1` is the version 1 dataset of 15 minute averaged sea surface height values at Point Atkinson from `PointAtkinson.nc` output files * `ubcSSn3DwVelocity1hV2` is the version 2 dataset of 1 hr averaged vertical (w) velocity values over the entire domain from `SalishSea_1h_*_grid_W.nc` output files * `ubcSSnSurfaceTracers1dV1` is the version 1 dataset of daily averaged surface tracer values over the entire domain from `SalishSea_1d_*_grid_T.nc` output files * `ubcSSnBathymetry2V16-07` is the version 16-07 dataset of longitude, latitude, and bathymetry of the Salish Sea NEMO model grid that came into use in Jul-2016. The corresponding NEMO-generated mesh mask variables are in the `ubcSSn2DMeshMaskDbo2V16-07` (y, x variables), and the `ubcSSn3DMeshMaskDbo2V16-07` (z, y, x variables) datasets. The dataset version part of the `datasetID` is used to indicate changes in the variables contained in the dataset. For example, the transition from the `ubcSSn3DwVelocity1hV1` to the `ubcSSn3DwVelocity1hV2` dataset occurred on 24-Jan-2016 when we started to output vertical eddy viscosity and diffusivity values at the `w` grid points. All dataset ids end with their version identifier and their `summary` ends with a notation about the variables that they contain; e.g. ``` v1: wVelocity variable ``` When the a dataset version is incremented a line describing the change is added to the end of its `summary`; e.g. ``` v1: wVelocity variable v2: Added eddy viscosity & diffusivity variables ve_eddy_visc & ve_eddy_diff ``` ``` datasets = { 'ubcSSnBathymetry2V1' :{ 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, Geo-location and Bathymetry, v1', 'summary':'''Longitude, latitude, and bathymetry of the Salish Sea NEMO model grid. The bathymetry values are those calculated by NEMO from the input bathymetry file. NEMO modifies the input bathymetry to remove isolated holes, and too-small partial steps. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: longitude, latitude and bathymetry variables ''', 'fileNameRegex': '.*SalishSea2_NEMO_bathy\.nc$' }, 'ubcSSnBathymetry2V16-07' :{ 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, Geo-location and Bathymetry, v16-07', 'summary':'''Longitude, latitude, and bathymetry of the Salish Sea NEMO model grid. The bathymetry values are those calculated by NEMO from the input bathymetry file. NEMO modifies the input bathymetry to remove isolated holes, and too-small partial steps. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: longitude, latitude and bathymetry variables v16-07: same variables, bathymetry uniformly deepened by 1 grid level, smoothed at Juan de Fuca & Johnstone Strait open boundaries, Fraser River lengthened, bathymetry deepened near mouth of Fraser River ''', 'fileNameRegex': '.*downbyone2_NEMO_bathy\.nc$' }, 'ubcSSn2DMeshMask2V1': { 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, 2D Mesh Mask, v1', 'summary':'''NEMO grid variable value for the u-v plane of the Salish Sea NEMO model Arakawa-C grid. The values are those calculated by NEMO from the input coordinates and bathymetry files. The variable names are those used by NEMO-3.4, see the NEMO-3.4 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_4.pdf) for details, or the long_name attributes of the variables for succinct descriptions of the variables. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv, tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables ''', 'fileNameRegex': '.*mesh_mask_SalishSea2\.nc$', }, 'ubcSSn2DMeshMask2V16-07': { 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, 2D Mesh Mask, v16-07', 'summary':'''NEMO grid variable value for the u-v plane of the Salish Sea NEMO model Arakawa-C grid. The values are those calculated by NEMO from the input coordinates and bathymetry files. The variable names are those used by NEMO-3.6, see the NEMO-3.6 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_6.pdf) for details, or the long_name attributes of the variables for succinct descriptions of the variables. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv, tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables v16-07: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv, glamf, gphif, tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables ''', 'fileNameRegex': '.*mesh_mask_downbyone2\.nc$', }, 'ubcSSn3DMeshMask2V1': { 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, 3D Mesh Mask, v1', 'summary':'''NEMO grid variable value for the Salish Sea NEMO model Arakawa-C grid. The values are those calculated by NEMO from the input coordinates and bathymetry files. The variable names are those used by NEMO-3.4, see the NEMO-3.4 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_4.pdf) for details, or the long_name attributes of the variables for succinct descriptions of the variables. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: e3t, e3u, e3v, e3w, gdept, gdepu, gdepv, gdepw, tmask, umask, vmask, fmask variables ''', 'fileNameRegex': '.*mesh_mask_SalishSea2\.nc$' }, 'ubcSSn3DMeshMask2V16-07': { 'type': 'geolocation bathymetry', 'title': 'Salish Sea NEMO Model Grid, 3D Mesh Mask, v16-07', 'summary':'''NEMO grid variable value for the Salish Sea NEMO model Arakawa-C grid. The values are those calculated by NEMO from the input coordinates and bathymetry files. The variable names are those used by NEMO-3.6, see the NEMO-3.6 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_6.pdf) for details, or the long_name attributes of the variables for succinct descriptions of the variables. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. v1: e3t_0, e3u_0, e3v_0, e3w_0, gdept_0, gdepu, gdepv, gdepw_0, tmask, umask, vmask, fmask variables v16-07: e3t, e3u, e3v, e3w, gdept, gdepu, gdepv, gdepw, tmask, umask, vmask, fmask variables ''', 'fileNameRegex': '.*mesh_mask_downbyone2\.nc$' }, 'ubcSSnPointAtkinsonSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Point Atkinson, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minute intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Point Atkinson tide gauge station on the north side of English Bay, near Vancouver, British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*PointAtkinson\.nc$', }, 'ubcSSnCampbellRiverSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Campbell River, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Campbell River tide gauge station at the north end of the Strait of Georgia, near Campbell River, British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*CampbellRiver\.nc$', }, 'ubcSSnCherryPointSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Cherry Point, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Cherry Point tide gauge station in the southern Strait of Georgia, near Birch Bay, Washington. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*CherryPoint\.nc$', }, 'ubcSSnFridayHarborSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Friday Harbor, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Friday Harbor tide gauge station at San Juan Island in Haro Strait, near Friday Harbor, Washington. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*FridayHarbor\.nc$', }, 'ubcSSnNanaimoSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Nanaimo, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Nanaimo tide gauge station on the west side of the central Strait of Georgia, near Nanaimo, British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*Nanaimo\.nc$', }, 'ubcSSnNeahBaySSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Neah Bay, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Neah Bay tide gauge station on the south side of the west end of the Juan de Fuca Strait, near Neah Bay, Washington. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*NeahBay\.nc$', }, 'ubcSSnVictoriaSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Victoria, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Victoria tide gauge station on the north side of the east end of the Juan de Fuca Strait, in the Victoria Inner Harbour, near Victoria, British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*Victoria\.nc$', }, 'ubcSSnSandHeadsSSH15mV1': { 'type': 'tide gauge', 'title': 'Nowcast, Sand Heads, Sea Surface Height, 15min, v1', 'summary': '''Sea surface height values averaged over 15 minutes intervals from Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point closest to the Sand Heads light station on the east side of the central Strait of Georgia, near Steveston, British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: ssh variable ''', 'fileNameRegex': '.*Sandheads\.nc$', }, 'ubcSSn3DTracerFields1hV1': { 'type': '3d fields', 'title': 'Nowcast, Salish Sea, 3d Tracer Fields, Hourly, v1', 'summary': '''3d salinity and water temperature field values averaged over 1 hour intervals from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: salinity (practical) and temperature variables ''', 'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_T\.nc$', }, 'ubcSSnSurfaceTracerFields1hV1': { 'type': 'surface fields', 'title': 'Nowcast, Salish Sea, Surface Tracer Fields, Hourly, v1', 'summary': '''2d sea surface height and rainfall rate field values averaged over 1 hour intervals from Salish Sea NEMO model nowcast runs. The values are calculated for the surface of the model grid that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: sea surface height and rainfall rate variables ''', 'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_T\.nc$', }, 'ubcSSn3DuVelocity1hV1': { 'type': '3d fields', 'title': 'Nowcast, Salish Sea, 3d u Velocity Field, Hourly, v1', 'summary': '''3d zonal (u) component velocity field values averaged over 1 hour intervals from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: uVelocity variable ''', 'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_U\.nc$', }, 'ubcSSn3DvVelocity1hV1': { 'type': '3d fields', 'title': 'Nowcast, Salish Sea, 3d v Velocity Field, Hourly, v1', 'summary': '''3d meridional (v) component velocity field values averaged over 1 hour intervals from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: vVelocity variable ''', 'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_V\.nc$', }, 'ubcSSn3DwVelocity1hV1': { 'type': '3d fields', 'title': 'Nowcast, Salish Sea, 3d w Velocity Field, Hourly, v1', 'summary': '''3d vertical (w) component velocity field values averaged over 1 hour intervals from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset. v1: wVelocity variable ''', 'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_W\.nc$', }, 'ubcSSaSurfaceAtmosphereFieldsV1': { 'type': 'surface fields', 'title': 'HRDPS, Salish Sea, Atmospheric Forcing Fields, Hourly, v1', 'summary': '''2d hourly atmospheric field values from the Environment Canada HRDPS atmospheric forcing model that are used to force the Salish Sea NEMO model. The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound, and Johnstone Strait on the coasts of Washington State and British Columbia. Geo-location data for the atmospheric forcing grid are available in the ubcSSaAtmosphereGridV1 dataset. Atmospheric field values are interpolated on to the Salish Sea NEMO model grid (ubcSSnBathymetry2V1 dataset) on-the-fly by NEMO. v1: atmospheric pressure, precipitation rate, 2m specific humidity, 2m air temperature, short-wave radiation flux, long-wave radiation flux, 10m u wind component, 10m v wind component variables ''', 'fileNameRegex': '.*ops_y\d{4}m\d{2}d\d{2}\.nc$', }, } datasets['ubcSSn3DwVelocity1hV2'] = datasets['ubcSSn3DwVelocity1hV1'] datasets['ubcSSn3DwVelocity1hV2'].update({ 'title': datasets['ubcSSn3DwVelocity1hV1']['title'].replace(', v1', ', v2'), 'summary': datasets['ubcSSn3DwVelocity1hV1']['summary'] + ''' v2: Added eddy viscosity & diffusivity variables ve_eddy_visc & ve_eddy_diff''', }) datasets['ubcSSn3DTracerFields1hV16-10'] = datasets['ubcSSn3DTracerFields1hV1'] datasets['ubcSSn3DTracerFields1hV16-10'].update({ 'title': datasets['ubcSSn3DTracerFields1hV1']['title'].replace(', v1', ', v16-10'), 'summary': datasets['ubcSSn3DTracerFields1hV1']['summary'] + ''' v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details. Changed salinity variable to reference salinity. Made temperature variable explicitly potential temperature. Added squared buoyancy frequency variable.''', }) datasets['ubcSSnSurfaceTracerFields1hV16-10'] = datasets['ubcSSnSurfaceTracerFields1hV1'] datasets['ubcSSnSurfaceTracerFields1hV16-10'].update({ 'title': datasets['ubcSSnSurfaceTracerFields1hV1']['title'].replace(', v1', ', v16-10'), 'summary': datasets['ubcSSnSurfaceTracerFields1hV1']['summary'] + ''' v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details. Added mixed layer thickness defined by sigma theta variable. Deleted rainfall rate variable.''', }) datasets['ubcSSn3DuVelocity1hV16-10'] = datasets['ubcSSn3DuVelocity1hV1'] datasets['ubcSSn3DuVelocity1hV16-10'].update({ 'title': datasets['ubcSSn3DuVelocity1hV1']['title'].replace(', v1', ', v16-10'), 'summary': datasets['ubcSSn3DuVelocity1hV1']['summary'] + ''' v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.''' }) datasets['ubcSSn3DvVelocity1hV16-10'] = datasets['ubcSSn3DvVelocity1hV1'] datasets['ubcSSn3DvVelocity1hV16-10'].update({ 'title': datasets['ubcSSn3DvVelocity1hV1']['title'].replace(', v1', ', v16-10'), 'summary': datasets['ubcSSn3DvVelocity1hV1']['summary'] + ''' v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.''' }) ``` The `dataset_vars` dictionary below is used to rename variables from the often cryptic NEMO names to the names that appear in the ERDDAP generated files and web content. The keys are the NEMO variable names to replace. The values are dicts that map the variable names to use in ERDDAP to the `destinationName` attribute name. ``` dataset_vars = { 'sossheig': {'destinationName': 'ssh'}, 'vosaline': {'destinationName': 'salinity'}, 'votemper': {'destinationName': 'temperature'}, 'vozocrtx': {'destinationName': 'uVelocity'}, 'vomecrty': {'destinationName': 'vVelocity'}, 'vovecrtz': {'destinationName': 'wVelocity'}, } ``` A few convenient functions to reduce code repetition: ``` def print_tree(root): """Display an XML tree fragment with indentation. """ print(etree.tostring(root, pretty_print=True).decode('ascii')) def find_att(root, att): """Return the dataset attribute element named att or raise a ValueError exception if it cannot be found. """ e = root.find('.//att[@name="{}"]'.format(att)) if e is None: raise ValueError('{} attribute element not found'.format(att)) return e def replace_yx_with_lonlat(root): new_axes = { 'y': {'sourceName': 'nav_lon', 'destinationName': 'longitude'}, 'x': {'sourceName': 'nav_lat', 'destinationName': 'latitude'}, } for axis in root.findall('.//axisVariable'): if axis.find('.//sourceName').text in new_axes: key = axis.find('.//sourceName').text new_axis = etree.Element('axisVariable') etree.SubElement(new_axis, 'sourceName').text = new_axes[key]['sourceName'] etree.SubElement(new_axis, 'destinationName').text = new_axes[key]['destinationName'] axis.getparent().replace(axis, new_axis) ``` Now we're ready to produce a dataset!!! Use the `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script generate the initial version of an XML fragment for a dataset: ``` $ cd /opt/tomcat/webapps/erddap/WEB-INF/ $ bash GenerateDatasetsXml.sh EDDGridFromNcFiles /results/SalishSea/nowcast/ ``` The `EDDGridFromNcFiles` and `/results/SalishSea/nowcast/` arguments tell the script which `EDDType` and what parent directory to use, avoiding having to type those in answer to prompts. Answer the remaining prompts, for example: ``` File name regex (e.g., ".*\.nc") (default="") ? .*SalishSea_1h_\d{8}_\d{8}_grid_W\.nc$ Full file name of one file (default="") ? /results/SalishSea/nowcast/28jan16/SalishSea_1h_20160128_20160128_grid_W.nc ReloadEveryNMinutes (e.g., 10080) (default="") ? 10080 ``` Other examples of file name regex are: * `.*PointAtkinson.nc$` * `.*SalishSea_1d_\d{8}_\d{8}_grid_W\.nc$` The output is written to `/results/erddap/logs/GenerateDatasetsXml.out` Now, we: * set the `datasetID` we want to use * parse the output of `GenerateDatasetsXml.sh` into an XML tree data structure * set the `datasetID` dataset attribute value * re-set the `fileNameRegex` dataset attribute value because it looses its `\` characters during parsing(?) * edit and add dataset attributes from the `metadata` dict * set the `title` and `summary` dataset attributes from the `datasets` dict * set the names of the grid `x` and `y` axis variables * rename data variables as specified in the `dataset_vars` dict ``` def update_xml(root, datasetID, metadata, datasets, dataset_vars): root.attrib['datasetID'] = datasetID root.find('.//fileNameRegex').text = datasets[datasetID]['fileNameRegex'] title = datasets[datasetID]['title'] summary = find_att(root, 'summary') summary.text = '{0}\n\n{1}'.format(title, datasets[datasetID]['summary']) e = etree.Element('att', name='title') e.text = title summary.addnext(e) for att, info in metadata.items(): e = etree.Element('att', name=att) e.text = info['text'] try: root.find('.//att[@name="{}"]'.format(info['after'])).addnext(e) except KeyError: find_att(root, att).text = info['text'] for axis_name in root.findall('.//axisVariable/destinationName'): if axis_name.text in ('x', 'y'): axis_name.text = 'grid{}'.format(axis_name.text.upper()) if datasets[datasetID]['type'] == 'tide gauge': replace_yx_with_lonlat(root) for var_name in root.findall('.//dataVariable/destinationName'): if var_name.text in dataset_vars: var_name.text = dataset_vars[var_name.text]['destinationName'] parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse('/results/erddap/logs/GenerateDatasetsXml.out', parser) root = tree.getroot() datasetID = 'ubcSSn3DvVelocity1hV16-10' update_xml(root, datasetID, metadata, datasets, dataset_vars) ``` Inspect the resulting dataset XML fragment below and edit the dicts and code cell above until it is what is required for the dataset: ``` print_tree(root) ``` Extra processing step are required for some types of datasets. See: * [Surface Field Datasets](#Surface-Field-Datasets) * [Model Grid Geo-location and Bathymetry Datasets](#Model-Grid-Geo-location-and-Bathymetry-Datasets) * [EC HDRPS Atmospheric Forcing Datasets](#EC-HDRPS-Atmospheric-Forcing-Datasets) Store the XML fragment for the dataset: ``` with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f: f.write(etree.tostring(root, pretty_print=True)) ``` Edit `/results/erddap-datasets/datasets.xml` to include the XML fragment for the dataset that was stored by the above cell. That file is symlinked to `/opt/tomcat/content/erddap/datasets.xml`. Create a flag file to signal the ERDDAP server process to load the dataset: ``` $ cd /results/erddap/flag/ $ touch <datasetID> ``` If the dataset does not appear on https://salishsea.eos.ubc.ca/erddap/info/, check `/results/erddap/logs/log.txt` for error messages from the dataset load process (they may not be at the end of the file because ERDDAP is pretty chatty). Once the dataset has been successfully loaded and you are happy with the metadata that ERDDAP is providing for it, commit the changes in `/results/erddap-datasets/` and push them to GitHub. ## Surface Field Datasets The `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script produces and XML fragment that uses all of the dimensions that it finds in the sample file it parses, and includes only the variables that have all of those dimensions. To produce an XML fragment for surface fields we need to do some additional work: * Delete the depth axis * Delete all of the `dataVariable` elements * Add `dataVariable` elements for the surface variables ``` for axis in root.findall('.//axisVariable'): if axis.find('.//destinationName').text == 'depth': axis.getparent().remove(axis) break for var in root.findall('.//dataVariable'): var.getparent().remove(var) var = etree.SubElement(root, 'dataVariable') etree.SubElement(var, 'sourceName').text = 'sossheig' etree.SubElement(var, 'destinationName').text = 'ssh' etree.SubElement(var, 'dataType').text = 'float' attrs = etree.SubElement(var, 'addAttributes') etree.SubElement(attrs, 'att', name='_ChunkSize').text = 'null' etree.SubElement(attrs, 'att', name='coordinates').text = 'null' var = etree.SubElement(root, 'dataVariable') etree.SubElement(var, 'sourceName').text = 'rain_rate' etree.SubElement(var, 'destinationName').text = 'rain_rate' etree.SubElement(var, 'dataType').text = 'float' attrs = etree.SubElement(var, 'addAttributes') etree.SubElement(attrs, 'att', name='_ChunkSize').text = 'null' etree.SubElement(attrs, 'att', name='coordinates').text = 'null' find_att(root, 'keywords').text = ( 'model results, height, local, sea, sea surface height, sossheig, source, surface, time_counter') print_tree(root) with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f: f.write(etree.tostring(root, pretty_print=True)) ``` ## Model Grid Geo-location and Bathymetry Datasets Model grid geo-location and bathymetry datasets require a lot of hand editing because they are not model generated. Here is an example of a finished one: ``` parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse('/results/erddap-datasets/fragments/ubcSSnBathymetry2V1.xml', parser) root = tree.getroot() print_tree(root) ``` ## EC HDRPS Atmospheric Forcing Datasets ### Atmospheric Forcing Grid Geo-location Dataset Use the `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script generate the initial version of an XML fragment for the dataset: ``` $ cd /opt/tomcat/webapps/erddap/WEB-INF/ $ bash GenerateDatasetsXml.sh EDDGridFromNcFiles /results/forcing/atmospheric/GEM2.5/operational/ ops_y\d{4}m\d{2}d\d{2}.nc$ /results/forcing/atmospheric/GEM2.5/operational/ops_y2016m03d07.nc 10080 ``` Like the model grid geo-location and bathymetry dataset, the atmospheric forcing grid dataset requires a lot of hand editing. Here is the finished dataset: ``` parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse('/results/erddap-datasets/fragments/ubcSSaAtmosphereGridV1.xml', parser) root = tree.getroot() print_tree(root) ``` ### Atmospheric Forcing Model Fields * Change the value of the `recursive` element to `false` so that the `/results/forcing/atmospheric/GEM2.5/operational/fcst/` directory is excluded * Add Environment Canada acknowledgement and terms & conditions of use to `license` element * Add Environment Canada to `acknowledgement` element ``` root.find('.//recursive').text = 'false' find_att(root, 'license').text += ''' This dataset is derived from a product of the Environment Canada HRDPS (High Resolution Deterministic Prediction System) model. The Terms and conditions of use of Meteorological Data from Environment Canada are available at http://dd.weather.gc.ca/doc/LICENCE_GENERAL.txt.</att>''' find_att(root, 'acknowledgement').text += ', Environment Canada' for axis in root.findall('.//axisVariable'): axis_name = axis.find('.//sourceName').text if 'time' not in axis_name: attrs = axis.find('.//addAttributes') etree.SubElement(attrs, 'att', name='grid_spacing').text = 'null' etree.SubElement(attrs, 'att', name='units').text = 'null' etree.SubElement(attrs, 'att', name='long_name').text = axis_name.upper() etree.SubElement(attrs, 'att', name='standard_name').text = axis_name print_tree(root) with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f: f.write(etree.tostring(root, pretty_print=True)) ```
github_jupyter
<a href="https://colab.research.google.com/github/iotanalytics/IoTTutorial/blob/main/code/detection_and_segmentation/Anomaly_Detection_with_Autoencoder_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # **Anomaly Detection with Autoencoder** Autoencoders are neural networks designed to learn a low dimensional representation given some input data. They consist of two components: an encoder (which learns to map input data to a low dimensional representation, termed the bottleneck), and a decoder (which learns to map this low dimensional representation back to the original input data). By structuring the learning problem in this manner, the encoder network learns an efficient “compression” function which maps input data to a salient lower dimension representation, such that the decoder network is able to successfully reconstruct the original input data. The model is trained by minimizing the reconstruction error: the difference (mean squared error) between the original input and the reconstructed output produced by the decoder. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAqMAAAFdCAYAAAA372ScAAAgAElEQVR4Ae2dXbLmqpG1fWN7OD2RGprti56OI/q2I3oMvvq+M4/dser0OjsLg179IAGphwgdJCAheRLE2tpVdf70RYIABCAAAQhAAAIQgMAgAn8aNC7DQgACEIAABCAAAQhA4AsxyiKAAAQgAAEIQAACEBhGADE6DD0DQwACEIAABCAAAQggRlkDEIAABCAAAQhAAALDCCBGh6FnYAhAAAIQgAAEIAABxChrAAIQgAAEIAABCEBgGAHE6DD0DAwBCEAAAhCAAAQggBhlDUAAAhCAAAQgAAEIDCOAGB2GnoEhAAEIQAACEIAABBCjrAEIQAACEIAABCAAgWEEEKPD0DMwBCAAAQhAAAIQgABilDUAAQhAAAIQgAAEIDCMAGJ0GHoGhgAEIAABCEAAAhBAjLIGIAABCEAAAhCAAASGEUCMDkPPwBCAAAQgAAEIQAACiFHWAAQgAAEIQAACEIDAMAKI0WHoGRgCEIAABCAAAQhAADHKGoAABCAAAQhAAAIQGEYAMToMPQNDAAIQgAAEIAABCCBGWQMQgAAEIAABCEAAAsMIIEaHoWdgCEAAAhCAAAQgAAHEKGsAAhCAAAQgAAEIQGAYAcToMPQMDAEIQAACEIAABCCAGGUNQAACEIAABCAAAQgMI4AYHYaegSEAAQhAAAIQgAAEEKOsAQhAAAIQgAAEIACBYQQQo8PQMzAEIAABCEAAAhCAAGKUNQABCEAAAhCAAAQgMIwAYnQYegaGAAQgAAEIQAACEECMsgYgAAEIQAACEIAABIYRQIwOQ8/AEIAABCAAAQhAAAKIUdYABCAAAQhAAAIQgMAwAojRYegZGAIQgAAEIAABCEAAMcoagAAEIAABCEAAAhAYRgAxOgw9A0MAAhCAAAQgAAEIIEZZAxCAAAQgAAEIQAACwwggRoehZ2AIQAACEIAABCAAAcQoawACEIAABCAAAQhAYBgBxOgw9AwMAQhAAAIQgAAEIIAYZQ1AAAIQgAAEIAABCAwjgBgdhp6BIQABCEAAAhCAAAQQo6wBCEAAAhCAAAQgAIFhBBCjw9AzMAQgAAEIQAACEIAAYpQ1AAEIQAACEIAABCAwjABidBh6BoYABCAAAQhAAAIQQIyyBiAAAQhAAAIQgAAEhhFAjA5Dz8AQgAAEIAABCEAAAohR1gAEIAABCEAAAhCAwDACiNFh6BkYAhCAAAQgAAEIQAAxyhqAAAQgAAEIQAACEBhGADE6DD0DQwACEHiWwL9+++3L17MjMxoEIACBNgHEaJsNNRCAAARSEfjb3//x9ee//PVLOQkCEIDALAQQo7NEAj8gMICARImvp4bXl7mnx3xqbrOPI+6I0dmjhH8QeB8BxOj7Ys6MIfCTgEThjx8/fooTCRQ9P5E0jsbT2KRnCSBGn+XNaBCAwD4CiNF9nGgFgXQELEwkDJ/8WoYYHbeUHHPlJAhAAAKzEECMzhIJ/IDAwwQsTPx19KkvlYjRhwMdhnPMEaMBCrcQgMBwAojR4SHAAQg8T0CCUOJTl4SJBanKW2mrTeyvtI/9+yvsVt7yQeWxL/veah/9OGurMXRFe5ftFXSytY3zPbZxTPGSrexU/imVY9pO+Z6v4Gqn8Tyu7rfGdb8xrrapzWOrr09zox4CEMhHADGaL6bMCAIfCUgMWGiosYWHREUrbbVRf64v7WtCJYqW8r4mVGL/ZXs9b/m9ZWvBVPqsZ9l5LM/Nz863xlUfW3Pfso1jeyznWz5vjSk7z6M19hYrjd+ya82zNo9P/tdiQRkEIJCbAGI0d3yZHQSqBCweLC78LKHQShYyEhhliiKmrCufLVC2xoo2sW/5GcfXs/3yXI7YSmDJPvYZ7VsC0Lz2CDT1H30r5xPH073qPW5p53FbPpd9e17KbbvXZ9vKJ9lucY5ziD7IxvNXua/YnnsIQAACiFHWAAReSMDCwoIjCgiXlVhKm1gf7WN57V5tJYjU355kEaX2Nd+26l2nvGbrOam+TNHPo7bq61Pfrfls2X3qN863nM8nW81RY7f8ct+teo/nfo7E2LbkEIDAOwkgRt8Zd2b9YgItkbVXBNWEWRQgn9DG8T+1Vb1FUE0wqt5jlyKpVR7HdN+yrSULKvVVpi1bz1H2R5J9ll1tTPXVGjfatljZtla/Vadx3X/JuZyf223NobThGQIQeDcBxOi748/sX0igJTpc3hJmKm8JjChAPiFVW/XTGifax35rAkptYxvdO3kcjSXb1mVfoq37bdWpfouX62R/JMW5tOxafUfbci7uy7bKy+Q6xUX3tWtrDbi/PX64LTkEIAABEUCMsg4g8DICFhTKy0viSVdNzNiuVhcFyCecamuRt6etx60JKNnHsaNvHsdz2so1RrS1X/azVhfFm9s7d53sj6Toc8uu1bdszarmr/qzrfKY1N51W5xc1+pffe7xI47NPQQgAAHEKGsAAi8iIKFgQbGVl2JFiLaEThQgn3DaB/X3KcV+az7JPrbRvVMcR7Z63rps5zza675MFm+1edhWjI8k2Zlzy641brSt+av+bKs8JrWPdXr+dEX7eC87z0H3JAhAAAKfCCBGPxGiHgKJCFhwSCzUxIZFhPIyua4UMmoXBUhpVz6rrURabYyyrZ7tc21c1Xts9ad7p1a56/fk9jP2azv7VZuH2lvs12zdR5nbZ9m27FrjRtsWK9vW6rfqSj+3nqMfrTls2VMHAQi8jwBi9H0xZ8YvJrAlKIVF4qEloixWauLLdbL9lDxGrZ+arftW+5q4cb3ysj7Wtfpu9Rv9LPtVX+67NQ+Vi4falcl91+q2+pXdVr9XbO3TFg/V1XyO84s+6p4EAQhA4BMBxOgnQtRDIAkBi42tr26aqsVoKTqiver0rEv3FkhHxKj9cD/OS9weQ+0tlNxWY8fymq1927It5+p+tvqOY7t9zOWj7HWpbfTZPtXGVbtYb7tYrvpaKttEW/trf2r2bqP+t3xWv2WKY9n/WOb70o5nCEAAAohR1gAEXkIgCo2tKVtIqH2Z3IdFlnILF5eVNrVnj2GbmLeETm3sOH5tHJWpv9Z49r1mKzv3X/PJ/qiPVlKb2thb4+7xueaPfWiNGct1X0vqV3UxHr63z7WxWza2db7FquYPZRCAwDsIIEbfEWdmCYE/vmC2hIgRSWxYeLgs5hYs6keXnmNZbNu6d3uNU16qa6VoF8dvtXe57WTjudl3t6nlW2OoT/dVs3VZHHurP7d3Hu08juz3pGgbx7TPyrdSy75l437LWJbPe/1vjUM5BCCQkwBiNGdcmRUEIAABCEAAAhBYggBidIkw4SQEIAABCEAAAhDISQAxmjOuzAoCEIAABCAAAQgsQQAxukSYcBICEIAABCAAAQjkJIAYzRlXZgUBCEAAAhCAAASWIIAYXSJMOAkBCEAAAhCAAARyEkCM5owrs4IABCAAAQhAAAJLEECMLhEmnIQABCAAAQhAAAI5CSBGc8aVWUEAAhCAAAQgAIElCCBGlwgTTkIAAhCAAAQgAIGcBBCjOePKrCAAAQhAAAIQgMASBBCjS4QJJyEAAQhAAAIQgEBOAojRnHFlVhCAAAQgAAEIQGAJAojRJcKEkxCAAAQgAAEIQCAnAcRozrgyKwhAAAIQgAAEILAEAcToEmHCSQhAAAIQgAAEIJCTAGI0Z1yZFQQgAAEIQAACEFiCAGJ0iTDhJAQgAAEIQAACEMhJADGaM67MCgIQgAAEIAABCCxBADG6RJhwEgIQgAAEIAABCOQkgBjNGVdmBQEIQAACEIAABJYggBhdIkw4CQEIQAACEIAABHISQIzmjCuzggAEIAABCEAAAksQQIwuESachAAEIAABCEAAAjkJIEZzxpVZQQACEIAABCAAgSUIIEaXCBNOQgACEIAABCAAgZwEEKM548qsIAABCEAAAhCAwBIEEKNLhAknIQABCEAAAhCAQE4CiNGccWVWEIAABCAAAQhAYAkCiNElwoSTEIAABCAAAQhAICcBxGjOuDIrCEAAAhCAAAQgsAQBxOgSYcJJCEAAAhCAAAQgkJMAYjRnXJkVBCAAAQhAAAIQWIIAYnSJMOEkBCAAAQhAAAIQyEkAMZozrswKAhCAAAQgAAEILEEAMbpEmHASAhCAAAQgAAEI5CSAGM0ZV2YFAQhAAAIQgAAEliCAGF0iTDgJAQhAAAIQgAAEchJAjOaMK7OCAAQgAAEIQAACSxBAjC4RJpyEAAQgAAEIQAACOQkgRnPGlVlBAAIQgAAEIACBJQggRpcIE05CAAIQgAAEIACBnAQQoznjyqwgAAEIQAACEIDAEgQQo0uECSchAAEIQAACEIBATgKI0ZxxfeWs/vP//7+vf/322yvnzqQhAIG5Cej9pIsEAQj8OwHE6L8zoWRRAv/xP//99af/+icv/EXjh9sQyEhAPyDzbsoYWebUkwBitCdN+hpKwC98C1K+kg4NB4ND4PUE9CW0fC+9HgoAIFAhgBitQKFoTQLxpS9Bqmd+LbZmLPEaAisT0A/CevfoPRQv3kcrRxXf7ySAGL2TLn0/SqAUoz4EdADwlfTRUDAYBF5LQO+brXfRa8EwcQhsEECMbsChai0CrQOAr6RrxRFvIbAiAX8N3XoP8WV0xcji8xMEEKNPUGaMRwhsHQJRkPKV9JFwMAgEXkNg62uof0OjHDH6miXBRA8SQIweBEbzeQl8EqM+FNSOQ2HeOOIZBFYhsOdrqN87iNFVooqfIwggRkdQZ8xbCOwVozoULEj5SnpLKOgUAukJ7P0aihhNvxSYYAcCiNEOEOliDgJHxKgPCNkgSOeIH15AYBUCEqJ+hxzJ+Y3MKhHGz6cJIEafJs54txE4I0Z9kHBI3BYWOoZAGgL6wZX3TJpwMpGJCCBGJwoGrlwjcOWQkCjlK+k1/lhDIDOBM7+W9w+7zvmhN/MKYW5XCCBGr9DDdioCV8WoBakODH51P1VocQYCwwjoXaB3ggXllRwxOiyMDDw5AcTo5AHCvf0EeohRHzQ6NDg49rOnJQQyEtA7oPd7JSMn5gSBqwQQo1cJYj8NgZ6HRvxKOs0EcQQCEHiEQM+vof4BVzk/4D4SPgZZkABidMGg4XKdQG8xakGqfvm1fZ05pRDIRqD311DEaLYVwnzuIIAYvYMqfQ4hcIcY9UGivvmqMSSsDAqBxwhoj3vP35HzDnkslAy0GAHE6GIBw902gTvFqA8mDpM2f2ogsCoB/eaD98eq0cPvDAQQoxmiyBx+EnjiMJEo5SspCw4CeQjc/TXUP8gq54fZPOuGmfQlgBjty5PeBhJ4SoxGQcqfJR0YcIaGwAUCT30NRYxeCBKmryGAGH1NqPNP9Ekx6gOGr6T51xUzzEVAIlRfKEe8L/gymmstMZt+BBCj/VjS02ACIw4XvpIODjrDQ+AAgVEi1D+8IkYPBIumryKAGH1VuHNPdpQY9UHDV9Lc64vZrU1gtBDVewIxuvYawvv7CCBG72NLzw8TGC1G+Ur6cMAZDgI7CIz4s6H+AbXMEaM7AkaTVxJAjL4y7DknPYMY9eEjX/jLTTnXGbNah8AMX0P9TlCOGF1n7eDpswQQo8/yZrQbCcwkRnXwyB8OnxsDTtcQaBCY6WsoYrQRJIohEAggRgMMbtcmMJsY9SEkQcpX0rXXFt6vQ2C2r6F+Dyjnh9N11hGePksAMfosb0a7kcCsYlSHEF9Jbww8XUPg6+vnD3wSe1H8zXaPGGWpQqBOADFa50LpggRmFqM+FHUY8ZV0wcWFy1MT0L5aZf9PDRLnIDCIAGJ0EHiG7U9ghcOIr6T9406P7yWgH+xm/xrqH0SV82X0vWuVmW8TQIxu86F2IQKriNEoSPlKutACw9WpCKzyNRQxOtWywZlJCSBGJw0Mbh0nsJIY9QEln/lacjzWWLyXgL+Grrjf2evvXbfMfJsAYnSbD7ULEVjxcIpfSRdCjasQGEJAQnTVfa69jhgdsmwYdAECiNEFgoSL+wisfEhZlPJr+32xptX7CEjI+TcKq+aI0fetW2a8jwBidB8nWi1AYHUx6gOWA2uBxYaLjxFY/Wuo97Vy9vZjy4aBFiOAGF0sYLjbJpBFjOrQ0lz4StqONTXvICDxlmlfI0bfsW6Z5XECiNHjzLCYlECmQ8uClMNr0sWGW7cS0A9iWvvxq2KGe/bzrcuGzhcmgBhdOHi4/iuBbGLUh68OML6S/v5/2FHEzSJ7/uvqfs+T1nvmvfyeSDJTCOwngBjdz4qWkxPIeoDxlfTr51cyxfdN19u+ovlraOZ9/LaYTn5k4N5EBBCjEwUDV64RyHyISZC+9SCTSMkeW38Fj7nm/Kb0hji/dQ+/aR0z13MEEKPnuGE1IYHsguXNB1n22EYR6nvE6D/5M6MTvmdxCQJ3EECM3kGVPocQyC5YEKP5xImFZy1HjOaL95v38JBDgUGXIYAYvRgq/Wrpz3/56zLX3/7+j4szntccMTpvbK56lj22iNHf/2Ja9jhnFKM6U1Y6A3Vmk+YjgBi9GBPE6EWAHc05yDrCnKyr7LFFjCJGJ9tyu91BjO5GRcMNAojRDTh7qhCjeyg90ya7YMn4VWXvysgeW8QoYnTvXpitHWJ0tois6Q9i9GLcEKMXAXY0zy5YEKP5/gxhTYS6TOv5TUnvUvbwehFHjK4Xsxk9RoxejApi9CLAjuYcZB1hTtZV9thagMYcMZrvh4+MP1AiRid7WS7qDmL0YuAQoxcBdjTPLlgyHmR7w589tlGE+h4xihjduz9GtkOMjqSfZ2zE6MVYIkYvAuxonl2wIEbziRMLz1qOGM0X74x7GDHa8RB7cVeI0YvBR4xeBNjRHDHaEeZkXWWPLWKUv8A02Zbb7Q5idDcqGm4QQIxuwNlThRjdQ+mZNtkFS8avKntXRvbYIkYRo3v3wmztEKOzRWRNfxCjF+P2pBj98ePHl6+z/8iwXhxZU3bBghjN92vbmgh1mdbzm5Lepezh9SL+lBj12afxdH/2DNQ6I81HADF6MSZPiVFtPm1CX3Ez+l65r9ZGlX3WxEGWNbJf6UWKBWjMEaP5fvjI+AOlzpTWedOzXGebz7/amD77nLfGRozOeU4gRi/G5Qkx6k2osXxpM3rTxQ0a71VfbkjVZ02I0ayRRYzmjez3zPRuYw9/81jlTmdKec70ft46A+OZV97XzkCtM9J8BBCjF2Oihd1747m/KDYtQp3HTecy565zPzFXXdbEQZY1sojRvJH9npneX+zhbx6r3OlMiWdMz/vaGSguWis+55T77HPuOsToKqvo6wsxejFWWvw9N5/70ibyhqq56E3n3BtUz7KrbUL1rbqsiYMsa2QRo3kj+z0zvbvYw988VrnTmeJzq2fuMzCecZGJy527Ts9bZ6DqSfMRQIxejIkWds8N6L7iRvzkojaeN6A3IWKUP2/2ad2sVJ9dpMQ/K+p7zflNSe+w7HHmz4z+dfd56XPtk3j02efcZ6DP0jL/1N+b9txMc0WMXoyGFna52K8+HxGi5QZsiVD7pI2aNXGQZY0sX0bzRvZ7ZnqXsYe/eaxypzPF50uv/OwZuMcXxOicKwsxejEuvcVo3ITeNMp1aaP53s8q0yW7T0JULwq1zZo4yLJGFjGaN7LfM9M7jT38zWOVO50pvUSo+mmdgT7rfAb62fneM1D2pPkIIEYvxkQLu9dGrG1CuacxtOG82cr8yPjqJ2viIMsaWcRo3sh+z0zvOfbwN49V7nSmHDmDPrVVf1Ew6l5Xee7F5099xvrY9yqM3+AnYvRilLWw40I/e6+NpU0YN6I3ocrixvP9mbHU1wzJc+vpCwdZT5rH+rojntGD7LH1nxONueY8W7ozzuo7e5xn+jOjvWKpM+XMWVTa+AyMfule/dfOwNJ+77P6JM1HADF6MSZa2Hs3QatdbRPKLW9E1bdsj5bPIEY9L79kLobgD3MOsj9QPHoT46n7O1L22EYR6vsZxaj37B1xVp/Z4zyLGO25Z7Umjp5DZft4Bsb3h/3seQbesXajz9yfI4AYPcftDyst7HJjHX3WZlY/tU2iMm3EXptRY82QfKg57+ETB1kPisf70Bp1HJXX1vHxXn+1yB5bC9CYzyhGy1j/GqVrT+o7e5xnEaOKVNyzuj+bZHv0zIvtoxCtvTtU1uv807i1Mc7OHbt+BBCjF1lqYceNdfRem0x9lBsklmmz99qMV146F1H9Yq75yZd4/dLgxAMH2QlonUzKeJbr+eow2WMbRajvVxCjPd8nWjPZ4zyTGC337NlYyu7ouef2W0LU7xD72esMdL9X30nY9yWAGL3IUwvbG+toHjei3VB/8VK5NnuvjXj2hWP/euaap/yJ15X+Ociu0Ltu63jW1vXV3rPH1gI05jOKUcXRce61b7021G/2OM8kRnvFUuvg6Nnn9npXKO66nPwcc43R6wyMY3lM8vEEEKMXY6CF7Y11NNcG84aTG76v5Vc2fPRL/cyUNFf5FK+z/nGQnSXXz87x7C1Is8c2ilDfzypGtVoc5x771qtPfWaP82xitEcstQbiGbP3vvaO0BpoXWfHKf1R/6T5CCBGL8ZEC7tc7J+etQl1xU2he202ldtezyr3Fevc5miuPs8m+9g7l0+eu+/P+MhBtp9a7xi6P8cvxlN1V1P22FqAxryXGHVseucx1r6/Emf5lz3OV8Vo7xi6P8cv5ntjKZuj55Day07jO9kXn3PK3aasOzOebeKYHpt8PAHE6MUYaGF7kR/JtdHiptC9yrwR1VfciLX6I+O5rfo8k2Snyy+Iu/PIZq+/HGT7SIntXfGMayTen4lnnE322EYR6vseYvSuOLf2v8Y7m7RGssf5ihh9OpZ796z88vlyJJedx1Du+bkPrzHV+VKZ68/mHvPsOsXuHgKI0YtctbCPbgptqLjRtAlrm8xt5KLGabU7Mr76OJM8vl8Yd+Ser/o+88LgINsf2TviF/uMsVT51ZQ9thagMe8hRlfYt14b8jV7nK+I0Vljqf195AxyW9lpTp7X1hkY29n+bK6+SPMRQIxejIkW9plNEQ9r3W9tRLm4tWGPjN9DGFxE9m/mnpt88wvq3xrtKOAg2wHp5iZlLHutt+yxjSLU9z3E6J3hLmOt5ytJ9tnjfEWMXmH7yfZKLLXHj5xBbnvkDJR/9rF2VrrPPbn6Ic1HADF6MSZa2Hs2wJk2FqlysddG7CUOLmL7wzzO64oQVYccZH9gHXLjWCqOvno5kj22FqAxn1mMOtYWFHq+mtRH9jjPKEavxlJ7/cz5tscmri/5qWuP3VYb9UGajwBi9GJMemyO1sbxRpSLGkebXmWt9nvK1ccsKc5Jfl19SXCQjYusY6k49ohlOZPssY0i1PezilHH2u+nq/vWsVY/2eM8mxjtEUvt9z1nz5k2cY1FX8/0ZZte69XrlrwPAcToRY5a2F7kvXMf7HJR42hjZhGj8cWiefZIHGQ9KB7vw7H0etVz75Q9thagMZ9RjN4Za/WdPc4ziVHHMgq+M/v2TjGqvuWnr6vnn85o9UWajwBi9GJMtLBbItTi0Yd0q12rPG5E3ffYiOpndBIzM+npDwfZmMjGeN71os8e2yhCfT+jGI37tnes1V/2OM8iRuOeVUyvxFL2rTPsyhkoW/tmf3ucgVfmOuYN+45REaMX46yFvXcjHtlI3ohyzxuxNc6Rcm3uGZLm5KuXPxxkvUge68dxVH5Xyh5bC9CYzyhGFWMLhN6xVt/Z4zyLGFXsesXykxhVva8jZ5XOQPnoa2ucI/2qP9J8BBCjF2Oihd3aCBaU3kwtMaryeGnT2Ubu6b7XRlQ/WRMHWdbI5v/LaVGE+n5GMXrnCtN7jj18J+F7+t46m3Su+Sz71K52Bspj26u+ddYeKVd/pPkIIEYvxkQLu7UR4kZUu9pmihtQ99qwurwB5Z6ea7atcbfK1VfWxEGWNbKI0byR/Z6Z3nns4W8eq9zpTGmdOTq3fJa1zrGtM1AMZN+ybY27Va7+SPMRQIxejIkWdmvhawN5I8YNVROdalu2l2uy82ZtjXOkXGNkTRxkWSOLGM0b2e+Z6V3HHv7mscqdzpTWGVSKUbX1eaZ7XT4b47PKlJTrsk1rnCPl7nsVvm/xEzF6MdJa2K2N4I3mDeXNFvPWxrCN8p4bUWNnTRxkWSOLGM0b2e+Z6V3HHv7mscqdzpRPZ6Dmovj67NOZpnufc7W5uk7t1L41xtFy9UuajwBi9GJMtLBbm6HcbN5cMa8N7/rem1B+qs+siYMsa2QRo3kj+z0zvffYw988VrnTmdI6AyUiFVcln2tl3pqn2t1xBtqf1riUjyGAGL3IXQu7tRG1kXRtLX7VlZften4RtY/qO2viIMsaWcRo3sh+z0zvQfbwN49V7nSm+Hwpc4vRK2dg2efV5y1fVmGe0U/E6MWoamG3Noc24pYYla2vOwVo9A8x+s8v/23l1fKZ/lmYi9vmsHl2kVJbi5rzm5LehdnjnHEPfxKjW2eg1vfTZ6DGI81HADF6MSZa2FHslfcWpBabrdxfQZWXffR81vhZEwdZ1sjyZTRvZL9nhhj9ZrHSnc6U1hm1df4p3vE8fOoMRIzOuboQoxfj8kmMapOWG9KbrrWB7yxHjPJl9OKSH2Ke/QcNvoz+/oUse5zf9mXUZ1k8A33+KXf9kzlidMgr/OOgiNGPiLYb7BWjcQOO2oTa8IhRxOj2ip6zNrtIQYwiRufceZ+92voyapFZnn8jz0DE6OeYjmiBGL1IfY8Y9YacIUeMIkYvLvkh5ojRIdgfHVTv0uxxfuuX0RnOPvuAGH10W+8eDDG6G1W9IWK0zmVEKQfZCOrPjJk9tnwZ5cvoMzup/yh7voxaCM6QI0b7r4EePSJGL1JEjF4E2NE8u2DJ+FVlb/izxxYxihjduxdma4cYnS0ia/qDGL0YN8ToRYAdzbMLFsToun/EoiY2P5VpPb8p6V3KHl4v4ojR9WI2o/aJ4YwAABa4SURBVMeI0YtRQYxeBNjRnIOsI8zJusoe25owRYzm++Ej4w+UiNHJXpaLuoMYvRg4xOhFgB3NswuWjAfZ3vBnjy1ilF/T790Ls7VDjM4WkTX9QYxejBti9CLAjubZBQtiNN+XspoIdRlfRvPFO+MeRox2PMRe3BVi9GLwEaMXAXY0R4x2hDlZV9ljawEac8QoYnSybVh1BzFaxULhQQKI0YPAas21GVe5JJ6zpuyCJeNXlb1rMXtsowj1PWIUMbp3f4xspzNllfNPfpLmJIAYnTMueHWCQHbBghjNJ04sPGs5YjRfvN+8h0+80jF5EQHE6IuCnX2qiNG8Ec4eW8Qof4Ep7+5lZhD4TAAx+pkRLRYhkF2wvPmrSvbYIkYRo4u8ZnETArcQQIzegpVORxDILFg0tzeLUf25tDdeI/bRqDEV38x7WD9wvHkPj1pXjLsGAcToGnHCyx0Esh5kmpcOahIE3kBAgi3rXkaMvmEFM8czBBCjZ6hhMyWBjAcYh9eUSw2nbiaQ9Ssp+/nmhUP3yxJAjC4bOhwvCWQSo3wNLaPL8xsJZPtKihh94ypmznsIIEb3UKLNEgQyiFHNQQcWv5ZfYsnh5AMEMn0lRYw+sGAYYkkCiNElw4bTNQKri1EL0drcKIPAmwlIkGb4SooYffMqZu5bBBCjW3SoW4rAqmLUIpSvoUstN5wdQGB1QYoYHbBoGHIJAojRJcKEk3sIrChGLUT3zI82EIDA7/8e6aqiFDHKCoZAnQBitM6F0gUJrCRGLUL5GrrgQsPlKQisKEgRo1MsHZyYkABidMKg4NI5AquIUQvRc7PECgIQiAQk8Gr/B6sZyxCjMXLcQ+CbAGL0mwV3ixNYQYxyGC2+yHB/SgKr/I179v+UywenJiCAGJ0gCLjQh8DMYpSvoX1iTC8Q2CIw+1dSxOhW9Kh7MwHE6Jujn2zuM4pR+aSLPxuabLExnWkJSPDN+C7g/00/7ZLBsQkIIEYnCAIu9CEw2wEkf/gS0ie29AKBIwT0w9+MopT3wZEo0vZNBBCjb4p28rnOJEZ16PA1NPmCY3rTE5hNkCJGp18yODiIAGJ0EHiG7U9gBjHK19D+caVHCFwh4K+kM/ztesTolUhim5kAYjRzdF82t9FilK+hL1twTHcpAjN8JUWMLrVkcPZBAojRB2Ez1L0ERolRvobeG1d6h0AvAqO/kiJGe0WSfrIRQIxmi+iL5zNCjGpM/mzoixcdU1+SwKivpIjRJZcLTj9AADH6AGSGeIbAk2KUr6HPxJRRIHAXAf0Q+eQ7g3/a6a5I0m8GAojRDFFkDj8JPHWwaBy+hrLoIJCDwJNfSfkymmPNMIv+BBCj/ZnS4yACd4tR9c9hMii4DAuBGwn4K+nd7xDeHzcGka6XJoAYXTp8OB8J3HmQ6BDha2ikzT0E8hHQPr/7PZKPGjOCwHUCiNHrDOlhEgJ3HCLqk68ZkwQYNyDwAAH90Kk9f8e/S8q75IEAMsSSBBCjS4YNp2sEeotR9cfX0BppyiCQm4AFae93CmI097phducJIEbPs8NyMgK9Dg71w6ExWXBxBwIDCEiU9nqv8LfpBwSQIZchgBhdJlQ4+olAj0NDffA19BNp6iHwLgK9fm3PD7nvWjfMdj8BxOh+VrScnMBVMcpBMXmAcQ8CAwn0+ErKO2ZgABl6agKI0anDg3NHCJwVo3wNPUKZthB4NwEJyrPvGsTou9cOs28TQIy22VCzGIGjB4TaczgsFmTchcAEBM4KUt43EwQPF6YkgBidMiw4dYbAETGqtvzZ0DOUsYEABEzgqChFjJocOQR+JYAY/ZUHTwsT2CNG/TUUIbpwoHEdAhMROCJIEaMTBQ5XpiKAGJ0qHDhzhcAnMWohemUMbCEAAQiUBPTD7R5RihgtyfEMgd8JIEZZCWkIbIlRDoE0YWYiEJiWwKe/cc97aNrQ4dhgAojRwQFg+H4EamJUZfxKvh9jeoIABD4TaH0lRYx+ZkeLdxJAjL4z7ilnHcWo7nnxpwwzk4LAEgRqX0l5Jy0ROpwcQAAxOgA6Q95DwGIUIXoPX3qFAASOE4hfSRGjx/lh8Q4CiNF3xPkVs7QI5dfyrwg3k4TAMgQsSBGjy4QMRx8mgBh9GDjD3UcAEXofW3qGAASuE+AddZ0hPeQkgBjNGVdmBQEIQAACEIAABJYggBhdIkw4CQEIQAACEIAABHISQIzmjCuzggAEIAABCEAAAksQQIwuESachAAEIAABCEAAAjkJIEZzxpVZQQACEIAABCAAgSUIIEaXCBNOQgACEIAABCAAgZwEEKM548qsIAABCEAAAhCAwBIEEKNLhAknIQABCEAAAhCAQE4CiNGccWVWEIAABCAAAQhAYAkCiNElwoSTEIAABCAAAQhAICcBxGjOuDIrCEAAAhCAAAQgsAQBxOgSYcJJCEAAAhCAAAQgkJMAYjRnXJkVBCAAAQhAAAIQWIIAYnSJMOEkBCAAAQhAAAIQyEkAMZozrswKAhCAAAQgAAEILEEAMbpEmHASAhCAAAQgAAEI5CSAGM0ZV2YFAQhAAAIQgAAEliCAGF0iTDgJAQhAAAIQgAAEchJAjOaMK7OCAAQgAAEIQAACSxBAjC4RJpyEAAQgAAEIQAACOQkgRnPGlVlBAAIQgAAEIACBJQggRpcIE05CAAIQgAAEIACBnAQQoznjyqwgAAEIQAACEIDAEgQQo0uECSchAAEIQAACEIBATgKI0ZxxZVYQgAAEIAABCEBgCQKI0SXChJMQgAAEIAABCEAgJwHEaM64MisIQAACEIAABCCwBAHE6BJhwkkIQAACEIAABCCQkwBiNGdcmRUEIAABCEAAAhBYggBidIkw4WQGAv/67bcvXaR5CDgmzufx7OvnWmG9jIkI3MdwX33UGd8jqzBFjN4cKS/OWn7z0L907/F/KUz+4DnX8hFT//Hjx9ef//LXEUNPO2YtNio7mtxPy65V75goLro/M3ZrzKvl9u1qP7PbOzYxH+nz3/7+j5/7dKa10OIRmZX3LZvZyu33KL96jq89q4t0nABi9DizQxZ6sflQ0YHnQ0/lTyaN+/SYT86vNpYPFTP3i2LUy8LroObrG8t0CJiJY+NYqW5vcj+t9b1VrzrZ2Y8j4+7172w7+3TWfhU7xdxx9zoYGQe/N0b6sDd29tXcnKt8Bf81T89hlL8aX9x6jG/+e+NHu28CiNFvFrfcaYHr8obzS6LHwj/isF/2R2xWb1tj7niMmJteVIoD6XcCioVf3o5LjNleTu5HtrX0qV42HldtZ0lvWS9+N4m94qB5Kx8VixnXQmtN2lfzMkMxVdkKyXMYGW+tuR7j+322AvfZfESMPhQRLfTaC0Ibsbzipoh1tZe06lXuJFuVKfe96v3C92ZRm+zJbCJPz9ls1MbtlMe2sY3Z2165610XbWv1joP72LK3T+47+lmO4/5WyzUPzU9XTF6rLotzjzGK9rZxf27n51q9+1eu9mpTstWz6tSP+4x2Lm+1sb3qa/audz9xfJXJJye3VT+ZkmPjOZmlWcR5m6PbKne9GZZ8ynr36z7KetnHteB6jx37173G9dixbTmOx+uZl76qb5fZT+W6oo/RB9fV6j0f17lP2btuq3+3kb37MJc4rtdAbKN2tlMe+5Ktn92f597y0X177q3xPVaco23jWHE823geHoN8PwHE6H5Wl1pqEWuhxo2iDlXmBawF7Xsv+rJebbzwZW8bOxfH0b3bl/2Uftg+U645at5mGefmOvMxR+Vqr0ttynpzK+vczmPZ3vF0/3pWUr1tYl/R3vWxD5XZhzifFe9LBpqXrhizyMY8lJcMa4xi/7X6yKwct+w/+qF7p7JfPavecYw++z7aq6zsI9qqrvTF9fZh9dzz9zzER2Wep7nF/BND16uPaOf7st4+uN7jR3vZuN72LlP7sg/773ndkWt8++r+Xabc/snveG/fYlk5N7Up6zWWypSUl3P2s2x1uc/a+LFv27l9aV/Wy1ZtVG5/5JPsVeYUx3Cd28e6sv9y/NhWdUrK7a9z9+PxyfcTQIzuZ3WpZW3jqEMvXi9+LXqVecPU6rXwXe8NZudq47hMbZX0/IZklmLoy+zMRG3MoyyLbVXntrpXnS7d+3KszLi017PatOzdp2Ojdp5DHNv1q+fm4Ng4Fwcl1+tZ977UTjzcJjJy2c/K/+tjq97t3EZjOMUyj+0YuZ18if7pXmWq/2Rfq1eZ+459lePaxwy5GXoukUu8dwxUZuatejOM9epf5ZGl65W73mVqG+9jvcd3mZ69Lu3nzw5v/o/909jxUrku+aXcPJRHP2O956v2StFez663vep93+q/Vu+2ZZ8qj3VlvefhNsrdv9oqOQ66V31tfrZ3G89D5a6zrexdHss8Vlkvf1RGOk4AMXqc2SkLLeRy46ijcvF6YyjfU6+Frz6ctsZxn26bPY8sde9LjGJd5OAYterV1i8ltYkpxqJm73rba6zaFft0P7LJlswhvrxV5hjE+zh31bdsYjvft/pxvfIaZ8erFiP1qWRf3Vfs55O9621b5q73+Oo7YyrjuZeh2plRi4vqdcVkO8WwZr93fK8B9V3rJ45517199ZzienSd10/M5a/rldeS6+M81c5juL60d33kq/E8Ztmf+ynL7VOrXu09ltvGONiu9M9tnatednF83buvyM33rlebmPRclsV67tsEEKNtNl1rahtHA2hxx8VbbqBP9bJVG6e947h95rz2kvF8P3Eq42A75VsvIseiZu9YRXvd+5KN7mNyP2V5bLPqveYkJppjTF7zqtd9q942ZlS2c32rH9crdx+Rc4yXyuNl29K/2M8ne9e7rzJ3vfrUva7oX9l+1WfH2/57vppr5Bn5+96MbFvm5hbLY5++b9Xb3uOVue0++eF2vXP7L7+Uor+uU176redYX/NL9erPfauN7r3mW/aud3u1c1+qi/Vq437iONGfVn30xe1jHGynfCu5XRxf95GlnnWpre9dH/uulcV67tsEEKNtNt1qvIi9CfXspDItYCdvDG8g13sDqFzt3Yfbx3qP4z6Vl/3Euqz3JRszct7iJDu1EWff61n3flbuOMT+HMuaveo0ppLt3Z/7V+7kMtno3pfrV881HzFpcazVi1cZN7VzWY3RVr3bx35VplSWqdzxMnuP62fbuK3r4zhqs6f/uF5kH1l5vAy5GMU1EOdpnuZurnsZ2t78S46uL/uXTx7L9+5DbXUpucyx8vNTcbH/GlfJz8pVJr98b9/ic6vefZX17l99+V55THHNx7E8vutt435cr9wpjlPW61n+6XKd7tW/kuujD7rXFVNtfNmqvDZ/1SmVY6tcY6ucdJwAYvQ4s0MWXtDeJF7A3hDl4vXGiPVuE229IZSXfau97e1s2aasd7tMuVmam3OVu67kYNbioDrbOHd7c3e58xiX0l5961Jq2bv/aGufNIbrM8TJDOL8So6Rg+uUx+R+Yn3ktFUf+49+qLxmpzFi37KJz7pXmWyVok++j+1dFvNoq76cZKd2yt3GdSvnmqOuGgPNK5b7PjJwWczN+FMMXV+Or2fVuT72rXuPr1zP5Rw8/t1x0Tj2VWNFf3Vv/0r/VafUqndftXqV2VZj+/lnYfg1fvSlNb77qdVHe40T28iuNj/HoVWvPkp/a3NU33H8OLbta/X20yzI9xNAjO5ndaqlFm7rUoeuc+feYMqVvLjdTrnrSpujbWyfNTfLyMX3ritZul5M3MZlymMq68u+ynr34z7K+mhf1tm29MF9rZi35hg5aF5lu7JebSIf3ZdtWn2Udn62fWmn+pjc3mVuv9fe7d2P7eKcyr7VNrZz/aq55+68NjfXOY9tdO9y5bFOTI7Wuy/3U9rHMdy2zG17d0zsWxyvLPOzfYxt5Z/Lncf60lZtnFwX28f+dO827lt52T7auJ3a1GxdX/rgcueur/Ud63RfG8c+lnUudx+xPt67nnw/AcToflZDWlqMDhmcQSEAAQhAAAIQgMDNBBCjNwO+2j1i9CpB7CEAAQhAAAIQmJkAYnTm6PzfrxDKXw1M7jLuQQACEIAABCAAgd0EEKO7UdEQAhCAAAQgAAEIQKA3AcRob6L0BwEIQAACEIAABCCwmwBidDcqGkIAAhCAAAQgAAEI9CaAGO1NlP4gAAEIQAACEIAABHYTQIzuRkVDCEAAAhCAAAQgAIHeBBCjvYnSHwQgAAEIQCA5gfiPvPv/bOT8yX8Bxn48OWby0A6ZHmJ0CHYGhQAEIAABCKxLwCLQ/6tM/5vYen5SGOr/uvT0mOtGbV7PEaPzxgbPIAABCEAAAtMSkOjUJTEoMernJx1GjD5J+76xEKP3saVnCEAAAhCAQHoCEoQSozFZpJZfTt3G9RaT/rKqcqVYb5soen0vu/JyH7Yjn58AYnT+GOEhBCAAAQhAYFoCFobRQQtU1UXBqXslCUYLUJW5vcpU53q3l00cxzYu87NyxGiMxBr3iNE14oSXEIAABCAAgSkJSABKRMbkMuVOFp96ronNWBbvbW/h6Wfl6l/lCNBIZb17xOh6McNjCEAAAhCAwDQEWiJR4jOK0diuJjY1IdvU6qO9J2/Rixg1kTVzxOiaccNrCEAAAhCAwBQELAijMy47IkajANW9xGfL3mOpXu0QoyayZo4YXTNueA0BCEAAAhAYSkAC0KJRXzT97NxfOe1k/LKpNqq34LSojP2oLtarTldMslOZco+rnLQWAcToWvHCWwhAAAIQgMBwAhJ8FosWiX6WMIwi0c6q3mIyilHbWVS6vfqwTWzjeuW1NojRSGiNe8ToGnHCSwhAAAIQgMBUBCT6WpccdZ2djs+6t/h0ufIyxTrff2pT1vM8PwHE6PwxwkMIQAACEIBAKgISlhajqSbGZE4RQIyewoYRBCAAAQhAAAJnCUiM6lfv+jU7CQKIUdYABCAAAQhAAAIQgMAwAojRYegZGAIQgAAEIAABCEAAMcoagAAEIAABCEAAAhAYRgAxOgw9A0MAAhCAAAQgAAEIIEZZAxCAAAQgAAEIQAACwwggRoehZ2AIQAACEIAABCAAAcQoawACEIAABCAAAQhAYBgBxOgw9AwMAQhAAAIQgAAEIIAYZQ1AAAIQgAAEIAABCAwjgBgdhp6BIQABCEAAAhCAAAQQo6wBCEAAAhCAAAQgAIFhBBCjw9AzMAQgAAEIQAACEIAAYpQ1AAEIQAACEIAABCAwjABidBh6BoYABCAAAQhAAAIQQIyyBiAAAQhAAAIQgAAEhhFAjA5Dz8AQgAAEIAABCEAAAohR1gAEIAABCEAAAhCAwDACiNFh6BkYAhCAAAQgAAEIQAAxyhqAAAQgAAEIQAACEBhGADE6DD0DQwACEIAABCAAAQggRlkDEIAABCAAAQhAAALDCCBGh6FnYAhAAAIQgAAEIAABxChrAAIQgAAEIAABCEBgGAHE6DD0DAwBCEAAAhCAAAQggBhlDUAAAhCAAAQgAAEIDCOAGB2GnoEhAAEIQAACEIAABBCjrAEIQAACEIAABCAAgWEEEKPD0DMwBCAAAQhAAAIQgABilDUAAQhAAAIQgAAEIDCMAGJ0GHoGhgAEIAABCEAAAhBAjLIGIAABCEAAAhCAAASGEUCMDkPPwBCAAAQgAAEIQAACiFHWAAQgAAEIQAACEIDAMAKI0WHoGRgCEIAABCAAAQhAADHKGoAABCAAAQhAAAIQGEYAMToMPQNDAAIQgAAEIAABCCBGWQMQgAAEIAABCEAAAsMI/C98qj2yfGFR6gAAAABJRU5ErkJggg==) # Use autoencoder to get the threshold for anomaly detection It is important to note that the mapping function learned by an autoencoder is specific to the training data distribution, i.e., an autoencoder will typically not succeed at reconstructing data which is significantly different from data it has seen during training. This property of learning a distribution specific mapping (as opposed to a generic linear mapping) is particularly useful for the task of anomaly detection. Applying an autoencoder for anomaly detection follows the general principle of first modeling normal behaviour and subsequently generating an anomaly score for a new data sample. To model normal behaviour we train the autoencoder on a normal data sample. This way, the model learns a mapping function that successfully reconstructs normal data samples with a very small reconstruction error (the difference between the actual sample and the version reconstructed by the model). This behavior is replicated at test time, where the reconstruction error is small for normal data samples, and large for abnormal data samples. To identify anomalies, we use the reconstruction error score as an anomaly score and flag samples with reconstruction errors above a given threshold. ``` import numpy as np import pandas as pd from tensorflow import keras from tensorflow.keras import layers from matplotlib import pyplot as plt ``` load the data, here we used benchmark data in kaggle ``` master_url_root = "https://raw.githubusercontent.com/numenta/NAB/master/data/" df_small_noise_url_suffix = "artificialNoAnomaly/art_daily_small_noise.csv" df_small_noise_url = master_url_root + df_small_noise_url_suffix df_small_noise = pd.read_csv( df_small_noise_url, parse_dates=True, index_col="timestamp" ) df_daily_jumpsup_url_suffix = "artificialWithAnomaly/art_daily_jumpsup.csv" df_daily_jumpsup_url = master_url_root + df_daily_jumpsup_url_suffix df_daily_jumpsup = pd.read_csv( df_daily_jumpsup_url, parse_dates=True, index_col="timestamp" ) print(df_small_noise.head()) print(df_daily_jumpsup.head()) ``` Visualize the data: time series with anomalies and without anomalies ``` fig, ax = plt.subplots() df_small_noise.plot(legend=False, ax=ax) plt.show() fig, ax = plt.subplots() df_daily_jumpsup.plot(legend=False, ax=ax) plt.show() ``` Wrap up the function to preprocess the time series data, create sequences using raw data through time_steps set in advance. ``` # Normalize and save the mean and std we get, # for normalizing test data. TIME_STEPS = 288 # Generated training sequences for use in the model. def create_sequences(values, time_steps=TIME_STEPS): output = [] for i in range(len(values) - time_steps + 1): output.append(values[i : (i + time_steps)]) return np.stack(output) def AE_anomaly_detection(x_train, x_test, time_steps=TIME_STEPS): training_mean = x_train.mean() training_std = x_train.std() df_training_value = (x_train - training_mean) / training_std print("Number of training samples:", len(df_training_value)) x_train = create_sequences(df_training_value.values) print("Training input shape: ", x_train.shape) model = keras.Sequential( [ layers.Input(shape=(x_train.shape[1], x_train.shape[2])), layers.Conv1D( filters=32, kernel_size=7, padding="same", strides=2, activation="relu" ), layers.Dropout(rate=0.2), layers.Conv1D( filters=16, kernel_size=7, padding="same", strides=2, activation="relu" ), layers.Conv1DTranspose( filters=16, kernel_size=7, padding="same", strides=2, activation="relu" ), layers.Dropout(rate=0.2), layers.Conv1DTranspose( filters=32, kernel_size=7, padding="same", strides=2, activation="relu" ), layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"), ] ) model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse") history = model.fit( x_train, x_train, epochs=50, batch_size=128, validation_split=0.1, callbacks=[ keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min") ], ) # Get train MAE loss. x_train_pred = model.predict(x_train) train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1) plt.hist(train_mae_loss, bins=50) plt.xlabel("Train MAE loss") plt.ylabel("No of samples") plt.show() # Get reconstruction loss threshold. threshold = np.max(train_mae_loss) print("Reconstruction error threshold: ", threshold) ##### test ... test_mean = x_test.mean() test_std = x_test.std() ####### prepare the test data df_test_value = (x_test - test_mean) / test_std #fig, ax = plt.subplots() #df_test_value.plot(legend=False, ax=ax) #plt.show() # Create sequences from test values. x_test = create_sequences(df_test_value.values) print("Test input shape: ", x_test.shape) # Get test MAE loss. x_test_pred = model.predict(x_test) test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1) test_mae_loss = test_mae_loss.reshape((-1)) plt.hist(test_mae_loss, bins=50) plt.xlabel("test MAE loss") plt.ylabel("No of samples") plt.show() # Detect all the samples which are anomalies. anomalies = test_mae_loss > threshold print("Number of anomaly samples: ", np.sum(anomalies)) #print("Indices of anomaly samples: ", np.where(anomalies)) return anomalies ##### plot anomalies anomalies = AE_anomaly_detection(df_small_noise, df_daily_jumpsup, time_steps=TIME_STEPS) # data i is an anomaly if samples [(i - timesteps + 1) to (i)] are anomalies test_mean = df_daily_jumpsup.mean() test_std = df_daily_jumpsup.std() df_test_value = (df_daily_jumpsup - test_mean) / test_std anomalous_data_indices = [] for data_idx in range(TIME_STEPS - 1, len(df_test_value) - TIME_STEPS + 1): if np.all(anomalies[data_idx - TIME_STEPS + 1 : data_idx]): anomalous_data_indices.append(data_idx) df_subset = df_daily_jumpsup.iloc[anomalous_data_indices] fig, ax = plt.subplots() df_daily_jumpsup.plot(legend=False, ax=ax) df_subset.plot(legend=False, ax=ax, color="r") plt.show() ``` ## Pros and Cons of Autoencoder Algorithm for Anomaly Detection **Pros**: - An autoencoeder can perform tasks that a linear program cannot. When an element of the neural network fails, it can continue without any problem with their parallel nature. - An autoencoder constructed by neural network learns and does not need to be reprogrammed. - It can be implemented in any application. **Cons**: - The neural network needs training to operate. - The architecture of a neural network is different from the architecture of microprocessors therefore needs to be emulated. - Requires high processing time for large neural network
github_jupyter
# Robot Class In this project, we'll be localizing a robot in a 2D grid world. The basis for simultaneous localization and mapping (SLAM) is to gather information from a robot's sensors and motions over time, and then use information about measurements and motion to re-construct a map of the world. ### Uncertainty As you've learned, robot motion and sensors have some uncertainty associated with them. For example, imagine a car driving up hill and down hill; the speedometer reading will likely overestimate the speed of the car going up hill and underestimate the speed of the car going down hill because it cannot perfectly account for gravity. Similarly, we cannot perfectly predict the *motion* of a robot. A robot is likely to slightly overshoot or undershoot a target location. In this notebook, we'll look at the `robot` class that is *partially* given to you for the upcoming SLAM notebook. First, we'll create a robot and move it around a 2D grid world. Then, **you'll be tasked with defining a `sense` function for this robot that allows it to sense landmarks in a given world**! It's important that you understand how this robot moves, senses, and how it keeps track of different landmarks that it sees in a 2D grid world, so that you can work with it's movement and sensor data. --- Before we start analyzing robot motion, let's load in our resources and define the `robot` class. You can see that this class initializes the robot's position and adds measures of uncertainty for motion. You'll also see a `sense()` function which is not yet implemented, and you will learn more about that later in this notebook. ``` # import some resources import numpy as np import matplotlib.pyplot as plt import random %matplotlib inline # the robot class class robot: # -------- # init: # creates a robot with the specified parameters and initializes # the location (self.x, self.y) to the center of the world # def __init__(self, world_size = 100.0, measurement_range = 30.0, motion_noise = 1.0, measurement_noise = 1.0): self.measurement_noise = 0.0 self.world_size = world_size self.measurement_range = measurement_range self.x = world_size / 2.0 self.y = world_size / 2.0 self.motion_noise = motion_noise self.measurement_noise = measurement_noise self.landmarks = [] self.num_landmarks = 0 # returns a positive, random float def rand(self): return random.random() * 2.0 - 1.0 # -------- # move: attempts to move robot by dx, dy. If outside world # boundary, then the move does nothing and instead returns failure # def move(self, dx, dy): x = self.x + dx + self.rand() * self.motion_noise y = self.y + dy + self.rand() * self.motion_noise if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size: return False else: self.x = x self.y = y return True # -------- # sense: returns x- and y- distances to landmarks within visibility range # because not all landmarks may be in this range, the list of measurements # is of variable length. Set measurement_range to -1 if you want all # landmarks to be visible at all times # ## TODO: complete the sense function def sense(self): ''' This function does not take in any parameters, instead it references internal variables (such as self.landamrks) to measure the distance between the robot and any landmarks that the robot can see (that are within its measurement range). This function returns a list of landmark indices, and the measured distances (dx, dy) between the robot's position and said landmarks. This function should account for measurement_noise and measurement_range. One item in the returned list should be in the form: [landmark_index, dx, dy]. ''' measurements = [] ## TODO: iterate through all of the landmarks in a world ## TODO: For each landmark ## 1. compute dx and dy, the distances between the robot and the landmark ## 2. account for measurement noise by *adding* a noise component to dx and dy ## - The noise component should be a random value between [-1.0, 1.0)*measurement_noise ## - Feel free to use the function self.rand() to help calculate this noise component ## - It may help to reference the `move` function for noise calculation ## 3. If either of the distances, dx or dy, fall outside of the internal var, measurement_range ## then we cannot record them; if they do fall in the range, then add them to the measurements list ## as list.append([index, dx, dy]), this format is important for data creation done later ## TODO: return the final, complete list of measurements dx, dy = 0, 0 for index in range(num_landmarks): # Compute dx and dy dx = self.landmarks[index][0] - self.x dy = self.landmarks[index][1] - self.y # Add noise component to dx and dy noise = self.rand() * self.measurement_noise dx, dy = dx + noise, dy + noise # Check dx and dy values if (dx < self.measurement_range) or (dy < self.measurement_range): measurements.append([index, dx, dy]) return measurements # -------- # make_landmarks: # make random landmarks located in the world # def make_landmarks(self, num_landmarks): self.landmarks = [] for i in range(num_landmarks): self.landmarks.append([round(random.random() * self.world_size), round(random.random() * self.world_size)]) self.num_landmarks = num_landmarks # called when print(robot) is called; prints the robot's location def __repr__(self): return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y) ``` ## Define a world and a robot Next, let's instantiate a robot object. As you can see in `__init__` above, the robot class takes in a number of parameters including a world size and some values that indicate the sensing and movement capabilities of the robot. In the next example, we define a small 10x10 square world, a measurement range that is half that of the world and small values for motion and measurement noise. These values will typically be about 10 times larger, but we ust want to demonstrate this behavior on a small scale. You are also free to change these values and note what happens as your robot moves! ``` world_size = 10.0 # size of world (square) measurement_range = 5.0 # range at which we can sense landmarks motion_noise = 0.2 # noise in robot motion measurement_noise = 0.2 # noise in the measurements # instantiate a robot, r r = robot(world_size, measurement_range, motion_noise, measurement_noise) # print out the location of r print(r) ``` ## Visualizing the World In the given example, we can see/print out that the robot is in the middle of the 10x10 world at (x, y) = (5.0, 5.0), which is exactly what we expect! However, it's kind of hard to imagine this robot in the center of a world, without visualizing the grid itself, and so in the next cell we provide a helper visualization function, `display_world`, that will display a grid world in a plot and draw a red `o` at the location of our robot, `r`. The details of how this function wors can be found in the `helpers.py` file in the home directory; you do not have to change anything in this `helpers.py` file. ``` # import helper function from helpers import display_world # define figure size plt.rcParams["figure.figsize"] = (5,5) # call display_world and display the robot in it's grid world print(r) display_world(int(world_size), [r.x, r.y]) ``` ## Movement Now you can really picture where the robot is in the world! Next, let's call the robot's `move` function. We'll ask it to move some distance `(dx, dy)` and we'll see that this motion is not perfect by the placement of our robot `o` and by the printed out position of `r`. Try changing the values of `dx` and `dy` and/or running this cell multiple times; see how the robot moves and how the uncertainty in robot motion accumulates over multiple movements. #### For a `dx` = 1, does the robot move *exactly* one spot to the right? What about `dx` = -1? What happens if you try to move the robot past the boundaries of the world? ``` # choose values of dx and dy (negative works, too) dx = 1 dy = 2 r.move(dx, dy) # print out the exact location print(r) # display the world after movement, not that this is the same call as before # the robot tracks its own movement display_world(int(world_size), [r.x, r.y]) ``` ## Landmarks Next, let's create landmarks, which are measurable features in the map. You can think of landmarks as things like notable buildings, or something smaller such as a tree, rock, or other feature. The robot class has a function `make_landmarks` which randomly generates locations for the number of specified landmarks. Try changing `num_landmarks` or running this cell multiple times to see where these landmarks appear. We have to pass these locations as a third argument to the `display_world` function and the list of landmark locations is accessed similar to how we find the robot position `r.landmarks`. Each landmark is displayed as a purple `x` in the grid world, and we also print out the exact `[x, y]` locations of these landmarks at the end of this cell. ``` # create any number of landmarks num_landmarks = 3 r.make_landmarks(num_landmarks) # print out our robot's exact location print(r) # display the world including these landmarks display_world(int(world_size), [r.x, r.y], r.landmarks) # print the locations of the landmarks print('Landmark locations [x,y]: ', r.landmarks) ``` ## Sense Once we have some landmarks to sense, we need to be able to tell our robot to *try* to sense how far they are away from it. It will be up t you to code the `sense` function in our robot class. The `sense` function uses only internal class parameters and returns a list of the the measured/sensed x and y distances to the landmarks it senses within the specified `measurement_range`. ### TODO: Implement the `sense` function Follow the `##TODO's` in the class code above to complete the `sense` function for the robot class. Once you have tested out your code, please **copy your complete `sense` code to the `robot_class.py` file in the home directory**. By placing this complete code in the `robot_class` Python file, we will be able to refernce this class in a later notebook. The measurements have the format, `[i, dx, dy]` where `i` is the landmark index (0, 1, 2, ...) and `dx` and `dy` are the measured distance between the robot's location (x, y) and the landmark's location (x, y). This distance will not be perfect since our sense function has some associated `measurement noise`. --- In the example in the following cell, we have a given our robot a range of `5.0` so any landmarks that are within that range of our robot's location, should appear in a list of measurements. Not all landmarks are guaranteed to be in our visibility range, so this list will be variable in length. *Note: the robot's location is often called the **pose** or `[Pxi, Pyi]` and the landmark locations are often written as `[Lxi, Lyi]`. You'll see this notation in the next notebook.* ``` # try to sense any surrounding landmarks measurements = r.sense() # this will print out an empty list if `sense` has not been implemented print(measurements) ``` **Refer back to the grid map above. Do these measurements make sense to you? Are all the landmarks captured in this list (why/why not)?** --- ## Data #### Putting it all together To perform SLAM, we'll collect a series of robot sensor measurements and motions, in that order, over a defined period of time. Then we'll use only this data to re-construct the map of the world with the robot and landmar locations. You can think of SLAM as peforming what we've done in this notebook, only backwards. Instead of defining a world and robot and creating movement and sensor data, it will be up to you to use movement and sensor measurements to reconstruct the world! In the next notebook, you'll see this list of movements and measurements (which you'll use to re-construct the world) listed in a structure called `data`. This is an array that holds sensor measurements and movements in a specific order, which will be useful to call upon when you have to extract this data and form constraint matrices and vectors. `data` is constructed over a series of time steps as follows: ``` data = [] # after a robot first senses, then moves (one time step) # that data is appended like so: data.append([measurements, [dx, dy]]) # for our example movement and measurement print(data) # in this example, we have only created one time step (0) time_step = 0 # so you can access robot measurements: print('Measurements: ', data[time_step][0]) # and its motion for a given time step: print('Motion: ', data[time_step][1]) ``` ### Final robot class Before moving on to the last notebook in this series, please make sure that you have copied your final, completed `sense` function into the `robot_class.py` file in the home directory. We will be using this file in the final implementation of slam!
github_jupyter
### Model features - augmentation (6 image generated) - 2 dropout layer - adam optimizer with learning rate decay ``` NAME = '2dropout-augmentation' LOAD = True import sys import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import cv2 import random from tqdm import tqdm from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split import keras from keras import optimizers from keras.applications.vgg16 import VGG16 from keras.models import Model from keras.layers import Dense, Dropout, Flatten, BatchNormalization # import third-party library sys.path.append('./my_lib/') from data_augmentation import DataAugmentation # import data csv_train = pd.read_csv('../input/labels.csv') csv_test = pd.read_csv('../input/sample_submission.csv') # read training CSV csv_train.head(10) # read test csv csv_test.head(10) # reduce dataset for test # csv_train = csv_train.head(200) # csv_test = csv_test.head(200) # Generate Labels targets_series = pd.Series(csv_train['breed']) # print(targets_series) one_hot = pd.get_dummies(targets_series, sparse = True) labels = np.asarray(one_hot) n_check = random.randint(0, len(labels)-1) print(csv_train['breed'][n_check], 'is encoded as', ''.join((str(i) for i in labels[n_check]))) im_size = 90 x_train = [] y_train = [] x_test = [] for i, (f, breed) in enumerate(tqdm(csv_train.values)): img = cv2.imread('../input/train/{}.jpg'.format(f)) x_train.append(cv2.resize(img, (im_size, im_size))) y_train.append(labels[i]) ``` Use external module to execute data augmentation. The module execute: - [ ] Inversion - [ ] Sobel derivative - [ ] Scharr derivative - [ ] Laplacian <!--**(error not used for now)**--> - [ ] Blur - [ ] Gaussian blur [disable] - [ ] Median blur - [ ] Bilateral blur - [x] Horizontal flips - [x] Rotation ``` for i, images in enumerate(tqdm(DataAugmentation(x_train, options={'inverse': False, 'sobel_derivative': False, 'scharr_derivative': False, 'laplacian': False, 'blur': False, 'gaussian_blur': False, 'median_blur': False, 'bilateral_blur': False, 'horizontal_flips': True, 'rotation': True, # 'rotation_config': [(10,1.2)], 'shuffle_result': False}))): for image in images: if i == 4: plt.imshow(image, cmap = 'gray', interpolation = 'bicubic') plt.show() x_train.append(image) y_train.append(y_train[i]) print('dataset became:', len(x_train)) # check train n_check = random.randint(0, len(y_train)-1) print('label:', ''.join((str(i) for i in y_train[n_check]))) plt.imshow(x_train[n_check], cmap = 'gray', interpolation = 'bicubic') plt.show() for f in tqdm(csv_test['id'].values): img = cv2.imread('../input/test/{}.jpg'.format(f)) x_test.append(cv2.resize(img, (im_size, im_size))) # build np array and normalise them x_train_raw = np.array(x_train, np.float32) / 255. y_train_raw = np.array(y_train, np.uint8) x_test_raw = np.array(x_test, np.float32) / 255. print("x_train shape:", x_train_raw.shape) print("y_train shape:", y_train_raw.shape) print("x_test shape:", x_test_raw.shape) num_classes = y_train_raw.shape[1] classes = csv_test.columns.values[1:] ``` Using the __stratify__ parameter on __treain_test_split__ the split should be equally distributed per classes. **TODO:** Add cross validation ``` X_train, X_valid, Y_train, Y_valid = train_test_split(x_train_raw, y_train_raw, test_size=0.20, random_state=42, stratify=y_train_raw) ``` **TODO:** try the optimizer _rmsprop_ instead of _adam_. ``` # Create the base pre-trained model base_model = VGG16(weights="imagenet", include_top=False, input_shape=(im_size, im_size, 3)) # Add a new top layers x = base_model.output x = Flatten()(x) x = Dense(4096, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(4096, activation='relu')(x) x = Dropout(0.5)(x) x = BatchNormalization()(x) predictions = Dense(num_classes, activation='softmax')(x) # This is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # First: train only the top layers (which were randomly initialized) for layer in base_model.layers: layer.trainable = False model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) callbacks_list = [ keras.callbacks.ModelCheckpoint('../output/weights.' + NAME + '.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1), keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1)] model.summary() # load previous backuped weigths if LOAD: model.load_weights('../output/weights.2dropout-augmentation.40-1.75.hdf5') history = model.fit(X_train, Y_train, epochs=40, batch_size=48, validation_data=(X_valid, Y_valid), callbacks=callbacks_list, verbose=1) # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() preds = model.predict(x_test_raw, verbose=1) # save prediction to csv frame = pd.DataFrame(preds, index=csv_test['id'].tolist(), columns=classes) frame.to_csv("../output/predicted-{}.csv".format(NAME), index_label='id') frame.head(10) # check predict n_check = random.randint(0, len(x_test_raw)-1) plt.imshow(x_test_raw[n_check], cmap = 'gray_r', interpolation = 'bicubic') plt.show() pre = model.predict(np.array([x_test_raw[n_check]])) arg_max = np.argmax(pre) print(np.max(pre), arg_max, classes[arg_max]) ```
github_jupyter
# 09 - Decision Trees by [Alejandro Correa Bahnsen](albahnsen.com/) version 0.2, May 2016 ## Part of the class [Machine Learning for Risk Management](https://github.com/albahnsen/ML_RiskManagement) This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [Kevin Markham](https://github.com/justmarkham) *Adapted from Chapter 8 of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/)* Why are we learning about decision trees? - Can be applied to both regression and classification problems - Many useful properties - Very popular - Basis for more sophisticated models - Have a different way of "thinking" than the other models we have studied ## Lesson objectives Students will be able to: - Explain how a decision tree is created - Build a decision tree model in scikit-learn - Tune a decision tree model and explain how tuning impacts the model - Interpret a tree diagram - Describe the key differences between regression and classification trees - Decide whether a decision tree is an appropriate model for a given problem # Part 1: Regression trees Major League Baseball player data from 1986-87: - **Years** (x-axis): number of years playing in the major leagues - **Hits** (y-axis): number of hits in the previous year - **Salary** (color): low salary is blue/green, high salary is red/yellow ![Salary data](images/salary_color.png) Group exercise: - The data above is our **training data**. - We want to build a model that predicts the Salary of **future players** based on Years and Hits. - We are going to "segment" the feature space into regions, and then use the **mean Salary in each region** as the predicted Salary for future players. - Intuitively, you want to **maximize** the similarity (or "homogeneity") within a given region, and **minimize** the similarity between different regions. Rules for segmenting: - You can only use **straight lines**, drawn one at a time. - Your line must either be **vertical or horizontal**. - Your line **stops** when it hits an existing line. ![Salary regions](images/salary_regions.png) Above are the regions created by a computer: - $R_1$: players with **less than 5 years** of experience, mean Salary of **\$166,000 ** - $R_2$: players with **5 or more years** of experience and **less than 118 hits**, mean Salary of **\$403,000 ** - $R_3$: players with **5 or more years** of experience and **118 hits or more**, mean Salary of **\$846,000 ** **Note:** Years and Hits are both integers, but the convention is to use the **midpoint** between adjacent values to label a split. These regions are used to make predictions on **out-of-sample data**. Thus, there are only three possible predictions! (Is this different from how **linear regression** makes predictions?) Below is the equivalent regression tree: ![Salary tree](images/salary_tree.png) The first split is **Years < 4.5**, thus that split goes at the top of the tree. When a splitting rule is **True**, you follow the left branch. When a splitting rule is **False**, you follow the right branch. For players in the **left branch**, the mean Salary is \$166,000, thus you label it with that value. (Salary has been divided by 1000 and log-transformed to 5.11.) For players in the **right branch**, there is a further split on **Hits < 117.5**, dividing players into two more Salary regions: \$403,000 (transformed to 6.00), and \$846,000 (transformed to 6.74). ![Salary tree annotated](images/salary_tree_annotated.png) **What does this tree tell you about your data?** - Years is the most important factor determining Salary, with a lower number of Years corresponding to a lower Salary. - For a player with a lower number of Years, Hits is not an important factor determining Salary. - For a player with a higher number of Years, Hits is an important factor determining Salary, with a greater number of Hits corresponding to a higher Salary. **Question:** What do you like and dislike about decision trees so far? ## Building a regression tree by hand Your **training data** is a tiny dataset of [used vehicle sale prices](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/vehicles_train.csv). Your goal is to **predict price** for testing data. 1. Read the data into a Pandas DataFrame. 2. Explore the data by sorting, plotting, or split-apply-combine (aka `group_by`). 3. Decide which feature is the most important predictor, and use that to create your first splitting rule. - Only binary splits are allowed. 4. After making your first split, split your DataFrame into two parts, and then explore each part to figure out what other splits to make. 5. Stop making splits once you are convinced that it strikes a good balance between underfitting and overfitting. - Your goal is to build a model that generalizes well. - You are allowed to split on the same variable multiple times! 6. Draw your tree, labeling the leaves with the mean price for the observations in that region. - Make sure nothing is backwards: You follow the **left branch** if the rule is true, and the **right branch** if the rule is false. ## How does a computer build a regression tree? **Ideal approach:** Consider every possible partition of the feature space (computationally infeasible) **"Good enough" approach:** recursive binary splitting 1. Begin at the top of the tree. 2. For **every feature**, examine **every possible cutpoint**, and choose the feature and cutpoint such that the resulting tree has the lowest possible mean squared error (MSE). Make that split. 3. Examine the two resulting regions, and again make a **single split** (in one of the regions) to minimize the MSE. 4. Keep repeating step 3 until a **stopping criterion** is met: - maximum tree depth (maximum number of splits required to arrive at a leaf) - minimum number of observations in a leaf ### Demo: Choosing the ideal cutpoint for a given feature ``` # vehicle data import pandas as pd import zipfile with zipfile.ZipFile('../datasets/vehicles_train.csv.zip', 'r') as z: f = z.open('vehicles_train.csv') train = pd.io.parsers.read_table(f, index_col=False, sep=',') # before splitting anything, just predict the mean of the entire dataset train['prediction'] = train.price.mean() train year = 0 train['pred'] = train.loc[train.year<year, 'price'].mean() train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean() (((train['price'] - train['pred'])**2).mean()) ** 0.5 train_izq = train.loc[train.year<0].copy() train_izq.year.unique() def error_año(train, year): train['pred'] = train.loc[train.year<year, 'price'].mean() train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean() return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2) def error_miles(train, miles): train['pred'] = train.loc[train.miles<miles, 'price'].mean() train.loc[train.miles>=miles, 'pred'] = train.loc[train.miles>=miles, 'price'].mean() return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2) ``` **Recap:** Before every split, this process is repeated for every feature, and the feature and cutpoint that produces the lowest MSE is chosen. ## Building a regression tree in scikit-learn ``` # encode car as 0 and truck as 1 train['vtype'] = train.vtype.map({'car':0, 'truck':1}) # define X and y feature_cols = ['year', 'miles', 'doors', 'vtype'] X = train[feature_cols] y = train.price # instantiate a DecisionTreeRegressor (with random_state=1) from sklearn.tree import DecisionTreeRegressor treereg = DecisionTreeRegressor(random_state=1) treereg # use leave-one-out cross-validation (LOOCV) to estimate the RMSE for this model import numpy as np from sklearn.cross_validation import cross_val_score scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error') np.mean(np.sqrt(-scores)) ``` ## What happens when we grow a tree too deep? - Left: Regression tree for Salary **grown deeper** - Right: Comparison of the **training, testing, and cross-validation errors** for trees with different numbers of leaves ![Salary tree grown deep](images/salary_tree_deep.png) The **training error** continues to go down as the tree size increases (due to overfitting), but the lowest **cross-validation error** occurs for a tree with 3 leaves. ## Tuning a regression tree Let's try to reduce the RMSE by tuning the **max_depth** parameter: ``` # try different values one-by-one treereg = DecisionTreeRegressor(max_depth=1, random_state=1) scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error') np.mean(np.sqrt(-scores)) ``` Or, we could write a loop to try a range of values: ``` # list of values to try max_depth_range = range(1, 8) # list to store the average RMSE for each value of max_depth RMSE_scores = [] # use LOOCV with each value of max_depth for depth in max_depth_range: treereg = DecisionTreeRegressor(max_depth=depth, random_state=1) MSE_scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error') RMSE_scores.append(np.mean(np.sqrt(-MSE_scores))) %matplotlib inline import matplotlib.pyplot as plt # plot max_depth (x-axis) versus RMSE (y-axis) plt.plot(max_depth_range, RMSE_scores) plt.xlabel('max_depth') plt.ylabel('RMSE (lower is better)') # max_depth=3 was best, so fit a tree using that parameter treereg = DecisionTreeRegressor(max_depth=3, random_state=1) treereg.fit(X, y) # "Gini importance" of each feature: the (normalized) total reduction of error brought by that feature pd.DataFrame({'feature':feature_cols, 'importance':treereg.feature_importances_}) ``` ## Creating a tree diagram ``` # create a Graphviz file from sklearn.tree import export_graphviz export_graphviz(treereg, out_file='tree_vehicles.dot', feature_names=feature_cols) # At the command line, run this to convert to PNG: # dot -Tpng tree_vehicles.dot -o tree_vehicles.png ``` ![Tree for vehicle data](images/tree_vehicles.png) Reading the internal nodes: - **samples:** number of observations in that node before splitting - **mse:** MSE calculated by comparing the actual response values in that node against the mean response value in that node - **rule:** rule used to split that node (go left if true, go right if false) Reading the leaves: - **samples:** number of observations in that node - **value:** mean response value in that node - **mse:** MSE calculated by comparing the actual response values in that node against "value" ## Making predictions for the testing data ``` # read the testing data with zipfile.ZipFile('../datasets/vehicles_test.csv.zip', 'r') as z: f = z.open('vehicles_test.csv') test = pd.io.parsers.read_table(f, index_col=False, sep=',') test['vtype'] = test.vtype.map({'car':0, 'truck':1}) test ``` **Question:** Using the tree diagram above, what predictions will the model make for each observation? ``` # use fitted model to make predictions on testing data X_test = test[feature_cols] y_test = test.price y_pred = treereg.predict(X_test) y_pred # calculate RMSE from sklearn.metrics import mean_squared_error np.sqrt(mean_squared_error(y_test, y_pred)) ``` # Part 2: Classification trees **Example:** Predict whether Barack Obama or Hillary Clinton will win the Democratic primary in a particular county in 2008: ![Obama-Clinton decision tree](images/obama_clinton_tree.jpg) **Questions:** - What are the observations? How many observations are there? - What is the response variable? - What are the features? - What is the most predictive feature? - Why does the tree split on high school graduation rate twice in a row? - What is the class prediction for the following county: 15% African-American, 90% high school graduation rate, located in the South, high poverty, high population density? - What is the predicted probability for that same county? ## Comparing regression trees and classification trees |regression trees|classification trees| |---|---| |predict a continuous response|predict a categorical response| |predict using mean response of each leaf|predict using most commonly occuring class of each leaf| |splits are chosen to minimize MSE|splits are chosen to minimize Gini index (discussed below)| ## Splitting criteria for classification trees Common options for the splitting criteria: - **classification error rate:** fraction of training observations in a region that don't belong to the most common class - **Gini index:** measure of total variance across classes in a region ### Example of classification error rate Pretend we are predicting whether someone buys an iPhone or an Android: - At a particular node, there are **25 observations** (phone buyers), of whom **10 bought iPhones and 15 bought Androids**. - Since the majority class is **Android**, that's our prediction for all 25 observations, and thus the classification error rate is **10/25 = 40%**. Our goal in making splits is to **reduce the classification error rate**. Let's try splitting on gender: - **Males:** 2 iPhones and 12 Androids, thus the predicted class is Android - **Females:** 8 iPhones and 3 Androids, thus the predicted class is iPhone - Classification error rate after this split would be **5/25 = 20%** Compare that with a split on age: - **30 or younger:** 4 iPhones and 8 Androids, thus the predicted class is Android - **31 or older:** 6 iPhones and 7 Androids, thus the predicted class is Android - Classification error rate after this split would be **10/25 = 40%** The decision tree algorithm will try **every possible split across all features**, and choose the split that **reduces the error rate the most.** ### Example of Gini index Calculate the Gini index before making a split: $$1 - \left(\frac {iPhone} {Total}\right)^2 - \left(\frac {Android} {Total}\right)^2 = 1 - \left(\frac {10} {25}\right)^2 - \left(\frac {15} {25}\right)^2 = 0.48$$ - The **maximum value** of the Gini index is 0.5, and occurs when the classes are perfectly balanced in a node. - The **minimum value** of the Gini index is 0, and occurs when there is only one class represented in a node. - A node with a lower Gini index is said to be more "pure". Evaluating the split on **gender** using Gini index: $$\text{Males: } 1 - \left(\frac {2} {14}\right)^2 - \left(\frac {12} {14}\right)^2 = 0.24$$ $$\text{Females: } 1 - \left(\frac {8} {11}\right)^2 - \left(\frac {3} {11}\right)^2 = 0.40$$ $$\text{Weighted Average: } 0.24 \left(\frac {14} {25}\right) + 0.40 \left(\frac {11} {25}\right) = 0.31$$ Evaluating the split on **age** using Gini index: $$\text{30 or younger: } 1 - \left(\frac {4} {12}\right)^2 - \left(\frac {8} {12}\right)^2 = 0.44$$ $$\text{31 or older: } 1 - \left(\frac {6} {13}\right)^2 - \left(\frac {7} {13}\right)^2 = 0.50$$ $$\text{Weighted Average: } 0.44 \left(\frac {12} {25}\right) + 0.50 \left(\frac {13} {25}\right) = 0.47$$ Again, the decision tree algorithm will try **every possible split**, and will choose the split that **reduces the Gini index (and thus increases the "node purity") the most.** ### Comparing classification error rate and Gini index - Gini index is generally preferred because it will make splits that **increase node purity**, even if that split does not change the classification error rate. - Node purity is important because we're interested in the **class proportions** in each region, since that's how we calculate the **predicted probability** of each class. - scikit-learn's default splitting criteria for classification trees is Gini index. Note: There is another common splitting criteria called **cross-entropy**. It's numerically similar to Gini index, but slower to compute, thus it's not as popular as Gini index. ## Building a classification tree in scikit-learn We'll build a classification tree using the Titanic data: ``` # read in the data with zipfile.ZipFile('../datasets/titanic.csv.zip', 'r') as z: f = z.open('titanic.csv') titanic = pd.read_csv(f, sep=',', index_col=0) # encode female as 0 and male as 1 titanic['Sex'] = titanic.Sex.map({'female':0, 'male':1}) # fill in the missing values for age with the median age titanic.Age.fillna(titanic.Age.median(), inplace=True) # create a DataFrame of dummy variables for Embarked embarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked') embarked_dummies.drop(embarked_dummies.columns[0], axis=1, inplace=True) # concatenate the original DataFrame and the dummy DataFrame titanic = pd.concat([titanic, embarked_dummies], axis=1) # print the updated DataFrame titanic.head() ``` - **Survived:** 0=died, 1=survived (response variable) - **Pclass:** 1=first class, 2=second class, 3=third class - What will happen if the tree splits on this feature? - **Sex:** 0=female, 1=male - **Age:** numeric value - **Embarked:** C or Q or S ``` # define X and y feature_cols = ['Pclass', 'Sex', 'Age', 'Embarked_Q', 'Embarked_S'] X = titanic[feature_cols] y = titanic.Survived # fit a classification tree with max_depth=3 on all data from sklearn.tree import DecisionTreeClassifier treeclf = DecisionTreeClassifier(max_depth=3, random_state=1) treeclf.fit(X, y) # create a Graphviz file export_graphviz(treeclf, out_file='tree_titanic.dot', feature_names=feature_cols) # At the command line, run this to convert to PNG: # dot -Tpng tree_titanic.dot -o tree_titanic.png ``` ![Tree for Titanic data](images/tree_titanic.png) Notice the split in the bottom right: the **same class** is predicted in both of its leaves. That split didn't affect the **classification error rate**, though it did increase the **node purity**, which is important because it increases the accuracy of our predicted probabilities. ``` # compute the feature importances pd.DataFrame({'feature':feature_cols, 'importance':treeclf.feature_importances_}) ``` # Part 3: Comparing decision trees with other models **Advantages of decision trees:** - Can be used for regression or classification - Can be displayed graphically - Highly interpretable - Can be specified as a series of rules, and more closely approximate human decision-making than other models - Prediction is fast - Features don't need scaling - Automatically learns feature interactions - Tends to ignore irrelevant features - Non-parametric (will outperform linear models if relationship between features and response is highly non-linear) ![Trees versus linear models](images/tree_vs_linear.png) **Disadvantages of decision trees:** - Performance is (generally) not competitive with the best supervised learning methods - Can easily overfit the training data (tuning is required) - Small variations in the data can result in a completely different tree (high variance) - Recursive binary splitting makes "locally optimal" decisions that may not result in a globally optimal tree - Doesn't tend to work well if the classes are highly unbalanced - Doesn't tend to work well with very small datasets
github_jupyter
# Location Set Covering Problem (LSCP) *Authors:* [Germano Barcelos](https://github.com/gegen07), [James Gaboardi](https://github.com/jGaboardi), [Levi J. Wolf](https://github.com/ljwolf), [Qunshan Zhao](https://github.com/qszhao) Location Set Covering is a problem realized by Toregas, et al. (1971). He figured out that emergency services must have placed according to a response time, since, there is a allowable maximum service time when it's discussed how handle an emergency activity. Therefore he proprosed a model named LSCP that: _Minimize the number of facilities needed and locate them so that every demand area is covered within a predefined maximal service distance or time._ Church L., Murray, A. (2018) **LSCP can be written as:** $\begin{array} \displaystyle \textbf{Minimize} & \sum_{j=1}^{n}{x_j} && (1) \\ \displaystyle \textbf{Subject to:} & \sum_{j\in N_i}{x_j} \geq 1 & \forall i & (2) \\ & x_j \in {0,1} & \forall j & (3) \\ \end{array}$ $\begin{array} \displaystyle \textbf{Where:}\\ & & \displaystyle i & \small = & \textrm{index referencing nodes of the network as demand} \\ & & j & \small = & \textrm{index referencing nodes of the network as potential facility sites} \\ & & S & \small = & \textrm{maximal acceptable service distance or time standard} \\ & & d_{ij} & \small = & \textrm{shortest distance or travel time between nodes} i \textrm{and} j \\ & & N_i & \small = & \{j | d_{ij} < S\} \\ & & x_j & \small = & \begin{cases} 1, \text{if a facility is located at node} j\\ 0, \text{otherwise} \\ \end{cases} \end{array}$ _This excerpt above was quoted from Church L., Murray, A. (2018)_ This tutorial solves LSCP using `spopt.locate.coverage.LSCP` instance that depends on a array 2D representing the costs between facilities candidate sites and demand points. For that it uses a lattice 10x10 with simulated points to calculate the costs. ``` from spopt.locate.coverage import LSCP from spopt.locate.util import simulated_geo_points import numpy import geopandas import pulp import spaghetti from shapely.geometry import Point import matplotlib.pyplot as plt ``` Since the model needs a distance cost matrix we should define some variables. In the comments, it's defined what these variables are for but solver. The solver, assigned below as `pulp.PULP_CBC_CMD`, is an interface to optimization solver developed by [COIN-OR](https://github.com/coin-or/Cbc). If you want to use another optimization interface as Gurobi or CPLEX see this [guide](https://coin-or.github.io/pulp/guides/how_to_configure_solvers.html) that explains how to achieve this. ``` CLIENT_COUNT = 100 # quantity demand points FACILITY_COUNT = 5 # quantity supply points MAX_COVERAGE = 8 # maximum service radius in meters # Random seeds for reproducibility CLIENT_SEED = 5 FACILITY_SEED = 6 solver = pulp.PULP_CBC_CMD(msg=False) ``` ## Lattice 10x10 Create lattice 10x10 with 9 vertical lines in interior. ``` lattice = spaghetti.regular_lattice((0, 0, 10, 10), 9, exterior=True) ntw = spaghetti.Network(in_data=lattice) ``` Transform spaghetti instance into geodataframe. ``` street = spaghetti.element_as_gdf(ntw, arcs=True) street_buffered = geopandas.GeoDataFrame( geopandas.GeoSeries(street["geometry"].buffer(0.2).unary_union), crs=street.crs, columns=["geometry"], ) ``` Plotting the network created by spaghetti we can verify that it seems a district with quarters and streets. ``` street.plot() ``` ## Simulate points in a network The function `simulated_geo_points` simulates points inside a network. In this case, it uses a lattice network 10x10 created by using spaghetti package. Below we use the function defined above and simulate the points inside lattice bounds. ``` client_points = simulated_geo_points(street_buffered, needed=CLIENT_COUNT, seed=CLIENT_SEED) facility_points = simulated_geo_points( street_buffered, needed=FACILITY_COUNT, seed=FACILITY_SEED ) ``` Plotting the 100 client and 5 facility points we can see that the function generates dummy points to an area of 10x10 which is the area created by our lattice created on previous cells. ``` fig, ax = plt.subplots(figsize=(6, 6)) street.plot(ax=ax, alpha=0.8, zorder=1, label='streets') facility_points.plot(ax=ax, color='red', zorder=2, label='facility candidate sites ($n$=5)') client_points.plot(ax=ax, color='black', label='clients points ($n$=100)') plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1)) ``` ## Transform simulated points to real points To use cost matrix or geodataframes we have to pay attention in some details. The client and facility points simulated don't belong to network, so if we calculate the distances now we are supposed to receive a wrong result. Before calculating distances we snap points to the networok and then calculate the distances. Below we snap points that is not spatially belong to network and create new real points geodataframes ``` ntw.snapobservations(client_points, "clients", attribute=True) clients_snapped = spaghetti.element_as_gdf( ntw, pp_name="clients", snapped=True ) ntw.snapobservations(facility_points, "facilities", attribute=True) facilities_snapped = spaghetti.element_as_gdf( ntw, pp_name="facilities", snapped=True ) ``` Now the plot seems more organized as the points belong to network. The network created is plotted below with facility points and clients points: ``` fig, ax = plt.subplots(figsize=(6, 6)) street.plot(ax=ax, alpha=0.8, zorder=1, label='streets') facilities_snapped.plot(ax=ax, color='red', zorder=2, label='facility candidate sites ($n$=5)') clients_snapped.plot(ax=ax, color='black', label='clients points ($n$=100)') plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1)) ``` ## Calculating the cost matrix Calculate distance between clients and facilities. ``` cost_matrix = ntw.allneighbordistances( sourcepattern=ntw.pointpatterns["clients"], destpattern=ntw.pointpatterns["facilities"], ) ``` The expected result here is a Dijkstra distance between clients and facilities points, so we our case an array 2D 100x5. ``` cost_matrix ``` With ``LSCP.from_cost_matrix`` we model LSC problem to cover all demand points with $p$ facility points within `max_coverage` meters as service radius using cost matrix calculated previously. ``` lscp_from_cost_matrix = LSCP.from_cost_matrix(cost_matrix, MAX_COVERAGE) lscp_from_cost_matrix = lscp_from_cost_matrix.solve(solver) ``` Expected result is an instance of LSCP. ``` lscp_from_cost_matrix ``` ## Using GeoDataFrame With ``LSCP.from_geodataframe`` we model the LSC problem to cover all demand points with $p$ facility points within `max_coverage` meters as service radius using geodataframes without calculating the cost matrix previously. ``` lscp_from_geodataframe = LSCP.from_geodataframe( clients_snapped, facilities_snapped, "geometry", "geometry", MAX_COVERAGE, distance_metric="euclidean" ) lscp_from_geodataframe = lscp_from_geodataframe.solve(solver) ``` Expected result is an instance of LSCP. ``` lscp_from_geodataframe ``` ## Plotting the results The cell below describe the plotting of the results. For each method from LSCP class (from_cost_matrix, from_geodataframe) there is a plot displaying the facility site that was selected with a star colored and the points covered with the same color. Sometimes the demand points will be colored with not expected colors, it represents the coverage overlapping. ``` from matplotlib.patches import Patch import matplotlib.lines as mlines dv_colors = [ "darkcyan", "mediumseagreen", "cyan", "darkslategray", "lightskyblue", "limegreen", "darkgoldenrod", "peachpuff", "coral", "mediumvioletred", "blueviolet", "fuchsia", "thistle", "lavender", "saddlebrown", ] def plot_results(lscp, facility_points): arr_points = [] fac_sites = [] for i in range(FACILITY_COUNT): if lscp.fac2cli[i]: geom = client_points.iloc[lscp.fac2cli[i]]['geometry'] arr_points.append(geom) fac_sites.append(i) fig, ax = plt.subplots(figsize=(6, 6)) legend_elements = [] street.plot(ax=ax, alpha=1, color='black', zorder=1) legend_elements.append(mlines.Line2D( [], [], color='black', label='streets', )) facility_points.plot(ax=ax, color='brown', marker="*", markersize=80, zorder=2) legend_elements.append(mlines.Line2D( [], [], color='brown', marker="*", linewidth=0, label=f'facility sites ($n$={FACILITY_COUNT})' )) for i in range(len(arr_points)): gdf = geopandas.GeoDataFrame(arr_points[i]) label = f"coverage_points by y{fac_sites[i]}" legend_elements.append(Patch(facecolor=dv_colors[i], edgecolor="k", label=label)) gdf.plot(ax=ax, zorder=3, alpha=0.7, edgecolor="k", color=dv_colors[i], label=label) facility_points.iloc[[fac_sites[i]]].plot(ax=ax, marker="*", markersize=200 * 3.0, alpha=0.8, zorder=4, edgecolor="k", facecolor=dv_colors[i]) legend_elements.append(mlines.Line2D( [], [], color=dv_colors[i], marker="*", ms=20 / 2, markeredgecolor="k", linewidth=0, alpha=0.8, label=f"y{fac_sites[i]} facility selected", )) plt.title("LSCP", fontweight="bold") plt.legend(handles = legend_elements, loc='upper left', bbox_to_anchor=(1.05, 1)) ``` ### LSCP built from cost matrix ``` lscp_from_cost_matrix.facility_client_array() plot_results(lscp_from_cost_matrix, facility_points) ``` ### LSCP built from geodataframe ``` lscp_from_geodataframe.facility_client_array() plot_results(lscp_from_geodataframe, facility_points) ``` You may notice that the models are different. This result is expected as the distance between facility and demand points is calculated with different metrics. The cost matrix is calculated with dijkstra distance while the distance using geodataframe is calculated with euclidean distance. But why it needs just one facility point to cover all of those demand points? It can be explained by the nature of the problem. The problem was configured in a synthetic manner, the street is created with 10x10 lattice and the max_coverage parameter is 8 meters, so this result is not weird at all. You can change the max_coverage parameter to 2 meters and you will obtain a different result but be aware with how many points will be covered. ## References - [Church, R. L., & Murray, A. T. (2018). Location covering models: History, applications and advancements (1st edition 2018). Springer](https://www.springer.com/gb/book/9783319998459) - [Toregas, C., Swain, R., ReVelle, C., &amp; Bergman, L. (1971). The location of emergency service facilities. Operations Research, 19(6), 1363–1373.](https://pubsonline.informs.org/doi/abs/10.1287/opre.19.6.1363)
github_jupyter
``` !pip3 install tqdm from post_processing import * from const import ROOT from const import * ``` vert_path = os.path.join(".", 'vertical_hamming') vert_path1 = os.path.join(".", 'vertical_hamming_res') vert_file_list = glob.glob(os.path.join(vert_path, '*.png'))+ glob.glob(os.path.join(vert_path1, '*.png')) df = pd.read_csv("0861_34.csv") df = df.sort_values(by=['id']) df3 = pd.read_csv("0861_50.csv") df3 = df3.sort_values(by=['id']) df4 = pd.read_csv("0863.csv") df4 = df4.sort_values(by=['id']) df2 = pd.read_csv("10folds_ne.csv") len(vert_file_list) ``` df58 = pd.read_csv("final/optimized_stable_all_sigmoids.csv") df256 = pd.read_csv("256_arith_mean.csv") df2 = df256.copy() for i in tqdm(range(len(df2))): df2.loc[df2['id'] == df256.iloc[i]['id'],'rle_mask']=df58['rle_mask'][i] df2.to_csv("final/ocnet256_resnet256_0450350101.csv",index=False) vert_path = os.path.join(".", 'vertical_hamming') vert_path1 = os.path.join(".", 'vertical_hamming_res') vert_file_list = glob.glob(os.path.join(vert_path, '*.png'))+ glob.glob(os.path.join(vert_path1, '*.png')) df = pd.read_csv("pb_0865.csv") df = df.sort_values(by=['id']) #df2 = pd.read_csv("256_arith_mean.csv") ct=0 for i in tqdm(range(len(df))): #print(row[1]["rle_mask"] is float) try: s=0 if not(type(df.iloc[i]["rle_mask"]) is float): #print("s") s=s+1 #print(s) if s>0: ct=ct+1 df2.loc[df2['id'] == df.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 print("setting {} masks to empty".format(ct)) ct=0 for i in tqdm(range(len(df2))): #print(row[1]["rle_mask"] is float) try: s=0 if type(df2.iloc[i]["rle_mask"]) is float or df2.iloc[i]["rle_mask"] == '' : #print("s") s=s+1 #print(s) if s>0: ct=ct+1 #df2.loc[df2['id'] == df.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 print("{} empty masks in total".format(ct)) """ 100%|██████████| 18000/18000 [00:43<00:00, 410.06it/s] 7%|▋ | 1255/18000 [00:00<00:02, 6269.62it/s] setting 7614 masks to empty 100%|██████████| 18000/18000 [00:02<00:00, 6322.42it/s] 7619 empty masks in total """ vert_path = os.path.join(".", 'vertical_hamming') vert_path1 = os.path.join(".", 'vertical_hamming_res') vert_file_list = glob.glob(os.path.join(vert_path, '*.png'))+ glob.glob(os.path.join(vert_path1, '*.png')) df = pd.read_csv("0871.csv") df = df.sort_values(by=['id']) #df2 = pd.read_csv("256_arith_mean.csv") ct=0 for i in tqdm(range(len(df))): #print(row[1]["rle_mask"] is float) try: s=0 if type(df.iloc[i]["rle_mask"]) is float: #print("s") s=s+1 #print(s) if s>0: ct=ct+1 df2.loc[df2['id'] == df.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 print("setting {} masks to empty".format(ct)) ct=0 for i in tqdm(range(len(df2))): #print(row[1]["rle_mask"] is float) try: s=0 if type(df2.iloc[i]["rle_mask"]) is float or df2.iloc[i]["rle_mask"] == '' : #print("s") s=s+1 #print(s) if s>0: ct=ct+1 #df2.loc[df2['id'] == df.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 print("{} empty masks in total".format(ct)) """ 100%|██████████| 18000/18000 [00:43<00:00, 410.06it/s] 7%|▋ | 1255/18000 [00:00<00:02, 6269.62it/s] setting 7614 masks to empty 100%|██████████| 18000/18000 [00:02<00:00, 6322.42it/s] 7619 empty masks in total """ df2=df2.sort_values(by='id') #df2.to_csv("final/test.csv",index=False) df2.to_csv("final/ocnet256_resnet256_0450350101_0865empty.csv",index=False) ct=0 #df2.to_csv("test.csv",index=False) df=pd.read_csv("final/probe.csv") #df=pd.read_csv("final/optimized_ocnet256_resnet256_0450350101_0871empty_leak4++_stage2+++_stage3++.csv") for i in tqdm(range(len(df))): #print(row[1]["rle_mask"] is float) try: s=0 if type(df.iloc[i]["rle_mask"]) is float : #print("s") s=s+1 #print(s) if s>0: ct=ct+1 #df2.loc[df2['id'] == df.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 print("{} empty masks in total".format(ct)) #df.to_csv("128_256_bc34_leak4.csv",index=False) ct=0 for i in range(len(df)): #print(row[1]["rle_mask"] is float) try: s=0 if type(df.iloc[i]["rle_mask"]) is float: #print("s") s=s+1 if type(df3.iloc[i]["rle_mask"]) is float: s=s+1 if type(df4.iloc[i]["rle_mask"]) is float: s=s+1 #print(s) if s>1: ct=ct+1 df2.loc[df2['id'] == df4.iloc[i]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 ct type(df.loc[3]["rle_mask"]) is float df = pd.read_csv("0871.csv") df = df.sort_values(by=['id']) i=0 for row in df.iterrows(): #print(row[1]["rle_mask"] is float) try: if type(row[1]["rle_mask"]) is float: #print(1) i=i+1 df.loc[df['id'] == row[1]['id'],'rle_mask']=' '.join(map(str, "1 2")) else: df.loc[df['id'] == row[1]['id'],'rle_mask']=' '.join(map(str, "")) except ValueError: a=1 i #df.to_csv("binary_prediction_0871.csv",index=False) df2.to_csv("80_stable_models_0871.csv",index=False) ROOT='/Users/alexanderliao/data' df=df2 for cand in vert_file_list: mask=cand[cand.find("/",2)+1:cand.find(".png_",2)] needs_treatment=cand[cand.find(".png_",2)+5:len(cand)-4] img = Image.open(ROOT+'/train/masks/'+mask+'.png').convert('LA') arr= np.array(img)[:,:,0]/255 new_code=rle_encoding(arr) #img.show() df.loc[df['id'] == needs_treatment,'rle_mask']=' '.join(map(str, new_code)) df.to_csv("ohem_ne_majvote_vert_corrected.csv",index=False) cand[cand.find("/",2)+1:cand.find(".png_",2)] cand[cand.find(".png_",2)+5:len(cand)-4] cand.find(".png_",2) a=rle_encoding(arr) arr.shape df.loc[df['id'] == 'b3680c53d9','rle_mask']=' '.join(map(str, a)) df.to_csv("submission_0861_vert_corrected.csv",index=False) ' '.join(map(str, a)) masks=[] for cand in vert_file_list: masks.append(cand[cand.find("/",2)+1:cand.find(".png_",2)]+".png") bad_masks =[ '1eaf42beee.png' ,'33887a0ae7.png' ,'33dfce3a76.png' ,'3975043a11.png' ,'39cd06da7d.png' ,'483b35d589.png' ,'49336bb17b.png' ,'4ef0559016.png' ,'4fbda008c7.png' ,'4fdc882e4b.png' ,'50d3073821.png' ,'53e17edd83.png' ,'5b217529e7.png' ,'5f98029612.png' ,'608567ed23.png' ,'62aad7556c.png' ,'62d30854d7.png' ,'6460ce2df7.png' ,'6bc4c91c27.png' ,'7845115d01.png' ,'7deaf30c4a.png' ,'80a458a2b6.png' ,'81fa3d59b8.png' ,'8367b54eac.png' ,'849881c690.png' ,'876e6423e6.png' ,'90720e8172.png' ,'916aff36ae.png' ,'919bc0e2ba.png' ,'a266a2a9df.png' ,'a6625b8937.png' ,'a9ee40cf0d.png' ,'aeba5383e4.png' ,'b63b23fdc9.png' ,'baac3469ae.png' ,'be7014887d.png' ,'be90ab3e56.png' ,'bfa7ee102e.png' ,'bfbb9b9149.png' ,'c387a012fc.png' ,'c98dfd50ba.png' ,'caccd6708f.png' ,'cb4f7abe67.png' ,'d0bbe4fd97.png' ,'d4d2ed6bd2.png' ,'de7202d286.png' ,'f0c401b64b.png' ,'f19b7d20bb.png' ,'f641699848.png' ,'f75842e215.png' ,'00950d1627.png' ,'0280deb8ae.png' ,'06d21d76c4.png' ,'09152018c4.png' ,'09b9330300.png' ,'0b45bde756.png' ,'130229ec15.png' ,'15d76f1672.png' ,'182bfc6862.png' ,'23afbccfb5.png' ,'24522ec665.png' ,'285f4b2e82.png' ,'2bc179b78c.png' ,'2f746f8726.png' ,'3cb59a4fdc.png' ,'403cb8f4b3.png' ,'4f5df40ab2.png' ,'50b3aef4c4.png' ,'52667992f8.png' ,'52ac7bb4c1.png' ,'56f4bcc716.png' ,'58de316918.png' ,'640ceb328a.png' ,'71f7425387.png' ,'7c0b76979f.png' ,'7f0825a2f0.png' ,'834861f1b6.png' ,'87afd4b1ca.png' ,'88a5c49514.png' ,'9067effd34.png' ,'93a1541218.png' ,'95f6e2b2d1.png' ,'96216dae3b.png' ,'96523f824a.png' ,'99ee31b5bc.png' ,'9a4b15919d.png' ,'9b29ca561d.png' ,'9eb4a10b98.png' ,'ad2fa649f7.png' ,'b1be1fa682.png' ,'b24d3673e1.png' ,'b35b1b412b.png' ,'b525824dfc.png' ,'b7b83447c4.png' ,'b8a9602e21.png' ,'ba1287cb48.png' ,'be18a24c49.png' ,'c27409a765.png' ,'c2973c16f1.png' ,'c83d9529bd.png' ,'cef03959d8.png' ,'d4d34af4f7.png' ,'d9a52dc263.png' ,'dd6a04d456.png' ,'ddcb457a07.png' ,'e12cd094a6.png' ,'e6e3e58c43.png' ,'e73ed6e7f2.png' ,'f6e87c1458.png' ,'f7380099f6.png' ,'fb3392fee0.png' ,'fb47e8e74e.png' ,'febd1d2a67.png' ] def diff(first, second): second = set(second) return [item for item in first if item not in second] rest=diff(bad_masks,masks) len(rest) rest import numpy as np np.gradient([1,-1,-2,-3]) dicts = pickle.load( open( "linked_list.p", "rb" ) ) list1=dicts[58] list1[1][4] ```
github_jupyter
# Character Sequence to Sequence In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models. This notebook was updated to work with TensorFlow 1.1 and builds on the work of Dave Currie. Check out Dave's post [Text Summarization with Amazon Reviews](https://medium.com/towards-data-science/text-summarization-with-amazon-reviews-41801c2210b). <img src="images/sequence-to-sequence.jpg"/> ## Dataset The dataset lives in the /data/ folder. At the moment, it is made up of the following files: * **letters_source.txt**: The list of input letter sequences. Each sequence is its own line. * **letters_target.txt**: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number. ``` import numpy as np import time import helper source_path = 'data/letters_source.txt' target_path = 'data/letters_target.txt' source_sentences = helper.load_data(source_path) target_sentences = helper.load_data(target_path) ``` Let's start by examining the current state of the dataset. `source_sentences` contains the entire input sequence file as text delimited by newline symbols. ``` source_sentences[:50].split('\n') ``` `target_sentences` contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from `source_sentences`. `target_sentences` contains a sorted characters of the line. ``` target_sentences[:50].split('\n') ``` ## Preprocess To do anything useful with it, we'll need to turn the each string into a list of characters: <img src="images/source_and_target_arrays.png"/> Then convert the characters to their int values as declared in our vocabulary: ``` def extract_character_vocab(data): special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>'] set_words = set([character for line in data.split('\n') for character in line]) int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))} vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()} return int_to_vocab, vocab_to_int # Build int2letter and letter2int dicts source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences) target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences) # Convert characters to ids source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')] target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')] print("Example source sequence") print(source_letter_ids[:3]) print("\n") print("Example target sequence") print(target_letter_ids[:3]) ``` This is the final shape we need them to be in. We can now proceed to building the model. ## Model #### Check the Version of TensorFlow This will check to make sure you have the correct version of TensorFlow ``` from distutils.version import LooseVersion import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) ``` ### Hyperparameters ``` # Number of Epochs epochs = 60 # Batch Size batch_size = 128 # RNN Size rnn_size = 50 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 15 decoding_embedding_size = 15 # Learning Rate learning_rate = 0.001 ``` ### Input ``` def get_model_inputs(): input_data = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='targets') lr = tf.placeholder(tf.float32, name='learning_rate') target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length') max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len') source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length') return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length ``` ### Sequence to Sequence Model We can now start defining the functions that will build the seq2seq model. We are building it from the bottom up with the following components: 2.1 Encoder - Embedding - Encoder cell 2.2 Decoder 1- Process decoder inputs 2- Set up the decoder - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder 2.3 Seq2seq model connecting the encoder and decoder 2.4 Build the training graph hooking up the model with the optimizer ### 2.1 Encoder The first bit of the model we'll build is the encoder. Here, we'll embed the input data, construct our encoder, then pass the embedded data to the encoder. - Embed the input data using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence) <img src="images/embed_sequence.png" /> - Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output. <img src="images/encoder.png" /> ``` def encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size): # Encoder embedding enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size) # RNN cell def make_cell(rnn_size): enc_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return enc_cell enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32) return enc_output, enc_state ``` ## 2.2 Decoder The decoder is probably the most involved part of this model. The following steps are needed to create it: 1- Process decoder inputs 2- Set up the decoder components - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder ### Process Decoder Input In the training process, the target sequences will be used in two different places: 1. Using them to calculate the loss 2. Feeding them to the decoder during training to make the model more robust. Now we need to address the second point. Let's assume our targets look like this in their letter/word form (we're doing this for readibility. At this point in the code, these sequences would be in int form): <img src="images/targets_1.png"/> We need to do a simple transformation on the tensor before feeding it to the decoder: 1- We will feed an item of the sequence to the decoder at each time step. Think about the last timestep -- where the decoder outputs the final word in its output. The input to that step is the item before last from the target sequence. The decoder has no use for the last item in the target sequence in this scenario. So we'll need to remove the last item. We do that using tensorflow's tf.strided_slice() method. We hand it the tensor, and the index of where to start and where to end the cutting. <img src="images/strided_slice_1.png"/> 2- The first item in each sequence we feed to the decoder has to be GO symbol. So We'll add that to the beginning. <img src="images/targets_add_go.png"/> Now the tensor is ready to be fed to the decoder. It looks like this (if we convert from ints to letters/symbols): <img src="images/targets_after_processing_1.png"/> ``` # Process the input we'll feed to the decoder def process_decoder_input(target_data, vocab_to_int, batch_size): '''Remove the last word id from each batch and concat the <GO> to the begining of each batch''' ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1) return dec_input ``` ### Set up the decoder components - Embedding - Decoder cell - Dense output layer - Training decoder - Inference decoder #### 1- Embedding Now that we have prepared the inputs to the training decoder, we need to embed them so they can be ready to be passed to the decoder. We'll create an embedding matrix like the following then have tf.nn.embedding_lookup convert our input to its embedded equivalent: <img src="images/embeddings.png" /> #### 2- Decoder Cell Then we declare our decoder cell. Just like the encoder, we'll use an tf.contrib.rnn.LSTMCell here as well. We need to declare a decoder for the training process, and a decoder for the inference/prediction process. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model). First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM. #### 3- Dense output layer Before we move to declaring our decoders, we'll need to create the output layer, which will be a tensorflow.python.layers.core.Dense layer that translates the outputs of the decoder to logits that tell us which element of the decoder vocabulary the decoder is choosing to output at each time step. #### 4- Training decoder Essentially, we'll be creating two decoders which share their parameters. One for training and one for inference. The two are similar in that both created using tf.contrib.seq2seq.**BasicDecoder** and tf.contrib.seq2seq.**dynamic_decode**. They differ, however, in that we feed the the target sequences as inputs to the training decoder at each time step to make it more robust. We can think of the training decoder as looking like this (except that it works with sequences in batches): <img src="images/sequence-to-sequence-training-decoder.png"/> The training decoder **does not** feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters). #### 5- Inference decoder The inference decoder is the one we'll use when we deploy our model to the wild. <img src="images/sequence-to-sequence-inference-decoder.png"/> We'll hand our encoder hidden state to both the training and inference decoders and have it process its output. TensorFlow handles most of the logic for us. We just have to use the appropriate methods from tf.contrib.seq2seq and supply them with the appropriate inputs. ``` def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input): # 1. Decoder Embedding target_vocab_size = len(target_letter_to_int) dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) # 2. Construct the decoder cell def make_cell(rnn_size): dec_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) return dec_cell dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) # 3. Dense layer to translate the decoder's output at each time # step into a choice from the target vocabulary output_layer = Dense(target_vocab_size, kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1)) # 4. Set up a training decoder and an inference decoder # Training Decoder with tf.variable_scope("decode"): # Helper for the training process. Used by BasicDecoder to read inputs. training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input, sequence_length=target_sequence_length, time_major=False) # Basic decoder training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)[0] # 5. Inference Decoder # Reuses the same parameters trained by the training process with tf.variable_scope("decode", reuse=True): start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens') # Helper for the inference process. inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, target_letter_to_int['<EOS>']) # Basic decoder inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper, enc_state, output_layer) # Perform dynamic decoding using the decoder inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)[0] return training_decoder_output, inference_decoder_output ``` ## 2.3 Seq2seq model Let's now go a step above, and hook up the encoder and decoder using the methods we just declared ``` def seq2seq_model(input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers): # Pass the input data through the encoder. We'll ignore the encoder output, but use the state _, enc_state = encoding_layer(input_data, rnn_size, num_layers, source_sequence_length, source_vocab_size, encoding_embedding_size) # Prepare the target sequences we'll feed to the decoder in training mode dec_input = process_decoder_input(targets, target_letter_to_int, batch_size) # Pass encoder state and decoder inputs to the decoders training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size, target_sequence_length, max_target_sequence_length, enc_state, dec_input) return training_decoder_output, inference_decoder_output ``` Model outputs *training_decoder_output* and *inference_decoder_output* both contain a 'rnn_output' logits tensor that looks like this: <img src="images/logits.png"/> The logits we get from the training tensor we'll pass to tf.contrib.seq2seq.**sequence_loss()** to calculate the loss and ultimately the gradient. ``` # Build the graph train_graph = tf.Graph() # Set the graph to default to ensure that it is ready for training with train_graph.as_default(): # Load the model inputs input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs() # Create the training and inference logits training_decoder_output, inference_decoder_output = seq2seq_model(input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length, len(source_letter_to_int), len(target_letter_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers) # Create tensors for the training logits and inference logits training_logits = tf.identity(training_decoder_output.rnn_output, 'logits') inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions') # Create the weights for sequence_loss masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) ``` ## Get Batches There's little processing involved when we retreive the batches. This is a simple example assuming batch_size = 2 Source sequences (it's actually in int form, we're showing the characters for clarity): <img src="images/source_batch.png" /> Target sequences (also in int, but showing letters for clarity): <img src="images/target_batch.png" /> ``` def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths ``` ## Train We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size. ``` # Split data to training and validation sets train_source = source_letter_ids[batch_size:] train_target = target_letter_ids[batch_size:] valid_source = source_letter_ids[:batch_size] valid_target = target_letter_ids[:batch_size] (valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size, source_letter_to_int['<PAD>'], target_letter_to_int['<PAD>'])) display_step = 20 # Check training loss after every 20 batches checkpoint = "best_model.ckpt" with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(1, epochs+1): for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate( get_batches(train_target, train_source, batch_size, source_letter_to_int['<PAD>'], target_letter_to_int['<PAD>'])): # Training step _, loss = sess.run( [train_op, cost], {input_data: sources_batch, targets: targets_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths}) # Debug message updating us on the status of the training if batch_i % display_step == 0 and batch_i > 0: # Calculate validation cost validation_loss = sess.run( [cost], {input_data: valid_sources_batch, targets: valid_targets_batch, lr: learning_rate, target_sequence_length: valid_targets_lengths, source_sequence_length: valid_sources_lengths}) print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}' .format(epoch_i, epochs, batch_i, len(train_source) // batch_size, loss, validation_loss[0])) # Save Model saver = tf.train.Saver() saver.save(sess, checkpoint) print('Model Trained and Saved') ``` ## Prediction ``` def source_to_seq(text): '''Prepare the text for the model''' sequence_length = 7 return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*(sequence_length-len(text)) input_sentence = 'hello' text = source_to_seq(input_sentence) checkpoint = "./best_model.ckpt" loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(checkpoint + '.meta') loader.restore(sess, checkpoint) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') #Multiply by batch_size to match the model's input parameters answer_logits = sess.run(logits, {input_data: [text]*batch_size, target_sequence_length: [len(text)]*batch_size, source_sequence_length: [len(text)]*batch_size})[0] pad = source_letter_to_int["<PAD>"] print('Original Text:', input_sentence) print('\nSource') print(' Word Ids: {}'.format([i for i in text])) print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text]))) print('\nTarget') print(' Word Ids: {}'.format([i for i in answer_logits if i != pad])) print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad]))) ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # Create Image In this notebook, we show the following steps for deploying a web service using AzureML: - Create an image - Test image locally ``` import pandas as pd from utilities import text_to_json, get_auth from azureml.core.model import Model from azureml.core.workspace import Workspace from azureml.core.conda_dependencies import CondaDependencies from dotenv import set_key, get_key, find_dotenv env_path = find_dotenv(raise_error_if_not_found=True) ``` AML will use the following information to create an image, provision a cluster and deploy a service. Replace the values in the following cell with your information. ``` image_name = get_key(env_path, 'image_name') ``` ## Get workspace Load existing workspace from the config file. ``` ws = Workspace.from_config(auth=get_auth(env_path)) print(ws.name, ws.resource_group, ws.location, sep="\n") ``` ## Load model ``` model_name = 'question_match_model' model_version = int(get_key(env_path, 'model_version')) model = Model(ws, name=model_name, version=model_version) print(model.name, model.version) ``` ## Create an image We will now modify the `score.py` created in the previous notebook for the `init()` function to use the model we registered to the workspace earlier. ``` %%writefile score.py import sys import pandas as pd import json from duplicate_model import DuplicateModel import logging import timeit as t from azureml.core.model import Model sys.path.append('./scripts/') def init(): logger = logging.getLogger("scoring_script") global model model_name = 'question_match_model' model_path = Model.get_model_path(model_name) questions_path = './data_folder/questions.tsv' start = t.default_timer() model = DuplicateModel(model_path, questions_path) end = t.default_timer() loadTimeMsg = "Model loading time: {0} ms".format(round((end-start)*1000, 2)) logger.info(loadTimeMsg) def run(body): logger = logging.getLogger("scoring_script") json_load_text = json.loads(body) text_to_score = json_load_text['input'] start = t.default_timer() resp = model.score(text_to_score) end = t.default_timer() logger.info("Prediction took {0} ms".format(round((end-start)*1000, 2))) return(json.dumps(resp)) ``` Let's specifiy the conda and pip dependencies for the image. ``` conda_pack = ["scikit-learn==0.19.1", "pandas==0.23.3"] requirements = ["lightgbm==2.1.2", "azureml-defaults==1.0.10"] lgbmenv = CondaDependencies.create(conda_packages=conda_pack, pip_packages=requirements) with open("lgbmenv.yml", "w") as f: f.write(lgbmenv.serialize_to_string()) from azureml.core.image import ContainerImage image_config = ContainerImage.image_configuration( execution_script="score.py", runtime="python", conda_file="lgbmenv.yml", description="Image with lightgbm model", tags={"area": "text", "type": "lightgbm"}, dependencies=[ "./data_folder/questions.tsv", "./duplicate_model.py", "./scripts/item_selector.py", ], ) image = ContainerImage.create( name=image_name, # this is the model object models=[model], image_config=image_config, workspace=ws, ) %%time image.wait_for_creation(show_output = True) print(image.name, image.version) image_version = str(image.version) set_key(env_path, "image_version", image_version) ``` You can find the logs of image creation in the following location. ``` image.image_build_log_uri ``` ## Test image locally Now, let's use one of the duplicate questions to test our image. ``` dupes_test_path = './data_folder/dupes_test.tsv' dupes_test = pd.read_csv(dupes_test_path, sep='\t', encoding='latin1') text_to_score = dupes_test.iloc[0,4] text_to_score jsontext = text_to_json(text_to_score) %%time image.run(input_data=jsontext) ``` ## Conclusion We have created a docker Image using AzureML and registred this image on Azure Container Registry (ACR). This docker image encapsulates a trained machine learning model and scoring scripts. In the next step, we can take this image and deploy it on the compute target of your choice: Azure Kubernetes Service (AKS) Cluster or Azure IoT Edge.
github_jupyter
# Weight Initialization In this lesson, you'll learn how to find good initial weights for a neural network. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker. ## Testing Weights ### Dataset To see how different weights perform, we'll test on the same dataset and neural network. Let's go over the dataset and neural network. We'll be using the [MNIST dataset](https://en.wikipedia.org/wiki/MNIST_database) to demonstrate the different initial weights. As a reminder, the MNIST dataset contains images of handwritten numbers, 0-9, with normalized input (0.0 - 1.0). Run the cell below to download and load the MNIST dataset. ``` %matplotlib inline import tensorflow as tf import helper from tensorflow.examples.tutorials.mnist import input_data print('Getting MNIST Dataset...') mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print('Data Extracted.') ``` ### Neural Network <img style="float: left" src="images/neural_network.png"/> For the neural network, we'll test on a 3 layer neural network with ReLU activations and an Adam optimizer. The lessons you learn apply to other neural networks, including different activations and optimizers. ``` # Save the shapes of weights for each layer layer_1_weight_shape = (mnist.train.images.shape[1], 256) layer_2_weight_shape = (256, 128) layer_3_weight_shape = (128, mnist.train.labels.shape[1]) ``` ## Initialize Weights Let's start looking at some initial weights. ### All Zeros or Ones If you follow the principle of [Occam's razor](https://en.wikipedia.org/wiki/Occam's_razor), you might think setting all the weights to 0 or 1 would be the best solution. This is not the case. With every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust. Let's compare the loss with all ones and all zero weights using `helper.compare_init_weights`. This function will run two different initial weights on the neural network above for 2 epochs. It will plot the loss for the first 100 batches and print out stats after the 2 epochs (~860 batches). We plot the first 100 batches to better judge which weights performed better at the start. Run the cell below to see the difference between weights of all zeros against all ones. ``` all_zero_weights = [ tf.Variable(tf.zeros(layer_1_weight_shape)), tf.Variable(tf.zeros(layer_2_weight_shape)), tf.Variable(tf.zeros(layer_3_weight_shape)) ] all_one_weights = [ tf.Variable(tf.ones(layer_1_weight_shape)), tf.Variable(tf.ones(layer_2_weight_shape)), tf.Variable(tf.ones(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'All Zeros vs All Ones', [ (all_zero_weights, 'All Zeros'), (all_one_weights, 'All Ones')]) ``` As you can see the accuracy is close to guessing for both zeros and ones, around 10%. The neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run. A good solution for getting these random weights is to sample from a uniform distribution. ### Uniform Distribution A [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution_(continuous%29) has the equal probability of picking any number from a set of numbers. We'll be picking from a continous distribution, so the chance of picking the same number is low. We'll use TensorFlow's `tf.random_uniform` function to pick random numbers from a uniform distribution. >#### [`tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)`](https://www.tensorflow.org/api_docs/python/tf/random_uniform) >Outputs random values from a uniform distribution. >The generated values follow a uniform distribution in the range [minval, maxval). The lower bound minval is included in the range, while the upper bound maxval is excluded. >- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor. - **minval:** A 0-D Tensor or Python value of type dtype. The lower bound on the range of random values to generate. Defaults to 0. - **maxval:** A 0-D Tensor or Python value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point. - **dtype:** The type of the output: float32, float64, int32, or int64. - **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. - **name:** A name for the operation (optional). We can visualize the uniform distribution by using a histogram. Let's map the values from `tf.random_uniform([1000], -3, 3)` to a histogram using the `helper.hist_dist` function. This will be `1000` random float values from `-3` to `3`, excluding the value `3`. ``` helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3)) ``` The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2. Now that you understand the `tf.random_uniform` function, let's apply it to some initial weights. ### Baseline Let's see how well the neural network trains using the default values for `tf.random_uniform`, where `minval=0.0` and `maxval=1.0`. ``` # Default for tf.random_uniform is minval=0 and maxval=1 basline_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape)), tf.Variable(tf.random_uniform(layer_2_weight_shape)), tf.Variable(tf.random_uniform(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'Baseline', [(basline_weights, 'tf.random_uniform [0, 1)')]) ``` The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction. ### General rule for setting weights The general rule for setting the weights in a neural network is to be close to zero without being too small. A good pracitce is to start your weights in the range of $[-y, y]$ where $y=1/\sqrt{n}$ ($n$ is the number of inputs to a given neuron). Let's see if this holds true, let's first center our range over zero. This will give us the range [-1, 1). ``` uniform_neg1to1_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1)) ] helper.compare_init_weights( mnist, '[0, 1) vs [-1, 1)', [ (basline_weights, 'tf.random_uniform [0, 1)'), (uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')]) ``` We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small? ### Too small Let's compare [-0.1, 0.1), [-0.01, 0.01), and [-0.001, 0.001) to see how small is too small. We'll also set `plot_n_batches=None` to show all the batches in the plot. ``` uniform_neg01to01_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1)) ] uniform_neg001to001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01)) ] uniform_neg0001to0001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001)) ] helper.compare_init_weights( mnist, '[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)', [ (uniform_neg1to1_weights, '[-1, 1)'), (uniform_neg01to01_weights, '[-0.1, 0.1)'), (uniform_neg001to001_weights, '[-0.01, 0.01)'), (uniform_neg0001to0001_weights, '[-0.001, 0.001)')], plot_n_batches=None) ``` Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$. ``` import numpy as np general_rule_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0]))) ] helper.compare_init_weights( mnist, '[-0.1, 0.1) vs General Rule', [ (uniform_neg01to01_weights, '[-0.1, 0.1)'), (general_rule_weights, 'General Rule')], plot_n_batches=None) ``` The range we found and $y=1/\sqrt{n}$ are really close. Since the uniform distribution has the same chance to pick anything in the range, what if we used a distribution that had a higher chance of picking numbers closer to 0. Let's look at the normal distribution. ### Normal Distribution Unlike the uniform distribution, the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from TensorFlow's `tf.random_normal` function to a histogram. >[tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](https://www.tensorflow.org/api_docs/python/tf/random_normal) >Outputs random values from a normal distribution. >- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor. - **mean:** A 0-D Tensor or Python value of type dtype. The mean of the normal distribution. - **stddev:** A 0-D Tensor or Python value of type dtype. The standard deviation of the normal distribution. - **dtype:** The type of the output. - **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. - **name:** A name for the operation (optional). ``` helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000])) ``` Let's compare the normal distribution against the previous uniform distribution. ``` normal_01_weights = [ tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Uniform [-0.1, 0.1) vs Normal stddev 0.1', [ (uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'), (normal_01_weights, 'Normal stddev 0.1')]) ``` The normal distribution gave a slight increasse in accuracy and loss. Let's move closer to 0 and drop picked numbers that are `x` number of standard deviations away. This distribution is called [Truncated Normal Distribution](https://en.wikipedia.org/wiki/Truncated_normal_distribution%29). ### Truncated Normal Distribution >[tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)](https://www.tensorflow.org/api_docs/python/tf/truncated_normal) >Outputs random values from a truncated normal distribution. >The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. >- **shape:** A 1-D integer Tensor or Python array. The shape of the output tensor. - **mean:** A 0-D Tensor or Python value of type dtype. The mean of the truncated normal distribution. - **stddev:** A 0-D Tensor or Python value of type dtype. The standard deviation of the truncated normal distribution. - **dtype:** The type of the output. - **seed:** A Python integer. Used to create a random seed for the distribution. See tf.set_random_seed for behavior. - **name:** A name for the operation (optional). ``` helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000])) ``` Again, let's compare the previous results with the previous distribution. ``` trunc_normal_01_weights = [ tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Normal vs Truncated Normal', [ (normal_01_weights, 'Normal'), (trunc_normal_01_weights, 'Truncated Normal')]) ``` There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations. We've come a long way from the first set of weights we tested. Let's see the difference between the weights we used then and now. ``` helper.compare_init_weights( mnist, 'Baseline vs Truncated Normal', [ (basline_weights, 'Baseline'), (trunc_normal_01_weights, 'Truncated Normal')]) ``` That's a huge difference. You can barely see the truncated normal line. However, this is not the end your learning path. We've provided more resources for initializing weights in the classroom!
github_jupyter
``` #import urllib, urllib3 #from bs4 import BeautifulSoup #import requests #import time #import io #import numpy as np ##import nltk ##from nltk.corpus import wordnet as wn #import pandas as pd #from sklearn.feature_extraction.text import CountVectorizer #import gensim #from gensim.models.ldamodel import LdaModel ``` Downloading abstracts and from computer science papers using [arXiv.org](https://arxiv.org) API and group them by years: ``` import pandas as pd df = pd.read_pickle("arxiv_data_cs_all.pickle.bz2") print(len(df)) df[:2] df.id[:2][0] import re def clean_id(s): if s: s = re.sub('.*(abs\/)', '', s) s = re.sub('.*(cs\/)', '', s) s = re.sub('.*(astro-ph\/)', '', s) s = re.sub('.*(math\/)', '', s) s = re.sub('.*(org\/)', '', s) s = re.sub('.*(dyn\/)', '', s) s = re.sub('.*(lg\/)', '', s) s = re.sub('.*(mat\/)', '', s) s = re.sub('.*(qc\/)', '', s) s = re.sub('.*(ph\/)', '', s) s = re.sub('.*(qc\/)', '', s) s = re.sub('.*(nlin\/)', '', s) s = re.sub('.*(th\/)', '', s) s = re.sub('.*(bio\/)', '', s) s = re.sub('.*(lat\/)', '', s) s = re.sub('\..*$', '', s) return s.strip() return s df_ids_y_m = df.id.map(clean_id) s_y_m = list(map(lambda x: x[0:4], df_ids_y_m)) papers_y_m = pd.DataFrame(columns=['Year', 'Month', 'Papers']) for i, item in enumerate(set(s_y_m)): if s_y_m.count(item) > 1: papers_y_m.loc[i, 'Year'] = item[0:2] papers_y_m.loc[i, 'Month'] = item[2:4] papers_y_m.loc[i, 'Papers'] = s_y_m.count(item) papers_y_m.sort_values('Year', inplace=True) papers_y_m.sort_values('Month', inplace=True, ascending=False) papers_y_m.reset_index(inplace=True, drop=True) papers_y_m['Year'] = (papers_y_m['Year'].astype(int).where(papers_y_m['Year'].astype(int) < 50, papers_y_m['Year'].astype(int)-100) + 2000) papers_y_m import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(20,10)) plt.bar(papers_y_m['Year'], papers_y_m['Papers'], width=0.9) #plt.axis('off') ax = plt.gca() ax.set_title("Number of papers on Machine Learning \n (arXiv:cs) until Jun 09, 2019", size=25) ax.set_xlim(1990.1, 2019.6) rects = ax.patches labels = [papers_y_m.loc[i, 'Papers'] for i in range(len(rects))] for rect, label in zip(rects, labels): height = rect.get_height() rect.set_y = rect.get_y() + 10 ax.text(rect.get_x() + rect.get_width(), height + 15, label, ha='center', va='bottom', color='black') ax.tick_params(axis ='x', which='minor', length=0) ax.tick_params(axis ='both', which='major', length=0) ax.set_yticklabels = [''] ax.axes.get_yaxis().set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) plt.savefig("arxiv_data_cs_all_y_m.png", format="png", transparent=True,rasterized=True,dpi=300) plt.show() s_y = list(map(lambda x: x[0:2], df_ids_y_m.str.slice(stop=2))) papers_y = pd.DataFrame(columns=['Year', 'Papers']) for i, item in enumerate(set(s_y)): if s_y.count(item) > 1: papers_y.loc[i, 'Year'] = item papers_y.loc[i, 'Papers'] = s_y.count(item) papers_y['Year'] = (papers_y['Year'].astype(int).where(papers_y['Year'].astype(int) < 50, papers_y['Year'].astype(int)-100) + 2000) papers_y.sort_values('Year', inplace=True) papers_y.reset_index(inplace=True, drop=True) papers_y piece_to_find = "he" df[s_y.index(piece_to_find):s_y.index(piece_to_find)+1] import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.figure(figsize=(20,10)) plt.bar(papers_y['Year'], papers_y['Papers'], width=0.9) #plt.axis('off') ax = plt.gca() ax.set_title("Number of papers per Year on Machine Learning \n (arXiv:cs) until Jun 09, 2019", size=25) max_value = papers_y['Year'].max() min_value = papers_y['Year'].min() number_of_steps = 1 l = np.arange(min_value, max_value+1, number_of_steps) ax.set(xticks=l, xticklabels=l) rects = ax.patches labels = [papers_y.loc[i, 'Papers'] for i in range(len(rects))] for rect, label in zip(rects, labels): height = rect.get_height() rect.set_y(rect.get_y()+10) ax.text(rect.get_x() + rect.get_width()/2, height + 15, str(label), ha='center', va='bottom', color='black') ax.tick_params(axis ='x', which='minor', length=0) ax.tick_params(axis ='both', which='major', length=0) ax.set_yticklabels = [''] ax.axes.get_yaxis().set_visible(False) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.spines['left'].set_visible(False) plt.savefig("arxiv_data_cs_all_y.png", format="png", transparent=True,rasterized=True,dpi=300) plt.show() filename = "arxiv_data_cs_all_stats_years.json" papers_y.to_json(filename) filename = "arxiv_data_cs_all_stats_years_months.json" papers_y_m.to_json(filename) s_y_m = list(map(lambda x: x[0:4], df_ids_y_m)) papers_ym = pd.DataFrame(columns=['Year', 'Month', 'Papers']) for i, item in enumerate(set(s_y_m)): if s_y_m.count(item) > 1: papers_ym.loc[i, 'Year'] = item[0:2] papers_ym.loc[i, 'Month'] = item[2:4] papers_ym.loc[i, 'Papers'] = s_y_m.count(item) papers_y_m.sort_values('Year', inplace=True) papers_y_m.sort_values('Month', inplace=True, ascending=False) papers_y_m.reset_index(inplace=True, drop=True) papers_ym['Year'] = (papers_ym['Year'].astype(int).where(papers_ym['Year'].astype(int) < 50, papers_ym['Year'].astype(int)-100) + 2000) papers_ym def build_bf_cat_y_m(cat, df_cat_id): s_cat = list(map(lambda x: x[0:4], df_cat_id.map(clean_id))) papers_cat = pd.DataFrame(columns=['Year', 'Month', cat]) for i, item in enumerate(set(s_cat)): if s_cat.count(item) > 1: papers_cat.loc[i, 'Year'] = item[0:2] papers_cat.loc[i, 'Month'] = item[2:4] papers_cat.loc[i, cat] = s_cat.count(item) papers_cat.sort_values('Year', inplace=True) papers_cat.sort_values('Month', inplace=True, ascending=False) papers_cat.reset_index(inplace=True, drop=True) papers_cat['Year'] = (papers_cat['Year'].astype(int).where(papers_cat['Year'].astype(int) < 50, papers_cat['Year'].astype(int)-100) + 2000) return papers_cat for cat in set(list(df.primary_category)): df_cat_id = df["id"].where(df['primary_category'] == cat).dropna() papers_df = build_bf_cat_y_m(cat, df_cat_id) papers_ym = pd.merge(papers_ym, papers_df, on=['Year','Month'], how="outer") papers_ym.fillna(0, inplace=True) papers_ym filename = "arxiv_data_cs_all_stats_primary_cats_years_months.json" papers_ym.to_json(filename) s_y = list(map(lambda x: x[0:2], df_ids_y_m.str.slice(stop=2))) papers_y = pd.DataFrame(columns=['Year', 'Papers']) for i, item in enumerate(set(s_y)): if s_y.count(item) > 1: papers_y.loc[i, 'Year'] = item papers_y.loc[i, 'Papers'] = s_y.count(item) papers_y['Year'] = (papers_y['Year'].astype(int).where(papers_y['Year'].astype(int) < 50, papers_y['Year'].astype(int)-100) + 2000) papers_y.sort_values('Year', inplace=True) papers_y.reset_index(inplace=True, drop=True) papers_y def build_bf_cat_y(cat, df_cat_id): s_cat = list(map(lambda x: x[0:2], df_cat_id.map(clean_id))) papers_cat = pd.DataFrame(columns=['Year', cat]) for i, item in enumerate(set(s_cat)): if s_cat.count(item) > 1: papers_cat.loc[i, 'Year'] = item papers_cat.loc[i, cat] = s_cat.count(item) papers_cat['Year'] = (papers_cat['Year'].astype(int).where(papers_cat['Year'].astype(int) < 50, papers_cat['Year'].astype(int)-100) + 2000) papers_cat.sort_values('Year', inplace=True) papers_cat.reset_index(inplace=True, drop=True) return papers_cat for cat in set(list(df.primary_category)): df_cat_id = df["id"].where(df['primary_category'] == cat).dropna() papers_df = build_bf_cat_y(cat, df_cat_id) papers_y = pd.merge(papers_y, papers_df, on='Year', how="outer") papers_y.fillna(0, inplace=True) papers_y filename = "arxiv_data_cs_all_stats_primary_cats_years.json" papers_y.to_json(filename) ```
github_jupyter
# Interpretable forecasting with N-Beats ``` import os import warnings warnings.filterwarnings("ignore") os.chdir("../../..") import pandas as pd import torch import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping from pytorch_forecasting import TimeSeriesDataSet, NBeats, Baseline from pytorch_forecasting.data import NaNLabelEncoder from pytorch_forecasting.data.examples import generate_ar_data from pytorch_forecasting.metrics import SMAPE ``` ## Load data ``` data = generate_ar_data(seasonality=10.0, timesteps=400, n_series=100) data["static"] = 2 data["date"] = pd.Timestamp("2020-01-01") + pd.to_timedelta(data.time_idx, "D") data.head() # create dataset and dataloaders max_encoder_length = 60 max_prediction_length = 20 training_cutoff = data["time_idx"].max() - max_prediction_length context_length = max_encoder_length prediction_length = max_prediction_length training = TimeSeriesDataSet( data[lambda x: x.time_idx <= training_cutoff], time_idx="time_idx", target="value", categorical_encoders={"series": NaNLabelEncoder().fit(data.series)}, group_ids=["series"], # only unknown variable is "value" - and N-Beats can also not take any additional variables time_varying_unknown_reals=["value"], max_encoder_length=context_length, max_prediction_length=prediction_length, ) validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff+1) batch_size = 128 train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0) val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0) ``` ## Calculate baseline error ``` # calculate baseline absolute error actuals = torch.cat([y for x, y in iter(val_dataloader)]) baseline_predictions = Baseline().predict(val_dataloader) SMAPE()(baseline_predictions, actuals) ``` ## Train network Find optimal learning rate ``` trainer = pl.Trainer(gpus=0, gradient_clip_val=0.1) net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2) # find optimal learning rate res = trainer.lr_find(net, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2) print(f"suggested learning rate: {res.suggestion()}") fig = res.plot(show=True, suggest=True) fig.show() net.hparams.learning_rate = res.suggestion() ``` Fit model ``` early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min") trainer = pl.Trainer( max_epochs=100, gpus=0, weights_summary="top", gradient_clip_val=0.1, early_stop_callback=early_stop_callback, limit_train_batches=30, ) net = NBeats.from_dataset(training, learning_rate=1.2e-2, log_interval=10, log_val_interval=1, weight_decay=1e-2) trainer.fit( net, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, ) ``` ### Evaluate Results ``` # best_model_path = trainer.checkpoint_callback.best_model_path best_model_path = "/Users/beitnerjan/Documents/Github/temporal_fusion_transformer_pytorch/lightning_logs/version_212/checkpoints/epoch=19.ckpt" best_model = NBeats.load_from_checkpoint(best_model_path) print(best_model_path) ``` We calculate the error which is approximately half of the baseline error ``` actuals = torch.cat([y for x, y in iter(val_dataloader)]) predictions = best_model.predict(val_dataloader) (actuals - predictions).abs().mean() raw_predictions, x = best_model.predict(val_dataloader, mode="raw", return_x=True) for idx in range(10): best_model.plot_prediction(x, raw_predictions, idx=idx, add_loss_to_title=True); ``` ## Interpret model ``` for idx in range(10): best_model.plot_interpretation(x, raw_predictions, idx=idx) ; ```
github_jupyter
# Create a QComponent - Advanced ``` from qiskit_metal import draw, Dict from qiskit_metal.toolbox_metal import math_and_overrides from qiskit_metal.qlibrary.core import QComponent import qiskit_metal as metal design = metal.designs.DesignPlanar() ``` ## Qubits and Junctions The vast majority of junction management is actually under the QRenderers. The only information that a component designer needs to provide, is a linestring and width which indicates the location and orientation of a given junction. We can see this from a couple extracted lines of code from `TransmonPocket` `...` `rect_jj = draw.LineString([(0, -pad_gap / 2), (0, +pad_gap / 2)])` `...` `self.add_qgeometry('junction', dict(rect_jj=rect_jj), width=p.inductor_width)` In this case, the linestring is drawn between the two charge islands of the `TransmonPocket`. Much more of the junctions options are from renderer options added when the QRenderers are initiated. These are covered more in the renderer tutorials and sessions. It should be noted, currently multiple junctions in a component will receive the same renderer options. This is fine if, say, making a symmetric SQUID, though if trying to have asymmetry, (or, say fluxonium), a manner to handled multiple junction renderer options in a component is required. ``` from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket ?TransmonPocket ``` ## Exteriors, Interiors, and MultiPolygons As was shown in 3.1, there is a great amount of flexibility already present in Metal for what a component can be, though as it is still in development, there are some limitations with respect to if renderers can accurately render a given shape, say, a multi-faceted polygon where some facets are composed of splines. What capabilities are currently missing and would be beneficial to be added are all part of the development process. Currently, a poly can be generated with interior cut outs, such as the smiley face previously, ``` face = draw.shapely.geometry.Point(0, 0).buffer(1) eye = draw.shapely.geometry.Point(0, 0).buffer(0.2) eye_l = draw.translate(eye, -0.4, 0.4) eye_r = draw.translate(eye, 0.4, 0.4) smile = draw.shapely.geometry.Point(0, 0).buffer(0.8) cut_sq = draw.shapely.geometry.box(-1, -0.3, 1, 1) smile = draw.subtract(smile, cut_sq) face = draw.subtract(face, smile) face = draw.subtract(face, eye_r) face = draw.subtract(face, eye_l) face ``` This differs from qgeometries which have `subtract=True`, as that specifically sets that geometry to be "etched" from the ground plane. The polygon face is composed of an exterior; ``` face.exterior ``` and interiors, such as; ``` face.interiors[0] ``` A renderer must recognize the difference between these shapes, as the current QRenderers do. This allows for the component designer to generate complex shapes, without having to worry about how to add the qgeometries in any particular manner. This is also true with MultiPolygons. ``` big_square = draw.rectangle(10,10,0,0) cut_rectangle = draw.rectangle(12,1,0,0) multi_poly = draw.subtract(big_square, cut_rectangle) multi_poly type(multi_poly) ``` The MultiPolygon can still just be passed to add_qgeometry as one would with a regular polygon. It is broken up behind the scenes so two separate rectangles (with the appropriate coordinates) are added to the poly qgeometry table. This is handled by the add_qgeometry method of QGeometryTables. ``` ?metal.qgeometries.QGeometryTables.add_qgeometry ``` This method also handles rounding of coordinates to try and avoid any numerical errors. It is called by `metal.qlibrary.core.QComponent.add_qgeometry` and should not be called directly. ## QComponent Inheritance As is the case with python classes, one can extend a given component by creating a qcomponent which inherits said class, making it a parent/child relationship. While python does support multiple inheritances, Metal may run into some bugs, so it is best to keep inheritances as single paths of heritage. A good example is `TransmonPocketCL`, which adds a "charge line" the a "standard" `TransmonPocket`. As can be seen in the below code, none of the charge islands or other connection pads are present, but will still be generated via the `super().make()` line in the `make()` method. ``` import numpy as np from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket class TransmonPocketCL(TransmonPocket): # pylint: disable=invalid-name """ The base `TransmonPocketCL` class Inherits `TransmonPocket` class Description: Create a standard pocket transmon qubit for a ground plane, with two pads connected by a junction (see drawing below). Connector lines can be added using the `connection_pads` dictionary. Each connector line has a name and a list of default properties. This is a child of TransmonPocket, see TransmonPocket for the variables and description of that class. :: _________________ | | |_______________| ^ ________x________ | N | | | |_______________| .. image:: Component_Qubit_Transmon_Pocket_CL.png Charge Line: * make_CL (bool): If a chargeline should be included. * cl_gap (string): The cpw dielectric gap of the charge line. * cl_width (string): The cpw width of the charge line. * cl_length (string): The length of the charge line 'arm' coupling the the qubit pocket. Measured from the base of the 90 degree bend. * cl_ground_gap (string): How much ground is present between the charge line and the qubit pocket. * cl_pocket_edge (string): What side of the pocket the charge line is. -180 to +180 from the 'west edge', will round to the nearest 90. * cl_off_center (string): Distance from the center axis the qubit pocket is referenced to """ component_metadata = Dict(short_name='Q', _qgeometry_table_poly='True') """Component metadata""" default_options = Dict( make_CL=True, cl_gap='6um', # the cpw dielectric gap of the charge line cl_width='10um', # the cpw trace width of the charge line # the length of the charge line 'arm' coupling the the qubit pocket. cl_length='20um', # Measured from the base of the 90 degree bend cl_ground_gap= '6um', # how much ground between the charge line and the qubit pocket # -180 to +180 from the 'left edge', will round to the nearest 90. cl_pocket_edge='0', cl_off_center= '100um', # distance from the center axis the qubit pocket is built on ) """Default drawing options""" def make(self): """Define the way the options are turned into QGeometry.""" super().make() if self.options.make_CL == True: self.make_charge_line() ##################################################################### def make_charge_line(self): """Creates the charge line if the user has charge line option to TRUE """ # Grab option values name = 'Charge_Line' p = self.p cl_arm = draw.box(0, 0, -p.cl_width, p.cl_length) cl_cpw = draw.box(0, 0, -8 * p.cl_width, p.cl_width) cl_metal = draw.cascaded_union([cl_arm, cl_cpw]) cl_etcher = draw.buffer(cl_metal, p.cl_gap) port_line = draw.LineString([(-8 * p.cl_width, 0), (-8 * p.cl_width, p.cl_width)]) polys = [cl_metal, cl_etcher, port_line] # Move the charge line to the side user requested cl_rotate = 0 if (abs(p.cl_pocket_edge) > 135) or (abs(p.cl_pocket_edge) < 45): polys = draw.translate( polys, -(p.pocket_width / 2 + p.cl_ground_gap + p.cl_gap), -(p.pad_gap + p.pad_height) / 2) if (abs(p.cl_pocket_edge) > 135): p.cl_rotate = 180 else: polys = draw.translate( polys, -(p.pocket_height / 2 + p.cl_groundGap + p.cl_gap), -(p.pad_width) / 2) cl_rotate = 90 if (p.cl_pocket_edge < 0): cl_rotate = -90 # Rotate it to the pockets orientation polys = draw.rotate(polys, p.orientation + cl_rotate, origin=(0, 0)) # Move to the final position polys = draw.translate(polys, p.pos_x, p.pos_y) [cl_metal, cl_etcher, port_line] = polys # Generating pins points = list(draw.shapely.geometry.shape(port_line).coords) self.add_pin(name, points, p.cl_width) # TODO: chip # Adding to element table self.add_qgeometry('poly', dict(cl_metal=cl_metal)) self.add_qgeometry('poly', dict(cl_etcher=cl_etcher), subtract=True) ``` We can see this is the case by generating a TransmonPocketCL in the GUI. ``` gui = metal.MetalGUI(design) my_transmon_cl = TransmonPocketCL(design,'my_transmon_cl',options=dict(connection_pads=dict(a=dict(),b=dict(loc_W=-1)))) gui.rebuild() gui.autoscale() gui.screenshot() my_transmon_cl.options ``` We can see that `my_transmon_cl` inherited the appropriate options from `TransmonPocket`, and even got the junction renderer options since its parent class does declare `_qgeometry_table_junction='True'` ``` gui.main_window.close() ```
github_jupyter