code
stringlengths
2.5k
150k
kind
stringclasses
1 value
## Finding entity classes in embeddings In this notebook we're going to use embeddings to find entity classes and how they correlate with other things ``` %matplotlib inline from sklearn import svm from keras.utils import get_file import os import gensim import numpy as np import random import requests import geopandas as gpd from IPython.core.pylabtools import figsize figsize(12, 8) import pycountry import csv ``` as before, let's load up the model ``` MODEL = 'GoogleNews-vectors-negative300.bin' path = get_file(MODEL + '.gz', 'https://s3.amazonaws.com/dl4j-distribution/%s.gz' % MODEL) unzipped = os.path.join('generated', MODEL) if not os.path.isfile(unzipped): with open(unzipped, 'wb') as fout: zcat = subprocess.Popen(['zcat'], stdin=open(path), stdout=fout ) zcat.wait() ``` Most similar to a bunch of countries are some other countries! ``` model = gensim.models.KeyedVectors.load_word2vec_format(unzipped, binary=True) model.most_similar(positive=['Germany']) model.most_similar(positive=['Annita_Kirsten']) ``` No we'll create a training set with countries and non countries and get a support vector machine to learn the difference. ``` countries = list(csv.DictReader(open('data/countries.csv'))) countries[:10] positive = [x['name'] for x in random.sample(countries, 40)] negative = random.sample(model.vocab.keys(), 5000) negative[:4] labelled = [(p, 1) for p in positive] + [(n, 0) for n in negative] random.shuffle(labelled) X = np.asarray([model[w] for w, l in labelled]) y = np.asarray([l for w, l in labelled]) X.shape, y.shape TRAINING_FRACTION = 0.3 cut_off = int(TRAINING_FRACTION * len(labelled)) clf = svm.SVC(kernel='linear') clf.fit(X[:cut_off], y[:cut_off]) ``` We did alright, 99.9% precision: ``` res = clf.predict(X[cut_off:]) missed = [country for (pred, truth, country) in zip(res, y[cut_off:], labelled[cut_off:]) if pred != truth] 100 - 100 * float(len(missed)) / len(res), missed all_predictions = clf.predict(model.syn0) res = [] for word, pred in zip(model.index2word, all_predictions): if pred: res.append(word) if len(res) == 150: break random.sample(res, 10) country_to_idx = {country['name']: idx for idx, country in enumerate(countries)} country_vecs = np.asarray([model[c['name']] for c in countries]) country_vecs.shape ``` Quick sanity check to see what is similar to Canada: ``` dists = np.dot(country_vecs, country_vecs[country_to_idx['Canada']]) for idx in reversed(np.argsort(dists)[-10:]): print(countries[idx]['name'], dists[idx]) ``` Ranking countries for a specific term: ``` def rank_countries(term, topn=10, field='name'): if not term in model: return [] vec = model[term] dists = np.dot(country_vecs, vec) return [(countries[idx][field], float(dists[idx])) for idx in reversed(np.argsort(dists)[-topn:])] rank_countries('cricket') ``` Now let's visualize this on a world map: ``` world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) world.head() ``` We can now plot some maps! ``` def map_term(term): d = {k.upper(): v for k, v in rank_countries(term, topn=0, field='cc3')} world[term] = world['iso_a3'].map(d) world[term] /= world[term].max() world.dropna().plot(term, cmap='OrRd') map_term('coffee') map_term('cricket') map_term('China') ```
github_jupyter
# Denoising Autoencoder Sticking with the MNIST dataset, let's add noise to our data and see if we can define and train an autoencoder to _de_-noise the images. <img src='notebook_ims/autoencoder_denoise.png' width=70%/> Let's get started by importing our libraries and getting the dataset. ``` import torch import numpy as np from torchvision import datasets import torchvision.transforms as transforms # convert data to torch.FloatTensor transform = transforms.ToTensor() # load the training and test datasets train_data = datasets.MNIST(root='data', train=True, download=True, transform=transform) test_data = datasets.MNIST(root='data', train=False, download=True, transform=transform) # Create training and test dataloaders num_workers = 0 # how many samples per batch to load batch_size = 20 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) ``` ### Visualize the Data ``` import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # get one image from the batch img = np.squeeze(images[0]) fig = plt.figure(figsize = (5,5)) ax = fig.add_subplot(111) ax.imshow(img, cmap='gray') ``` --- # Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. >**We'll use noisy images as input and the original, clean images as targets.** Below is an example of some of the noisy images I generated and the associated, denoised images. <img src='notebook_ims/denoising.png' /> Since this is a harder problem for the network, we'll want to use _deeper_ convolutional layers here; layers with more feature maps. You might also consider adding additional layers. I suggest starting with a depth of 32 for the convolutional layers in the encoder, and the same depths going backward through the decoder. #### TODO: Build the network for the denoising autoencoder. Add deeper and/or additional layers compared to the model above. ``` import torch.nn as nn import torch.nn.functional as F # define the NN architecture class ConvDenoiser(nn.Module): def __init__(self): super(ConvDenoiser, self).__init__() ## encoder layers ## # conv layer (depth from 1 --> 32), 3x3 kernels self.conv1 = nn.Conv2d(1, 32, 3, padding=1) # conv layer (depth from 32 --> 16), 3x3 kernels self.conv2 = nn.Conv2d(32, 16, 3, padding=1) # conv layer (depth from 16 --> 8), 3x3 kernels self.conv3 = nn.Conv2d(16, 8, 3, padding=1) # pooling layer to reduce x-y dims by two; kernel and stride of 2 self.pool = nn.MaxPool2d(2, 2) ## decoder layers ## # transpose layer, a kernel of 2 and a stride of 2 will increase the spatial dims by 2 self.t_conv1 = nn.ConvTranspose2d(8, 8, 3, stride=2) # kernel_size=3 to get to a 7x7 image output # two more transpose layers with a kernel of 2 self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2) self.t_conv3 = nn.ConvTranspose2d(16, 32, 2, stride=2) # one, final, normal conv layer to decrease the depth self.conv_out = nn.Conv2d(32, 1, 3, padding=1) def forward(self, x): ## encode ## # add hidden layers with relu activation function # and maxpooling after x = F.relu(self.conv1(x)) x = self.pool(x) # add second hidden layer x = F.relu(self.conv2(x)) x = self.pool(x) # add third hidden layer x = F.relu(self.conv3(x)) x = self.pool(x) # compressed representation ## decode ## # add transpose conv layers, with relu activation function x = F.relu(self.t_conv1(x)) x = F.relu(self.t_conv2(x)) x = F.relu(self.t_conv3(x)) # transpose again, output should have a sigmoid applied x = F.sigmoid(self.conv_out(x)) return x # initialize the NN model = ConvDenoiser() print(model) ``` --- ## Training We are only concerned with the training images, which we can get from the `train_loader`. >In this case, we are actually **adding some noise** to these images and we'll feed these `noisy_imgs` to our model. The model will produce reconstructed images based on the noisy input. But, we want it to produce _normal_ un-noisy images, and so, when we calculate the loss, we will still compare the reconstructed outputs to the original images! Because we're comparing pixel values in input and output images, it will be best to use a loss that is meant for a regression task. Regression is all about comparing quantities rather than probabilistic values. So, in this case, I'll use `MSELoss`. And compare output images and input images as follows: ``` loss = criterion(outputs, images) ``` ``` # specify loss function criterion = nn.MSELoss() # specify loss function optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # number of epochs to train the model n_epochs = 20 # for adding noise to images noise_factor=0.5 for epoch in range(1, n_epochs+1): # monitor training loss train_loss = 0.0 ################### # train the model # ################### for data in train_loader: # _ stands in for labels, here # no need to flatten images images, _ = data ## add random noise to the input images noisy_imgs = images + noise_factor * torch.randn(*images.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # clear the gradients of all optimized variables optimizer.zero_grad() ## forward pass: compute predicted outputs by passing *noisy* images to the model outputs = model(noisy_imgs) # calculate the loss # the "target" is still the original, not-noisy images loss = criterion(outputs, images) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update running training loss train_loss += loss.item()*images.size(0) # print avg training statistics train_loss = train_loss/len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format( epoch, train_loss )) ``` ## Checking out the results Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is. ``` # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() # add noise to the test images noisy_imgs = images + noise_factor * torch.randn(*images.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) # get sample outputs output = model(noisy_imgs) # prep images for display noisy_imgs = noisy_imgs.numpy() # output is resized into a batch of iages output = output.view(batch_size, 1, 28, 28) # use detach when it's an output that requires_grad output = output.detach().numpy() # plot the first ten input images and then reconstructed images fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25,4)) # input images on top row, reconstructions on bottom for noisy_imgs, row in zip([noisy_imgs, output], axes): for img, ax in zip(noisy_imgs, row): ax.imshow(np.squeeze(img), cmap='gray') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ```
github_jupyter
<a href="https://colab.research.google.com/github/neurologic/NeurophysiologyModules/blob/main/Crawdad_Extracell_Intracell_Simultaneous.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Arthropod skeletal muscle compared to vertebrate skeletal muscle: > Arthropod muscles are innervated by relatively few excitatory motor neurons (sometimes only one). Arthropod motor neurons innervate each muscle fiber at multiple points (multiterminal innervation). More than one motor neuron may innervate one muscle fiber (polyneuronal innervation). Inhibitory motor neurons may innervate muscle fibers (and sometimes the terminals of the excitatory motor nerve endings). The tonic superficial flexor does not have “all-or-none” propagated action potentials, but instead has graded electrical responses dependent upon the level of the excitation and inhibition. The degree of depolarization determines the amount of Ca2+ that enters the cell through voltage-gated channels; the amount of Ca2+ entry in turn determines the strength of muscle contraction. Unlike the superficial flexor, fast phasic crayfish muscles may fire Ca2+-based action potentials. ``` #@markdown Sketch of skeletal muscle innervation #@markdown in arthoropods versus vertebrates. from IPython import display from base64 import b64decode base64_data = "iVBORw0KGgoAAAANSUhEUgAAAxYAAAGkCAMAAACb/UPCAAAAA3NCSVQICAjb4U/gAAADAFBMVEUAAADj5+dcYmT0BAT/7+8dKCGqqqqVlZXMzMx8gYMNDg/sbG1ESkyGKiyc066Hi409PT3srq/CwsJpaWpNVFaTx6Td0NFihG0bHh/HWlrwjY7bDQ1/rI7r6+u7u7uFhoYuMzSnpaaRlpdOaldudHUGBwff399BPUF1n4PGODmwsrLpy8zX19cWGBmeIiP/zMxcXV0oLC3zdHT7JSZITU//Hx9FRUX3LS3un6BYd2KcoKH8HByqq63///8xQjfeX2CJuZliZGTIamtzeHr/p6c7T0LGxsqOjo5skngnNSzURkbz8/PyIiJVVVX1xcXgMjJNTU3hEhL/39//T085OTnxgoPzCAi8vr9VOz3/n5/4mJjo2drrvL0YGhufn5//Zmb/FBRvcXGxtbYfIiNFXUwMDAxxd3lVWVrQIiIzMzM1Oz3Y2dr8FxfGycrUl5enqqtISUkEBATO0NESExT/Pz9hZmg9REb3OT1JUFL2UVElJyj/fX24KyyeoaL5OTpJQEKBhojn6Oj/Dw+BgoP4Q0T/X191dXYjHyCVmZtwVlfv7+8xNzhQUVH7DAx3fH3DFRb7EBBaYGL/MzNsamooKCgUHBhBR0nb3t/gwsNVXF2PkZHnCAloTU8JCQqKi4x1Ojz/hoZUWlyvr6+3GRq2srI3KCgzMzMMEAxla232MjJVVVk5P0HpGhpAQUEjJCQ9RUkQEBDT0tLa3Nw+QkMtLS0cHBzKvL32VlfMzMz/WVn/b2+jpaY5OT18fHwUFBRRWFn/AABtbW0zMzPzNTX/v7/0ZWX/j4/05OT/CAi9vr+ysrYYGBjmmJnyEhL7IyP/r68gICDvmJmFioszMzNBSEpYXmD/Jyd4eXpiNzlhYWGtsLE5QEJpbnD/Pj5RTU19gob5ICG9wcJydXaKio6WlppZWVk0ODmTk5NITlA5PT0kJChFTE39LS35NjYZGxuqra61tbVeZGb/AwMrMDJ2d3hmZmZqbG3vkZFwcXIICAj/MzOZmZn3SEj/DAw9QUUIDAxepZFcAAAACXBIWXMAABcRAAAXEQHKJvM/AAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M1cbXjNgAAIABJREFUeJztfX9sXdd934NiKyTnSYoaTdkmPNuULGg1OoeqLVtKnyauEyyN5ebZNDRPLluzojqF5UjJ1UObVG6fOEctI3jYEvWt7OTNLgoo6yyIHDWjDmw5QOwl8DZ7QlpPaQsshJWxaRq6Kx3PkAvs/r7n5z2fc+45994nv88ftkhePr73vfdzvuf7+f44tUYXXXTBoFb2G+iii+qhS4suuuDQpUUXXXDo0qKLLjh0adFFFxy6tOiiCw5dWnTRBYcuLbrogkOXFl10waFLiy664NClRRddcOjSoosuOHRp0UUXHLq06KILDoa0OHPm08RXd545c6eVd9OFM3z6DIMNjcY7Z85sLPt9VRNdWnxE0KWFDrq0+IigSwsddGnx0QJ1q7q0kKHzabHwzFBpf7vzUDQtTtUd/wE36Hha1JebJ7u8gFEsLYa2NZsdyQvLtLjzS/629e3iSNLfXNPffG6hsL/X6eBp8a4fdTy7Pf2m//VD76SXbG+8+5D3nXe9r7a/7d/defSPLTzX7F/TvGrrvRcIu7RI4rqHZvO/NQDeanS03RpbXu7yAgRHi7ejOxa5jfmH4oh8Pr5ke3TJnY2Ni9S1KtSXl8da7d5O5IVVWjyb6hxvW3hvSvirUcvDZJcXKFhafCm+YYvBQrZ9MbmDi/PRJc8m34h/uAH6U54jn/TvTifywiotFs8sBv+/c8OZxdzvTI1gNWpFvOjILWzxYGlx5oy/XXo2+r/v7hf9fwTbpviSDd5vzG/w/n/m7e3htcA2KnTkrZAXHRf92aTFdm/xsfGeQMSrUcCLNZ0Z2hUOjhbhE+7tk571/jfvPftRlPHpcK/kXbIYfGfjmfCSRsNzMO8q/07syAPs6The2KTFrL95eqeYqIJcjXy0u7yAwNIifNL9h97f9j6b/nQ+/Fl6iXdzZ+PfUkoqG5eXp9Kb05rqNF7YokWwfkSx2dvvSH7LIhZONte3SHi86Hf/ZzseLC2iL7aHtPhS/Oh72BDc4ncS1xDtqiA5/lTzyiR1d6aanRX9WaFFrH/PxjLGmS9tl/yiJTCrUcCLCx0Y2hnjFsPfk+QtIlrQFSIbqEuSW66kxdDV5oU2c3c6TC00pMUGSo14NrHdOzExFp3yYqW5PNni0ImShylefeT1h01+rwBaDJ1s9vI3p7PUQkNaPER42+ArIh30zqJrhfZqcw27GkW82NZZW1hzvOrh+y/do/17KlqwCqI+LRaWm6wjj3jRQdGfIS2eJU3j2ZSx5p2otm0C8WoUYH2nhXbGeDXE6aff1NtOZdPiWS5Vp02LenP5kPjmdJJaaEiLd+N0j49PR77hnUj8bvgZDGe0WHiuuUfCig6UPEzxagqt7VQ2LTZ6y1m0C5gPv6NLi/7mmnnZzekgtdC0aXWDL2P7xJh9x/9nsIeajYtr5r/kbhO1McnhyXjRQVtYc7z+fYIYr37/8+h2KpsW/nY4SOdtf+dMkrfQoIUo2KZ50SFqoSkt5qngLHISRPGHq5C7zkp/HC86KrTLgVsee+Q0wYz7nn4T+S0FLebT4o8wX6FFi4ztbcSLox2iihiPOPCrAWIKJEnPtMLGUcGyLNgm0FmSRz68+dJ9BDNOP/KYMtBQ0IK4rc8yl6hpIQu2SXSIWphj8sed74QpCjJ5927gMB5ylM8b2tbsVbEi4MVHqOfsHno79YuKQENFi6iw/Mw78+wlSlp421tJsE3zohPUwk4aiNM6SdTZZPHiSseEdlbgbaeoQMNAt7UA5fY2Qn8nqCIdRAvER4foIMnDFt58mtxO3fcSFGjYBLC9jdAJamHn0KK+LMpsS3lxquz3WzjueYncTmlnNHJh6Blke5vwovLtlB1DC9RHJ7zoiNDOMh5+/RfJ7RQQgtvBEFu4qeBF5VWRTqEF7qMj9DZ/rez3XAqYQMOwdEoPC8+h29sIk8t/Vm1edAYthq5q+OgA7QvNb31kJ+W8+TSZ0fi+a2YsZGdYRXfnj5v3Vjr66whaKNNEHCavNPs7sFfSHr5OZTScMqMuLGfOvDtrmr3VVgs7gRatk5o+2t+8jnWG5OEQdAjuTLZdyaiCEmPMb5aptlrYAbTAhdkY66Mub48XrbLffal4mMr13eeCGdpBX6s/FE8qrRZWnxYnNITZAF5YEXd5d1qvpAPQ4tQ3/uTrdl9eP+g7GlcTVrmdsvK00N65+mFF+kXlpcACQItT3/iTf2DtlQ2CvjWElFvdAqmq06Ku66PHaFnE48WJsj9DBUAz4//+m9etJDQ8VshbX4Q4RPfg9zYrqhZWnBbaO9d+NuvXUb2SLsEkNP7wP+QWp7TTFa09rOuvajtltWlxlZoFpUa7l2+DqbbkUSge/v2fJZnxsy/lCjT00xW9/CJXUbWwyrTwk3haZp9fI/oFjxcrZX+UyuDhnyHzGa/+1dPG9SEL2lKI8O5Us52SpcX2S9dGC5oLqIJ2PDcpUXI/ogVSMjz8EuUzlC0aYuhLIcviQKSSaiFLiwdrHhYfrECYOqSbxJuS3qjOHCbvEA9//jTFjPu0t1N13STelLRJqYpqIUuLs7UQbz3xfCnvJ4E2K9Zn3SiPF1XcwpaIe36G9hl6hejaAiE5R5tFBdspWVrUCJx90PHIzAzoprY9h5AZnX+kC6QkePPpv6KYgZfb9muyQiSFEKieWsjS4ptPLJLMWHzim6W8LV2VQxzOkei8YfIF4JbHfpEmBjZaJ3vsjcHdqZxaKFCitj94lmTGpbPFx+C6Kocs2CZRUSmwbDz8+fsYZig7XnUFwskryrtTNbVQLNDOPn/tLcppvFxoDK7LijFo5kQlJY8q4OtPM8TwAo0M3VaXFWOI56+YWijPW5x4mdpOFSjcbtRkxRTYz3qogpJHNXDL66zLkAcauqyQC4Q0KqUWZqbzZkevXSreaegq4oLcqQRVlAKrAt5l+N1LXKChnUzC706V1EJllvsEHYOff+M6fCyzGTS1v/ZRjdLm6kkeFQIfZbzK9WgMgaO6krujEAgpVEgtRIo/to+epZzGXSMPuntDuqxYozVzYr5jhgOXAk6YCpiRTrfVTSapBUIKU83fqkjbGFoTxQi3tTcG3TgNTVYAIgeN79zWnHLyxm8SpHspMqdxOmSGLiu0787R5pVq8EKjVJB1GudHblh/O7qs0C3inFq+9w+qFNpVEA8nxPjDP6HFqf+qywr+gMNMePvhv6iIWqhZQXvnhnHKaVy+ZtVpaLICFTkS+E3eHTNMvjTcklRM/f7r1GEB/+rvH3Z4d4LGvYqohfqF5fODlylmfLDBWqShzQq9crW4ybtSUmAlkRDj9OeZoVMfvo8yYyqjCkqEsdC3VKOd0qzf4sbIecZp2Kj10mQFLv2FSJu8KyR5VBW3vBSLtF9nj9HAmKF7d5K2ykqohebHvjBO4/yxGznTfdqs0Js5QSZbO2KYfMlIYoyX/K/+0a/8thYzNO9OOg8kUAtL50Wu7rwHR+5inEYO/6fHCk3pj23y7vgCqSLq/r8eybXfvyfQoB5/7TMEM158wOLdoeaBVKFwMG/T6vw11mncbuY0tFmhl1Y6yhR9dsAw+UwEJZzO6/4fC8OK0/8s0qAoZpyWMkP37jDzQCpQIGWjl/vBYx9Q1Lhrw1rt19Bjhbe6aEl/83zSb6wakocpCqrGuSWaFvI7iblpZrz2uIW7w80DKb+d0tKIg43X3qCYUXtjSev3NVmhKYgL6847u0AqMfRb19xup94MHcaHRDBBMeMzj96d8+4Ig/OSeWFv8sfsjWO0PDW+AZantFmhWWErvN57mbK3sOagEqtOt1MPh2Ns76eCbIoZT1EBuObdkYUh5aqFdgfinGAijdpbtyO/pseKKU1W9MrSG5Plh3bmYOr+HW6ntv1OyAvGeg+8mOYzTr/4CcO7M3lFNpiw1HZK63OiZl9+i2bGpYdUMfjC8hUdVmidFkYOauZ/1sm8CKpxqCXIjTp1tdn7fqg9cfZ74ENiM7Xz7vDuaCXxslqUylQLnYxP27iB3k4d//JoxtV6vXhTetkNQbBNoN3xBbV0G+WlDV+z3SsWdB2FvHiUN+Dh958ishkP6N6dPZlLXIntlM6mCrIx+CvvSfa/eqzYo1tLmC2KVHmYPAq2jfK6zekyUS9eyAuR7NS6e2caZvz3v/sbNusUylMLXQ7bfPcYne07P3Inf5EeKzRLCoBqtbKlQCtgtlN3jegr5GIkHao7g9haYsNPEGFGGmWo4C1JqpxfaWqh4xm0s9cpyaRWu3iNdhpDmqzQKinoR2oJe5vbOjvhHWL2ebrB+OLbFl50JX1wg82S9JE//P79CTHufx+6OZNIzq+sAqkCRjP/ny8zMfiG1GkMnWwCMzti9OqVFGTP7EpQ1WHy+jix6zhp6NytYqRCeHeWuwgu+DsfS4SpnepiQjC7UZJaWMzE8o1M8VTt4tcCp6HX7qXHCrwup+MLpAi8Oz1OL0Hvmr8WrZu/5j/wbOqOvjupyzj9YsaVoc3BXUI5amFhg/xPDNPqVO3S9LsNh6zwlhn4pJ5qDpM3xfwgpXYcNy1tPkHHcf/Tf9x/QbZBigKFx19Mg4wsYuzBW2VKUQuLPN/ixI8wPqN2froHNI5uUaZeBcLUbZ+8iXjh4e2L1Hbq4hf00+ALy8skKx6NnvZfVtydw6kwJSeGlnLSPtr8NQcmykTBx748f5YhRu3Js1+FbKM5Q0Iv17q++UudXCAlxNoN1CL0ynW9NDijED6QeIHXxHeHWITSvZSYGIAERWFy+UrRamHhpyFtZ7Pgnpu/4+UJhWXaekWZerlWPxH+neoNk8+PeVoHvOuh5+HtFKsQ/myateMfde7ufCJO8omCb92yc2+J++Oi1cIyDgl7/gWWGLXaG+/Vsx5cPVboZTfCZlbvv51cCCLD9msXKTu/gB3OMHSySRVlfCJlBZ/sFt2dlBhsNIKM0Sbhj6UovJ2ynLPzTlzjiVH74KE5idPQY0VbL7sxFZXldHiBlByzM/SIry8DVYWsFkLSYid2dxJiPEUlxzVnGMUlbQWrhWUdKfnNH4s2UHQH05cHVwSm0WOFXhSyPtlveb94qiRruMbsx49RZr6saNK4yqp4GbSQ352EGMRvaE7JScdSFNtOWRItvIDu4yPhLbp+/UnqlvVNz9HylDYrNI5Qp+pry++VdIjZ0e/Szjnj2BJ+KPnjBC3oXVHm3XkgUqXujx2G5gyjKcK1TBVZCFIOLYZO+mvGTF9wf4ZbEy8zifCBwTTS0GOF3s51kqmvvSkKpKSYHWWCukVxoFEXuNuvJKz4BhVEq+7OztNkQKJZ0kYfuFdkgVQptIiT2z0Dwd2Z8f45MUdvgGt9w2GkocsK5ACYGGNccqO3+cxNk/AWYXZ0mjbzW09wgYawKexxsbNQ3527o56MFw/71sXHlwvGUhTYTlkKLa7GtpwYDu5NGE/0zHyOvmW1gZm6Hiv00hWiav+bf7La7IPMAnSJDjQWloUr+gPfMGKF/5uhw7j/bxzVLVNgO2WKUwvLoAW5dQ14cTn+qmewj2HG+KeWcEvqNe6JPfrNVCAlA587SgONlmxlObzzK/ff/xqVtADXrMNh6P1PflXnyIVDot1wYWphCbSgt66BUyce/dER5o4l2ykltFrDpIKVx4tqDJN3im9yEnmY0fD2t/DKgnvyoMzw1Z/SGOwsUayKUguLpwVTgTbRR7oLH7zLqNWmAWZoxXNeaC4TrD4iR0/Oji6yVl588MQ2fM+qs78Nu/vYKQlySO9lQe2UhdOCqUBrteb8GzJKfWtiiSeGkhla8Vzm6Z+dPUFKA4Ks6gcjUImajwsaUd/kv/+YrJ5KAL9oSnovC1ELi6bF0HOcc+yjd1EhIvG29so4yAytsvPs1vqKDJMvAtycFg/HFx9EbKhjcM+g/yLwF1BHq6Jvrwi1sGhaMLU2Pvyoe4T78BNLUWTxIDUafXqUu9KHXlGmcrtViWHyBWGUJ0at9tagatOqw4pDfhgf7KM+g12d7YYKaKcsmBZcVYGHr/m3gf/wvZ8N71dfvVVfGkjvWN8wX1SodbAkUh1y0xZIiZAQg2rse+V6Vi+MDiui8PlFPkWecXX2Ja55USwtVgTGrAfFz8P8R+9tzYQ88G9PzwzBjAFmM6WV3MDa9ryXXCnUNKUiJsaPfYEqNzi/S1ChFkCPFaFvPnwaibrXI+UhztspC6XFxuYa7hPWoyBieoL54L4twx8OhN/qmUl3U32DxFKmxwqwbe+mLpDi8XLYmdE3NzFIbaqObxDFc3sMWBEN1cnu8UbP8XatFhZJC9FQzXqiOY3QHzu0ZfjjJCCvE3HGcEwMLVbgFZylD5MvFtsjbzznffTV71GBxuIMs53SSRAR1wbDQ7JOi9GofnasFhZIi7A+kEYPocQS+6ixxJYhL4jbMjrMEEOrOBCaHBXD48VNn/BOUG/+fF/Ci1br2TuY2TpEQKfDin7y2vuj2igZdM7xnnTaTlkgLUSZogHS9nPER05sOer/ZJD8nYl0MzU8oTfLAJwclV5+8xeCRFjwHt+ocjPW+j7+XYoZ48Oj4XZqTIMVlOZ3OOx8fU1GDL0OJadqYXG06BeI0UuUs+7rScxDuBW/GOQy83tJhUjfExrFgbqt9f7q9RHhxdDy8nxcudmX+ubRF+hZLX4rzCSbj80A1Sd5+H9ENbg/LuaF7jnebYeFg4XRot48yn8yJpk9GH5c+kkP3AWnydbjvdST34bNqNla79+nuY8IL+LhjoFVqShv9CG6f/Li534PfnhpverPk9p0YbJbY3RUAO92XnGmFhZFC2G5cp1mRW06+rh0y4T/kxneLD1R50CfOMHHQbe1Pjwu5qNRIJWmkwao3WyAibld9G26PIiZnGYF2eAncBfap6wvN/e4UwsLooUo3PZA1z4N+D6B15X8DZOwunwlGmoxJ/ohb0adBqVQFPHv06GPQIEUUdPcw1Vu+piYG6CZgVQ1M7mNRwlacDUgbb1ejMCTH2q5K5AqiBbP8DUfIeoro0sBVlZ6ImMyrJjwR0eOiG7C5PLAOMoL3Y1rWmF78xcOLpALdRDuCbxBzxJz/luN7blnwGb8dmYPD9HpxSAOfnOkFhZDi1Pwp+bSpxPhOjXA88KPzOu0eCKFZmt9vBhFf+fmLgQZem6ZsE1Q6M/VHASoD3/AMGOAzWik4G4kSQsmeaF7YnHsycO/4yL6K4QWJ5oXwA/MpU8nYu/N8SLUq8Kf9ylG2epuXOkKhPmbu0BqG+3JBz17npeZ8SzTCu4HGsK5d3x1CBlb0Klu3aiP7pVxUiBVBC1asKQ3xVaHTKR7WoYXiYob3Ci+ApeEXms9fwrlTV04eIrR5/zKzdobomfdL1MTbKZEIbioZurHE1bQ450P6Z4nzaQ3XLRTFkELuA9yklvUyQ5WyrOnuY2QObKatpZ+OJeO7Epf4uYtkNrIePK5KKbmeZEkt9npIUEITjFDWEn4eDQk4dW/ooQo3ahvPTdf2IFaWAAt+tF5ZpPLy0wAQKf75qhLE9sEMu+09FV1wzlh455HrZuTFy2mTm0uedBZXkwSErvAZfhj72J/LqmvfTz0F39ObaE0oz7vRvCe375a6J4WG0V5POEnXsMuGz10uq8v2UbRefAgByXTCzWnOksb927SwkHGk48S1qZ5Mc9sdea4URS1uH1SXnX++M6dO+njWvv1oj7BlJzg27bbKZ3Tgl2O5OCf3xnG6vFOiWZFq8f/mUSM0qk+8yEPzm/KAinGk1PrEBXMeasLu1rUh2sCTM/NaOxZNYvUpI17ttVC57SAA4tefq81ypg8Wr/aV5jX9D26eJyU3qFI2YXNRQ+TLwCsJ6c9wCDxE+E8g54kHTtOzhH+FDjASPuM0IwwxLJa6JoWcGDRL7LQyvBIItCOxMPMxXnwQdGLTuqJHJ5ryXq3N91kNWlgESGVvXsl5WTpjJbFh6iWe4QZulpIpsxuVy10TAs4sJiCUxs8K1b8XPeTgvugKf1lTsnxUewwefdgPfkSQ4tE3svoxkvL/Ie/PajFDM0TFzKn5LQsq4VuaQEHFpPiwafiT8+wYk6wFQ6gKf0ppuT4GLupCkE4T16n9aWk4mZM0GycIvEYfXOt9T/3JMoMTS1kXln/bLOd0i0t0MCiDS/rUlbwvNCU/qBM+M1UIHVC4Mnrc0sxRuvph1YsbgkxFv9oTZsaRlG7LJuso8kKKBNujxdOaXEKDCy4WvKsT85YhwjLaV5oTaRVuugYBQ6Td4whtPagfUW9ZvVEqtTxd4KvKGa8ck1QmqNZBgU6fmtqoUtawKVQ3LOecSWzHaUkRTKnp8cKtYuOMXmzFIJsQ5eiNbLqZworERMGwy97qDjjyetMclCzDApuwd9jiRcOaQEvR/h4FV7QoyXFtFtpj95hqxp36SYpkFpB1wGBcC7GYLQ2xT67PvgD4tZ8cJYINDSbXzSyG5bUQoe0eAZcjrIDOhK8iMtIin1p+YEOK8a0FCu/+dWd1QrCAmr0KXTNal/5vYvsXvbGIjUlYWApdBp6urmeYmWnQModLVbAUiRlQEd8Yu5WSiRFveSp3mkx3o7r3l8qvRDklny/PnSSLT+T4BC6vvghdHS4FRXjMZN1/AND9ViB729DjN32yfy8cEaLhWVsOeIroWQQRQtMHjy6IXrJU4Mu4mIOWcjCq4+8/nCOX+8HN43zcFwehtBhuQ5T5n87c5DG+U+9LKpbr6+srERC2EgI/58vf/YO3RZ8C2qhM1qIm7d5HEV39XzVuY+euaXpyIiDS6OxYTRYgY53TBA07vU2t5Wb8PZLUb//0j2Gv41mWQWVUBLEqkm4q2UrcSZm2LHofSNLcx4NgpZl797xBbnMxRmNA+w77v1O/gIpV7RAtdl+tOqbrzqXQY8VeqnWpIu4gGHymYhaF04//abBdmoI3bfCEmFq8zlyM0ugZ/CNzEdfCckRDjTCxr38aqEjWqDa7Biq4bJ1zXJosWJe63B7sou45AIpogNUfzuFiiFwuE1qITOCbVQIQfuSFi4rR1nELfi51UI3tBh6DtuSzqPLFp7w02JFni5i58PkM/H69wliaG6nYDEEV6vIxW1a7C58TDxxXvHoh/vh4SDCGHzr0g++y49MkiOZB5JbLXRDi34oA6SxdYVzopqs0JsdRc4DCXqKyywEueWxR04TzPC2U+AvtlAxBN22MkNpe+SzQzyDf3yY44FHgpmVeCAS8fejAtuelaXpmBxZs/LIeSB52ymd0AKN6DS2ruBWR4sVBl3E1HNSfoHUmy/dRzqNRx5DtlPbwLXoAuigOYXdz+v1ia8MhrWko7WpY0oY0M3G9fj4XSkv2LNzcxVIuaAFGtHBW9f16IWarNDrImbngbgeJo/hHmY79bpqO8VO+pBhPXgdH/UFzfWC8ahpvmKFPY1B8LLsBiGqRxTMXggwxXn+PGqhC1qAEZ1YcRV9YrRpQ4sV+l3E/GMy6XBoNg5mO3XfS1nbqQVYDDHVcCfCghxuG0Vl8dLGPjExRFFfdNDAsKgml58Hkqud0gEtNmIRXfuK2dZVDi1WaHYRj4m7XytTIEVvp04//ZhMtwXT27AYwnWzJqO9GF5wue25y3JiiBPhE5ckgbdwHkgetdA+LdAt1FEsLMeLQ/agTsWHZhfxHlkY4vHilHUTmuGez1PbKbFui+aTUDGE62YlBt5JBnulSI4pYYkxJZbjk1NNGF7My2YeGbdT2qcFuIXaA6qEcIeSTiW57gEwGfUhlZqs9vBjj2QHGugWCtU4+OJnMjdBxBeSOqiV5HKyXUlSpJYGJH3UPipDZjdVC63TAtxCoZo4nLDQYoVe9Rml/PGo1gSpW958mg40vk7+FNxCoWIIX7xJzzAiGvxki1vcwFTrS0gkuZVkWegA9SYyFk5DtdA2LcAtFBxYoBquJiu0+iVVJOptPlOtiSBfZwKNJAQHt1DzYGc934HPDLyLn97MBr+EGANhDlCihaxQrzyYfF9xkLdZO6VtWoBbKDSwEI7JEUCHFZz0lw3pyK4UFZysRuu2px8JQnB0CwUGFoL9rbjQX7UOrcRBg7+Tkm1YmWrCKBiRBNsEjNRCy7QAt1BoYIGOyYG13pZ2wQeU86vkBCkm0HjksYfBLdR6zKeI9rfsHMh6dKHK4ivRQ395RaaFrDCvvBS/B+W6aaIW2qUFuIWaBMsP0DE5eHWtdsHHeoxwLobJWwATaPztv3m3+qPAGQvh/naG6CLuC4tesT3rTLT9+pTkMe9haBFEItgSZ6AW2qVFP7SFaq/BnmJUhNLp9tLsl4SbMap79OSbT5OBxv3MaGQeaMZCvr+d8Bsp0lpB4ZxOwW9FopTg3KsA9eGRJHAZmQ6KacfAJa6t3TZmlRaiyUMCrMfshIpQDlmh0YxRfoGUHPdQIfhnXstkxgUssIBbAnrhcxSinZSsvIODRrOxrlpokxZgOXkuJ80DL8PVZYXetHPrw+St4uHXfwpjRj+mXcNJVo28afu3XtHhhVazsaZaaJMWpyB5CXXS4JgcHbVVkjyVQHPauRf3N0svHJRjaPnfPfoUxQzuFODwQ0BOAO7Ah6s8wzs510cIWNlX92Lz7mIcbZ7UsJZFWoDyH+ikD4H5Ph1WaBWS65adexvYkvu7MxEo54ff/5CUbV98gPsQYD4JjBc0Jm5H61s9s0yWulqrTmFML31hkRaY/LcHc9LoyAm4E0N3zuAezSOLvR3XKXu2tI4T8Q4/mxm9WD4JLTrXMHns9aOjcxXTznWPLO7XLI6yR4sV6AEFiz7QgEFj56rHCt0pOVMVaL3IABX2HX7gxdMkM9Ld1BhmTjQ61EgnpXvhkBfZR+fqnuZzVNeRW6MF1gqJarNguI2P6dRjhe7G1R8EUlkdygcX9lHMiCNwMOxDw22NdBIZIU4EglTWOAO9GDGo3tG0lzVaPAMt76A2Cz7ugjGDGZdqsEJz4+r/VIj5AAAgAElEQVSFFRWrimIgDPt4ZmBhHxpu46czMLpJ0N13WX615hRIzbAigC1aYFUfhzDvC+604EGQ+qzQmpJjsBoVjCckyzbNjH/4T6GIARx4p3E6A6smLmW6C822St2wIoAlWgw9h/jV9hUojgZHTmj4aC1W6E7JMVmNikXWdHKKGfc/qq4OQevZ8KCY19j9LrxFydV6bZX6YUUAS7TAUhagzoGNnNDw0Zqs0JuSs6fyB+oNKcI+mhnvH868GBXOcYmQY0VUBiLs2W7rtVXOGzpyO7RoQfI0qHNgedY2dhqJDy1W6A31929TBYtnaVxVLzMPkKrth+9nfF5QOMfTeDwrksN1eV5oRn3Gx3XbocW2JrCdaWMCBlhsA4901mOFZjhXqZZVCU5AT9J/+ev/XCza0gCHR8HDWgQ7qLQZfFp9seJdmNapWaFFHVrfj0IWBUVCvABtsomen9HSTW6Yr0ZF4iS0vq9Znj/8/v1EAL5TFGaAFVO4GMI/6OTUwUH6R3pFan53mGm1vw1aDC1fAd7kGPYgY3k8ePKaXnUg2F2RvItKVwdGwNKs0WHEdz9KMIMPMw5hnnwSPRVDwAr6xBJqhqBeEs+vJi91fBrUZQFuobDOMHh0lB4rNGdH9Vc8hxdAFW9HZkqf98df+0zKDHozBXpy5GjW6EqOFRN0MzhZBKJZ/pxrUpEFWixA+0hsC4WVFeDnimmyQieca5d+9AsEIN5uBVso4itSmiI3U2CVJy6G8BWHbNdrmr3Q00JyHhRtgRbbkPQBtoXCliN8NXLJig4IthtovN3P+miqoPDDB+KroMACl2YFASJzRmhaYq5d/pzLkeenxUbEWOAWCluO4NUIPytGX/qrxPRZNaB4W9hkcfej6Wbq9Gt3w4FFnuOkPcylralxM7gPPS1kfd7W+ty0wPLb2BYKW47g1Uij/EB3otpYtQtmE2Dxtqx+83FiM/Xhf8PiaPw46Qzv3LOyskIlLXQLPfOWqOWmBZTfPgRWTCHLEbwa6YjchQnixQKLt7ktVIrD76cdfb/994C5IXjYp7Nn1S1py729zUsLKL+N1UJhCVS0+MAlKzogsx0C0ggVfap3v5a6DGmWLzEkHPbhCT/N5hcr29u8tHgGyW+vh4IBaKMFHxStkQbXZIUFH10QMI3wglIyIVzG/RmFIf5roWGfzvqvdUT0ISvb25y0OIHsjrBycqgyU+egaDQNrjl9s0MkKB+QRgh1Ef/bX/lYqtjKawnRZlZNVugd/GZje5uTFtAInDWQGgJtg2EXoGFLvULyTpGgGqBGOA/ZfX1zKnUZp1+UBBlw7QE6LtKH5hFXdra3+WgBCR0ZER0BqOULPtxeo5tVr6QgZ5qoUIAaIWD3MMv6+ItpkCGaMwW3buukkzSPuLKUYc1FC6gYah7Sl6CiD1A51+pm1SspKPnQYS2cQpwglGZNsk6Hdya5jKe46Bs+sFgnnaTDCv2ZmlLkogUkdFxAgnIo/IDPc8NLpnRZ0QlVUBGGlhGNEDIpWU2eVtmyxEAPLNYZA6nFCou9w3loAYmzU1ASHFFwYXPi2rkmKzpGmPUBFUOtRy5igvJPPCUkBhpu68h+OqywGvTloQUizrahonOomxU9GEnjUddlRYcIsz4WkCcKdNLsjjQNMp5Kgm84D6EhnGuxwmrdQQ5aQOJsL7IcQd2sqMyh4aO1WGFx51oEoIZJSCMU2fPuhBgvHo5Maf+UaB1WjNkN+nLQAqlCO4R8MmiDi8ocGj5ajxXVOWkYwQlkS7Mnh5CYEOP0zpbGqGYNiVCvOsTunAlzWtQRm0LLEZLetn4GpSYr5qtyLn2Ey4PbM3+OJJSglIW8NCQhxmc+oXPyJ3RdS5cVloM+Y1oMPQcEDVDKYgrZjKFnUGpMnLiC19dWr2m7Vqu9cX1e+uM68phCKYus6ah3x00Z//hXsXySxlBaHVbYl0KMaYFUzs4jGiF02i2ax8szcSID1TvqKGxIOH/2XeFPoYQSVNasWNhiVepj3JEAEjOiA+/0WGHetC0zr+HvQao4tBwhrUdoHi/PxIkMWI7nbCBp1Tm+QSDAnEIcISKKq0+BeT+qr/0we+qaDzgA0TtDSfcAMMi8hr+HqOLQcoRUqqEzu/JMnMhAFbsryMbO4xu+Sv+whaxZ/YgFgAHzh1+LIgzVWZUa4zd1WOGkctOQFi1ks4IsR1DYB+ZP80ycyEAlD91epFuej488P5v+8CrggKENLhQb7vlf/ymUpLJrzjV6vPXOUFpxYF5DWiCZPGg5QrZQ4AFKeI9357Oi0ZgdvTZOU2NxNGJGC1lrkQ0udJCeP15+Z+gwdmZdh0uzemcoOZFCzGiBZPKg5QgpDQHH+musRhqJ1koXfJx4+XMMM2Z8ZiCZPGiDK9hCBUdur/Sk3wiV80+EEcaL8tfCpVkNVjir8jejxRPA9qgXWI6Q0hA0Y5Fz4oT02moXfMyODh+nmHH5a+8izhXZ4NJbqJ65wZH0z4wMRkM5IuX8cFhB+JrstfAOCx1WOKvyN6IF0t8C5beRRB6YsbAycUJwbQcUfJx47xIdaOz6tupzIRtccgtVH7xcY9E32EMmncLkniS+wMM+jbN8HMrmRrRAUqhIfhsp9oeSfa4mTnQEK3zMDr5BM+PYXE/G52ojG9xkC9WzxHMixPC3CScQ8OK0WI+CRxNppDZcJpNMaIGkUJFyG6QWCnS+uCSuN3GiQ1jhY/v1u+hn9uKg9NR3pIAzvoMrw2JKBLhrgDB7wIunxH8PDPv0qp/dyeYGtEDKPqB6cqTYH3zc0ePTtVSOziqZ9eOMt9jHdnpG5DSQDW6knK8Q8cTA4Fw01Gxlbjie/TdNjDkL4gvBzBw47NPo23NbeGBAC6QZEnnikWJ/bIA5PnFCo2+vgyZ8pBj9FLegXx4cZT/aBWCDGyjnBCmm55iziUajnw2kxLtbrEbpjPZCy9Qcp1j1aYGUfcxbyvaBJ7PCGyONvj1HeSLXOPW9cY4YrNNAVHH/mnpCihGWEwG+GnqMvnryHT/hfZq9DB7tpZFOcl14oE8LpN4G6d8GxBDwZFZY/NPYubrKEzmGt2ZFBzIeZyKN48cejD8aooovX5lIYorhuviio81roT9KOPO3fHfxKGdJ0OjwTth9ilWbFki9zSEoSad+HUybhcU/XCXsVFaEa9ZosIqPf2GkxuAtX1L11iO1VXubP4yCh74lmaTlz7tbCa6Kz36MGjB+nFKj4AYYfOCd+8IDbVogNYLIIq8e8YgeK4YOeNQ4m7VTWRGtWT0D4cMqUFafnD4FrEeHvhgH78JTgAOEMXk94MVw8J3X4kbWXyCqaeFwG++UKaAcR5cWSI3gHkjAVToUcP4NeBiJjo/uVFYka1Z4hu+S96/RaT7S+NwNlQHiw06nM5If0d4o5IUf1T+eDFcjwm60ckdDOC+iSE2XFlfVUUMbKIpF9rfYGTDgecW+j0YPsOhYVqQ1guERdMFD3bPUxzPjrhe+Kf/89cjHDEjzHi2iMiQ4v8g/4+61lBbfSEyJhtu4cF5I6aYmLZDiTGSoGpBPwupm4ZFq/Trnp3cmK8g1a8V/WAejL+YGeGLUPnhBUiIyMx4FFVlGIkLDwWgb9ZWUFq/Gc3JQxVWnZKqI0k1NWgAF5chwTSCfhI0MhmUOvIS/c1lBrVl+vN2XfLXChd8+jh/7OPfx4ytHsopHqOLaicuhZyKOLn41CrrBtJOGRFhQmb8eLZADCo8C4iwQk2NbKFTm0Opm7VBW0GvWx/1nlQgixMTwAo0vkM//ShSK9M1kW4kqrg0803TrRYIW4Q+wc3P1SneKKfPXowVwYAJSWAA0fWFbKHSkGt7N2upcVlBrVihG3UVmHFaikOEH32WYMT4yE3RRzAzHwpXCVbC99QHj3nsgZcW/DL4Nb3DhNN5kc7mYMn+WFrPXRmeFF/pAZnIBhQVAgxImYKA7Uo2ERa+98b5Fg1yz6nGYPUd+uDhqWBkdvlSTYzwzqmjxadaHgl8b+Aq9h4I3uDqFhAU11bO0eN77fC88eEJ88Tb1I48UFgD9kkBrPe57NRIWHVUzS4Ncs3pS8YlSk2ZeSchS/9orElZ8TuEquEPf4nT47/7viBXvR7bEfAAshhQ4loilRZjOr731xPP8tYizuKL2m0C/JHZSDGp3fNBgB7OCXLMmCOWJqFjyS9XCDHhIlp7bmZ5XD+c/91lgn0yFDGnh+e/+tV/wSPHLoQyFbnBhMaTIYV0sLQjnenaUmecItKoimTx1vI2dFIPaHZdmO5kV5JpFdUgMpJ/PV0PqAxRZnmWZcfy7wrJAAswWaob45en02+gGV6egrbixRCwtnqBK9hefILZTgLNAsnRAiSB0UgxqT1ya7WRWkGvWCv2gJ5FCqIaEroQgyxe4CpGBGUlxYAC6pqpO/WYqYKEbXFSaLXawI69EbX/wLPlBLyUxODChHMjkAfE2pEKhgQWeKNrTyawg1yym3iPJXkRqSFoZEqPn+nmWGYIujdig9DJDZwr74rgEzFjAHRbehUWOsBMKtLPPX6OUisWXT/itqkAVk3phVue34bM/gavw1aiq86BAkBtcJkMR02IsvoFBZUgfFVhPzAhGGHCdRwFoNYTxTDHb0IwFGvYVnU2S5i1OvEwNrrt07bu3KVddoKQDSGtAiTzU7nC/V2ezgprFUp8mq6BGYi0qDQlG/e8PMgaYE80x4LdTjBpSZ34h3EWhGQs07Cs8x5qVzpsdPUs5jYGlrC2nHymrP6R6IAh02B5qd1gSH+toVvCzWFYipOv9FLEwB2NCuELAuTdqAlymJiVwbTKjJAdHotgCK1HAyzwvFJ1jVWW5v/kE5TT6dp2Vl14CZR9TyscUO2wPtDtc7D+5/FwnswLY4LavJHaN9ds59porA6KOV++uDyfbKXGbTI9HQGLJBGv9NQ4WO1WwQYHij+2M00jbH2kA+yPgmUcmqqF2h3vrq3d+hR6AwV2pAJhmNea4SyaWoh8tstXoYTs4pIZAo2s1ag9KUAjBmqjnP0evI5ev86lQoOxDPRAEmo0K2h3urZ/vcFYgziJVQwjtaJS+xF9EeqKAffrb3CDBgaWfQNQQdDoq2lXZX4JCCNKi3pzqmaGFvw9GaKcBNHCr03TYAd2Y3eGanILFP/sABnel0vkguTsiF7dYMJkJHcVAvVUfZBs1PviPWa1J8etAjzs6xGiqua14i4K0iAw/+hDtNH4wmK7+wHRNdZpuPWJS6KLqin/WAUx5TKVzWlElsnqTyR44chhBJrxnhmVG37AsoxECOk0ab1ItRyHEaEEY/tD1H1BWGh95J3r7SvKrN0iTiK1AbRYOt492OCsQZ5FK58zOKA0vyGB6KeWFkBnijEYA6DRpndqQUhRCjBaM4WcWP6DM9JYXaVxR3xv1BgkpnEXtjobbHVxKHgJxFskSweYZkiqmMWpZCysKk8opnhn8lMEImEaI1yiUE/VBtBAY/kfP0nXJd116T/UR1eIspHNAUpWf3cYmX5cR0NkF5CwSYzDPd7IhYkqfwzEHRPltz8xFhBngyVXgwTteFFlO1AfRQmL41WP0iSNvXc9K96nFWWRkCDrYHzxuz9+6ujaxY2g5Cw9zSyMxhtP8LFfAGcq4ZEy+pvlpLhHOMmMSKtxBz5P2oj7BMbJFAKFFhuG/zTiN83c8K9t1qssIET8AneKNyxyHOju57QNwFmqpQ7AiRZW2ye0MBvvXh7npOhQzsL0Rep50eVEfQotsw0/MLNJO44ORrwouU4uz0PlJ6PAoTObwtq6dzgrAWQC5IFFCKeTFcPRVQhzBQLbpOHDH2sfQ4VElRn0ALQDDP/HTT9KGuvz/+M+oep6RlIXd4VGlbV0tAnEWhn1fYUN49MgTZaA9fLltqNqCaVZwg1tmoT9AC7Xh/RTqxMwd9Izs8xt+lLhE7QqQ49ywmvMWmMcrb+tqD5CzUC4lkmq2lTS8YO7fCu8yLg/WoQ58dINbakmzmhaA4eOw4dvX6Ujj+Fu3x5coJzFD5xVjWyh0aFdvpycsGn41lNpoSmchXbOC/EWg4XKuXDTDc/znFf2uPsANbkkJiwhqWmDOIsbE20x1ct8u31TqFQs5Ph3bQqGBxfqOl2ahaijAWciJE+S7V8RBQ9q6RGwTprNT4PAGt11umZqSFhrOIsaNEXo7dXzxhjJuQEoEsS0UGlhMNZ8pwsBuAZTOqotyxuTE6Qm2R1LLJ61LRMHtZemBGOHbAdN9TclQpmKgpIXaWYjitYnrTAx+6b3s6UPQkRiQRcHAYrLzpVnIWQBFOVlDjIJt1Jy87TLeSo18nOjLyXAZmFZVekGOihaA4WU2u8EKtxveybCWlSMxWnBgMb+83CrEwE4BjChSrzeZQ4yCvu83MvSSiagi940vniQGo1+eEUcZYMZifdkFOSpaqL101vGRE9NMXPapHwqdBpLfxrZQ4CGUHV9LHgAYUTSlXG8U1QeBu/hs1kMQTp6qHf8J75+Dyf3uGxbcaTBjUf7+VkGLjYizyNaYbmcOix4XOA3k+HRoC4Vmisp20naAOAvlHlhRfRC4izuyX4IsuJ0jzizmejOwjEW5IlQABS3UhgfmGqy/4xgTg7/1Q8rHIvntjLiQAJgp6i/bSVuBJWeh2NYEmySF8LoStOFEhSI9qcsYoYmBbYNLFqECZNMCMLzKWUQhec+nme758RfSEhGg3RUrJwfrN0tp+LIPK85C6agDMYqbhsDg54O7m1Spp1EGSQyw1r9kESpANi2sOIuYOHWWGce/97XgB9iUc8ANgHavgJO2ASvOIisyjKw1XqOGy4qwpxl2uqYcSNPgCTHaWBK8t7lStmUVtLDkLFLirAyzUx1fOftVZAQONPkAtPvNUAnlAzhWAXEWKpNd+J5yF+WrIUF/02Ximz3pcfdh8I3NgZyqRJI1kxZqwwPOgq5qnuCPNzze90NVzQA0+QCdv3mh8yuhfFhxFpPK+zfWfNu/SZnJ68CVL3GXJcQITqfEig8qkk7KooUVZ8Gv8/Vhlhi12hs/kjmwsB8ZagBqs+sLn8XlBlachboT48qVtp/KHsy4ZCy4xYFkxWy2EmIM1LHgsF2RSv8sWthxFoIXmRAeFb1L2jcPFSyD2uxY6Zq4HVhxFup6KT/P6j/bA/JL2lGWPJCs2B8mxPgipBFWIdz2kUELS85C+CJCYtRq3xNPuVWW3/rAerxvknDbW7PUk00BZ6FaSQL5dkb0vKeI5xPV6aA7RnzE60WgurYS4baPDFpYcRbSoCAe63j8x+hi9PFhzmlAsusY1B7cXtMUj1ktAFbv24La9DacRSDfBm0X0k1uOsTIv0x0GmU0j60ve7B3y3/Hf1nSvXkBpgVgeLWzyNJeoyld5+cmXv4y/Sbp0ejQ5AOwbvZo86dLMHoIq7S4asNZKIWMSL713700c5HKf/79HBRdMkHMY8vA5L1/ejzLgA6B00JteMRZZN6amWQa0cTcMfp9EkOyod0RVl27p/m9MowewiYrWjachTpfFEXkfswtO5R4T1qa6acqRoQX9Ydmz+ZF+0p5nhymBWB4xFlk35p6UH7eFxCAY0ZtJDhzBJp8gKW3J5ufLcHkMWzSwo6zUF0Rb7L8xX5YfAnpyv3L+kSloJPNC3V6HpsIR5uyI5HdA6ZFAc7CM+ptgSXiJYbvnb88OIqkLLDq2vZykzsjrkBYZIUlZ6G6Ija9rzGJ3QDhyifCvLboyff3WT3MfB0OpXpylBZWnIXyOOLe5o8GRCCKBrhBRMdfUJ2Ii26hLjQvCSxSGCzSohhnkYyB9NURMS1SV56cmsHzImw9Ci8YlP2xcj05SgsbzkJZ1eFHdIGsR9pcdLrh9Fxmbx+2hepv/lzh5iZhjxXFOIv09gWiofCa1JWno0BYXsRZp3AfJbmRnicvK9z2AdLCirNQttwFc1iWeGONjvBvfGBGygy0QemLRRubhj1a2HAW6kq0tLQgUGjFl8R3mDw14zLt3ROpKjjJUhKjlOzJQVrYcRaK3HRYFzLhv60Z5kcrAmJImQE2KJW6HNUs0qKlTvqrnYVyzSJun4wWslMzqBIQons7iMkl76ZcT47RoqUuL0KchUJYjXKsI6whA6xEe9UN1Dxg8yHZJS9HNYu0OKVWrAFnoaIW0Ygho0U6xIjZ91JNFukfWmF+RlxUsifHaKE2vAVnEcvmsoAuPqxqok6drsAexoNtocpejmr2aDGkHjSHOAvFHSYbMSS0SKtAJYfW+6CK/f07ym4MWkHGokyJsAbSAjC8BWcRVZiFKVBR4fJEmMjwq9Soc0foSUTQFkq5HJ07sG7dvluZb7YbjcTFrCbWuSGoAEZgixbFOAti/uZEMOuGF17TeLuHkQ/TlDg1ACe47BX+hcrMWISAaFGQswhWNOkh0R56wxU+jNIoZgyvpC8DbKGUy9H4pnUedtDf3OWZYjX+YpWwzyr76xAssaIYZ0EkUcMTYGp97B0i8tvezZkeCW9P38gIcb49Vftcl9RGlZqxCIHQohBnESkh0kOiW8GtmQt+FLuSnqV0DzsyR76MAsrlaMe6AAepb95o1Bvt+IvVRnS+1qUbjcYuE9NbokUxziKtra0nnoC+Q1CpGln7nLwQw4vJe//UxJxWgdACcRbKk4RVhg/lvwnCA3C88MdEBrwgeiGJc0cu+78AFUxNqZajgyEr1m0mv3lptnFsthHvmBJaeNuJxloT09thRVHOIvbBE0Q0TT3OyBAjUg0hNloUL8osak4A0AJyFiqTqJ1F8EfIbTrrXMMkayCIU4xJJxFdXoHKyeeVlZmRs1i3j/zme41Zz2HEDCBoMez9wAB2aHFK3ahowVmkYQOplPcRMqC63ZVRQ0jZhMxsrC+xqDkBQAu14dVmVY7lD4/fGaXeG90BFu+yLnM/IbrnX7kXKCdXLke3BpTw/0MG3XUvhvDCi+gkRYIW3jdNTG+HFuqZwOpoS+ks0tM/Z6iPQAiGSGsYqYYsUS+UJvXGmneYGNMyAFqoDa8+Bk/lTqJdGCN3U9pd/EeCG8Pm8ZLuvnGB3se9208pjLI7YIT/n/3pN481Gsd8MSqKryviLYAB8soR5YCziB+BCUZi0lI6yIl3PYwx4p1Be/mPSs6yBlDTQm14wFmoYo8wZGcPiSazF8lLBFlwXr9NiDGiKCQEij58R3Eg2EkRwcXtDf9WrsYUoGKLGyamt0IL9ZqF9GcrVrW0xFOWkGhnTTmPryE7w2QvdLT0LGsANS2KcRbhxlR2SHSL1M39i0RNMHHb62VVc4tqOQr8xN7AZ2xJvzsb+ImLjcZ7wZcJLXbVAzeiDxusAJwF1p+dfUHyCEzQ/rwv9hbIKBbqmEomsxHHkRXQZgMoabHQvKJIj1lwFskzv7IUnxI9vURVPBFzdHxaCAvM+j8bNrtmNrf0qlNFPh821Wp7fXYkUchwlMqrR3Qg8xbvGZneBi2eUQa6FpwFdcHo0mByindSeYOc6MbMJ5oQvdB8BbTZAGpvsXFZcbZyfmcBNNylG+SwNlPgLvx7E0aEGbwYa3Lj2jhsCfZQtXNU5mJtapHANyS0mF018hV2aDF0VWU6G87CghKJnMIEiCFFAQi5F7J5YcFZqFWMVAqJRSfeXwSFamGOSNr0hUR043Gs7We6j0TfvEgY5Hb/G0RsYQgbtPBrmzN5YdtZSP6IWpxFjj1SiyFFAUnnebzI2DnacBaqW5fubtPEBsuLKGUR8kLWgI9EdMHm6VytRsXcSahdu9GYvVSrDi08XlzIWIbzOwt1qhYRZ5FtVul1symgmqiF5+SRnTrnDzgLlXtNIjpS7WZqD2ItJKvpC4rodsd5vK1EzN0OfUQtEGr9WKIytGicaq6R2s+Cs1BvkJCjRpACzjVl182mwArLh05KeaHsz7bgLJKlhpb1qAgi1ULkTV9YRLcldhL7fbcRfi9N4/lBd6jUVoUWjbqcFzacBTC4Qhk1INUH6yuzhYK78zxeSJ5dZWWB0lmoY7HeaKlh5EEy103+FWnT1wUkoktCi1CoDfPcNwgSvBeUBlaIFh4vJGqhuiIDcBa5B1dgaY1DlUhvR0BHHHi8EFpYXYamchbq/Ghyc+eYN080dpGjtkeZn8XYA7UeHYxDi1otzF/UgipBQoSd9dN3VaJFoy5RRZTPtAVngVTOAmkNIJ9UIHSmCooMlL8MTb2QJBHdDPPm00ef3t760QUfdIOi+Na0RNDfTm31/7FKlXes+imMStFCohaqn2kLzkItuSBRu7eFKrv1iAROC58X3AOcv2ZZa2DwIJkbvZw++YyT9ndRPC2gLVSoPx0I/3kg/acD2KSFWC3M7yzU0ivyyKtPzajWFkqLFh4vuNDOhrNQWYweI7gSg7yEdtIT/hHHF9mXwbZQYbZia/jPrXT5h2VYpYXHC259seAs1A90r1piApSqam2h9GjRWGF5UYSzmFLngegVKzoundGi0LoCMre9N5WiHMAuLQRqYRHOAmizQOLt/kptoTRpwUmBBTiLttqd0Gta0gs5zFyD1RWQlVCEFOUAlmnh84JaQGw4C6VICGTygHi7Qom8EHq08HhBhnZq75jfWSjHejFvg+gIGKauAQfgEBE3IUW5gG1asGqhDWehur/qK6Dgo0KJvBCatKALB/Mniyz0UzLBB1kJmLYk4d0tO8gwe0saZ9iHdVrQhYNt5eNowVkA9X9H1cFHdWqhYujSgpQC81cWWGi+Z/zJIPXpkjw43t2yj2TCDpdSlH1aUCq60nIWnAXQkwecoF65LZQBLXxeRI9hbmehnl4DeGCqCI1p+orz4GNwd8s4VU2+lR8WZQ8uaJGqhWo3a8FZAHEfcBzJhaptoUxo0ViIJI/8zkIdNwDyH5VFX2I+XuguNOQ/Isddo6qi7MMJLRK1sAhnAZR97FHriKhyXiQMaBFLgYU4C6X8R2uIbB48LKTVyKAeCTvzIjNAiUMAABe4SURBVNAksQwXpGjEaiHiLBQCEXB/lc4CqAypTEceCRNahIWD6k1jfmdxVJ0eZW7e3HQac48Mh85iUiODupvaNtFbKstwQIkAQeEg4CwUz7SFmnSmf1uMo1XpyCNhRIvG0LZmL6D/Za8U6sUG6PpCBrHoyH9+kL07/ZJs0LMN+4SIsHF5+TtKZ6FUO/LLjPSYcwnwsK9ImNHCD+2Uj6xq56l2FuqYD0n2acl/DA92OFRobbMhxcLyberjIXM7C8ATqGtHqlb1EcGUFo2f/LOfVDyzCsMjzkLpCYBk3/y9GmcTsrumzQ6lKMtcILFw222KwCG/swBEQuAGVqtwNoExLUSFgxRUhrfiLIBkn9be9SA1BIfJeVuGXSbQmPykwrr5nQUgEqrFWZ2wr0iY0yKrVzKwSV5nARReAoNY9PautBDltljQKg9YZLQZ+7DhLJRxH6DfVjBlESAHLXxeyLeO+Z0FstYA5Ztae9fdzKbJZbGgTRbwkLcZB7a14CyUYYNav61iyiJAHlowhYM0cjsLdWEI0t6iWbHMCFFhsaAjhdYiB0SQtRlDtlU6C2BFUuu37eUKpiwC5KJF44R0spoFZ2Fh6LB2uQ0nyO5zJ0XZY4AE8slq+Z2FekUCQvLeasxhFiAfLeQTB4twFg7KbTjnwLkPe7D2+EshajMObat46pXOAigAVIfkh4DBpyUhJy1EvZI+VC11NpwF0LY3pbl35Ys92GDDImw9/BmQqIWqpx5wFsoVCZkrXMmURYC8tEgKB5lPrDCb8qFXqxhAJk/7DBG+NNChQmvp0c8E12bsQ/nUA85CuX1Vt+3tqVyXRYrctBBKgSqzKXdIgIoB9EJq54r4QvKD7hRaOw++AiIVvRhnoZwrXMUSwRj5aeHzgt1oqsymXOjVmyyoIke3vYWfgONQobXy2CvBq4WFOAt13FfdeLtmhRZB4aCW2QBnoXzmoYoc3VyRoEnVnUJr46EHwJ1PUoSzUMd9h5p/6cSqdmCDFpwUaMFZqE8CB2oztXNFgpEG7hTa/E88BkYtzO8s1OUHQNxX4Xi7ZosWHi+OprYswlkASVb92sxzAtfgTqHN/byjSNuMfVxo5nUW6g2Seg8s1QiTd91ejTdZqz3+16u7oq+SK24Mi1/CAizRotFPhHYFOAtA/uvXP/ZcFF+7U2gNLW0AUi1UpqeVzkLd4qJe1trLv6e2Sjs4O2HXbPz12uAC8tDCVSc3pmaPFoTkoTK8DWehlv/m79UfJ8EWCvpwp9CaWtoAhFqocrN2nIVqWVsvjbeDUxJ8DLeD4deXZhs9vle4eDt7OtulG8m11mGNFmnhoMrwVpwFUFyr3wspGvThTqHN96TrISkcVBa+Kqs61OUH6rKPDI0wfdR3BYd3rjba0der4Vm31IHoa53cGZu0iKVAleHtOAu1bm5QWMAcUB/AnUKb6znXRawWqtYsdQmgOppWl31kaISEB+jxN0k3ktPZarMN320QtBimzlewCYu0iCSPApwF0MBtJHQIVSdnCm2ep9wAgVqodBZKKQNxFuqzwuUaIUGLekgL5hwRghaeP/H+ezGINm63up+ySQufF4eKcBbqEdi6xVAhhCNnnSm0eSxtAl8tVD316kda7SzU41qyNELWWwx7oTZ18DnrLY7FIblNXcoqLfzCQdXeVBmwIc5CnQKXCR1ZEO+XnCm0uSxtgqvNPyjAWahFwj1ZGiEVW/hS1A3/ra+uJid6UrHFjeCET+9nF8ODPm3BLi0arZO/+ccKoyn2P5CzUF1hdlyCOLp2ptDms7QJ6k1FHZkNZwEM0skqhkposRorsu9F7iBiRkKLXfUgJp91IdNapkVj6C+ylxOl0dTOQp0rMhFnazIt1plCm9PSJshsM25ZchbqQTpZxVDEu+2Jrzu2ujb4RrBNIvMW/jmfPY3ZVevVVbZpkdkracVZAMW1JuJsTeYXnE05yGtpE2S1GRfkLBQFnMlbXUs7gWM+HfwgI6HF7GoQcwyH/7ab8bZOi8bQMxnGteIs1FkNs64vcRThbA5tbkubQN5m3LLiLNT1UkezCzjlKbqL7UCr5Y643RW6ktnqhtwh5D3EVpyFOqthOGVFojm5UmgtWNoA0jZjO85CqagcUgwoYmjxHpGaCAkhOvl5NXAhFnnhghYeLy6IjVOIszConA0hORLM1RxaG5Y2gKTNuAVM3bJx/ucFRUKJocUx4ms5LfwL274sZQtOaEEVDhIoxlmYTjW9VeIWXM2htWJpA4jbjBHTAs5CPblCsWaxm6h6ox0F1JfagebE0CLhzdrq00IycVBV0wy4AvUcwUxVPAuy6idXc2jtWNoAkomDSuMjzkI5ueKPlFahaXGsESlNq+2QIKy38HgT1oQEupQlOKKFzwtuC6ust1GvV+oqNLNMng9R/awPV4fWW7K0AQRtxgU5C3W1Pxdyp4XlPUHigqVFkuUWOItzR3bvOLB1r74u6YoWjTof2imFDsRZqKrQ+o1bhGUH5blSaG1Z2gAitbAIZ9FeVk6PFyhRYdZibeQMuNji0qpPjLV8wH3Q3/4G2LFf8944owUveSiFDsRZqLQSw0yeD1H9bGBdRwqtNUubgFMLLTgLdauq+ZplgN3rCOzTI4Y7WgSFg6RNrDgLVRWaYSbPh+wQblcnhdmztAmoNmPE+GqRSVl+oD+KxRzjO9bR2KHTHeCQFowUWIyzyDFPQqLPhj/Q9cIALFraBLRaCDiL/CdM9hY3tn98S0SG3Zu3xMTQkBNd0oKWPApxFjnOSxDNNwjhSKG1aWkTUGqhcoqjusdFOc9ussCZs5sDIhwI9r7ntm4KebEF3go7pQXJi0KchSqFmgV5dyo/VM0KrFraBGThoDIssHDCZIFnvBxZR7n48a0hLzYJdwMCuKUFUTio1JDsOAvz4UMyfVYuUeWEXUubIC0cVGpIFo4jzurJs4xbaVZ4OBeFGmDnjGNaNIYiyUO5lqgPNlc7C+OyDx/yh58f2GwFli1tgkQtLMZZFDYwbYsglDgS7qS2QJKMa1rEUqByLVF3UaidRa7DbGX6LH/OpCVYt7QBIrWwCGdh1kdshGALxS1xt4ax9yZEVHRPi+CQBQuquNpZTJmWfQSQ92w7UmjtW9oAoVpYhLMo7gDucd8vbOLD6/HNsCJVAC0ap5pr5vKnUAFnYVr2EUCqz7pSaB1Y2gCtk82pYpxFnjVLC0F8Lax53h9upHYoXX8RtPBCu28V4CwMpmsSyJoHJUv05YMLSxtg6GTzyrLCtDacRa41S4qD+7fuZe5a4Cz2iS9HN1KF0KJx6jc/ma1DWXAW5jWCoX0z4mp52JEHTixtgKE/aJ7MNq0FZ5FvzZLg1mhXtGk3uWPamuH3k42UQpEqhhaiwkEKVpxFrnqbrEkGbhRaN5Y2wNAFhXHzOwsnRw2TRU+7k31R4Cwy7taRKLWXWQtSEC2yeiVbdpzFvcriTKWNZcaUpzTywJGlTSBvM/ahPlYVcBbWawTHt6wjkeyLtqoEklv3hb+R1XFZFC18XshrA8p3FplT0tyMZ3ZlaRNI24x9KMcEqp2FeV2zDDEr9iUlgVH0ty/D7Ue/ekDpMAqjRUYPsQ1nkaOgPERWx7a8XCoPnFnaBJI248C0wJJUvLMIH+2g6GlvxIwg/NuPyIaRIrVuq0ySKo4Wsl7Jll+cqbC7+s7kPaAwOzeRFcQZw52lTSBuMw5Nm9dZOCgop4ueDu5LeLED2vDGtSCbJAQqkBY+L4Ql++rZQmpn0bwjn5mze42cKLQOLW0CUZtxaNrczsL+oarnNtFOIdoXbQ4dO3KroloQSX9SkbSQTRwEBtGpnUXO4szsuicnCq1LS5tAohbacBZ32LbdZu7pD2WpzbszlzcSscMQEqNQWoglD7UqDjiLvKfZZmuwThRat5Y2gFAttOIsbBeUB76dGTuxPxGl0CaAvftiYmxliVQwLUS8qIKzUPiDrS4UWseWNgDbZhyaNr+zsH4C9w5RJJjwAq/TifuTPCrtp6LvomkRFA5SVrPiLHK3fWUf7uJEoXVtaQPwamElncW5dcJFLM55a7zSeEqMdTu2pkQrnBaNFYYXFpyFYtpvpoWP7N595JyqSNbJAXrOLW0ATi1UjqsDnIX1VtXNkgBi8zoxX7JAEiOVG4unBSMFWnAWphPKa0n9wOa9ikgtmzVmcG9pAzC8UJcAluAsxmUPf5ji016+9qf58vgel0ALjxdX0tBOeeCt2lkY9win9QObFM7XxQF6BVjaALRaqHzolbxxMNfgiHSR8nXbfQaveG53FH3H3yiDFo2NqeSBhNPOnAVdVZMlNbk4QK8ISxtgiFBFEGehKFBwMNdgCy9DxdhrfJvOHTmwKX3VUmhBSIFAOK12FoZtX+EOakusX2d5AxcH6BViaROkvACchaKaLccGV4Yg4JbV6ezOEwLemvxuObTweTGFPfSAszDsET4Yq3kHN1HhlgguDtArxtImiNVCtbNQVrM5cBbBHkraX2fHp5dEi8bCc4FBy3QWWxKN+9ZNiojbiUJbkKVNcCrkhXpGkar0Oc/grhQHKQ+wRSNjZ4qyaBFKHoizUIyXMnYWQfbnQPrvfVkXu1Boi7K0CUK1UDk9E3AWeecaHNwcRMNbjsT+4Zxexs4MpdHC50W/OoWqPM/C2PC+tTfFtj6gWoAcKLSFWdoEvloIHOKidBY5h+CcSwcsb4qqwLP3UJZQHi18XjSPKuzublTdfirKHt+kOB7PgUJbnKVNUF9e/mTpzmIvmWpbty9w1wfkOpQ9lEiLRuNkc1u2WR2OqttCOgtf2cveIjlQaIu0tAEWbmv+usL4rp3F/nU0gu6ILB3KGkqlxcZ/3ezNrPxw5yxuZSVZxTPvQKEt0tIm2HZbRpuxD9fOImLF5oO1c/ujzdT+8Gwq23U4HEqlhaBwkALqLFaT17sRHxW163b/y/pq+BV/wWbNHaqDGtpiLW2ASXmbcQDHziKcrxw3XEf7qf27HUjlPEqmBVc4SAF1FqvEC4ZEWBt/ObtLeEEwNUWnpsyBQluckU0hbzP24dhZjIcKVLJ0nQtrEvZp3jgzlE2LjB5ixFn8ZmD45JTBSzfCEwk9Gtzu90m+19OYvSi6YL+usuRgykHRljaArM04gOp4pJzO4gDNCrKCzbU8WwFaEIcssACcxX8OPgRx+Ga9sdb772x8RvOlduN20QUHtF2xKg+uj8ItbQBJm3FofEXpcz5nEbhner5ywgvX8mwVaNE4IZmspj7x4sLyC8GHIJ764cZsrXax0Yh76lcbPYILxlU1UDzsTzko3tImkE5WU/XJ5HQW+wTrUMQL5/JsJWghmzioPM7tUPMUR4td3kfwaUEf7sxesF9V7MHD/pSDEixtAgkv1M5iOY+zCJJ2nDoYVunYLmUWoAq0SAoHaQCHLgyJvUXNCylWyU/JXmCQErI/5aAMS5tArBaqncWpHLYJj6jgN0v77e9lhagELRoLAskDOHThVIOjRb1xw/vvsdmGr86+F3+XucBgD+VgDm0pljaBSC0EIouhHLbZKousD1gXBIWoBi1EUiDiLFha7Ko3Gsf8f1yMFNr6e6ILTFJC9k8KK8fSJhCohWpncbVhbhr5ERXj+5xXz/qoCC18XtBjsSFnkdAiReohVn2X0ahf4i/YbJASsn9SWKH2zQeqzTg0vtJZtHLQImOQ7EH3lR+16tCiMbSNDu3UzqI5xNFidvUY9elWPeewlr/AKCVkXTAvx85m2MioIqoh5pOes8hBi6yp47v1pBIzVIYWjOQBOAvP8HxswWI10GrpC241itt22FZoy7O0AWi1UNkn0+s5C3Na7C0mZ5eBCtHC48XRZMOqHGJ+1De8hBbtRqpDBVotfYFZxf4B21JUiZY2AKUWqvpk5oM1y9gyB8QyVIGoEi0IKVC5dw0NL6HFWuJrAS3MHvCtthNJZVraAFGbcWh8xFkY0+JcQcmJDFSKFqnkoRw1GBpeQotdjcRdrAZ5DPoCA3m2Fnl2MyOLUaqlDZCqhZizMKbFVv1kq21UixY+L+Y1nIUstljrRdp+BfmxGyFBqAsOmlXsG/6aHOVa2gB+m7GGszCmxT77vS26qBgtosJB1FlIQ+6ksDx0G9QFpq0TthXaskxsjqhwEHQWprS4tfSAu3q0CAoHYWchV6J2BVmL2dWL/AW+pGSSErLdzl2qmQ3hq4Xzqg78eM0ytMvuYopkM1E5WviSx1+gziKihR58q5ukhGwXC5ZrZkN4vJhT1Psna5ahXYppNMpG9WjRWLhXNfkgMbwJLYxjBNvFgqUa2RhXm99SNIcla5aZWcyySpZRQVo02r+lyOUlhjehhXFX9hHLUlSpNjbHr3/ruUxfnq5ZZmbZbb9rXh9VpIWih5gwvAktjNNy2Yex6qNMC+dBRptxuGadiC40M0sV9lDVpAVfOMgYfiG+0IAWm0wjZ9vFgmUaOBfkbcYtv6dyW3ydkVUqsYeqKC0aQ8/ItfF2MzG8AS1uNX+4NxnG6hKUaN6ckLUZ++hPnIUZLQqZpalERWmR0UNMGt6AFtkHcGfC8mTBEo2bF5I2Y9pZmNGiiHnkalSWFlThoMzwBrTYbK4nWZ4sWJ5p80PcZsysWSZGGTeVz+2iurRo9ItDO9LwBrTYZ77kW+5bLc+yFiBqMw7WrCfSa0yMYjB8wgUqTAux5EEZXp8W4zkiOstSVGl2tQKxWkitWSZGMWmcdIAq08LnBbeFpQyvT4u9OSI6y1JUaWa1A5FaSK9ZJkbJ4cxtotK0EEiBtOH1aZHrEDxjbVeIsoxqC2ybcbBm1YkLDGxyrhLybNVpwUse/c2N5M+1aZGrsMluVVRZNrUHTi288hz5YwObVEOerTwtfF4ckhtenxa5hA67DXolWdQmGLVwinIWJrSw3hlsiKrTwuMFGdoxhtemRb5eohw5DwFKMqhV0BMHmTXLwCZ2t6nmqDwtGi1S8mAMr02LfOV+ds9bLceclkGqheyaZWhgyyd3GqH6tCClQNbw2rTI6aSt9o2VY03biNqMRWuWvklsFykbowNoQRyywBo+pcU6HDmc9Bab8mEptrSPRC3k1izis+5Q3pUEVQgtOoIWSeEgZ3gjWuRw0jkqR2Ic9BFsxEoxpQPEhYPcmkV8ag1aVCBr0SG0iKVAzvBGtMih/5k6+YN7t27dsSM55CqkVhl2dIJQRR/zZwJTIAygQYsKZC06hhYeLy60ecOb0CKPwqovZB08snvHJu493Fy0CNVCf4A8DcIMGrSoQNaic2jRONVcwxs+pcVBGLmUJP++oTH3uf27t0hu/U1GC18tXM+vWYQtbsXvT57bYw0dQwsvtOMNbzT5Iw/QmHt87+Z9GSvizUYLXxXh1yzH98IlOocWjTpv+MJpsRnZhZ07wu8ZtuzYvHXrXm8xTPcIJZjQHYZOnuK+5/ROuEUH0aLR4r9VNC3Uee5zR5id047dRw4Kq9GLN6BLDPFrlpM7UAw6iRYCFE0LRRp2fP8BykXs3p8RyZRtO+dwYP+i0KWFHrKKdm7dTGhO+zbvVWgqZdvOOawbvzh0aaEHefXIfiKg2HIEaOIr23bOYdn0RaJLCz1IEnrntm7S40StS4sqo0sLPQinex3cnO6dtsLN3mXbzjksm75IdGmhCX4WJLF72qyTjCrbds5h1/CFoksLTQSTg9Ng+tzuTamj0KtbKNt2zmHb9AWiSwtNBGVRUf3HOcJR7NDuwyjbds5h2/QFoksLXQRFHVu2bt26mcjbbTaotCrbds5h3/aFoUsLXeznCjs24WE2ibJt5xy2LV8gurTQBlvcYdrFWrbtnMOq1YtFlxbaGCcKPNAchQhl2845LNq8aHRpYYBzRzbv8LBVVd6RjbJt5xy2zF0CurQoDWXbzjnKNnAOdGlRGsq2nXOUbeAc6NKiNJRtO+co28A50KVFaSjbds5RtoFzoEuL0lC27ZyjbAPnQJcWpaFs2zlH2QbOgS4tSkPZtnOOsg2cA11alIaybeccZRs4B7q0KA1l2845yjZwDnQ4Lbroogh0adFFFxy6tOiiCw5dWnTRBYcuLbrogkOXFl10waFLiy664NClRRddcOjSoosuOHRp0UUXHP4/Q5iYCo3Y6bUAAAAASUVORK5CYII=" display.Image(b64decode(base64_data)) ``` # Arthropod skeletal muscle compared to dendritic integration at cortical synapses: > As in human brains, glutamate is an excitatory transmitter and GABA is an inhibitory transmitter. In addition, crayfish neuromuscular synapses show the types of synaptic plasticity thought to be involved in vertebrate learning and memory and in altering the efficacy of central synaptic transmission in normal and pathological brain network function. The multiterminal, polyneuronal, and inhibitory innervation of crustacean muscle, the use of glutamate and GABA as transmitters, and the extensive synaptic plasticity make the crayfish neuromuscular junction a good simplified model for the complex mix of synaptic interactions that occur in our own brains. ``` #@markdown Sketch of skeletal muscle innervation #@markdown in arthoropods versus dendritic innervation in neurons. from IPython import display from base64 import b64decode base64_data = "iVBORw0KGgoAAAANSUhEUgAAA0gAAAGkCAMAAAA8IPU5AAAAA3NCSVQICAjb4U/gAAADAFBMVEUAAADl5eV1dXXwAAAnNSz/7+/tfX2ysrKc065PTE36MjINDg+Hi43j4+PMzMzr6+vAAACjpaZGTE5eZGbsrq98gYOZmZnyEhI/RUf0ZWV4eXkbHh+AAACTx6TCwsI5P0FnbW5WXF7v7+/Y2doICAicn6HvkZHpy8x/rI5scXPqS0vOTk5+hIZFXUz2BwcxNzjo2dqAQEAgAAAoKChihG2eoaL///91n4OqqqoYGBi6urqSkpLrvL3Hx8dOalfX19fzdHToiYk9REb/mZl6LzD/399paWlSWFr4Q0T7JSb/wsJNTU1BSEpYd2KJuZl9fX26HR4zMzNcYmTwjY4fIiNsknj2Vld9HR3/v7/f39/O0NGOjo66vL05OTlVVVUsLS07T0LYiotwAACmqquNXl7z8/NJT1H/r69wICCvr6/mmJn/bW2yEhKRlpdhYWGmenv5Njb/TEzb3t8SExMZGxxQAAD/X1/n5+cMDAzwICBVVVn7EBCcHBwEBARfRUfGycpxdXXZGxvKvL2io6PEp6i2travIiP/AAA7QUPS0tIxQjePkZLRo6QjHyCVlZWfn59mZmaJioszMzPdDg+9vr+BgoMQAABtbW15OTniEhKtX2HBYWGwAACgREUkJSVZWVr/Cgr8HBw9PT3Gxsr3LS3GGBz3SEja3NywkJCxlZaqra5FRUVxd3iytLX7CwvSExT/zMwUGhbiYmL7OztBQUH/QUH/Dw8MEAyWNjaSJieRcnNQUVGgAAD/f3+Fiot5fn/9Fxfhs7MzAADxgoNITlBmMzPNLS3/Hx/MAABsampJSUlBPUEzMzMIDAzMzMxdXl41Oz1UWlz/T09hZ2nMzMzh0dH/j4/GGBn/dHTzo6MUFBQ5OT22srLpCQr8GRmPLy/YqKj/n58cHBwjJyj/KSnvz8/5OTru3t4QEBDhIiMqLC2mpqb2UVEdKCHun6D/8/OQAACvQ0P3DAz/qKj/MzOGhob7ICDysrL4WFj8LCxBRUlgZmggICCkFBQuMzTwwMA9tXtWAAAACXBIWXMAABcRAAAXEQHKJvM/AAAAHHRFWHRTb2Z0d2FyZQBBZG9iZSBGaXJld29ya3MgQ1M1cbXjNgAAIABJREFUeJztvX90HFeW39fqISPNkgA82mHb6gm5XDRJSc5qrVhSSwTVstwOwV3XiFgykkw2gAIdpsV0GLS9zVk2c04YxN7IdIvRgHMMLBw7oSScPUMbJDeUR8RJwpCzXiGhxX+wYk64zpwTJ6TtWQS9lJMTI14eMePU+1X1XtWreq+6X/1A433/kIju6ur69el733333pcpamlpda1M0gegpdUL0iBpaSmQBkkrZo212+1cHqiR9KEolAZJK2blDUvZbHbYaCV9KAqlQdKKWfnhAag+o530oSiUBkkrZrUNBNKABklLq3NpkLS0FMgGqZ5P+lAUSoOkFbNskLIaJC2tjtU2+jRIWlrdqmWUEEgjZtKHolAaJK2YZYOUM5I+FIXSIGnFLA2SlpYCUSBVkz4WdVII0o4P3WoXix9+uKzuG7SClXcu/Rc5mQ8kcndskEq9lCOkQeoh5emLPy/xlCZyd6pGToMUJA1S0mJA+nB+UPiBZO6OBklO8x/OO39okGKUBVIf/mfOIimX5LEEiIBUMGpJH4o6rX+Qqj++FPE3rBtRIAH/4JtJHkuACEg9lWy37kG69GX5PU0SEg1ScYK+DamSBklOHpAGgZ/xTeeitcDfH/5gh71Jvjj4A/AKcOrb85Z3n5P+sqGlpd1LS5okKAakeWyRwFDVemMeXv82uM4fTuTw4AT9zPWBO/D+F+Am5eI4zLmsBklGbpBaE2jwS0xTjoyGf0A2yeNNvjlY/ILdVqTJ8uGp5tTS0pDKE1i3crl2OfgPCyR4Ua2/+r5Jx4GKNEgkThHHiDZPQBruoWS76EH65jy5eTn4yg+cuFIeb/IF2ST3A/ZGC1R9vnyl0Ww2pw6UNUlFGiTgBOAhkgUHtEKDxcF559KjDW2QHOViOEwCUi9lrUYPknWzLCduh/VjOAFeAEHyL6wXoDP3Ptkk3wcJm4ceCNj2C4kvsoZHk02oxmFNUtEV/v4CR78/JNcZuALffB//AwFDgfRFC134GAZWGiQ5uUFCTIC7BW7tF/aP3g7sSNhOnvXLOQF/KFtSESdreHS12bRJmlR3ButVDEiIGXh5sXn/pm2wctgbcEBCd4DcpIgP0wZpLvLvik0xgITvzDK8jYPUb94X6D17k5w9bJKJOL0Chke2GjvLt5SdwXoVOyFrj5G8bOzwgIQ3+SYdrohKo+ZA72WtRg8S+QM58C0blmKx/eGHO+Am2PzkiA/C7oKr6o/LOxtNWts0SVSwoQ0GRjAu6vHWcnAkyoJEHIDlOECyS2Q3Bkj/otNd+swjobvsyiNq05vk8K2XAKn5dXlr06Vt5R/3UDZxJ2LC319gp5qOxLW/YAM9NkhkEw1Sp/IH6cjt0yc62mUMIFXeKx93c9Rsbi1/vbFJYkBq4eiOcwcGYfR7Ipdre1y7pEDqnfsVANI5S3fv7w+9SxFIOdf24UEaKi9NeTlqNo9vcJIYkMDIB/zfuQNOnOf9dIDUS1mrApAsPbp3LdwuA0Ea9M75hQbpVvlwg8cRJGkjJzkwIO1wWyRqePoDxu2LG6SaUdiIIFm6fTKMkxcIErhVOfzKF+iVkCBVny9v8+HIImlpIyfeMSAtu8dIO+iJBzLzkARIPVnZ5w/S5tO3HZTOLd69v1lyl8EgvY8nZGFcaZnZRAqkS1+TWViupjZy4h0F0vtU1M6xSPBfLRgl1yCpVWD4+9rNRxRL567fkxowBYNET3ZM9DGbyIB06b0lTpiBJWmHz2d7Xq55JDQT7twBOkMIXeCEQeqlNvqieaT99+/SLN0+KR4wCUByku1wMXQYkPzCDAxJGzZdKM/hiLoDJHv4w/kdE2QuPFGQein9W2ZC9sTpRdrJE0XFRSDhMorlnHsTMUiT5cMtAUcbOfGOAmmeVErQ80h98MJ/0YZbvl9MDqShjQmSpWsnGSdPfsCkUtVb7mwGX5JeSeDwtGTVi5V98ilC++9d72DApFDVr8vbJDBCJG34dKE0ywbJHE36UJQpVK7d5vuMk/foZsgZpq506evybjmOmjrxLt2yQeqhOorQSasnTtJRcZnggxoJw3UekjZ0kkOqpUFCYgdMwuCDEkmE69wk/bwmKaXSINnafy9elhZkwnWMpg48vbET71KsuR4ske28HmkzM8O0eDrKQJ5/dp2fri4t/VL562Z0h6TVuewS2bXeWSGpq8K+zewMU1RB8eot2XCdrcnyganmcd3xLp2yQeqhgqSuK2SjZ0k+7E3UuIImnKY2dApreqVB4uuIK5CnlqXm17wiviBNHSbls1NLSxWlB6OlQhokX11jWPrd0+omay9xi2GDZA2P7E9s4MS7FGu2rkHyF8vS4q+rmWC6tBQy7I2GR7ZaGzbxLsWyS2StfyR9LKqktIsQy9LHv36iaycv9PRRY5srH69xuLyg4ty01KkXa81Vt+P6C/+MTsjrNiNviOldJyHLALkDEzrxLnXSIEnp//mXH9Ms/bPOs4gWwk4fWUMi74CqoRPvUiYNkqz+4o8Yu9Rh5kPoadjjPo6gJild0iCF0InrjF3qoLHXrYAmJ1xt9c0j2rbB+3SlTKUebNoQGUiW7jMFTGHLLsKmMzSuoDVeuNqtSUqR7FrzijGW9LGoUpQgFYubmdzWcyEae4VOZ2gd9jYxprTRe0emSr3YtCFakCztZ0Li5yQbe1WDm255xQ0zsCTpdKG0SIPUmU6cPueSKCpeDZsW5BdmoDSjE+/SIg1Sp/K4eNaAKSAqHpqjSZn5pg3dOzJV0iB1oWtMlnjggOlSWI7c2Qy+JOl0oVSo2oNthGIDCVQC3najxK0HDJum2vBmM/iRpBPv0iENUpc64hktnfPUMF16L1x6nUWHdHehjds7Ml1yQMolfSiqFC9IxeL+mx4PDwyYnOBDaI5E4To3Sc/HfMZaXvVg95O4QWI8vI+9LIXl6HjIMovWgfKV2E9ZyyUNkhqdICkPH/9Leth0++S1sOVHx0Omh08dKP/POvEucWmQVOkIQWnxHzNLx/xvf+O/CMPFtpBprZb9ump96Hmd5JCserAfV0IgUSg9OsI2yfvkmdflOQqX1roV2a9JnS6UsPIaJIWyUTq52dUkT44l+bA33t7OatWJdwlLg6RWJ/AIaRFMzG7+838QiqVGiLA30BSV1Xq8/KVOckhQGwqkyujR6L/+3qJtlC4tLf31PT+UZilc2JvtLQT+0ol3CWpDgbQjk3k8NxP192/GU7SP/iGK1724511qounR58/6crQ0E4YjtreQTrxLVhsNJEsry9OD0R7BEeTf/fLfth/0r2i79GjPixwuQk4fNbzhPYskneSQlPLDGw4koPlonbzNJxEye5yn/EXGx/vhVx6OXPZFxBEvLDF1QKcLJaWcsYFAqmQoRevknfgdN0kulhbfZYZLx8NNH/kMp3TiXWKyu5+M1JM+FFUKiNoNTi+vUCxF6ORd+k//lpckwBI1v/SJM1wKOQ173G84ZZE0GdEZaQXKBql3ehYLwt8zc49pwxSNk1f9uvzXfwpp8fhwz35OsYRdvG0BTU44Cij6a+zU6UKJaOOBZOno6Dzj5F1+X/EhoHrYd6ELxwnSvf7MJ46L98yzIJ0hBEaeFsasdMe7RLQhQSp6nLzMS7MKnTxSVw6HRI+4j/tX7zpm6a/9O6E4EmU/bCv/WCc5xK6NChKQy8l7UFfU2s/uz/DiI84wiYgeLn3yJi8gzpXErO1WnS4Uv+wOkRsQpKLHydt1Q4WT97z9qL8OZmIXfSF59hlnpvZdubzWqzKztjrxLn61NjZIReDk7aVZOntotMvvv0X1r/s8wCRB/fm/ZqP0U09cgoeI1GyTRZJOcohXGiRLg3MvZRgnb66LARPbl/gT31ESVOtw+e87ZumTIOSApKPkx3XiXczSIGHlxnfRLJ2q7+js2139vf8IAPJH/ul10At0RkuBKDWuyBcr6cS7mKVBcrTjBuPk7br8rfD7GCpfoR7nF//qOU+uEPu0X0X/+rYdxFv83Jej4Fbgnn0/3eFPgVYnahg1DZKjo3MP2AHT98M5eUOs7/VH9gCIF0qYotNUn31zMdgqTR0IV2Qx+d33dLpQjCJNGzRIRKMvnWUGTGsV6Y9WWI5ed+aKfk7AkaUX3yTTtJ9wwg6O9ZLT1vLhX9SJdzGKgLRmJn0kqqSgQvb9eXbANFyT+tilpSVmDPOmA9I5TwzczRHQHoLST90WTKKlPq3GzvKVhuULvtL9tdCSEwFpI2R/h1HFOMMMmO6I81s9Dex+SoH0bQmOaJSeYcg77rtyH1eWGwhC8HrR5hilQfJV5QITfMhMBOe3Vr92o/FDf5B8OLL0OR4rfUJ9Inxy+FXyQU1STNIgBenoCBN8CCpi4izc4u/a+XMExkpuoxSyR9dWKjncIkknOcQiDZJAR9d+wrC0slzjOnk/9gbVnnU4+hVpjsDnsCl7BEdKIXt0weGR86detDkmaZDEqsyxPl7mxrTHyeMutPwM4ehjZkpWwJGlr9BQaXFP6B5dU4ddK2zqxLt4pEGSUmX5DMvSxCgTFR/iWw08z/oxE4QTc2T5d5jBN0NOH7EtughJzSguiRYjDZKsasssStaAyWZpyC8a8O13/9+Pf44tkpDhyNJXKOjwax4wgrSbl9SqF22OQxokeQ1OT7hYsgZM4I1LS/JRNUmOrJESysD70/IY+UX3pnQKa/QiIJndVg+kRpG2LK6whbWQpelCiBWQpDmy3Lt3A/L0OPKPSlhfKp+eodWRCEh66UtJec1SJvOTCwXJhz0ER5ZP9mvcuVy/XR/wj0roRZsjlwYpvGbcoyVLe288KfGwW0YjBEflwzDr9ROpOvSrgcl4Ld3xLmJpkDrR0Tm3h2dp12ffEnMkHzrYCgY8cEbpTYmtuWEG9qsXYrgwG1cYpIoxlvSRqFI8y7oMjmCUztK54mcP/UAVR6jp1ovBXR+orUXhDp14F60wSCVDUQud5BXX+kiDxMG7vMzkir800q+EIxQ4+AqYJN9iP7LnnRLJDw2deBehqhqkznX0BkJntf9bnzEs7b0wxnmSd3bAUbMJguABXR+AWu5sBt+dapKiEik1zxkDSR+KKsW5Yt/7KN9hi8XNkzeY3IeVG9Oex1jqaXdx1NwDTFLgxvILlOnEu8jkgJT0kShTnCDd+oU7NknNZv9Hp5jow+Xz/fRDLJ16ykwIwczXfzdg6zALK+3WJEUkAlLvFMjGCdIr1gM/vQUw8wBPJPUPs/mtq7NjnXBEdTlB9eq/69v0brd/R32OdAprRGobfT2WIRQjSDvKO61ncwySNG4/q/357zEsPbgwDR7gK/5Pt4cjylN7/WOcQO6T4BCy5q/5LZ14F4lI7+/6bNJHokyxgXRp6QB8hhFJs9TT2j/7HYalLfOrvyj7uLMcvUg44ic4gFZ3YTACXbqe1iRFoFGz1+ZjYwPJKS2HJG1hs4T6s64apjuzPlHxII5Qz2NebSDZOESruybsoTKje0dGIbwYc8GQ65OzHhQXSFRJ7CwgJet+ascuuGqYVrO8qHgQR82fo6rVPRtPhZmYAtoGe6joxLsIVB/ptWmkuEBaoI3BODBJnCd32mRRQgMmeY6ajwJAmvIW8QUKBAOhgzmlE++UC8/HtntnGikmkCrlwzQwABIuIoXz4y6WtpjTfrni3tyH3/YH6WqYRPImNEQkO1wv2qxapGNxD00jxQNS9T22FyRoNnTDB47fNx+4WMrcOc9jiZND9IzD0a+y70iu8EJtT62spBPvFIusM5adS/pI1CkWkL52lUOsQjx4dFwBcHhcvExm3BN84OXiUY2I2Pj37pBh761si8nGFU2SSpHo93DvTCPFAtKkq4YOc7LqJWkbHkoVZj1mKbPKsMTPabXDdmzQLuT0katHF9qDJkmdZus9F/2OA6QdrulV2954SNpNzfMseM1S5oGd3uqXG74HzSQ946Ig1PQRbmHsJul5neSgSjhoV9AghRGZifVwRCc4QB1nQhLN/uwWP5b8ayxe3PPmm58zffGkqiYocXp0AU3qdCFV6sEiihhAcjf5Pk9jcYF+Z8rbXej8qhcli6WrIaaEGpJVE0S+xbM68U6VSKxhSIMUQrfYZ76ftTILzjstboB6wY6H06OmXXeEk7VYoeoDm4HDqePlL3WSgwqRlNVein5HDpK7qapr5OM4d76dTsbsj8xf5o6XghSy9Wpw5/CruuOdEuWHe225vmLkIDXd7pp7wtV+44r/E99PUNqydt7cEoalkOkMQT268O40Sd3LWNMghZV7BokdIlGDpG2BCaU2SuP9zS/37pJlKVRfPFGPLrxDneTQrVpkJeZ9GiRZuWeQLBWms9nsuKUL2ey0PTN0XBRZGyOm7DNQHkjbpbOH/Pp6heRIZsFMy2ZpkrpU2yj0WuPvYsQguWeQfDUjMWO6gKMNj+Hsk4slXl+vMFXlTdlZW51417XyeDq2pzKEIgWp+t4BuYSClmtdZh/heaUtONTHsJSZWHMlEYVLrwNFf1IHC8LpEV6z3lfV2Degx0ih9Hx5RvxgNkO0Ju5/CUFzHv9dOH+HHnA9oCuYjodKCwpR9NfYqdOFulHJGNIghdKC7ExoQMCO1Uz5z7AkgV5ETG3tGRNniofjSL5HF5BOvOtGozhjdcOA9C+63PMlNuHHX5OyxmDK8gDHHrhIsuzSR2xr8dULCyE5mllakrOdWNvK23SSQ6cyRwhINaOR9MGokz9IR26fPtHFjqtfL7XEj6Slq7C7kIRQEK4w7iHJskvLrrS8lZ99i1elUViwNJtlNbvtZ98NVaxk+YHf1elCHcoOfm+YXLsjII367v39He54UtJZmlqSjEg0DuAgHJpTcs8fjRm7WJYyq2Z2YQGgY8FyZ3ycl7bnaNw8L9VupYmKZ3XiXafK4fygjZP9fQSX9jy6ea2D/cpGvhuHJWPUlhkgs6WQpFXvJmzoIbweXJBhCRXPWiTpJIdOVM8O2OqdhS9lQLIU3smTjnxLBxro5FNoXDx9iJqgRZ63IDCUxMmw23Dx7HGdeNeJGo5n11sTSf4gbT59m+olYjl5m0Ps9seSkW/pQMM2OkeiAFvj8Tccmz8rgOXBOKW9KxfZ4dUF/m6x7N5COvGuM406nh0I2/WOfxwY/r52k25wde7RPVknb0iSjxnZQIOryBVm7PFbdR0v/+IPXMOh1fHxO9Y4adoaLrk2xsnhhYXzF+zPcCrgne2XKJ4tknYouAMbS07MDhYklZI+HmUSzSPtv3+XZun2SRknz5Pz7SPJjAZOM3DgwZn8LcFXjzlJD6vneZvZKDj+ot0KzJ8kpreQ7h3ZgUq0ZzfQZ+SSPiBlkpmQPcE4eYunhU7e85KZCrIZDVc9s0Jg/b8VTmjAnj+i2qc84HbzanJ63fWjaMU4f3N3byGdeBdeoybl2VmDpHrSB6RMkpkNLifv7r2gqLhsSsO24OofW94I+RgMdW/xRAaYedgFO4i3JctDiZftfR5ashWeFeP0FgIkvaLkNmwQVUkpElYPtVqVTxHaf49x8h6d9BswWY6dDB7i0gksbw36GHHcXA+8O5+h3/bwOCjxkx9qaHuv2zjF7f2ge0eGUg13ayCq9E4X/VC5dptPnF6kB0z8qLikY8fpdcKVN6V1zImzMdEDDhoFuxPRlln2HX7VhL1rN0kzfrW2OvEuhOr1AVb1ngmAh05aPXJSMGCSdOykZ2I9qzIXqLmiLeJ87/Nk8we0/drm9dPYXbMkBRT9WST1ThA3WrWMtgukfT3j23WS/b3/XsCASdax2yY5E+tdlZlp++AkOPjnqdoojdsGzKdnJB03pyd8A4v+9KLNsmImkbBv1ytZQh2WUWxmo+KPHJYkHTvZAZJ3O1fXB/K8eyN79GcIShfgUMmvZ2SW2bNt7MDmQV6oTryTkzvUADTcK3G7LuqRXAMmFHyQdOxkB0gcPFx9iLaQHQZPSZ3Hg58t0/5VfP3snomx44cZaFkkNVXdkB5W2xVqQHG7HskA766w7xozYLp9+oSkYyc7QOKlhnNBEvc5scMOdyp+PSNZg0QiGTItvY7rRZslVB/2cDTQZ/RI4mrXFbLXmAHTL//aXxbS0QRTm1IDJC5v/fQCSltQlqlUvyA83Zo5+zOfL591gQQnfGV6C4ED0CSJNMZkNRCt9Ui+nYpSc3aGafGHe14UPHZXJWssPAE7Iligt7BAUhv43Y69Io2IeInjQOfvOIyumtAgTbqzGXxkoVxRcCl7WXmTw9FAqUemkhT1bNh8/y49YApmqSVZy+cN2PEl3TzF9t64i5zxD2GnZMm6TrwTqIXXoHCr3hvhBoXNT06c/mWGpWf9nrmdagN7YfrkT/0CqqMNSvFm9iy/IkxLJ94Fyhv77qlwg8ouQjvKf+eZT+ig+OdcliYlA3uS/UtCcbS0NHZHmqSpw5LJgPhAdBfWAA1wYt8o3GDOJn1sKqQQpOqXwGN7XcTSlFwNUmNJsnmKbOprk8QkZiVJCtejC/xA6GVf/DVKGhVzwg29kN2gEKRJ0lTh9Tcf+bPUOCAHyGHJGtsQ61qS2B6a0uWWM1EK2fG4sVMvjhkgX4MEeqD0QgRcHUiX6FDcs5/7sbStLFjxgWwmZ2cmw3NESJoN3Dpcx2NQa6sLKgLEm4wlGjF74BdIHUjuPnZ8lq7KPfjyGUSStersXBMkaUtQz6Ct4RZCP67LzgNVNbO+HA2UeiHhThlICxwTwrIEYuINuci3bKAhMMHOtUvGU8sKnDt+criv1A+PBtXuLmkFGaSBgWEz6ePrXqpA8ssNcrH0X1+WGfnIjqOkm0t6cx9gnrdfvCHkQuiNK+qHR8sry7XegSnQIIHexWNJH2HXUgXSj/3nhliW3v1K/GTulBtHhVhIzLPpbwZkOIRYmgLuO4qFXh6D47sx3SMsBRuknpiUVQTSjuBHj2Fp8d1vBz+ZkhNN0qWBHI7GUOURt8PQVLiF0GeiWA6zQjKVeoIlgUHqCZOkBqTql8K+WgxLnzzzuv+Wsl315Z93L0c+TR/QxqE42h1J4vcIlTu7/lkSGaReMElqQHpFyhf7K3/jTwrTHsJk4slOxPpzxCEp5MKzUZXH1uYeUywtr+vETqFBAnlC690kKQGpKWVDQEoDk/fwwz28zSQzULdKxwM8aBToinJX89Vw07CRNhGqzE04h7kyt36Ty3NCgzTQZ66jhZlbs3mjPupa20kJSM+XZYJsh1Eo7qt3F6nhksfF2ypnaDy9V33lNTEmXXS0hQndHS8fDsHR1IHygorr56ujoxRLE6Pr08UbMIQGCbQBXz+pq20ju2/EcNlQFSDtkAoOTDqE7PkhNVxiXTzJUiXZiSYeRwts+R49mxRuob+rcczCHh2hfLzlmci/T71GjYoYpPVkktpg8c7CMLveoAKQULKq8IFm3L8X6dAD5eJJDpCmZDNaeUMe9ypKzjvh0hl2x5WkWplz1vacWHeRh4Asu3VqknImB30FIMlFGg67H/1nqeHS4jPYLMkNkOQD37yaPxdIzvIw8lV8aOsYewfVlp3R0sjR2L5WhWZ9077Xq0nKI1eV7cnXPUhykYZJ3tDn29Rw6afALEkOkHxL0N3i1ioVLlDBhi3O0mIh8shh8kO8HVYHRx0Xb3kdBR5axj4ZjtaTSZpDIJWYUVL3IElFGlo+tL245xFllv5zuQGSbAl6UM3fGGz6wO41BEetJLrnzzhmaX7dDJbyJr8wdh2bJFIzz+Tadg1SRSqdZqc/bbSL99/8hxL7ks0MD1U727gShqOkGjRQZmmdoDTm06mBa5LWx7Jj9uqdzBK4XYMkzmlognF5oA35yoniffKmqAWRfMBOen3akN0ZEl0/tja/rlCq1yUNEjBJ6yO9oUSmxbK0Ce0WpFdkHtaWsG3ks286Zsk7t8TuSzZgF8JXa4TqzrA72RXNHQ8v/Si1ub3sfE3SukhvgNFv1SBVpTqrSrUN+st/4AQe/DPE5VtvheMoRHpd8k3zj9oo7Up3CK9qcpqr+mt9ZNzlyTmZOerVLkG6JdNZ4apMcKBxeOkfPmMH8T7hZg815RexCFODHoojazCVggWRHJQyd1KchyeRHERrfSSB22FIhcGGSzJPq1xVLOpi7ATxFrmDpd2yeBwvy60u0wzLURTFR52IQunxSEonaQeMkTAcrQ+T1DKG0MEWmB6x3YH0vMx4ZZuc1cKR72+/G4DSjCweIXJ9QnE0laLedRRKmYupzMPLy83Fri+T1CaNLkvMxFdXIEkl2c2EtVrP2h6eG6WWYO0W53mXXDWmGZajdPU4ab1EZWgcmk7bcKkkH/q2TVL6GwrNkSFSjmn/3xVIUkl2B2Qe/iuM1Xrx8094KEkHGkLUFIXiKMmwN18ze+lsp4nRVLFUl52LdTSU+oZCA/bynSOMH9oNSFKh70mZjXZ7ZnX3cFCSDTQ0DkTEUbJhb75GttAoZR6np24pVOibKJt2k1Szc9nZVstdgFRdkkiy88sNYjfiBdHtuMPiHvIcSzfVl+qd0gzJ0dbEw95cHb3BJuGmhaUBcV0sR6nvcTdHFmavsOvRdAGSVOg7IDeI3ohrQb79U1KWDrqlSGc0SKe0huNoW2pbEtceZzwsJe/jzcqUIfFMUqo7gTueXZttWe4P0o7x1wJ32ZTJMJWaQvJPIPqKOHg/fFa21518Smu4XLxtaZg+8tHgnJuk5MdL0lnfLlXS3Qk8ZwciXctIB4CUyZx9KSAhVybrW2oKaSoopk3GSov/t6S/Jj8RG4qjKJszKFBtxYtSZj7JIsAOIg1I6V6cwhzBh1kwcswbgSDBGYo5/t2QyvreKuP9eUr+WL2Jg+H/p+/CZbTkm4GH5Cg100d8Dc5zSEqw/VBHkQb8hKZ4vaQxMhtreXZs9xMhSJYe8DzuryWi2lMy3h+35I/Wi++yQYfgb5SdQAq1PlnqOSqyrfDo4VIi2XidRRqISUqHUbrsAAAgAElEQVRvhZ8davAkYciAZOlU1nU7hmRGIjslhjUy6459+7dxXriwxEI6NTzcOn/rgKNicQa5dxc9LN2I3yzlO4s0QKW4wq9l29mKew1pf5D+nMvtXqnTUdUvD4gfwOMysAkcO6Sd/93voPBdsHsXoomxfGgP0Jm+6SOejqLeXZexl7fLuYFxm6VSh5EGpPQWnTvlvp6RXFD4uzIywbJ0xmZJZi62sSQBm9CxQxtNPotmlRYDa5Xk6dgm3yZ/KnXpDL4aRNl3y9MYoUOmw1KcnbyqIcr5uCYppbmrzsLsfZ7YomAeaXB6eRfD0i4DJJtV35MY0stEGniOHeymMObZ6E0hSfKB7xC1Sscj6e0t1L279/Z38DFMEomHr9SmnSDExLTyo/RRzh6Sd6a05q7W7RGS12hKTMhWRh6wLI2/9ooUIxKRBnYU1X/eHLe/5sGdWbSkHplB+moxmCTpXg7hOEomLei2daq3T5/YHPZzo4ik4gx2JpYHjzpd8VbiqbdohK2e8KheT+Pcd80eIXGGcXKZDYPfH2dYypzKBq0bCSUTaaCnYqdNllegVdDj/oq9yDMiyWecNCMdsJNvGw44SuSW7ie1JNfvXQv3yWlMEoniTVjeuGOWVuJIeZgzRdUT1pUNfD+VuatUuS+nKF4+Rej9y2dZlj5aCHoEZXIanBy7sQts7qVjl85TCa0vwoHST7n7mpItsQjRNjwxjor3zzm6ffJEmI8ikuYsTwIZpRXg0VWWbbO0HDVKNTuLxoeiJ/qAnghiKY25q6N2uS9vEBcq1+5bq+xTvuvyrJ9hahyQyGkgOXbnXfaO0Zb3nA+8CG0Sbz5JPmAXouYvMY6K+2/SqxyeWzwdgiVE0rSTOTQHXh10eohHi9KAoE+DhdETzv98VEpfohCV88Tr0hIyaZWuysQm48I07xmclMjoweHx85RLt3rh/AJkc2wha9NFrWH0OmzowNmXdOutEBwlm+69+cTJ2wxLR2Q/ifgBAVacOXQDDY2mHZQiHCsJppCe6HtisAo0+EQQSWuuxIHEVTXtSGSfOed9P3T29yBnBv2OxzC1JEosUHjcwWiLOc2ujlyYxe9RC0Z8zjdJk7Lh7BC1s8l3Cyruv3eX8fEkA3nw124FwIKnliYwODNksBRd2EFQFmvRUyUKIqmQtlnZWaePyz7ePFcHZRQ/vzfjlcswXZGoZQVVsdM2Rne4hm3nWTdJIIn1Xfdm0sOeELWz28q3kuYIaPOJ006D9HPX70t9COIzD/6Fp5ZWyASgg1I0wXDqh5unJsURIMl/nJRzpw4kq5rz+1AweIyHB6lSnlxAjz87w5TJ7L1BguIyfRpmylvHiPe2xScI2Fgqj7tIAk1Z/4lrNWdpM9MKxVH3l1+RaJYWZczSIHTp0DBjlCXJQWkiiina2eAGXE/0DVAgDfYFQJeq9g0tarG0LDc9PTxIoEdx4QK8FYcufM/F0q7xHHgKD4sjaI0D360Ta5Yt+GwEjBZaXy+LXnjxr6LH6Rl2V5J4NA7LZKNDpYkjIJqlj39dOL8EV0VfQUEFNFBySLLnmCIYKgmqkJp9zSqtZoBJKqUoC7xB2Vmf0HxokIZQTs95/HhPmxm3Ls7JJNlNTuwl1sgPI8tj22p/FYy1v/ir5GGivbsrkqVKIRJV08YR0H16vCRKfBixnTuLKjdJdthhRbH7VBVUIfX1DTIgBZqkfalpql+dM+0ASp/JnywODRLJVkULSI5RIQHaMH0k+uUf+wne1B8jMM0EDRs0f3Ct1zedJ8nx7qQDDdJdWlPJUREExek43qNAlqADh8dBlcdukoojeF7phlKjJMoNchmkYJPUZ6bEuavOUac14pNRGxakIfthhIZiHP5zwb2aJND38gFP6jQeX40HJUjYrRzg7u9Y//hd5zGyTdJV2VI++cSglHIEdOL6OTmWjkJ2MCaDEx6ScBRCqVFqCXKDnnAZJGCSAkLgQ+lw7iyOatQx5fhbhQSpSpVP3MEmCaifm5nwkzzf3OAhVuYBN1JH5OQPFR5g5456hn4bvye56myIbsdp5sjSNQYlf5agczeC/8AkMTOxM9i/UzZSEjl2lmdXdeuJoE/sS0PyKsNRwcexCw0SXT5RAOzY4bRClpvk85O61+aQmPcFf6+uybZygH7kKgPSI/yWbNNI+YnYdHNkaf/JjxmW7t7nxh4eUyYJkzTBMkOl46mQMOmbCdkhDQSB1FdPvqUQw9FA1rdUKhxI1ffoPiUGGA05f/qglNl1+Tfp55TkAz0ITNVzVfzBkMb5JvUA/Qp56uXGPfITsannyNLmkwxJ5xZPc1Jba7RJwhHxeXYTko6nxL0TOXau2DcBKcC3G6gYnByCWNVgONrnn0wbDiSmfAKNcyYos2KjdNHdae3xjd9cAHk/s3cIbMHmyF3xB63froU/cp4dlN0gWzohPxG7HjiytP80i9K5R16zNE+bJBy7cz2YJB1vxP3Z8BI6dpwhkqVAkAbaCefctUyTsrI17lQsUiiQmN6q5zERqzQR/TjssDozy29rg3UmcHTU9FT8FVCQ77+3TdKvYj7k7EyYlNbnu7768ejIIxdKi6ddo6UZFhE4t5RxJzTgdLzlro9HXM3HGSJZIAXTN5LoMKlmmFTiYMV3gFQMCRJtkM7bUDAkNf/mLvJiYdpgCy9snb0hMEfuVg4Fknb+32KSfvf1UHxI16Anl+/dge6hOdrFf2KzdJ1NEgd+wWPnT5gWvuIeEOF0vG5DDkLHzgLpCQ5IgdEG6zPDZmLZq9VZY5g6uj4z6FDCgEQbpDFqPDTOPIm7TQqvsRtnPBjt/ae/KHygXY6dU77x7/+K9bx8jNsJyQ6QpGvQ1xVHln+HAniLf8FJe7hNJ+NBcqjxD/TjJtx7wYHwia5IEjt2nFkkNJMU/KFKYrNJLZNJ0+gbDpwgDgPSpGOQCswkbNZ+EmFG93kGr/6PTrn9OnPM/QS75HLs6HKl883XSVMu2QFSiGX+1hVHlm4ifO5vvn/b9vBuOoOlFZfTBt1t7/gdDZQ8hIWRTJsGLkiBYTugkjGXxE2pjhp1Om3Q4ijQyQwBUpN6ui+waNghbsQaMlcOXjM33PmtnpIJVqxjl2U+aX+X7ADpqvwyf+uNI2ukhGyRZYeOnPaiBAhZobZGoTtvsqpdn96pJBy7gSYnaAdACq46B0P8BEJ3Y5Y5YhBfE2SjhwCJXn3CxQWZTSJlSIgk2u5Muzp7ZXhVTESsY9fPfoxYOtkB0pTsjO3UOuTIcu9QzAGEv/ffXHShBOML9AMAQ+KPvU5clyTJOHYdg5RA6K6VZ80R4EhwDPIgMatPuJnAL9vLxZ6nH3mkMW96a2b1As/Jczl2WdeH8Ee2yg2QpFPDp5bea3Z5/RPRZkjSIpxIcjw8jNJjNx7Qi+MEu+369I40K9N/q2OQxE+xWlkYmWx1Yl9WeATyIN2iV5+YZro33MGmpeUMRuj8IaJ+Dko8J8/Vf8gNEvqyq5K1fLKrjq2jPpAuIZIeYXfOQQmEHQA3j+mNB+EMH6dpA8xy8IT05FQy1sQccRMb5ECKlSSIEWtfrfGRsKuRNEhN94h9bAHLcdCuOKzBOYzHbkS4KLmdPPdySYUsRe0WXEkrm2K3TaaTa3Md9SXmCJF0mvxpo3T7CHLlGGzgK/OefeD69I5Cd4KqWOd57CRqZ5MUj9s9NufBaKAiiDNASYN0S7wcElUXi6Pjq+5R0NTvr3owcjl53HUwm/2QWQdMn0X+3AqxXOa65ahYvAbHRk5zFBul6/u9DhtcKZNXHDvh4/UJJaiKVQGSRVIMsbtGzjTqbowGSqYpMSksC5LHIHG0066LtWeZtrhI2lluLeBg9oo7/PDAnJZGZFJuZkh2lZf0r38UrBPQAFEv2Cj9f7/ljiEc9TNJR2FIL7xzNybbMb+TCVlbNWMu2pnZgdqcYYx4V3XKGXWZL5YFScogkYe735mtZdMeUNfI8/jttfPuOqYtd85LLWEhsxZME7ZokFvlZZ1zVCzCJNab9Cs47eHcN37DTQ0cDfFiub5eX6AGzGFJFjpJEaJIMs3oFqlo5eqGMdz2dojtyxp5KVMoCZKMQTpADFKBdt+Y0B3uGlnAAJnNwrTpThn/3oiwG7LserKyJRbbygvd3IUUaDPAZpF9CU/WnvsTf5bdFk4mcXmZ9/P6giS/FBIvaTWwso9VxYymkXGrnTf4FA0MDEl/pyRIEgbJMSXsbO2ss8mkHbGeRviMA3vlZWnVd4aJPPhSkTjZQMM6SfgO1H08LUtrP27y8I1/g319xI8XX68vQO3gPna0ePFvqaAdFrAOiuuTGrVZ0zDqazWuXSxk5f1JOZCkDBKpnXVNoG6xnbsGlavXv0p7fl6WHnBnmJCuykUQ5DOI1j9HaPmKR+4Xj9wmQQf6VX+TtBx6lNQwQqxxyRkkScYasHKGqa4yfmBsFEHk0/K/L2eEMIFyIEkZJGJt3PM+dgHfNqYZFp3b2gzDktRS6fKBhuPlrzu9FWnSPZLfwIj4d4v36FfhrCyvATg0SWHyG6rilSfoR9M7SJIeIiFVhtUYpWpp1BoUmSNtX7e0L2casyG+SgokCYNELc+34LJIxE2bcu3lPEsSYMmdksdlaadUc7qGZKBhZl0mBnm1GfByz/v6//gnEEp3qbo/yAs3gQ2YpBXeGz7KhVq83FsiG1wgy5P1fOe6vGGt3Jw1KMrmAsZ2hTXTyIeKbUiBJGGQJqmEnf7snfHx8S2ZLdZ/TWe4s9O9F0TSHeeF1tLTnhlbD0vu+VofyeEGEhp6gqNiEQyIrntfns/8e99ARolqwn/Djxc4jy7vPcnkqtLy+HaywW/6GR8xzHbH9wxHFvYFpTT11YYNYzZkiFAGJCmDJIxHz3gBQCRdoB7+qWbBu8YLwxJ/vtYjyYmmxjpOaHAJhhu8L1vM/Nn/EhklJzxe8+XFk5wXpKopkatKyx23CxGzo1TKgly48FcIQVRfGwpsuFIbsbYJv3sZkKQMkvD3/wCnjTEiiZSdk7XJ+rOelpMOS3IpDZLN7hqHy4o66CSv/Wx2AxEI0c3jkdJ1270DvNzg7cWTnBck2ZQGW02XSQrqoi9AyZgNU4NeLWGI/CILSIW2tWNztJMOrxIgqTFIx7nJ2pAkHNej1yZb8CblPYBOopxjJ5uJd2W9T8TSWuQOkiBIxWsofHebRCNguIGXV+dNzvNXTTalwRE7Sgo/QnIe+X0meOJlXLxWDQQWLHcuGKK+obU6sEUd9kmWAEmJQaKCEYwgMON4J/TsEK8T8ur5MbkiPcmZ2G3lyc6uWip13Z3cAIVAKm6+66SEFzmFSkSD4A25OdmGYGk+/uNKOXeBnb/FAj6YkW8HwNQotWcBQ2Y2VwruST60bxjsrNZ5FpIYJAmD1BI7Ur6orRLnzruTBc6KmN85z9uJS1vlZmKP98QEkq3T3GgDBgnHx23UfMdC4A2pzFW6s7y8mg5JFkedOXaO0PNv5nPtUssJDrRarVp7NA8QMurZ3FBwgL5QgzuZ69QUYYlBkjBI24QGyX8FP5jf+qDACeo1+WUXDy6IUogkS5Wu9kjgm+hmMEikKv00/MN3LATShKRKu2dDRb5tWSQNYL+ua46A+kq5EQCCW2Z2JFcTDeGszwLc8jkpHzFQQpCkDJLwwQ1ADc7fZv1WQbdROkUli48HmiXJAdLU0pc9xREE6bbnVQckUpUOQw5wLMQLtMxl5LKERsMPkJCacEVzsLK5Co6w+kqlWo5oqFQSzhL3ldprgL/6bFtNKqwQJCmDJNqkFUQjGAttqfiugm6j9J/NOq29tgSYJbkBUuNAzwS+sY5w498USMXNqD8KKKaFYyFe2emIHEhjckWxXAGUlGIUToUhZIdMa3ylLnVPBJKUQRJuwnPbbMF2AXcC8lDJWGm+PEvlEd3x6R0uOUA6XN6h7CKmQ2KQcLkFJAkYeF4AHGwvbsw1ZsjWTqRJwAxl4chJKUNQIpCUGCTOXCwtwMnZwHDFLOLnO9Y/qRqmB+c5Lb1m5AZI679ywiMZkNC07blH+30HSXDJWdFXrTuOKqUcQsjIj9ZaUbj0gmumyCAFLykLJ5Nmg7Zo9lNrMlPztd5lMxtL4uVrm72S8c2KD9Kyy/Igkhb3w0ESZyZpRgKk3PrhqFBq57LDGKF2K7pVYgTX7JY4Y0FskIRFrwCMO8Gb4JxynE40bQfG3YMluRS7q72R8c2KD9K4O5yNgneP/hdw9TgTRmKQqqPGSPo5qjgEWY5cNFaIVvA1a4oniCQMkm8cgQhCIghqt34GwSHpRNQSgSZlleQSH3ouYAfFBQl6anuZ8BxqlfLot/jRBiFIA3NdxBmiV6VUs7w4ExI0l8+1WzH14A8G6RU1BknUyfGXwM0TzLReKcPCzgc2NU7qg+PgTUklPjQOr+OAXQMFnCw3xf1bwAOJLBTLIelv/RZ3wqgmAKlkdjZ/FLH6SqVcbgQDZOZn22MRunE8BYJUFafQuYuMOI+tT3IQvcUpoW93tbwVlbBnqRfPk+YQW2YJITI1SOs6w27WzOXwT+7cKJPS8o8BHuwSScvEavNI+kfcOHdw+LuaM+od5DNEpkppKJfLZusG9uFGLScumWVgAkGSMEiBgW2oSWGDhW1leMODNkFVsXDGiXndziJaBcFwuS7Gk+s6w27UQI9QYSgHacpj04S6RNrJdFAjdoCTWrkP6Brc9n/gxbkDQSrV0zA8KgHr4+Bj8TPbbrciHwUFKwgkCYM0I2GQRDuxjNoYuNdBa8puhUhPc1xAG6ULBbnUoKvrZk0+rtoG9UhVaiBjGeSJDdhrnTvl5jMZSiwcKHb3b3LuPgiLj/O/eiBv1CV6fEeryj6cEDSXz7ctBy66Fl3hFASSIoMk2glo9e122lwik0Pc8B5ZJP3B78ukBq33QEPbcKW/oMzlv004OneXbDnILuTLZqIikn7Ds3u4MPBZXl74wKhheLqQJqIC/PkwZ7vI1VavAJCq7ykwSP7ZqkQwyW48eJB0GE8OXfD4dlB4vjazLPiq5joPNAC1uGP90r9lg2THG0YZjlzOHcpx+Mb/4dr7NN7YsywFwGgtRJ+TqFWojZipgikAJCUGSZwYDoPjIAD+wHcTe72kBZ84ud1xUrg27bby3kwyUrBwOJTJ7X/1pgMSCUHccB2Aa9Loh2Db/5hdCn3a3pglqWVhNBKuGrZQKkUdlai0AUzG+GN3/6nYRF0if5DUGCThFqhrCkxu8N2JbdUKYDPuWGoaJbSuCkjaXfZp4h+9VIGU49Z3UyDBWfzcWKPCunbu6qPN/8gpqsCapjZ3sBuo1UNZo0JtHw5DC1r1qBCB6aJ7fdVYRF07f5BeEbczlTFIgi0aqJcJNDV+0QaqTQPYbJq70dc/gacWTNJU+feTuN5QqkCqcntt/yubox+CEAQckOez2eERrBlv+fjvwe5CVJSvskIdLvYEYWv5elt6bFTZV0fZOLVWq9XO5Q3O8g6qhUMun108dTbmeyoF0nui6R8lBmkrdv3AYfmARFUqFcCVGudttLs8iZKIuO9iNZbK7s558UkVSMUxI8t5NH9EQPrX6G+S6Dw32y7xxxGZ30BZd/YL7PIgN4rVEmgAV98nbVQKgKI5tsBnLG+YMczgFtCPx+XVvXHeYfpq+t2uIfGcjNggXRFtYdcEgsPih+2onihjKD437rU6U8Cwubt7cQ54xf+iRC1lIIHca6+j9Yd4HmkP/WKllkOzLTDdzDXVn8n8X+footpp1wF/BDqGyLtmsBvcXM2bT9DKh5h8glNESO1ScKMF7xEM7QO/HXcOxTZooq+m3936UoFBEm9hkwbCdnyQnAlde9Ulr/+GUhoQSb7zUZPlt2O6wDypA8kiyeR0rt/zox/96Jl/zns4a0765iiYuhzAIGXgMOke3uuI64DNfQEN4CyDt0YzVloL6AZXM4YleCy0R8gUq6PhbK4WaphVysFB02cX98bg51Hn6AeSEoO0U5SxM1Peiv8FQOLaEmcppAK16pIHERTW46wBTX/Zn4n+0vpLIUjFxlyYcQt+TkttOxkNIJW3jun3AEi/8/faUGvfYY53/KjPjio1+MNPodzXtggI6gbXMkWNUvraAPX5iw9O2a7ZrlOnLl6cvwNjFmvtMNYJDxIt0xRxCEICJLFBEmfZCer5mnShEohfcwlwcKXjbSxzDmwwDM43SdYAKe6xKCOVIFlGqW6Yax2lGRRKFlEWUnBI+Z8Akv6AhNgumB8ZZr2ezWa//yQXo1J7XxZv64AB+mTXa8Gz3I3g1cgKlj27s+rjkJ16sPoZaQgkb5z6SiiF6vKhB9E5etQZ+oAkYZDEad+Cej6mUAncVh5IfqsuMaE7J1cVWi2TfzRJDpAyqkEqFkuzhsUSf2EfKYGDehKOrP6V1HOJQ4G52qxhtu03hrJSfbJLQcu/5AzjkMB27Np78RA0TsNrgh51tArIeBqXVx9EYpuoE/QBSWyQxAE5sUGiCpV8QHKWcHGtukTP3k5SXRpApxRO7kPSA6SMepCKxerYrAEbwneWcQAO6kkYNv8kcDswxDJRs5CxhuWm1Y2s/YW1umwbbv8FySrDxryk0SDGyczuE7bacvZPaDqkPD5OnR8fpHgM0m7qW3xAciINvqsusZ0l4YQUt5VDogOkTBQgAbXAVA1oJRqeJnBQTw78MSBpD3+LyhDuGTc3S6q0qznDJB5lX64u9Okc5X1WUqqZxmPRtWO069TFQ5ehrzci7+tVyCzxZ6uP1eFEnR4fpK+FjQ8UGCS6UAkG5E55AKAYOe86BydRiAl6/CZ4z/DsKOkBUiYqkIBaqDGvNSTPiRu6uUH6w0WeSbIDfXVkhohKph3KBitx1UM0sh/gO3c143JHY5gtD+xAhPRZF4h/CqwTFdboWNTpcUGqiAu2FRgkKi8cU+IJa9OMLJjjJNwwPk51amCqy/12dCXhAVImSpCAqq0atE0wYNyWebJg++8ncXKRbZJKQ6RQGyDkGvtUZ416ycEoH67JLze3qWYY3fzCnXqMfD0ju086rgc7CuHo5Z35QxcvXjxlqSOaqbPjgvS8AoPk0znVEVWoZFsbFwDCfTRd6yX57Wh3+Z92cbfUKFqQkCyc2qjntcXTvlwpaBzxJDgo6/9/+AkwSaDVAQmO50f5yRCWE7avz8EobCVQlZNuWzHmPuv6yp4lvp4ZAifYHWWfUx3o1R2kz8bHLdj49os6Ox5ISgwSbzkk1y6IQaLWymQBEHZNabLrJVHuHzPTNPV0cil2tuIACasBeJpDj8OFz8Zfmjhkff/E+PgdKmiNQAKdfv8uMEn/u4E6QYz5FpoO5I1hDGbNGht1UFCXM9wjmr66OTCv6Pru6ggnqJKloRyjLCXSWvzy6gOX9aROjgeSCoMkbMHl7KKftqp0UdKkROU4bbTG6B1RM02Nwwmm2NmKESSsaqvUXnOd+RfoOVnLZuGUG3xAfhnUU4haHbRNEnYr1UONjRwNeNoPjRitoiqQkGicQo0YhayhKbR5ZrEh6uQ4IMVlkMgu2MVbnAkicU2ga4VztkLCCettTawGiVb8IAHNuI7iIvpxNfN58AC/1AL8wPX8vGv90WrMkSnYQtYw5ReZZTXrWimzZOSKikFCsnECoYgQs7hCnEBWLhVjpM6NA1IsBslJw3OF45xJIHFNILuNa0f2TNPV8s8iuFehlQxIg64gi1Nm5Cw7sXnRXZjkUrVNpmD79hlG5yshl1zVvVmzGg1ISBZOOBQBMvZCZsD6aShLhRmpc/OCJNGlWMIgiSZ0nTQ8vwkiccaraxv3smR41dnG0t9MOvINlQxIroJzqpEdtRASWKRicTP/80V6CrZmGvlu2sWZI8zvu9EuRgkS0tlTJFAOzVPIVW85spxc4t9Rp+YFSdw2X8YgCUY31CSTK3vf7gApDJ/DpVmobVwWiYQbUhD5hkoIJKbifILq2zDhHNM1V4UfI2cK1vLq6t0tazdq0GYBGqTIQcLaYnl7yDyZonimSJVhQhJ1ah6QquUl0fJCMn0YBBvQlPTPZsex7mTtBSaEzmHT0zKvf/aCsyMy1kpD5BsqAZA237x97tyj/8A+BKZNJHiBDHZu092HWDlTsDmzC68O78yg8mwLYIQUG0hYlnnCoycT2qeO/L2+Op78oi+n52QvvbcUbE7EneqEBkk8QdQQskgnffurlYbIN1T8IOGekef+I3wEDEfMqsugpdAibxfOFKz1M5zvumNPlY7b7TOglxgvSEhnHZ6MbDYX2kCVcMSBOjVOsOHSl8EciDvVCSEQxvSkQt/igiiwzRnBRY1L8YNk94z8S/AA2LbFMJxHPL1rPnE7Zwo213msjla+7jyNdTRCSwIkolN7L67i8ZMF1D7LQskFzCtSIBWrXwc9xQoMkritvvhLZJ2/hHO+HcUO0jW7H8qfBN+/yrY/YRcUA3G7m+4dOFOwheHuggy2qPaWBQORmSRIWGdpoCyi1oCN8g+al/aZBpqeo86MmyJkkeT/jKowSEK3TSL0LW7OD5y/pHO+HcUO0k2nQ9dvZTJnXO+yPb7vnvOuiN62q2Atw9RWc0wtZ5DUNpCnmAKQbJ069fjiIZsowFR2BE5g10DyAyiIzGXhtCwOgFNnxs/+tkjyC8zFYpDE1bdO/6EgpSKlAStJkH7Pu4ILeH6dVnf3PIMkZwq2b8SYU9XPtOpUJY2YzoGkTmdPWaOoi6vjn91xqMIZeJ+tPrAfKurM/ErNb/k9yrEYJInRj8xasZPl7wRdrXgVO0gnHZCsr3f3tXvMHBJcW4maSQIxb1wFW6kbo+q6pdftxNX6LHollSC5tQUkiHvrmKgT8+0iZJHECwgoMEjiOIK4ttZpBx6gNDl2YUG6fu76yfvXxNv56sh1h6P/irOAOTgkqokxG21o1Y2RAnHrzI4S63w0a2KO+oi3uC5A8qq+AGIAABtwSURBVBF9PX1P+Vb5MOdRFa92dGAp+H2JsY04qNfcTZWX+ylNjl1YkBYRAtdP3zyyX7y1W/vBBJKtb/yed7VYGLSjrBTY7gT+d3XUqYJdU+fWQbXJlGzJwHO7PQ9ScYFHkpACYSxN7BseF0PSWBKvcZkqxy4kSPvP0bp+9+aJ4KRSSptPnLzNfBpw5Fk6DOaTUH9fd8J2JdNYI1WwwyrdOrhzUt1HYg0bAKTikJckcZxAZE3EvqHMXKxEVC9djl1IkE6c4+j69ZsWUAH+3pETN+/SEJ3+izfvnvxLYNVlz/Ll7oX5AEjXge0Dq4mR6UlreKRi8ohWw8BDr5yBX9kAIFkkudOFhM+4AoMkMRcrk9CaLscurGt35N7pRzyYoBavX7d8Plonr193b754GvqEcL7IvRKFO2hX3P+r8DMnwRSs3fR+yDS7S63jiYTtsnXqSNarqPMKXtW8ssSSJDZIolRTCYMkMRcrkdCaMseuo6jdtfs3r9/2YCSlu/dRDA6uMLHiXYqCDdptJt/yP1HNhWtGPYJVvEjYLpvHL2wIkIqXWJKEBkkYbxMbJIn5IYmchlbKHLsuwt9HLJz8rRNHt0/fJ5HsQbjCxKh3p+Blx21zppz+ij1/v8+Yi2KN0DwGaXhjgcSmsMZhkFriTFSZnIadKXPsup5H2m/xdFoI1KPrN0/QUT64XDxnkXLYQsjJvbtt7+BNwtGaMRvJWrujOP6NUr+LGwak4iUq8S4OgyQuGpSxWbvTk2NHpGpC9tqRI/esUdHd64xugjiEJ1AOF5jgOHYo+u386ZD4I5sjjhlTIbIqu510tFFAolJYxWHp7g2SRHKQhM1KT/GEowTqkVDNJC/wBkIQK86fDkh/HDFHdtrqxgPJSWEVDvGFBklsSySSgyQ2uZKa4glH8YOEOOISAdZ0oTy+X7dB2hMxR8UWnkjagCAVq89DMxFmlRa+xLZEIjlIYpOraamKpRU7SKhbgzfybWkaDCBX7Lrz2r9NOFr8w4g52tAg4RTW7g2SeBpVIq4tzh9KS7sTVnGDBOMMfI7m0BFNoMETKDv6B5ijfx01R8UG7iS0MUGySLryfgwGSaJH8W7xdO3WlLQ7YRUvSIPo+eRytEwOCdokVHb0v4J44B//88g5smdkNyhIFklPPx29QZKxNsIku5l09LFzK1aQauinhO/XOcc0Mdiao1Y7inp8BLXBQSq+UhY14hYFAcQGSTxPJRP6Ppz4Ci5cxQjSILY57mI+KKb36iF68T2cAxctRxsepOLu8uFAUITd7hR08bK+RFjOl7rcIKz4QBpF5mhlmvfmILuq1w1XP6qaked9SqHmRjY4SJwUVhcn3RokieGPOPSdxikkqLhAmsakTFS4b7OdVzNbWI5K0eQF0cI5QhsYJE8Kq4uTbg2SRObPTHmraJM0TiFBhQOpNSjehqNBglFmzmcH7ud2B81RxTSj5kiDVISJd74kdW+QxMW3EvXlqZxCggoH0tuZU0YtLEyVZRKunPAUIBHV2IjmOM1R37AZQb63SxgkM4f/3oggWST5eV8qDJIw80eco9Q4kM5IQyYsSOgzey+P+hLhVmXOHv2sBMULKsOHLj5+sAraOme/z9ijgRFDZXsGH2GQNlgZhVuX/HpHqjBIoliEROnsZPknSV9cP4UCiR7JfO9yfoaTd0prZvSGY2hWRvwtWSNXN4zhNhPwLpB4Q87OyI5SbVODVPTtwhqLQRL7fumrQnIUCiTv0/WTx5eXZ0fdAYTKzPTIjQl6u8ejvhiVRr0UDQyN2CvxRR6wg8Lp3xsdJEASZ8AvMkhiTCQMkhi19FUhOQrn2uXG/WMmZ1eQgr/v8fz8jZFRYssGxmYNwxhxUVRZA8svI5D6Ygg0AGGQ1nB/yA0LUrH6Y6/1aYgcNyEmEutciuehrqavCslR6PB3ZeSQip+FlRujQ7k5wzDXauycUQG4ebMWYAiurNHB+sodCIO0oZqf+MjbhVXEidiYiCkRz8WmONKQ6XAeqTL90UsKaDp1I+fqCV9oDxvGXG2gOIoNUk5Ve2+RNEiObpWvMGFoISdigyQu1rsinItNcaQh082E7ODM9LLxeG9XPxLjFTdF9RxYX2IUr1hUiWeAVLRBqm2gvnb+mmQ73qkwSCJKxB24Wk+nN9KQUZPZMJgbHR1dvjGPdHl5eXnE0vQMT6P1y9+hulVv2cFSBB/jKuFoYNhUsmyLhMZQr9US8SQ3NEhs70gVBklYYC6uVPLLaXB+anOm/Vo/+Dt/2XU9GvnoCjDiLaNojZowtnD0ySwO6kGS4Pr2mCKLoznCUU55J0j/I0OVfd21LObc08tr4M+hvO8GEYg6rQ5BAiTZNiQegySqVJrxy2mgfRZ0mS/bEeLX3NejEVm4Ik6QGhYuI3ZsofIRPICXaiBGN1drUFvhyHd8jp23RLZLkMg9fY38OXiZv0EUor6kU5CoFNZYDJK4Usk30pAvDqF/rOSKRXCVVwaL/eBn6m3rN2wNXQ9smswG2Va9YgSpZdbbTIRux0vgCA6Zs2OUAzdmmrWBuB07ZSC57qkFzhrwJi70Fwff5m0QiajT6hgkp3ekKGlbhUESJwft9q2eyDtwDEETlC827LeKKxkKpMzlYvFQRBc9RpBMk8aoUlsb/gz8yHzGbNQ26iQA0Y7PsXNAIh0iuwOJ3NPB4gX050oD/jh6NohE1Gl1DhJIYYWWRpS4o8QgiZKDGku+1RPUNTXhYt45ZIeABotmhgYp0x+ZIxAjSGNg0gis1ZjLjQyDZebybXCCZylcqnkjS2jrM7kVgBGJgJRVBBK8p2+jX0T0Vj9ng2hEnVYXIOEUVlFZqwqDJO6rH9Cngbqml2E2aM7tv1EgDUGQLgxZp9evFqk4x0itWbJSYz3fHmvY5eX2MTjDowFQXB7PVCzSAF6PYq2bpS/d9/Rtt//muemR3FPqtLoBqdgEiXcxGCRxpVIroE+D58fJuoOvMQ6c2yLl8Pn1q7zoMXcRarQskZwfu00DbuJgDY/sRZEHSlEXl7tkzwCjP5VYJOu+DeYDN8jhb1d6T6mz6gokkHj3S0KDJIJAxiCJ+0r6z/8z7nIuQy5qPm8H6Jgx0tvgP9AsFYsqI6cJdFolmmaPojpqDMO0IOTbZWOMNABhkGrdrGruuaeHQCB2KH/Bb4No7il1Vt2BZJH09HvBj7iwoYmMQRL3lQyIWtvX9PIQCSVcwPFvzJINUh4GxPORONXJgVShD6NWHJgz9qFHGQbthuLKDSLCIJEZ2e5Asu/p2zj+PXSBu0E095Q6qy5B4qawMhKGCVQYpMAkO3pKwf7FOpRHF950XY/+FeT6qZ/FSwwktt3JyhCJeleG4f/q8SR9O8IYV3AVYdfzSI4VysPfx6EVzgbR3FPqS7oFiZfCSkuBQZLpmrJX5qIP5tnQ9iHwziH6eryG/Ox+9G+1WQ6JgTTCHscDvJRYBfFUi6MqlhFZIQlbwu5Act/T/FART3G4NojknlJn1T1IFkk7/SdLFRikbeWg5kVN4Pr9QvBF951kfRtNOnim7NBPW/E1lZc9MZDcT2of5qiODFJdvAe1IiCZo9zDk1LAPUVzg94Norin1FkpAMmdwkpLiUESbTEZ3KLYfU0vUO4yeo8z930oD6Olii44UGIgLbPHMY45MhIySDZIeCJJCUgNavoP3k0eaervKXVWKkDiLX+OpcQgCbYQ1Ze7r+khihtfkIDWQAhPmZILNkwvz8+jn5pTe43vHwUPccE0kjJIxTwamw2MoK9WAtJr1N++IAGpvafUWSkBCZDEdb9iMUjbBJ3sPNd0qNjAJmwF/ZK5QCJ35VCPgIRVzRsjOJmhbxgXmCdgkOw1+/BEkhKQLjupqSg+50ea2ntKnZUakIpD/N6RCgySsJ5vprwafLYekA6BISgM7TQQUi6QrLuSs14+1G/n5KlQ0iBRyQyAI5SON2yKP6haBCTr/+BPNWOk13BU7lAOIcUhLYJ7Sp2VIpDcy58jCVcfF5sbcT3fzj8lqMX2WnmnjKL/bXQ9WNcuh98d5CSwPnzhjTfeeGFT+IueMEhjpllyOGqjQqRS3HNIQAQkPJGkKNhgl1Eg0+TZIIff5d3TTdYtfePYp+EPgzorVSDZKazMMy6qfRAbJGGz78C5WJ+Lbr0Gr/trF8j1cI2RTPD2ICdWeuzgU0jbXw573ZMFKYeTGRBHY6Oo48lI3HNIQGQV2RJyK1VF7S7DsNwgnmP3buB3T8+8sR3f04MvhO12TZ2VMpA4XViFxXgyBklUzxdnw5Mzrz5FKSRKSYJkDY/W+iiOquYIDDnEnGWHRNK/8URSwqXmDw9St3T7G+FQos5KHUje3pEqDJJoF8cD52LV6uH2p1iFuuwJgtSYM+xVkABHxZoBc1b3GfFm2SE5IOXAn8mCdMx1S7d/EObT1FkpBAmQRJffxWGQGgcC52KVinD06quv2s5AiLFSciC1qFzvAuCoWEexbzO+AnP6cAhIqLQvUZAwR9ute0pQejXEryN1VipBAiRRaMRhkCbjM0hnDlJWaNM7xChJfz4xkMYM0+7FVTEBR63kYt9FpyAJz8gmCdImhNEx8O8zZPwbwihRZ6UUpGKVSryLxSD518UqF/zJ2v6Q/PkQ/4Q9JztSSgqkmjHc53BkAnbsUEMyR0TC8Ki0L0GQzkDP4jnbBBGU3pLdAXVSakGiU1h3luMwSLGtX/4Cy5GlY8jB2y7p3iUE0ijLEagBSjTUUHRAQjOyCYL0KsuRRdZb4X4dqZNSDZJFEkoXEtobcQtiCZsWW0vITyE1D5nXzrwTxr1LBqRRO5vB5sjy9YbQhGj0q4px5YAEgu/JgQR/Gw+yQ6JNB8O4d9RJKQep+AoiSWhvxN2+JQZZsS1zCX+8XnC/+sJ2+fFpIiDZnVThkAivxzeLVigajj/NDmkOZ60OwRnZxEA6w/lttF59OcSvI3VS6kFCKaxCgyRcwELGIPk3alAsOCp91fv6w+ek3bskQGI5mkMcVdGLhViWFeOJpH+XkgXpLR9ejsn/OlInFQFIFkkHpoQGSaIPg8ggXYlvNaSDvB8vIPkfsARAGjOyNkf77PXKbc8uiUkkoHSA9Cl07HjvyP86UicVBUjFoaWnhQZJog+DwCAJs1U705lNlly/RscCYjnHJKcf4gep5cQZ+taMWZINRDy7OJvZMZqt4zkt2JcyDpA+BffU9Rr8BeSzIv3rSJ1UJCAVL323/H4wBQoM0s4/FUFy0CYS1T5GvwoM0nY/UnCSiegHLHaQqk7D1b5hJ0KXuGdnZ62iHKHIQTrzAo5qv0PfoU99nHUk8usoiN5RJxUNSMWp95YC13tVYpDU97unk+kOHrNf/iD454lE794KNEqxgzRKMghgOaz9cinhmF3cIG2ikuledbzzAIME9FAqekedVEQgFS99GVjTp8Igqc9WdSXT2d7aq0EGCegFbMZ4oyiiuEEasAMNbcOkWqmO4sWQk4rZxQySK5mO/ByeCTRIGefX8Z2g+06dVFQg+S1/jqTAIEWwXqwnKRXPvz4U+8skhzjAKMUNUg5XK/SNGHN0WKEOh/p9yXl2xTGjEhtIhCP71uJfxzeCDRIQnttACUR8UScVGUjuFFZGCgyS+vIJKpnuU+xWo4sIvQCBt0x+wA76+gJxg4RjY6U6i0yV5NmVYj4eR2wdRaQgbXK89Icv027DQYFBAsLRu4CREnVS0YHkSmGl1Vg6LOBIaJCO+67i0rHeoYxQ5swb6CIew17Ay8KPf0CSw338u0RAKowYJktMCT3Fa4bPx2JQfCCxyXSfvmP7GcfwvRUIZwz5OhrUSUUIkn/vSNGKSlIGSd3lRvqA5ihjO3rH0ABIYsaVGCWfir/4Xbtaac0wcq4a2DZawrU+G/PhULJBggcRJUgv0xxlyFjWuslw1CuxAxKp8Kn4o04qUpD8SBL2RJEwSKrLJ5BjR/NyBll2eC2fk9rHBwcDUIobpEbeMIxRz6QrmkWqxLm0mFtsHUWEIEHHjkmmQ7+Oz0mMeoneeCoAJeqkogXJTmF1UdC9QVJez/eG99oikrbLeQHoE+SyP/WqZ6wU/4Rso8XpyICGTgkGv4tO1mrUIL3q9SUQSdslRr1En5Ipke0ve5x26pwiBqm4wCEplQZpu/vHK2PbJEvSybGfEv/uqYNvsdc96XZcWAikBIPfxdhA2sQb3NqhWVGogdqPPRP13AssfdQ5RQ0Spwtr9wZJ0Oy7E73BtTuEJHGowdEmZ1L34MtUj6eUgIQe4qRKkehjiBwkOBDy2B1C0rEQezrmTOo+9xaVP0adU+QgUcufY3VvkCIoMAdXipPBiMqQngrVEcOpQ3+K3mlKQILTSEMJBr+L9sIuEYPkNxBCU0uBE+ycDz1H3VP7d5U6p+hBKu5ge0eqMEjKC8x9w6HwbsjEdxh9+pY9AWh/NiUgQdduzUign53rGCIHyXf6742wTgbUppeTBsnVhfXAkoAjCYOkvMD8HV9cAGLSNfyUPnh5O+uKpwSkURD+NhMMfhdjAgmOevm4vBPayUB7fAGbJbvAkzqnOEACXVhtK3RV2PIxAYMEU4F9cHmZX4gkoQ/eeo7yLVICUstYS6p9kK1YQDrmDdkRnTkY3snAnzwGfh/tvVLnFAtIdAqrMLKdhEF6ISAceuY5bvWXnM5ssiFMCUiWScqaSXQqphQLSO/wR71QD8N7do4+dYwZdU7xgOSksIp7ECdgkDLAZPvOuT70tGroSGkBqdg2zURDDfGA9Cnjg7l1rEMnwyXqnGICCZA0mSaDxJif4IuuSKkBKXmRpcYUg8TE4Y4FOBnKRJ1TXCDhFFYJgyRoh9d4umuD9ME7YBy6/R3bRAd5dsqkQbJFCpLUgXTmhVddk+DvBDkZqkSdU2wgocQ7oUGaEjV76NogURWTpHX3q3FcdA2SLeUgveBUkuGahzNxOBkJgWSR9Hx5q8AgiboPNZ7usieknQ2HZgSAOxDPRdcg2VIMErvaznZ4J2Euv5qBUICoc4oTpOKt75a5S806ErbD69YgvfwUK5BjH89F1yDZUguSkxFJzZe+HBCzUyfqnGIFqfr0d/2WP4/JICGOtr+x6dNNOPng4MOYLroGyZZSkDBHB489fPjByzZJBzucRw8n6pxiBalYfZJXVhGfQUKFXbifBe6YfhBWInUxryApDZItpSC9SicbfIqoehnGYTtIXggp6pziBYmTwhqnQdpkW34k1Jz2uXguugbJlkqQ0Jj3mP03MkowoBR9Y3jqnOIGqVjhLX8ej0FCRbC07XH6BkV/0TVIthSC9NDFEdV/S77gqGNR5xQ7SCDxzo8koUFa6sogveW9vISkyIPfGiRKCkF6zhtxJSTJL6bYsahzih8kJoU1ToME/WZ3GcoHsV10DZItdSDBQe87rhdxyCHE+r6dijqnBEAqXvLpHSlaMamx9PPdnPWr3LHQC3FddA2SLQJS112EYKmEt0QPrVLaxX5lRZ1TEiD5dGEVtl+dLL/SxUlv4v14ZYJKkdRKg2SLgNR1X7s3vI4dEOSLc6uVizqnREACJHlTHETtVxtLz890cdLwZ4qTUAeuehwXXYNkSxVIEBje8HbTU+F6MnQq6pySAalYfd4zHpIwSJUuQIIGiTtFtymei65BsqUKJP8G3m9Fn4QMRJ1TQiBxekdKGKRiFyAFrCjxViwXXYNkSxFI0CDxg9xnnoshDpsOkCySrjQYTsQGqQuQPnVPIVE6E31aQ0aDREkRSEG9pB9Gnx+USQlIri6sMgapC5BkVpSIVhokW4pA8mmhFqOoc0oQJKZ3pHCBCmCQOgcptjiOvzRIttSA9IE7pyF+UeeUJEiAJJLKIOp211j6utgFSAEtZeKSBsmWGpDe8R/1xiXqnBIFiUphFbVfnSwPFbsA6bnEvQANkiMlIJ3xDcPGJ+qckgXJTmEVtl898CXYvFOQYuluIpAGydZsXQFIsTTaEIg6p4RBIimsIoN0HBqkjkF6KwUXXYNkC7fjqsA+lZ2C9Fw8ucaBos4paZAskixjJGmQOgbpYOKhBg0SJbK8rQEWW+8QpDQ4GakCqdj8unxc0iB1CtLD5OM7GiRKKkB6I54qsmBR55Q8SCDxTtIgdQrSW2m46BokWypAei4FTkbKQCpWD3z3lpRB6hSkNHh2GiRHCkD6NA1ORtpAKlZ/HFzSRwxShyClwrPTIDky17oG6YU0OBmpA8l3+XOXQeoQpHRcdA2SLbz0ZTcgpcKzSyFI7hRWvkHqEKR0XHQNkq3uQYqnOa5Q1DmlBaTipG/Hu+PlV8hGHYGUkouuQbKFQcrBBTg7AimWtSbEos4pNSBxlj/H2vmevSxWRyCl5KJrkGzZIIE/OgLp5RTMxmZSChIgidena8YxSJ2BlJKLrkEiahlD3YK0Pfk8OyDqpFIEkk8XVsogdQZSSi66BomoZZS6BOlh8sn8UNRJpQkk1/LnXoPUEUgPuV244pcGiah7kGBag/I7FF7USaUKJJDC6q6T3blELRzcCUgvpOSia5CIugfp1VTEYVMMEkph9TVIHYH0TjxtoIXSIBGVMEhrHYOUjjhsmkGCKay+BqkjkLbH05FYKA0SEdOxuBOQYGu1yFeGkxB1UmkDydWFdap8i36zA5BSc9E1SERdg/RGTM1xhaJOKnUg4eXPsbaVm/R7HYD0QlouugaJqGuQ0jJESjdIxaqTeNdiDVInIL2TlouuQSLqGqS0DJFSDhKVwuoySAxIT8krDUMkDZKtfN0HpE0h7mkavPXUg2SRtLPBMUidgpSCqTsNkiNcjoRWdekQpFR46+kHqfgKTLxzG6ROQUrsQtPSIBERkGAToQ5BSoW3vg5AgimsHoPUIUhpmEXSIDlSAVIqvPX1ABIg6Ur5kutFGqRN0ko+8xtIg0RUH/EB6Yz8PU28ThOKOqnUglQcWio/736tm4XGkpYGiQhXUXhBWneiTiq9IBUvvVdxv6RB6gVpkGJW0/OKBqkXZFeal8BfGqQEpEHqAVWZlg0apCSkQeoBkSoKDVJy0iD1gDRIyUuD1APSICUvDVIPyCmQhaVmGqQEpEHqAbWNAlVprkFKQhqkHhCpotAgJScNUg9Ig5S8NEg9IALSPg1SYtIg9YBmmbo+DVIS0iD1gEgVhQYpOWmQekAapOSlQeoBaZCSlwapB0Tq+sxR+KcGKQFpkHpAbDmSBikJaZB6QBqk5KVB6gFpkJKXBmn9i9T1FYwa/FuDlIA0SOtfrioKDVIS0iCtf2mQUiAN0vpXy2iXgNoapOSkQVr/KhlEDfi3BklLqwO1ai2spI9EqTRIWloKpEHS0lIgDZKWlgJpkLS0FEiDpKWlQBokLS0F0iBpaSmQBklLS4E0SFpaCvT/A6eIrPWauOezAAAAAElFTkSuQmCC" display.Image(b64decode(base64_data)) ``` # With the data you collected in lab you will use this analysis notebook to investigate synaptic connectivity and physiology. > This analysis notebook is a different interface than you have been used to working with so far. But it is necessary to explore more complex datasets. As your physiology experimental skillset expands, so must your analytic skillsets. <a id="one"></a> ## Part I. About Jupyter Notebooks Jupyter notebooks are a way to combine executable code, code outputs, and text into one connected file. They run using a <b>'kernel'</b>, which is the thing that executes your code. It is what connects the notebook (as you see it) with the part of your computer, or the DataHub computers, that runs code. ### Menu Options & Shortcuts There are also a large number of useful keyboard shortcuts. Some common oens can be found at: https://towardsdatascience.com/jypyter-notebook-shortcuts-bf0101a98330. ### Types of Cells Jupyter Notebooks have two types of cells, a <b>Markdown</b> (like this one) and <b>Code</b>. Most of the time you won't need to run the Markdown cells, just read through them. However, when we get to a code cell, you need to tell Jupyter to run the lines of code that it contains. Code cells will be read by the kernel, which will run whatever it recognizes as code within the cell. <span style="color:blue">When you're in <b>Command mode</b>, cells are outlined in blue</span>. <span style="color:green">When you're in <b>Edit mode</b>, blocks are either popped out (shadowed) or outlined in green</span>. <div class="alert alert-success"><b>Task:</b> Run the cell below to import necessary packages and set up the coding environment.</div> ``` # In Python, anything with a "#" in front of it is code annotation, # and is not read by the computer. # You can run a cell (this box) by pressing ctrl-enter or shift-enter. # You can also run a cell by clicking the play button in the menu bar # at the top of the page (single right arrow, not double). # Click in this cell and then press shift and enter simultaneously. # This print function below allows us to generate a message. print('Nice work!') # No need to edit anything in this code cell ################################# import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy import ndimage from scipy.signal import find_peaks from copy import deepcopy import math from sklearn.decomposition import PCA from sklearn.cluster import KMeans from pathlib import Path import matplotlib.pyplot as plt import csv from scipy.signal import hilbert,medfilt,resample from sklearn.decomposition import PCA import scipy import seaborn as sns colors = ['cyan','gray','red','green','blue','purple','orange'] merge_cluster_list = [[]] %matplotlib widget ``` <a id="two"></a> ## Part II. Import raw data ('.bin' file) ### Edit the code cell below with the appropriate information, then play/execute the cell - **filepath** is the path to your ".bin" data file that has simultaneous recording of muscle and nerve. - filepath needs to be in quotation marks. - ***if you are on a windows operating system computer, you need an "r" before the first quote of the filepath*** - **number_channels** = the number of inputs to the analog to digital converter were recorded. - **nerve_channel** and **muscle_channel** which analog input channel was the nerve amplifier hooked up to? This is "nerve_channel" and the same logic applies to "muscle_channel" - **sampling_rate** is the sampling rate that you recorded data at ### You will also get a plot of your raw data from both channels (nerve in blue and muscle in green). - You can interact with the plot by zooming in and panning. <br> - You can make the plot bigger or smaller by dragging its bottom right corner (gray triangle). Note that when it gets smaller the axis labels might disappear. - You can save the current plot view at any time by hitting the "save" icon - it will save to your Downloads folder. <br> <div class="alert alert-success"><b>Task:</b> Run the cell below after editing the variables to match your data parameters.</div> ``` filepath = "your file path" number_channels = nerve_channel = muscle_channel = sampling_rate = # No need to edit below this line ################################# filepath = Path(filepath) y_data = np.fromfile(Path(filepath), dtype = np.float64) y_data = y_data.reshape(-1,number_channels) nerve = y_data[:,nerve_channel] - np.median(y_data[:,nerve_channel],0) time = np.linspace(0,np.shape(y_data)[0]/sampling_rate,np.shape(y_data)[0]) muscle = y_data[:,muscle_channel] hfig,ax = plt.subplots(1) ax.plot(time, nerve, color = 'blue') ax.plot(time, muscle, color = 'green') ax.set_ylabel('Volts (recorded)') ax.set_xlabel('seconds') ``` <a id="three"></a> ## Part III. Detect spiking events in the raw signal from the motor nerve. > If you ever need to restore your "df" dataframe, start here again and re-run the next two cells below (for example if you merge clusters you did not mean to. ### Edit the code cell below with the appropriate information, then play/execute the cell - **spike_detection_threshold** is the Voltage value that peaks need to cross to be counted/detected as spikes. </br> - **polarity** controls whether you are detecting spikes based on the positive peaks (polarity = 1) or negative peaks (polarity = -1) </br> - what this does is multiply the nerve voltage trace by the value of polarity before detecting peaks based on spike threshold. ### You will also get a plot of the histogram (distribution) of peak heights for all peaks (putative spikes) detected (peaks larger than the threshold you set). <div class="alert alert-success"><b>Task:</b> Run the cell below after editing the variables as needed based on your raw data signal.</div> ``` spike_detection_threshold = polarity = # No need to edit below this line ################################# peaks,props = find_peaks(polarity * nerve,height=spike_detection_threshold, prominence = spike_detection_threshold, distance=(0.00075*sampling_rate)) peaks_t = peaks/sampling_rate df = pd.DataFrame({ 'height': props['peak_heights'], 'r_prom' : -nerve[peaks]+nerve[props['right_bases']], 'l_prom' : -nerve[peaks]+nerve[props['left_bases']] # 'widths' : props['widths']/fs }) n,bins = np.histogram(df['height'],bins = 100) # calculate the histogram bins = bins[1:] hfig,ax = plt.subplots(1) ax.step(bins,n) ax.set_ylabel('count') ax.set_xlabel('Volts') ``` <a id="four"></a> ## Part IV. Cluster peaks by waveform shape into putative neuron classes. ### Edit the code cell below with the appropriate information, then play/execute the cell - **number_of_clusters** is the number of clusters you want the algorithm to make. Look at both your raw data and the histogram to decide what number you think this should be. *You will be able to combine clusters later, so it is better to over-estimate here*. </br> <div class="alert alert-success"><b>Task:</b> Run the cell below after editing the variables as needed to control the clustering analyisis.</div> ``` number_of_clusters = # No need to edit below this line ################################# df_normalized=(df - df.mean()) / df.std() #normalize data in dataframe for PCA pca = PCA(n_components=df.shape[1]) pca.fit(df_normalized) X_pca=pca.transform(df_normalized) kmeans = KMeans(n_clusters=number_of_clusters).fit(X_pca[:,0:2]) df['peaks_t'] = peaks_t df['cluster'] = kmeans.labels_ ``` <a id="five"></a> ## Part V. Plot the results of clustering to determine how many neurons you recorded. ### You will get a plot of the raw voltage trace recorded from the nerve. - This plot incorporates your "polarity" to show you what happens when this value changes (with respect to the peak finding algorithm). - The overlaid scatter plot shows the height of each peak at the time of the peak. - The scatter is colored according to which cluster the spike was assigned. ### You will get a plot of the mean spike waveform associated with each cluster. - you can change **windur** to change the amount of time before and after each spike to plot. ### With these two plots, you can determine how many distinguishable (unique) neurons you think there actually are in your recording. <div class="alert alert-success"><b>Task:</b> Run the cell below to plot the clustering results.</div> ``` windur = 0.002 # No need to edit below this line ################################# hfig,ax = plt.subplots(1) ax.plot(time, polarity * nerve, color = 'blue') ax.plot(time, muscle, color = 'green') for i,k in enumerate(np.unique(df['cluster'])): df_ = df[df['cluster']==k] ax.scatter(df_['peaks_t'],df_['height'],color = colors[i],zorder = 3) ax.set_ylabel('Voltage recorded (V)') ax.set_xlabel('seconds') winsamps = int(windur * sampling_rate) x = np.linspace(-windur,windur,winsamps*2)*1000 hfig,ax = plt.subplots(1) ax.set_ylabel('Volts recorded') ax.set_xlabel('milliseconds') for k in np.unique(df['cluster']): spkt = df.loc[df['cluster']==k]['peaks_t'].values spkt = spkt[(spkt>windur) & (spkt<(len((muscle)/sampling_rate)-windur))] print(str(len(spkt)) + " spikes in cluster number " + str(k)) spkwav = np.asarray([nerve[(int(t*sampling_rate)-winsamps):(int(t*sampling_rate)+winsamps)] for t in spkt]) wav_u = np.mean(spkwav,0) wav_std = np.std(spkwav,0) ax.plot(x,wav_u,linewidth = 3,color = colors[k]) ``` ## If there are multiple spike clusters you want to merge into a single cell class, *edit and run* the cell below. - **merge_cluster_list** = a list of the clusters (identified by numbers associated with the following colors). > cyan = 0, > gray = 1, > red = 2, > green = 3, > blue = 4, > purple = 5, > orange = 6 - **For example**, the folowing list would merge clusters 0 and 2 together and 1 and 3 together: <br> **merge_cluster_list = [[0,2],[1,3]]** - For each merge group, the first cluster number listed will be the re-asigned cluster number for that group (for example, in this case you would end up with a cluster number 0 and a cluster number 1). ## After running the cell below, go back up and re-plot the mean waveform for your new clusters. ``` merge_cluster_list = # No need to edit below this line ################################# for k_group in merge_cluster_list: for k in k_group: df.loc[df['cluster']==k,'cluster'] = k_group[0] print('you now have the following clusters: ' + str(np.unique(df['cluster']))) ``` <a id="six"></a> ## Part VI. Analyze the post-synaptic activity associated with pre-synaptic spikes. ### Edit the code cell below with the appropriate information, then play/execute the cell. - **k** is the cluster number (according to the following colors list) </br> > cyan = 0, > gray = 1, > red = 2, > green = 3, > blue = 4, > purple = 5, > orange = 6 ### With this plot, you can determine which neurons have a synapse close enough to your electrode to detect the psp. - The mean and standard deviation are also plotted in addition to every spike-triggered membrane potential overlaid. <div class="alert alert-success"><b>Task:</b> Run the cell below to plot the post-synaptic potentials triggered by spikes from cluster *k*.</div> ``` k = # No need to edit below this line ################################# windur = 0.1 winsamps = int(windur * sampling_rate) x = np.linspace(0,windur,winsamps)*1000 # colors = ['brown','black','red','green','blue','purple','orange'] hfig,ax = plt.subplots(1) ax.set_ylabel('Volts recorded') ax.set_xlabel('milliseconds') spkt = df.loc[df['cluster']==k]['peaks_t'].values spkt = spkt[(spkt<((len(muscle)/sampling_rate)-windur))] synwav = np.asarray([muscle[(int(t*sampling_rate)):(int(t*sampling_rate)+winsamps)] - muscle[int(t*sampling_rate)] for t in spkt]) wav_u = np.mean(synwav,0) wav_std = np.std(synwav,0) ax.plot(x,synwav.T,linewidth = 0.5, alpha = 0.5,color = colors[k]); ax.plot(x,wav_u,linewidth = 3,color = 'black') ax.fill_between(x, wav_u-wav_std, wav_u+wav_std, alpha = 0.25, color = 'black',zorder=3); ``` <div class="alert alert-success"><b>Task:</b> Run the cell below to plot the mean post-synaptic potentials triggered by spikes from each cluster overlaid.</div> You can edit the value of windur in the first line of the code cell to change the amount of time after the spike that is plotted. ``` windur = 0.1 # No need to edit below this line ################################# winsamps = int(windur * sampling_rate) x = np.linspace(0,windur,winsamps)*1000 hfig,ax = plt.subplots(1) ax.set_ylabel('Volts recorded') ax.set_xlabel('milliseconds') for k in np.unique(df['cluster']): spkt = df.loc[df['cluster']==k]['peaks_t'].values spkt = spkt[(spkt<((len(muscle)/sampling_rate)-windur))] synwav = np.asarray([muscle[(int(t*sampling_rate)):(int(t*sampling_rate)+winsamps)] for t in spkt]) wav_u = np.mean(synwav,0) wav_std = np.std(synwav,0) ax.plot(x,wav_u,linewidth = 3,color = colors[k]) # ax.fill_between(x, wav_u-wav_std, wav_u+wav_std, alpha = 0.25, color = colors[k]) ``` ### Edit the code cell below with the appropriate information, depending on which spike cluster you want to use to plot the average spike waveform and the average spike-triggered post-synaptic potential. Then play/execute the cell. - **k** is the cluster number (according to the following colors list) </br> > cyan = 0, > gray = 1, > red = 2, > green = 3, > blue = 4, > purple = 5, > orange = 6 - **offset** is the amount of time plotted before the spike time. - **windur** is the amount of time plotted after the spike time. ### With this plot, you can determine the average delay between the spike and the post-synaptic resposne for each neuron. <div class="alert alert-success"><b>Task:</b> Run the cell below to plot the average pre- and post-synaptic potentials triggered by spikes from cluster *k*.</div> ``` k = # Optional parameters to change: offset (time before spike to plot) and windur (time after spike to plot) offset = 0.002 windur = 0.1 # No need to edit below this line ################################# winsamps = int(windur * sampling_rate) hfig,ax = plt.subplots(1) ax.set_ylabel('Volts recorded') ax.set_xlabel('milliseconds') x = np.linspace(-offset,windur,(winsamps + int(offset*sampling_rate)))*1000 spkt = df.loc[df['cluster']==k]['peaks_t'].values spkt = spkt[(spkt>offset)&(spkt<((len(muscle)/sampling_rate)-windur))] spkwav = np.asarray([nerve[(int(t*sampling_rate)-int(offset*sampling_rate)):(int(t*sampling_rate)+winsamps)] for t in spkt if (int(t*sampling_rate)+winsamps < len(muscle))]) synwav = np.asarray([muscle[(int(t*sampling_rate)-int(offset*sampling_rate)):(int(t*sampling_rate)+winsamps)] for t in spkt if (int(t*sampling_rate)+winsamps < len(muscle))]) spk_u = np.mean(spkwav,0) spk_std = np.std(spkwav,0) syn_u = np.mean(synwav,0) syn_std = np.std(synwav,0) ax.plot(x,spk_u,linewidth = 1,color = colors[k]) ax.plot(x,syn_u,linewidth = 1,color = colors[k]) ``` <div class="alert alert-success"><b>Task:</b> Celebrate your new analysis skills by running the cell below.</div> ``` from IPython.display import HTML HTML('<img src="https://media.giphy.com/media/l0MYt5jPR6QX5pnqM/giphy.gif">') ``` <a id="six"></a> ## Take Home: Answer the following questions in a separate document to turn in. > *When asked to report amplitudes of events (spikes or PSPs), correct for the amplifier gain (1000x for the extracellular amplifier and 10x for the intracellular amplifier).* ### Figure 1. *Synaptic connections between pre- and post-synaptic cells* Only compare intracellular recordings using the same motor nerve. - How many motor neurons did you record spikes from? - Which of these neurons evoked synaptic potentials that you recorded intracellularly from the muscle cell? - Make note of different intracellular recording locations as needed. - We know that all of the 6 motor neurons in Nerve 3 synapse on the superficial flexor muscle. Were your results for each intracellular recording location consistent with this? If not, how do you explain the inconsistency? Did you see responses to different motor neurons at different intracellular recording locations? ### Figure 2. *Synaptic **dynamics** - the timecourse of synaptic connectivity* Only compare intracellular recordings using the same motor nerve. - How many motor neurons did you record spikes from? *only do this if it is different than in Figure 1, otherwise refer to Figure 1)* - For each panel of the figure (each PSP), annotate the following: - the delay between the pre-synaptic spike time and the psp - the psp rise time (the time from the onset of the psp to the peak of the psp) - the psp amplitude - Make it clear which panels are from the same intracellular recording and which are from different. ### Figure 3. *Stereotypy of Pre to Post synaptic transformations - Does synaptic summation effect post-synaptic potential amplitude?* Because you will *"normalize"* your data, you can combine data using different motor nerves. - Use recordings with distinct PSPs associated with a distinct neuron (identified by waveform amplitude). - Quantitative procedure for each intracellular recording:<br> - Measure the amplitude of 5 solitary PSPs associated with that pre-synaptic neuron. - Find examples of synaptic summation. Measure the amplitude of the second PSP (from PSP onset, not from resting membrane potential). Measure the latency between the onset of the first PSP and the onset of the second PSP. If there are more than two PSPs in a row, compare sequentially. - Analysis: - Within each intracellular recording, divide all amplitudes by the mean solitary PSP amplitude. This enables you to plot data from different recordings together. - Visualization: - Use a program such as google sheets or excell to make a plot of PSP amplitude versus latency (the solitary PSPs will have a latency of 100ms). ### Figure 4. *Pre to post-synaptic transformations* In any of your recordings *from the same motor nerve recordings*, did you see PSPs in repsponse to different neurons? (If not, then you won't have a figure - instead you will just answer the second bullet point. - Did either psp onset slope correlate with spike amplitude? - To answer this question (are they correlated) use a spreadsheet program to make a scatter plot of psp onset slope (rise time / amplitude) against spike amplitude. - Use what you have learned in your neuroscience courses (and what we have learned this semester in lab) to name two reasons why: - the PSP amplitude could be different in response to different pre-synaptic neurons - the PSP amplitude could be different at different intracellular recording locations (different muscle cells or different locations along the same muscle cell)
github_jupyter
## Running Facebook Prophet model for forecasting Statistics Norway data via Statbank API Inspired by [`Eurostat/Prophet`](https://github.com/eurostat/prophet) and [`stats_to_pandas`](https://github.com/hmelberg/stats-to-pandas). Result of internal hackathon at SSB. --------- Facebook has open sourced [`Prophet`](https://facebookincubator.github.io/prophet/), a forecasting project available in `Python` and `R`. At its core, the `Prophet` procedure is an **additive regression model** with four main components (based on [`Stan`](http://mc-stan.org) Bayesian approach): 1. a piecewise linear (or logistic) growth curve trend: `Prophet` automatically detects changes in trends by selecting changepoints from the data, 2. a yearly seasonal component modeled using Fourier series, 3. a weekly seasonal component using dummy variables, 4. a user-provided list of important holidays. We (**make no assumption whatsoever here**, beyond the obvious seasonality of the data) use the features 1. and 2. of the model to **build forecast estimates of Statistics Norway timeseries using [*PX-API*](https://www.ssb.no/en/omssb/tjenester-og-verktoy/api/px-api) **: <img src=""> We suppose here that all required packages have been already install (see `Prophet` original webpage for `Prophet`'s dependencies). Let us first import everything we need: ``` import pandas as pd import numpy as np # needed for display in notebook: %matplotlib notebook from matplotlib import pyplot as plt import APIdata as apid #separate file import warnings warnings.filterwarnings('ignore') ``` ## Testing datasets from API: ``` statbank = apid.API_to_data(language='en') statbank.search('macro*') ``` Select table_id from the list above, and type between ' ' ``` statbank = apid.API_to_data(language='en') tablenr = '09190' #måned eksempel: 11721, uke eksempel: 03024, kvartal eksempel: 09190, år eksempel 05803 box_info = statbank.select(tablenr) box_info ``` Run next cell when info is saved. ``` [df, label] = statbank.read_box(box_info) df ``` The table contains exactly the data we need, let us store it into a `pandas.DataFrame` object as desired. As already mentioned, the input to `Prophet` is always a `pandas.DataFrame` object, and it must contain two columns: `ds` and `y`. ``` [df, f, periods] = statbank.prepare_dataframe(df=df) df.head() type(df) df.sort_values('ds', inplace=True) ds_last = df['ds'].values[-1] xlabel = "Time" ylabel = "Value" plt.plot(df['ds'], df['y'], 'k.') plt.plot(df['ds'], df['y'], ls='-', c='#0072B2') plt.grid(True, which='major', c='gray', ls='-', lw=1, alpha=0.2) plt.xlabel(xlabel, fontsize=10); #plt.ylabel(ylabel, fontsize=10) plt.suptitle(" {}, Table: {} (last: {})".format(label, tablenr, ds_last), fontsize=14, y=1.03) plt.legend() plt.show() ``` Considering the trend observed in the data, we define the regression model for `Prophet` by instantiating a new `Prophet` model as follows: Growth can be set to 'linear' eller 'logistic'. Possible to add parameter: seasonality_mode='additive' (default) or 'multiplicative'. Possible to set Markov chain Monte Carlo (MCMC), e.g. mcmc_samples=300 ``` from fbprophet import Prophet nyears=4 m = Prophet(growth = 'linear', weekly_seasonality=False, yearly_seasonality=True, daily_seasonality=False) ``` We then call its `fit` method and pass in the historical dataframe built earlier: We extend the data into the future by a specified number periods using the `make_future_dataframe` method. Say that we consider to predict the time-series over the 4 next years: ``` m.fit(df) future = m.make_future_dataframe(periods=periods*nyears, freq=f) fcst = m.predict(future) ``` Let us plot the forecast estimates calculated by the `Prophet` model: ``` m.plot(fcst, uncertainty=True) xlabel = "Time" ylabel = "Value" plt.axvline(pd.to_datetime(ds_last), color='r', linestyle='--', lw=2) plt.xlabel(xlabel, fontsize=10); plt.ylabel(ylabel, fontsize=10) plt.suptitle(" {} {} forecast data ({} years)".format(tablenr, label, nyears), fontsize=14, y=1.05) plt.legend() plt.show() plt.savefig("{}_{}y.svg".format(tablenr, nyears)) ``` `Prophet` also provides with the components (overall trend and yearly profile) of the time-series: ``` fig = m.plot_components(fcst, uncertainty=True) fig.suptitle("Forecast components", fontsize=16, y=1.02) plt.show() result = pd.concat([fcst[['ds', 'yhat', 'yhat_lower', 'yhat_upper']], df['y']], axis=1) result result.tail() result.to_csv("{}_{}y.csv".format(tablenr, nyears), sep = ';', decimal = ',') # result.to_excel("{}_{}y.xlsx".format(tablenr, nyears)) ``` **-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------**
github_jupyter
# Implementing a Neural Network In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset. ``` # A bit of setup import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.neural_net import TwoLayerNet %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) ``` We will use the class `TwoLayerNet` in the file `cs231n/classifiers/neural_net.py` to represent instances of our network. The network parameters are stored in the instance variable `self.params` where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation. ``` # Create a small net and some toy data to check your implementations. # Note that we set the random seed for repeatable experiments. input_size = 4 hidden_size = 10 num_classes = 3 num_inputs = 5 def init_toy_model(): np.random.seed(0) return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1) def init_toy_data(): np.random.seed(1) X = 10 * np.random.randn(num_inputs, input_size) y = np.array([0, 1, 2, 2, 1]) return X, y net = init_toy_model() X, y = init_toy_data() ``` # Forward pass: compute scores Open the file `cs231n/classifiers/neural_net.py` and look at the method `TwoLayerNet.loss`. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters. Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs. ``` scores = net.loss(X) print 'Your scores:' print scores print print 'correct scores:' correct_scores = np.asarray([ [-0.81233741, -1.27654624, -0.70335995], [-0.17129677, -1.18803311, -0.47310444], [-0.51590475, -1.01354314, -0.8504215 ], [-0.15419291, -0.48629638, -0.52901952], [-0.00618733, -0.12435261, -0.15226949]]) print correct_scores print # The difference should be very small. We get < 1e-7 print 'Difference between your scores and correct scores:' print np.sum(np.abs(scores - correct_scores)) ``` # Forward pass: compute loss In the same function, implement the second part that computes the data and regularizaion loss. ``` loss, _ = net.loss(X, y, reg=0.1) correct_loss = 1.30378789133 # should be very small, we get < 1e-12 print 'Difference between your loss and correct loss:' print np.sum(np.abs(loss - correct_loss)) ``` # Backward pass Implement the rest of the function. This will compute the gradient of the loss with respect to the variables `W1`, `b1`, `W2`, and `b2`. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check: ``` from cs231n.gradient_check import eval_numerical_gradient # Use numeric gradient checking to check your implementation of the backward pass. # If your implementation is correct, the difference between the numeric and # analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2. loss, grads = net.loss(X, y, reg=0.1) # these should all be less than 1e-8 or so for param_name in grads: f = lambda W: net.loss(X, y, reg=0.1)[0] param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False) print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])) ``` # Train the network To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function `TwoLayerNet.train` and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement `TwoLayerNet.predict`, as the training process periodically performs prediction to keep track of accuracy over time while the network trains. Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2. ``` net = init_toy_model() stats = net.train(X, y, X, y, learning_rate=1e-1, reg=1e-5, num_iters=100, verbose=False) print 'Final training loss: ', stats['loss_history'][-1] # plot the loss history plt.plot(stats['loss_history']) plt.xlabel('iteration') plt.ylabel('training loss') plt.title('Training Loss history') plt.show() ``` # Load the data Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset. ``` from cs231n.data_utils import load_CIFAR10 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) X_val = X_val.reshape(num_validation, -1) X_test = X_test.reshape(num_test, -1) return X_train, y_train, X_val, y_val, X_test, y_test # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() print 'Train data shape: ', X_train.shape print 'Train labels shape: ', y_train.shape print 'Validation data shape: ', X_val.shape print 'Validation labels shape: ', y_val.shape print 'Test data shape: ', X_test.shape print 'Test labels shape: ', y_test.shape ``` # Train a network To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate. ``` input_size = 32 * 32 * 3 hidden_size = 50 num_classes = 10 net = TwoLayerNet(input_size, hidden_size, num_classes) # Train the network stats = net.train(X_train, y_train, X_val, y_val, num_iters=1000, batch_size=200, learning_rate=1e-4, learning_rate_decay=0.95, reg=0.5, verbose=True) # Predict on the validation set val_acc = (net.predict(X_val) == y_val).mean() print 'Validation accuracy: ', val_acc ``` # Debug the training With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good. One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization. Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized. ``` # Plot the loss function and train / validation accuracies plt.subplot(2, 1, 1) plt.plot(stats['loss_history']) plt.title('Loss history') plt.xlabel('Iteration') plt.ylabel('Loss') plt.subplot(2, 1, 2) plt.plot(stats['train_acc_history'], label='train') plt.plot(stats['val_acc_history'], label='val') plt.title('Classification accuracy history') plt.xlabel('Epoch') plt.ylabel('Clasification accuracy') plt.show() from cs231n.vis_utils import visualize_grid # Visualize the weights of the network def show_net_weights(net): W1 = net.params['W1'] W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2) plt.imshow(visualize_grid(W1, padding=3).astype('uint8')) plt.gca().axis('off') plt.show() show_net_weights(net) ``` # Tune your hyperparameters **What's wrong?**. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy. **Tuning**. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value. **Approximate results**. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set. **Experiment**: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.). ``` best_net = None # store the best model into this ################################################################################# # TODO: Tune hyperparameters using the validation set. Store your best trained # # model in best_net. # # # # To help debug your network, it may help to use visualizations similar to the # # ones we used above; these visualizations will have significant qualitative # # differences from the ones we saw above for the poorly tuned network. # # # # Tweaking hyperparameters by hand can be fun, but you might find it useful to # # write code to sweep through possible combinations of hyperparameters # # automatically like we did on the previous exercises. # ################################################################################# learning_rates = [50e-5, 52e-5, 54e-5, 56e-5, 58e-5, 60e-5] regs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] best_lr = -1 best_reg = -1 best_acc = -1 for lr in learning_rates: for reg in regs: net = TwoLayerNet(input_size, hidden_size, num_classes) # Train the network stats = net.train(X_train, y_train, X_val, y_val, num_iters=1000, batch_size=200, learning_rate=lr, learning_rate_decay=0.95, reg=reg, verbose=False) # Predict on the validation set val_acc = (net.predict(X_val) == y_val).mean() print 'Validation accuracy: ', val_acc if val_acc > best_acc: best_acc = val_acc best_lr = lr best_reg = reg best_net = net print "" ################################################################################# # END OF YOUR CODE # ################################################################################# # visualize the weights of the best network show_net_weights(best_net) ``` # Run on the test set When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%. **We will give you extra bonus point for every 1% of accuracy above 52%.** ``` test_acc = (best_net.predict(X_test) == y_test).mean() print 'Test accuracy: ', test_acc ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Masking and padding in Keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/guide/keras/masking_and_padding"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/keras/masking_and_padding.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/keras/masking_and_padding.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/guide/keras/masking_and_padding.ipynb"> <img src="https://www.tensorflow.org/images/download_logo_32px.png" /> Download notebook</a> </td> </table> ## Setup ``` from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf from tensorflow.keras import layers ``` ## Padding sequence data When processing sequence data, it is very common for individual samples to have different lengths. Consider the following example (text tokenized as words): ``` [ ["The", "weather", "will", "be", "nice", "tomorrow"], ["How", "are", "you", "doing", "today"], ["Hello", "world", "!"] ] ``` After vocabulary lookup, the data might be vectorized as integers, e.g.: ``` [ [83, 91, 1, 645, 1253, 927], [73, 8, 3215, 55, 927], [71, 1331, 4231] ] ``` The data is a 2D list where individual samples have length 6, 5, and 3 respectively. Since the input data for a deep learning model must be a single tensor (of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter than the longest item need to be padded with some placeholder value (alternatively, one might also truncate long samples before padding short samples). Keras provides an API to easily truncate and pad sequences to a common length: `tf.keras.preprocessing.sequence.pad_sequences`. ``` raw_inputs = [ [83, 91, 1, 645, 1253, 927], [73, 8, 3215, 55, 927], [711, 632, 71] ] # By default, this will pad using 0s; it is configurable via the # "value" parameter. # Note that you could "pre" padding (at the beginning) or # "post" padding (at the end). # We recommend using "post" padding when working with RNN layers # (in order to be able to use the # CuDNN implementation of the layers). padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(raw_inputs, padding='post') print(padded_inputs) ``` ## Masking Now that all samples have a uniform length, the model must be informed that some part of the data is actually padding and should be ignored. That mechanism is <b>masking</b>. There are three ways to introduce input masks in Keras models: - Add a `keras.layers.Masking` layer. - Configure a `keras.layers.Embedding` layer with `mask_zero=True`. - Pass a `mask` argument manually when calling layers that support this argument (e.g. RNN layers). ## Mask-generating layers: `Embedding` and `Masking` Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch, sequence_length)`), and attach it to the tensor output returned by the `Masking` or `Embedding` layer. ``` embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) masked_output = embedding(padded_inputs) print(masked_output._keras_mask) masking_layer = layers.Masking() # Simulate the embedding lookup by expanding the 2D input to 3D, # with embedding dimension of 10. unmasked_embedding = tf.cast( tf.tile(tf.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), tf.float32) masked_embedding = masking_layer(unmasked_embedding) print(masked_embedding._keras_mask) ``` As you can see from the printed result, the mask is a 2D boolean tensor with shape `(batch_size, sequence_length)`, where each individual `False` entry indicates that the corresponding timestep should be ignored during processing. ## Mask propagation in the Functional API and Sequential API When using the Functional API or the Sequential API, a mask generated by an `Embedding` or `Masking` layer will be propagated through the network for any layer that is capable of using them (for example, RNN layers). Keras will automatically fetch the mask corresponding to an input and pass it to any layer that knows how to use it. Note that in the `call` method of a subclassed model or layer, masks aren't automatically propagated, so you will need to manually pass a `mask` argument to any layer that needs one. See the section below for details. For instance, in the following Sequential model, the `LSTM` layer will automatically receive a mask, which means it will ignore padded values: ``` model = tf.keras.Sequential([ layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32), ]) ``` This is also the case for the following Functional API model: ``` inputs = tf.keras.Input(shape=(None,), dtype='int32') x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs) outputs = layers.LSTM(32)(x) model = tf.keras.Model(inputs, outputs) ``` ## Passing mask tensors directly to layers Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their `__call__` method. Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input, previous_mask)` method which you can call. Thus, you can do something like this: ``` class MyLayer(layers.Layer): def __init__(self, **kwargs): super(MyLayer, self).__init__(**kwargs) self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) self.lstm = layers.LSTM(32) def call(self, inputs): x = self.embedding(inputs) # Note that you could also prepare a `mask` tensor manually. # It only needs to be a boolean tensor # with the right shape, i.e. (batch_size, timesteps). mask = self.embedding.compute_mask(inputs) output = self.lstm(x, mask=mask) # The layer will ignore the masked values return output layer = MyLayer() x = np.random.random((32, 10)) * 100 x = x.astype('int32') layer(x) ``` ## Supporting masking in your custom layers Sometimes you may need to write layers that generate a mask (like `Embedding`), or layers that need to modify the current mask. For instance, any layer that produces a tensor with a different time dimension than its input, such as a `Concatenate` layer that concatenates on the time dimension, will need to modify the current mask so that downstream layers will be able to properly take masked timesteps into account. To do this, your layer should implement the `layer.compute_mask()` method, which produces a new mask given the input and the current mask. Most layers don't modify the time dimension, so don't need to worry about masking. The default behavior of `compute_mask()` is just pass the current mask through in such cases. Here is an example of a `TemporalSplit` layer that needs to modify the current mask. ``` class TemporalSplit(tf.keras.layers.Layer): """Split the input tensor into 2 tensors along the time dimension.""" def call(self, inputs): # Expect the input to be 3D and mask to be 2D, split the input tensor into 2 # subtensors along the time axis (axis 1). return tf.split(inputs, 2, axis=1) def compute_mask(self, inputs, mask=None): # Also split the mask into 2 if it presents. if mask is None: return None return tf.split(mask, 2, axis=1) first_half, second_half = TemporalSplit()(masked_embedding) print(first_half._keras_mask) print(second_half._keras_mask) ``` Here is another example of a `CustomEmbedding` layer that is capable of generating a mask from input values: ``` class CustomEmbedding(tf.keras.layers.Layer): def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs): super(CustomEmbedding, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.mask_zero = mask_zero def build(self, input_shape): self.embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer='random_normal', dtype='float32') def call(self, inputs): return tf.nn.embedding_lookup(self.embeddings, inputs) def compute_mask(self, inputs, mask=None): if not self.mask_zero: return None return tf.not_equal(inputs, 0) layer = CustomEmbedding(10, 32, mask_zero=True) x = np.random.random((3, 10)) * 9 x = x.astype('int32') y = layer(x) mask = layer.compute_mask(x) print(mask) ``` ## Writing layers that need mask information Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to dertermine whether to skip certain time steps. To write such a layer, you can simply add a `mask=None` argument in your `call` signature. The mask associated with the inputs will be passed to your layer whenever it is available. ```python class MaskConsumer(tf.keras.layers.Layer): def call(self, inputs, mask=None): ... ``` ## Recap That is all you need to know about masking in Keras. To recap: - "Masking" is how layers are able to know when to skip / ignore certain timesteps in sequence inputs. - Some layers are mask-generators: `Embedding` can generate a mask from input values (if `mask_zero=True`), and so can the `Masking` layer. - Some layers are mask-consumers: they expose a `mask` argument in their `__call__` method. This is the case for RNN layers. - In the Functional API and Sequential API, mask information is propagated automatically. - When writing subclassed models or when using layers in a standalone way, pass the `mask` arguments to layers manually. - You can easily write layers that modify the current mask, that generate a new mask, or that consume the mask associated with the inputs.
github_jupyter
# MLP 104 ``` from google.colab import drive PATH='/content/drive/' drive.mount(PATH) DATAPATH=PATH+'My Drive/data/' PC_FILENAME = DATAPATH+'pcRNA.fasta' NC_FILENAME = DATAPATH+'ncRNA.fasta' import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow import keras from keras.wrappers.scikit_learn import KerasRegressor from keras.models import Sequential from keras.layers import Bidirectional from keras.layers import GRU from keras.layers import Dense from keras.layers import LayerNormalization import time dt='float32' tf.keras.backend.set_floatx(dt) EPOCHS=200 SPLITS=1 K=4 VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN' EMBED_DIMEN=16 FILENAME='MLP104' ``` ## Load and partition sequences ``` # Assume file was preprocessed to contain one line per seq. # Prefer Pandas dataframe but df does not support append. # For conversion to tensor, must avoid python lists. def load_fasta(filename,label): DEFLINE='>' labels=[] seqs=[] lens=[] nums=[] num=0 with open (filename,'r') as infile: for line in infile: if line[0]!=DEFLINE: seq=line.rstrip() num += 1 # first seqnum is 1 seqlen=len(seq) nums.append(num) labels.append(label) seqs.append(seq) lens.append(seqlen) df1=pd.DataFrame(nums,columns=['seqnum']) df2=pd.DataFrame(labels,columns=['class']) df3=pd.DataFrame(seqs,columns=['sequence']) df4=pd.DataFrame(lens,columns=['seqlen']) df=pd.concat((df1,df2,df3,df4),axis=1) return df # Split into train/test stratified by sequence length. def sizebin(df): return pd.cut(df["seqlen"], bins=[0,1000,2000,4000,8000,16000,np.inf], labels=[0,1,2,3,4,5]) def make_train_test(data): bin_labels= sizebin(data) from sklearn.model_selection import StratifiedShuffleSplit splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=37863) # split(x,y) expects that y is the labels. # Trick: Instead of y, give it it the bin labels that we generated. for train_index,test_index in splitter.split(data,bin_labels): train_set = data.iloc[train_index] test_set = data.iloc[test_index] return (train_set,test_set) def separate_X_and_y(data): y= data[['class']].copy() X= data.drop(columns=['class','seqnum','seqlen']) return (X,y) def make_slice(data_set,min_len,max_len): print("original "+str(data_set.shape)) too_short = data_set[ data_set['seqlen'] < min_len ].index no_short=data_set.drop(too_short) print("no short "+str(no_short.shape)) too_long = no_short[ no_short['seqlen'] >= max_len ].index no_long_no_short=no_short.drop(too_long) print("no long, no short "+str(no_long_no_short.shape)) return no_long_no_short ``` ## Make K-mers ``` def make_kmer_table(K): npad='N'*K shorter_kmers=[''] for i in range(K): longer_kmers=[] for mer in shorter_kmers: longer_kmers.append(mer+'A') longer_kmers.append(mer+'C') longer_kmers.append(mer+'G') longer_kmers.append(mer+'T') shorter_kmers = longer_kmers all_kmers = shorter_kmers kmer_dict = {} kmer_dict[npad]=0 value=1 for mer in all_kmers: kmer_dict[mer]=value value += 1 return kmer_dict KMER_TABLE=make_kmer_table(K) def strings_to_vectors(data,uniform_len): all_seqs=[] for seq in data['sequence']: i=0 seqlen=len(seq) kmers=[] while i < seqlen-K+1 -1: # stop at minus one for spaced seed kmer=seq[i:i+2]+seq[i+3:i+5] # SPACED SEED 2/1/2 for K=4 #kmer=seq[i:i+K] i += 1 value=KMER_TABLE[kmer] kmers.append(value) pad_val=0 while i < uniform_len: kmers.append(pad_val) i += 1 all_seqs.append(kmers) pd2d=pd.DataFrame(all_seqs) return pd2d # return 2D dataframe, uniform dimensions def make_kmers(MAXLEN,train_set): (X_train_all,y_train_all)=separate_X_and_y(train_set) # The returned values are Pandas dataframes. # print(X_train_all.shape,y_train_all.shape) # (X_train_all,y_train_all) # y: Pandas dataframe to Python list. # y_train_all=y_train_all.values.tolist() # The sequences lengths are bounded but not uniform. X_train_all print(type(X_train_all)) print(X_train_all.shape) print(X_train_all.iloc[0]) print(len(X_train_all.iloc[0]['sequence'])) # X: List of string to List of uniform-length ordered lists of K-mers. X_train_kmers=strings_to_vectors(X_train_all,MAXLEN) # X: true 2D array (no more lists) X_train_kmers.shape print("transform...") # From pandas dataframe to numpy to list to numpy print(type(X_train_kmers)) num_seqs=len(X_train_kmers) tmp_seqs=[] for i in range(num_seqs): kmer_sequence=X_train_kmers.iloc[i] tmp_seqs.append(kmer_sequence) X_train_kmers=np.array(tmp_seqs) tmp_seqs=None print(type(X_train_kmers)) print(X_train_kmers) labels=y_train_all.to_numpy() return (X_train_kmers,labels) def make_frequencies(Xin): # Input: numpy X(numseq,seqlen) list of vectors of kmerval where val0=NNN,val1=AAA,etc. # Output: numpy X(numseq,65) list of frequencies of 0,1,etc. Xout=[] VOCABULARY_SIZE= 4**K + 1 # plus one for 'NNN' for seq in Xin: freqs =[0] * VOCABULARY_SIZE total = 0 for kmerval in seq: freqs[kmerval] += 1 total += 1 for c in range(VOCABULARY_SIZE): freqs[c] = freqs[c]/total Xout.append(freqs) Xnum = np.asarray(Xout) return (Xnum) ``` ## Build model ``` def build_model(maxlen,dimen): act="sigmoid" embed_layer = keras.layers.Embedding( VOCABULARY_SIZE,EMBED_DIMEN,input_length=maxlen); dense1_layer = keras.layers.Dense(64, activation=act,dtype=dt,input_dim=VOCABULARY_SIZE) dense2_layer = keras.layers.Dense(64, activation=act,dtype=dt) dense3_layer = keras.layers.Dense(64, activation=act,dtype=dt) output_layer = keras.layers.Dense(1, activation=act,dtype=dt) mlp = keras.models.Sequential() #mlp.add(embed_layer) mlp.add(dense1_layer) mlp.add(dense2_layer) mlp.add(dense3_layer) mlp.add(output_layer) bc=tf.keras.losses.BinaryCrossentropy(from_logits=False) print("COMPILE...") mlp.compile(loss=bc, optimizer="Adam",metrics=["accuracy"]) print("...COMPILED") return mlp ``` ## Cross validation ``` def do_cross_validation(X,y,eps,maxlen,dimen): model = None cv_scores = [] fold=0 splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.2, random_state=37863) for train_index,valid_index in splitter.split(X): X_train=X[train_index] # use iloc[] for dataframe y_train=y[train_index] X_valid=X[valid_index] y_valid=y[valid_index] print("BUILD MODEL") model=build_model(maxlen,dimen) print("FIT") start_time=time.time() # this is complaining about string to float history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely epochs=eps, verbose=1, # verbose=1 for ascii art, verbose=0 for none validation_data=(X_valid,y_valid) ) end_time=time.time() elapsed_time=(end_time-start_time) fold += 1 print("Fold %d, %d epochs, %d sec"%(fold,eps,elapsed_time)) pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() scores = model.evaluate(X_valid, y_valid, verbose=0) print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) # What are the other metrics_names? # Try this from Geron page 505: # np.mean(keras.losses.mean_squared_error(y_valid,y_pred)) cv_scores.append(scores[1] * 100) print() print("Validation core mean %.2f%% (+/- %.2f%%)" % (np.mean(cv_scores), np.std(cv_scores))) return model ``` ## Load ``` print("Load data from files.") nc_seq=load_fasta(NC_FILENAME,0) pc_seq=load_fasta(PC_FILENAME,1) all_seq=pd.concat((nc_seq,pc_seq),axis=0) print("Put aside the test portion.") (train_set,test_set)=make_train_test(all_seq) # Do this later when using the test data: # (X_test,y_test)=separate_X_and_y(test_set) nc_seq=None pc_seq=None all_seq=None print("Ready: train_set") train_set ``` ## Len 200-1Kb ``` MINLEN=200 MAXLEN=1000 print ("Compile the model") model=build_model(MAXLEN,EMBED_DIMEN) print ("Summarize the model") print(model.summary()) # Print this only once print("Working on full training set, slice by sequence length.") print("Slice size range [%d - %d)"%(MINLEN,MAXLEN)) subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y print ("Sequence to Kmer") (X_train,y_train)=make_kmers(MAXLEN,subset) X_train X_train=make_frequencies(X_train) X_train print ("Cross valiation") model1 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN) model1.save(FILENAME+'.short.model') ``` ## Len 1Kb-2Kb ``` MINLEN=1000 MAXLEN=2000 print ("Compile the model") model=build_model(MAXLEN,EMBED_DIMEN) print ("Summarize the model") print(model.summary()) # Print this only once print("Working on full training set, slice by sequence length.") print("Slice size range [%d - %d)"%(MINLEN,MAXLEN)) subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y print ("Sequence to Kmer") (X_train,y_train)=make_kmers(MAXLEN,subset) X_train X_train=make_frequencies(X_train) X_train print ("Cross valiation") model2 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN) model2.save(FILENAME+'.medium.model') ``` ## Len 2Kb-3Kb ``` MINLEN=2000 MAXLEN=3000 print ("Compile the model") model=build_model(MAXLEN,EMBED_DIMEN) print ("Summarize the model") print(model.summary()) # Print this only once print("Working on full training set, slice by sequence length.") print("Slice size range [%d - %d)"%(MINLEN,MAXLEN)) subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y print ("Sequence to Kmer") (X_train,y_train)=make_kmers(MAXLEN,subset) X_train X_train=make_frequencies(X_train) X_train print ("Cross valiation") model3 = do_cross_validation(X_train,y_train,EPOCHS,MAXLEN,EMBED_DIMEN) model3.save(FILENAME+'.long.model') ```
github_jupyter
``` # Install TensorFlow !pip install tensorflow-gpu try: %tensorflow_version 2.x # Colab only. except Exception: pass import tensorflow as tf print(tf.__version__) print(tf.test.gpu_device_name()) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) #imports some required libraries import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, MaxPooling2D from tensorflow.keras.models import Model # Load in the data fashion_mnist = tf.keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 print("x_train.shape:", x_train.shape) # the data is only 2D! # convolution expects height x width x color x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print(x_train.shape) # number of classes K = len(set(y_train)) print("number of classes:", K) # Build the model using the functional API i = Input(shape=x_train[0].shape) x = Conv2D(128, (3, 3), strides=2, activation='relu', padding='same')(i) x = Conv2D(256, (3, 3), strides=2, activation='relu', padding='same')(x) x = MaxPooling2D((3, 3))(x) x = Conv2D(512, (3, 3), strides=2, activation='relu', padding='same')(x) x = Flatten()(x) x = Dropout(0.2)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.2)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.2)(x) x = Dense(K, activation='softmax')(x) model = Model(i, x) # Compile and fit # Note: make sure you are using the GPU for this! model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=25) # Plot loss per iteration import matplotlib.pyplot as plt plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.legend() # Plot accuracy per iteration plt.plot(history.history['accuracy'], label='acc') plt.plot(history.history['val_accuracy'], label='val_acc') plt.legend() # Plot confusion matrix from sklearn.metrics import confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() p_test = model.predict(x_test).argmax(axis=1) cm = confusion_matrix(y_test, p_test) plot_confusion_matrix(cm, list(range(10))) # Label mapping labels = '''T-shirt/top Trouser Pullover Dress Coat Sandal Shirt Sneaker Bag Ankle boot'''.split("\n") # Show some misclassified examples misclassified_idx = np.where(p_test != y_test)[0] i = np.random.choice(misclassified_idx) plt.imshow(x_test[i].reshape(28,28), cmap='gray') plt.title("True label: %s Predicted: %s" % (labels[y_test[i]], labels[p_test[i]])); ```
github_jupyter
``` print("hello world") 1 + (3 * 4) + 5 (1 + 3) * (4 + 5) 2**4 temperature = 72.5 print("temperature") print(temperature) type(temperature) day_of_week = 3 type(day_of_week) day = "tuesday" type(day) print(day) whos day_of_week + 1 print(day) print(temperature) day_of_week day_of_week + 1 day_of_week = 4 day_of_week day_of_week = day_of_week + 1 day_of_week day_of_week = day_of_week + 10 day_of_week day day + 10 "20" + 30 "20" + str(30) int("20") + 30 + " " + 40 # exercise # create a humidity (humidity = 0.6) # create a temperature = 75 # create a day "saturday" # try printing out humidity + temperature # try printing out day plus temperature # try printing day plus temperature plus humidity (with spaces between) # takes you through # creating variables # converting variables # adding them # printing # take 5 min (til 10:07 now) humidity = 0.6 temperature = 75 day = "saturday" print(humidity + temperature) print(day + " " + str(humidity) + " " + str(temperature)) print(day, humidity, temperature) # built in functions and help max(1, 5, 2, 6) min(1, 5, 2, 6) #cos(3.14) import math math.cos(3.14) #alias import math as m m.cos(3.14) from math import cos cos(3.14) m.pi m.e cos(m.pi) help(math) # exercise # calculate the sin of two times pi # try another method or two from the math library # (maybe calculate the natural log of euler's constant) # take 5 min (til 10:28) m.sin(round(2 * m.pi, 2)) m.log(m.e) m.log10(10) # reconvene at 10:42 # lists, loops, and conditionals temperatures = [76, 73, 71, 68, 72, 65, 75] temperatures[0] temperatures[1] temperatures[6] temperatures[-1] temperatures[-2] temperatures[0:4] temperatures[4:7] len(temperatures) temperatures[2:len(temperatures)] temperatures[:4] # exercise # create a new list called humidities # values [.6, .65, .7, .75, .65, .6, .55] # print the full list # print the length of the list # print from index 2 through 5 # take til 11:00am humidities = [.6, .65, .7, .75, .65, .6, .55] print(humidities) len(humidities) humidities[2:6] # enumerator print(temperatures) for t in temperatures: t = t + 10 print(t) print("all done!") # exercise - take the code in cell 110, and replace temperatures with humidities # move various print calls (either print(h) or print("all done") in and out of the loop # take 5 min to do this, resume at 11:18 # try out tuples vs lists if you have extra time # tuples humidities_tuple = (.5, .6, 7, .8) for h in humidities_tuple: print(h) humidities_tuple[2] my_list = [1, 2, 3, 4] my_tuple = (1, 2, 3, 4) my_list[2] = 10 my_list temperatures # enumerator for t in temperatures: print(t) # iterator for i in range(len(temperatures)): print(i, temperatures[i]) # a couple of common errors in loop processing, using iterator syntax with enumerator values #for t in temperatures: # print(t) # print(temperatures[t]) #for h in humidities: # print(h) # print(humidities[h]) for i in range(len(temperatures)): print(i, temperatures[i], humidities[i]) # exercise # days of the week # days = ['sunday', 'monday', ...] # add days[i] to the loop above # take 5 min, back at 11:38 days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'] for i in range(len(days)): print(i, days[i], temperatures[i], humidities[i]) for i in range(len(day)): if temperatures[i] > 72: print("it's hot", temperatures[i]) elif temperatures[i] > 70 or humidities[i] > .6: print("it's warm", temperatures[i]) else: print("it's cold", temperatures[i]) # as an exercise on your own, I'd recommend doing this with humidities or a combination of temp and humidity day day[2] temperature = 75 temperature[1] for d in day: print(d) day[3] day[3] = 'w' day days len(day) ```
github_jupyter
# Classifying Fashion-MNIST Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. <img src='assets/fashion-mnist-sprite.png' width=500px> In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this. First off, let's load the dataset through torchvision. ``` import torch from torchvision import datasets, transforms import helper # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here we can see one of the images. ``` image, label = next(iter(trainloader)) helper.imshow(image[0,:]); ``` ## Building the network Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. ``` # TODO: Define your network architecture here from torch import nn import torch.nn.functional as F class Network(nn.Module): def __init__(self): super().__init__() # Here I choose to use the same network architecture as in MNIST self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 32) self.fc5 = nn.Linear(32, 10) def forward(self, x): # make sure x is flattened x = x.view(x.shape[0], -1) x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) x = F.relu(x) x = self.fc4(x) x = F.relu(x) x = self.fc5(x) x = F.softmax(x, dim=1) return x ``` # Train the network Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). Then write the training code. Remember the training pass is a fairly straightforward process: * Make a forward pass through the network to get the logits * Use the logits to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. ``` # TODO: Create the network model = Network() model # TODO: Define the criterion and optimizer criterion = nn.CrossEntropyLoss() from torch import optim optimizer = optim.SGD(model.parameters(), lr=0.03) # perform quite well # optimizer = optim.Adam(model.parameters(), lr=0.001) # perform not very well ``` **Lecture Note:** Adam optimizer uses momentum which speeds up the actual fitting process. Adam optimizer adjusts the learning rate for each of the individual parameter in the Network. ``` # TODO: Train the network here epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # flatten the images into a (n_samples, features) vector # images = images.view(images.shape[0], -1) # Training pass optimizer.zero_grad() output = model(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.resize_(1, 784) # TODO: Calculate the class probabilities (softmax) for img ps = model.forward(img) # Plot the image and probabilities helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion') ```
github_jupyter
# Week 3 of Introduction to Biological System Design ## Introduction to Modeling Biological Processes ### Ayush Pandey Pre-requisite: If you have installed numpy, scipy, matplotlib, and pandas already, then you are all set to run this notebook. This notebook introduces modeling of biological processes using differential equations. Note that to model the growth of any variable $x$, we can write a differential equation: $\frac{dx}{dt} = f(x,t)$ where the function $f(x,t)$ models the rate of change of the variable $x$. In this notebook, we will use this formalism of modeling systems (deterministic ordinary differential equations) to study transcription and translation. # ODE Modeling with Python ## Introduction to `scipy.integrate` For Homework 2, you implemented your own numerical integrator by using a form of backward difference method to compute the derivative. This method is often referred to as the Euler's method to integrate differential equations. The scientific computing workhorse of the Python language `Scipy` consists of various integration algorithms. One of the best method in the `scipy.integrate` module is called `odeint`. We will use `odeint` in this notebook and throughout the course quite often to integrate ODE models. You can look at the `odeint` documentation here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html Let us learn how to use `odeint` by simulating a simple birth and death model: ### Growth and death model Let us assume that a species $x$ grows at the rate $k$ and dies at a rate of $d$. We can write a one-variable ODE model for this species: $\frac{dx}{dt} = k - d\cdot x$ To simulate this model, we can integrate this ODE over a set of time points and plot the result as $x(t)$ vs $t$ on a graph. Define the ODE as a Python function. We can use the `*args` argument to pass multiple parameters to our ODE. Inside the function, we can unfold args to get out the parameter values from it. The function defines the ODE by defining the right hand side of the differential equation. Recall that we used similar function definitions to integrate using our crude numerical integrator. ``` def growth_death_ode(x, t, *args): k, d = args return k - d*x from scipy.integrate import odeint import numpy as np # It is often helpful to use Python functions with keyword arguments, so we know # the meanings of the arguments that are passed. This is helpful in easy debugging, as well as in documenting the # code better. k = 1.0 d = 0.1 initial_values = np.array([5]) timepoints = np.linspace(0,50,100) solution = odeint(func = growth_death_ode, y0 = initial_values, t = timepoints, args = (k, d)) ``` ### Take a look at what odeint returns by running the next cell (uncomment to run) ``` # solution ``` ### Plot the simulated ODE with time: ``` import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot(timepoints, solution, lw = 3) ax.set_xlabel('$t$', fontsize = 18) ax.set_ylabel('$x(t)$', fontsize = 18) ax.tick_params(labelsize = 14) ``` You can compare odeint performance with your numerical integrator by running both simultaneously. ## Validate `odeint` simulation with analytical solution Since the birth-death model that we considered is a simple equation that can be integrated analytically, we can validate the numerical ODE simulation by comparing it to our analytical solution. Note that analytically solving an ODE is not possible for all kinds of ODEs, especially, as write more complicated models it may not be possible to obtain a closed form solution. For the model above, the analytical solution is given by: $ x(t) = \frac{k}{d}(1 - e^{-d(t - t_0)}) + x(0)e^{-d(t - t_0)}$ Let us plot this analytical solution alongside the numerical simulation: ``` def analytical_solution(t, k, d, t0, x0): return (k/d)*(1 - np.exp(-d*(t - t0))) + x0*np.exp(-d*(t - t0)) x0 = initial_values t0 = timepoints[0] fig, ax = plt.subplots() ax.plot(timepoints, solution, lw = 3, label = 'numerical', alpha = 0.9) ax.scatter(timepoints, analytical_solution(timepoints, k, d, t0, x0), c = 'r', marker = 'x', label = 'analytical') ax.set_xlabel('$t$', fontsize = 18) ax.set_ylabel('$x(t)$', fontsize = 18) ax.legend(fontsize = 14) ax.tick_params(labelsize = 14) ``` `odeint` has various options that you can explore in the documentation. For example, you can use the `rtol` and the `atol` option to set the tolerance levels of the integration algorithm. The tolerance levels decide the accuracy of the solution => lower the tolerance for error, more accurate the simulation, but also it is slower. So you have a speed-accuracy tradeoff. You can also take a look at the `infodict` that is returned when you pass in `full_output = True`. The `infodict` dictionary consists of information about the solver and the steps it took. Finally, an advanced version of `odeint` is `solve_ivp` which has multiple algorithms to integrate ODEs. However, the disadvantage is that it has slightly higher overhead and needs to be setup correctly inorder to get reliable simulations for ill-conditioned differential equations. ``` ```
github_jupyter
# Cross Validation Splitting our datasetes into train/test sets allows us to test our model on unseen examples. However, it might be the case that we got a lucky (or unlucky) split that doesn't represent the model's actual performance. To solve this problem, we'll use a technique called cross-validation, where we use the entire dataset for training and for testing and evaluate the model accordingly. There are several ways of performing cross-validation, and there are several corresponding iterators defined in scikit-learn. Each defines a `split` method, which will generate arrays of indices from the data set, each array indicating the instances to go into the training or testing set. ``` import pandas as pd import numpy as np from sklearn import datasets, svm, metrics, model_selection x, y = datasets.load_breast_cancer(return_X_y=True) # Define a function to split our dataset into train/test splits using indices def kfold_train_test_split(x, y, train_indices, test_indices): return x[train_indices], x[test_indices], y[train_indices], y[test_indices] ``` ### `KFold` `KFold` is arguably the simplest. It partitions the data into $k$ folds. It does not attempt to keep the proportions of classes. ``` k_fold = model_selection.KFold(n_splits=10) # splits the data into 10 splits, using 9 for training and 1 for testing in each iteration # Empty array to store the scores scores = [] for train_indices, test_indices in k_fold.split(x): # Split data using our predefined function x_train, x_test, y_train, y_test = kfold_train_test_split(x, y, train_indices, test_indices) # Train model svc = svm.SVC() svc.fit(x_train, y_train) # Predict using test set y_pred = svc.predict(x_test) # Calculate scores accuracy = metrics.accuracy_score(y_test, y_pred) precision = metrics.precision_score(y_test, y_pred) recall = metrics.recall_score(y_test, y_pred) # Create scores dictionary scores_dict = {"accuracy": accuracy, "precision": precision, "recall": recall} # Append to scores array scores.append(scores_dict) # Conver scores array to dataframe scores_df = pd.DataFrame(scores) scores_df # Calculate the mean of the scores scores_df.mean() ``` ### `StratifiedKFold` `StratifiedKFold` ensures that the proportion of classes are preserved in each training/testing set. ``` stratified_k_fold = model_selection.StratifiedKFold(n_splits=10) # splits the data into 10 splits, using 9 for training and 1 for testing in each iteration # Empty array to store the scores scores = [] for train_indices, test_indices in stratified_k_fold.split(x, y): # y is needed here for stratification, similar to stratify = y. # Split data using our predefined function x_train, x_test, y_train, y_test = kfold_train_test_split(x, y, train_indices, test_indices) # Train model svc = svm.SVC() svc.fit(x_train, y_train) # Predict using test set y_pred = svc.predict(x_test) # Calculate scores accuracy = metrics.accuracy_score(y_test, y_pred) precision = metrics.precision_score(y_test, y_pred) recall = metrics.recall_score(y_test, y_pred) # Create scores dictionary scores_dict = {"accuracy": accuracy, "precision": precision, "recall": recall} # Append to scores array scores.append(scores_dict) # Conver scores array to dataframe scores_df = pd.DataFrame(scores) scores_df # Calculate the mean of the scores scores_df.mean() ``` ### `ShuffleSplit` `ShuffleSplit` will generate indepedent pairs of randomly shuffled training and testing sets. ``` shuffle_k_fold = model_selection.ShuffleSplit(n_splits=10, random_state=42) # splits the data into 10 splits, using 9 for training and 1 for testing in each iteration # Empty array to store the scores scores = [] for train_indices, test_indices in shuffle_k_fold.split(x): # Split data using our predefined function x_train, x_test, y_train, y_test = kfold_train_test_split(x, y, train_indices, test_indices) # Train model svc = svm.SVC() svc.fit(x_train, y_train) # Predict using test set y_pred = svc.predict(x_test) # Calculate scores accuracy = metrics.accuracy_score(y_test, y_pred) precision = metrics.precision_score(y_test, y_pred) recall = metrics.recall_score(y_test, y_pred) # Create scores dictionary scores_dict = {"accuracy": accuracy, "precision": precision, "recall": recall} # Append to scores array scores.append(scores_dict) # Conver scores array to dataframe scores_df = pd.DataFrame(scores) scores_df # Calculate the mean of the scores scores_df.mean() ``` ### `StratifiedShuffleSplit` `StratifiedShuffleSplit` will generate indepedent pairs of shuffled training and testing sets. Here, however, it will ensure the training and test sets are stratified. ``` stratified_shuffled_k_fold = model_selection.StratifiedShuffleSplit(n_splits=10) # splits the data into 10 splits, using 9 for training and 1 for testing in each iteration # Empty array to store the scores scores = [] for train_indices, test_indices in stratified_shuffled_k_fold.split(x, y): # y is needed here for stratification, similar to stratify = y. # Split data using our predefined function x_train, x_test, y_train, y_test = kfold_train_test_split(x, y, train_indices, test_indices) # Train model svc = svm.SVC() svc.fit(x_train, y_train) # Predict using test set y_pred = svc.predict(x_test) # Calculate scores accuracy = metrics.accuracy_score(y_test, y_pred) precision = metrics.precision_score(y_test, y_pred) recall = metrics.recall_score(y_test, y_pred) # Create scores dictionary scores_dict = {"accuracy": accuracy, "precision": precision, "recall": recall} # Append to scores array scores.append(scores_dict) # Conver scores array to dataframe scores_df = pd.DataFrame(scores) scores_df # Calculate the mean of the scores scores_df.mean() ```
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Cars-File" data-toc-modified-id="Cars-File-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Cars File</a></span><ul class="toc-item"><li><span><a href="#Data-Preparation" data-toc-modified-id="Data-Preparation-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Data Preparation</a></span></li><li><span><a href="#Model-Building" data-toc-modified-id="Model-Building-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Model Building</a></span><ul class="toc-item"><li><span><a href="#Logistic-Regression" data-toc-modified-id="Logistic-Regression-1.2.1"><span class="toc-item-num">1.2.1&nbsp;&nbsp;</span>Logistic Regression</a></span></li><li><span><a href="#Decision-Trees" data-toc-modified-id="Decision-Trees-1.2.2"><span class="toc-item-num">1.2.2&nbsp;&nbsp;</span>Decision Trees</a></span></li><li><span><a href="#Random-Forest" data-toc-modified-id="Random-Forest-1.2.3"><span class="toc-item-num">1.2.3&nbsp;&nbsp;</span>Random Forest</a></span></li><li><span><a href="#Multilayer-Perceptron" data-toc-modified-id="Multilayer-Perceptron-1.2.4"><span class="toc-item-num">1.2.4&nbsp;&nbsp;</span>Multilayer Perceptron</a></span></li></ul></li><li><span><a href="#Accuracy-metrics" data-toc-modified-id="Accuracy-metrics-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Accuracy metrics</a></span></li><li><span><a href="#Results" data-toc-modified-id="Results-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Results</a></span></li></ul></li></ul></div> ## Cars File ### Data Preparation **Get all the packages and data ready** ``` # read in all packages library(data.table) library(caret) library(dummy) library(nnet) library(randomForest) library(RWeka) # set options options(warn=-1) # read data data <- fread("train.arff.csv") ``` **Check the data types** ``` # check data types dtypes <- as.matrix(sapply(data, class)) ``` **Change the data types of the columns from characters to factors** ``` num_levels <- as.matrix(sapply(data, function(x){length(unique(x))})) data_class_changed <- as.data.frame(sapply(data,as.factor)) target_levels <- as.matrix(sapply(data_class_changed, class)) ``` **Impute Missing values** ``` num_nas <- apply(data_class_changed, 2, function(x) sum(x == " ")) ``` <p style="color:green;"><b>Has no missing values&#8593;</b></p> **Note**: in this exercise, we wont be splitting the dataset into test and train due to shortage of time. **Check balance of data** ``` target_levels_dist <- 100*table(data_class_changed$class)/dim(data_class_changed)[1] ``` ** Rename the `class` column. `class` is a function in R** ``` colnames(data_class_changed)[7] <- "category_type" ``` ### Model Building #### Logistic Regression ``` covariates <- names(data_class_changed)[!names(data_class_changed) == "category_type"] x = (dummy(data_class_changed[covariates])) x = sapply(x, function(xx){as.numeric(xx)-1}) new_names<- names(x) y = data_class_changed["category_type"] y = y$category_type data_class_changed$category_type <- relevel(data_class_changed$category_type, ref = "unacc") model_logreg <- multinom(category_type ~ ., data = data_class_changed) predictions_logreg <- predict(model_logreg,data_class_changed) ``` #### Decision Trees ``` model_dt <- J48(category_type~., data=data_class_changed) predictions_dt <- predict(model_dt, data_class_changed) ``` #### Random Forest ``` model_rf <- randomForest(category_type~., data=data_class_changed) predictions_rf <- predict(model_rf, data_class_changed) ``` #### Multilayer Perceptron ``` model_mlp <- caret::train(x, y, method="mlp") predictions_mlp <- predict(model_mlp, x) ``` ### Accuracy metrics ``` y_true <- as.factor(data_class_changed[,"category_type"]) # build all the confusion matrices cm_logreg <- as.data.frame.matrix(table(predictions_logreg, y_true)) cm_dt <- as.data.frame.matrix(table(predictions_dt, y_true)) cm_rf <- as.data.frame.matrix(table(predictions_rf, y_true)) cm_mlp <- as.data.frame.matrix(table(predictions_mlp, y_true)) # rearrange cm_logreg <- cm_logreg[order(rownames(cm_logreg)),order(colnames(cm_logreg))] cm_dt <- cm_dt[order(rownames(cm_dt)),order(colnames(cm_dt))] cm_rf <- cm_rf[order(rownames(cm_rf)),order(colnames(cm_rf))] cm_mlp <- cm_mlp[order(rownames(cm_mlp)),order(colnames(cm_mlp))] # function to caclulate class wise precision, accuracy, recall classification_metrics <- function(conf_mat,model_type){ precision <- diag(as.matrix(conf_mat)) / rowSums(conf_mat) recall <- diag(as.matrix(conf_mat)) / colSums(conf_mat) accuracy <- diag(as.matrix(conf_mat)) / sum(conf_mat) df <- data.frame(accuracy,precision,recall) colnames(df) <- paste(model_type,c("accuracy","precision","recall"),sep="_") df } # get all metrics metrics_logreg <- classification_metrics(cm_logreg, "logreg") metrics_dt <- classification_metrics(cm_dt, "dt") metrics_rf <- classification_metrics(cm_rf, "rf") metrics_mlp <- classification_metrics(cm_mlp, "mlp") # combine all and rearrange metric_comparison <- cbind(metrics_logreg, metrics_dt,metrics_rf,metrics_mlp) rownames(metric_comparison) <- c("unacc", "acc", "good", "vgood") models <- c("logreg", "dt", "rf", "mlp") metrics <- c("accuracy", "recall", "precision") model_metrics <- paste(rep(models,3),rep(metrics,4),sep="_") accy <- model_metrics[grep("accuracy", model_metrics)][order(model_metrics[grep("accuracy", model_metrics)])] recl <- model_metrics[grep("recall", model_metrics)][order(model_metrics[grep("recall", model_metrics)])] prec <- model_metrics[grep("precision", model_metrics)][order(model_metrics[grep("precision", model_metrics)])] ``` ### Results The below table compares the classification metrics for each level and model. We have calculated three metrics for each model and level(levels here refers to each of the unique levels of the variable we are predicting: "unacc", "acc", "good", "vgood"). The metrics that we are calculating are: <ul> <li><i><b>Accuracy: </b></i>measures the fraction of all instances that are correctly categorized</li> <li><i><b>Recall: </b></i>is the proportion of people that tested positive and are positive (True Positive, TP) of all the people that actually are positive </li> <li><i><b>Precision: </b></i>it is the proportion of true positives out of all positive results</li> </ul> <img src="Precisionrecall.png" width = 300px> ``` metric_comparison[c(accy,recl,prec)] ```
github_jupyter
## **Analytic Antipodal Grasps** ``` import numpy as np from manipulation import running_as_notebook from pydrake.all import( Variable, sin, cos, Evaluate, Jacobian, atan, MathematicalProgram, Solve, eq ) import matplotlib.pyplot as plt, mpld3 if running_as_notebook: mpld3.enable_notebook() ``` ## Introduction to Symbolic Differentiation For this assignment, you will need [symbolic differentiation](https://en.wikipedia.org/wiki/Computer_algebra), supported by Drake's symbolic library. We will demonstrate how to use it with a simple function: $$T=\cos^2(x) + y^5$$ and it's Jacobian (first-order derivative), $$J = \begin{pmatrix} \frac{\partial T}{\partial x} & \frac{\partial T}{\partial y} \end{pmatrix}=\begin{pmatrix} -2\cos(x)\sin(x) & 5y^4 \end{pmatrix}$$ as well as the Hessian (second-order derivative), $$H = \begin{pmatrix} \frac{\partial^2 T}{\partial x^2} & \frac{\partial^2 T}{\partial x \partial y} \\ \frac{\partial^2 T}{\partial y \partial x} & \frac{\partial^2 T}{\partial y^2} \end{pmatrix}=\begin{pmatrix} 2 \sin^2(x) - 2\cos^2(x) & 0 \\ 0 & 20y^3 \end{pmatrix}$$ Below are some snippets of how to define symbolic variables, differentiate expressions, and evaluate them using numerical values. ``` # 1. Symbolic variables are defined x = Variable('x') y = Variable('y') # 2. Expressions can be written by composing operations on Variables. T = cos(x) ** 2.0 + y ** 5.0 print(T) # 3. Use Evaluate to query the numerical value of the expression given the variable values. # Use function for multi-dimensional quantities print(Evaluate(np.array([T]), {x: 3.0, y:5.0})) # Use method for scalar quantities print(T.Evaluate({x: 3.0, y:5.0})) # 4. Differentiate a quantity using Jacobian, or Differentiate. J = np.array([T.Differentiate(x), T.Differentiate(y)]) print(J) # Use method for scalar quantities J = T.Jacobian([x, y]) print(J) print(Evaluate(J, {x: 3.0, y:5.0})) # Use function for taking Jacobian of multi-dimensional quantities. H = Jacobian(J, [x, y]) print(H) print(Evaluate(H, {x: 3.0, y: 5.0})) ``` Are the symbolic values of the Jacobian and Hessian what you expect? ## The Cycloidal Gear Now we enter the main part of the problem. After graduating from MIT, you decide to work at a company producing cutting-edge [hypercycloidal gears](https://youtu.be/MBWkibie_5I?t=74). You are in charge of designing a robotic pick-and-place system for these parts. In order to reliably grasp the gears, you decide to use your knowledge of antipodal points. The mechanical design department gave you a pretty ugly parametric equation for what the shape looks like, which we won't even bother writing in latex! Instead, we provided it via the function `shape`. Given a angle in polar coordinates (parameter $t$), it returns $p(t)=[x(t),y(t)]$, a position in 2D. The below cell implements the function and shows you what the gear part looks like. ``` def shape(t): x = (10*cos(t))-(1.5*cos(t+atan(sin(-9*t)/((4/3)-cos(-9*t)))))-(0.75*cos(10*t)) y = (-10*sin(t))+(1.5*sin(t+atan(sin(-9*t)/((4/3)-cos(-9*t)))))+(0.75*sin(10*t)) return np.array([x, y]) def plot_gear(): theta = np.linspace(0, 2*np.pi, 500) gear_shape = [] for i in range(500): gear_shape.append(Evaluate(shape(theta[i])).squeeze()) gear_shape = np.array(gear_shape) plt.axis("equal") plt.plot(gear_shape[:,0], gear_shape[:,1], 'k-') plot_gear() ``` ## Grasp Energy Function How can we analytically find a pair of antipodal points given the parametric equation of a shape? We make the following claim: **Claim**: Let $p(t_1)$ and $p(t_2)$ be a pair of antipodal points given in parametric space. Then $t_1$ and $t_2$ are critical points of the following energy function: $$E=\frac{1}{2}\kappa\|p(t_1)-p(t_2)\|^2$$ that is, they satisfy $\frac{\partial E}{\partial \mathbf{t}}=[0, 0]$ where $\mathbf{t}=[t_1,t_2]$. For the subsequent problems, you may assume $\kappa=2$. **Problem 5.1.a** [2pts]: Prove the claim. \\ **Problem 5.1.b** [2pts]: Prove that the converse may not necessarily hold. HINT: The derivative of $p(t)$ respect to $t$ gives the tangent 'velocity' vector: $v(t)=p'(t)$ Write down your answer in a paper / pdf file, and submit to the Gradescope written submission section! ## Implementation **Problem 5.1.c** [4pts] Using this knowledge, we will write a Mathematical Program to find the antipodal points. Since we are looking for $t_1$ and $t_2$ such that the Jacobians is a zero vector, we are solving a root finding problem. Problems of this nature can still be transcribed as an instance of a Mathematical program; it simply doesn't have a cost. We will write down our problem as follows: $$\begin{aligned} \text{find} \quad & \mathbf{t} \\ \text{s.t.} \quad & \frac{\partial E}{\partial \mathbf{t}}(\mathbf{t}) = \mathbf{0} \\ \quad & 0 \leq \mathbf{t} \leq 2\pi \\ \quad & t_1 - t_2 \geq \varepsilon \end{aligned}$$ The first constraint makes sure that they are critical points of the energy function, while the last two makes sure the points are not overlapping. You will write the following outer loop to check for the validity of solutions. 1. Pick a random guess for $\mathbf{t}$ using [SetInitialGuess](https://drake.mit.edu/pydrake/pydrake.solvers.mathematicalprogram.html?highlight=setinitialguess#pydrake.solvers.mathematicalprogram.MathematicalProgram.SetInitialGuess) by uniform sampling over $[0, 2\pi]$ (use `np.random.rand(2)`). 2. Using `MathematicalProgram`, solve the above problem. Remember there is no cost in this problem, so we simply only add the constraints. 3. If the solution is not valid (i.e. problem doesn't return success), repeat 1-2 with random guesses until a valid solution is found. 4. If a valid solution $\mathbf{t}^*$ is found, return the Eigenvalues of the Hessian of $E$ at $\mathbf{t}^*$. (Use `np.linalg.eigvals`) ``` def find_antipodal_pts(shape): """ Finds antipodal points given the parametric function that describes the shape of the object. Args: - shape: function from parametric space t to position R2. Returns: - result: 2-dim np array that contains antipodal grasp locations parametrized by [t1, t2] - H_eig: 2-dim np array that contains eigenvalues of the Hessian. """ eps = 1e-3 # do not modify, but use it for epsilon variable above. ## Fill your code here result = np.array([0., 0.]) # modify here H_eig = np.array([0., 0.]) # modify here return result, H_eig ``` You can run the cell below to check the correctnes of your implementation. As the constraint is nonlinear, it might take some time to compute. (Typically, the solve time will still be less than 2~3 seconds). ``` def plot_antipodal_pts(pts, shape): antipodal_pts = [] for i in range(2): val = Evaluate(shape(pts[i])).squeeze() antipodal_pts.append(val) antipodal_pts = np.array(antipodal_pts) plt.scatter(antipodal_pts[:,0], antipodal_pts[:,1], color='red') plot_gear() result, H_eig = find_antipodal_pts(shape) plot_antipodal_pts(result, shape) print(H_eig) ``` ## Hessian Analysis Why did we implement the Hessian? You may remember that if the Hessian is used for the second-derivative test. For a function $f(x)$ with a critical point $x^*$, this critical point is: - A local minima if the Hessian is positive-definite (i.e. all positive eigenvalues) - A local maxima if the Hessian is negative-definite (i.e. all negative eigenvalues) - A saddle point if the Hessian has mixed positive / negative eigenvalues. **Problem 5.1.d** [2pts] Describe what grasps the local minima, maxima, and saddle points correspond to in terms of the geometry of the object. In a very simple sentence, explain why you might prefer one configuration over another. HINT: The cell below will visualize each of the cases. ``` if (running_as_notebook): plt.subplot(1,3,1) plot_gear() plt.title("Local Minima") np.random.seed(45) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig > 0).all()): break plot_antipodal_pts(result, shape) plt.subplot(1,3,2) plot_gear() plt.title("Local Maxima") np.random.seed(4) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig < 0).all()): break plot_antipodal_pts(result, shape) plt.subplot(1,3,3) plot_gear() plt.title("Saddle Point") np.random.seed(13) while True: result, H_eig = find_antipodal_pts(shape) if ((H_eig[0] > 0) and (H_eig[1] < 0)): break plot_antipodal_pts(result, shape) ``` ## How will this notebook be Graded? If you are enrolled in the class, this notebook will be graded using [Gradescope](www.gradescope.com). You should have gotten the enrollement code on our announcement in Piazza. For submission of this assignment, you must do two things. - Download and submit the notebook `analytic_antipodal_grasps.ipynb` to Gradescope's notebook submission section, along with your notebook for the other problems. - Write down your answers to 5.1.a, 5.1.b, and 5.1.d to a separately pdf file and submit it to Gradescope's written submission section. We will evaluate the local functions in the notebook to see if the function behaves as we have expected. For this exercise, the rubric is as follows: - [2 pts] 5.1.a is answered correctly. - [2 pts] 5.1.b is answered correctly. - [4 pts] `find_antipodal_points` must be implemented correctly. - [2 pts] 5.1.d is answered correctly. ``` from manipulation.exercises.clutter.test_analytic_grasp import TestAnalyticGrasp from manipulation.exercises.grader import Grader Grader.grade_output([TestAnalyticGrasp], [locals()], 'results.json') Grader.print_test_results('results.json') ```
github_jupyter
# FloPy ### A quick demo of how to control the ASCII format of numeric arrays written by FloPy load and run the Freyberg model ``` import sys import os import platform import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy #Set name of MODFLOW exe # assumes executable is in users path statement version = 'mf2005' exe_name = 'mf2005' if platform.system() == 'Windows': exe_name = 'mf2005.exe' mfexe = exe_name #Set the paths loadpth = os.path.join('..', 'data', 'freyberg') modelpth = os.path.join('data') #make sure modelpth directory exists if not os.path.exists(modelpth): os.makedirs(modelpth) print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) ml = flopy.modflow.Modflow.load('freyberg.nam', model_ws=loadpth, exe_name=exe_name, version=version) ml.model_ws = modelpth ml.write_input() success, buff = ml.run_model() if not success: print ('Something bad happened.') files = ['freyberg.hds', 'freyberg.cbc'] for f in files: if os.path.isfile(os.path.join(modelpth, f)): msg = 'Output file located: {}'.format(f) print (msg) else: errmsg = 'Error. Output file cannot be found: {}'.format(f) print (errmsg) ``` Each ``Util2d`` instance now has a ```.format``` attribute, which is an ```ArrayFormat``` instance: ``` print(ml.lpf.hk[0].format) ``` The ```ArrayFormat``` class exposes each of the attributes seen in the ```ArrayFormat.___str___()``` call. ```ArrayFormat``` also exposes ``.fortran``, ``.py`` and ``.numpy`` atrributes, which are the respective format descriptors: ``` print(ml.dis.botm[0].format.fortran) print(ml.dis.botm[0].format.py) print(ml.dis.botm[0].format.numpy) ``` #### (re)-setting ```.format``` We can reset the format using a standard fortran type format descriptor ``` ml.dis.botm[0].format.fortran = "(6f10.4)" print(ml.dis.botm[0].format.fortran) print(ml.dis.botm[0].format.py) print(ml.dis.botm[0].format.numpy) ml.write_input() success, buff = ml.run_model() ``` Let's load the model we just wrote and check that the desired ```botm[0].format``` was used: ``` ml1 = flopy.modflow.Modflow.load("freyberg.nam",model_ws=modelpth) print(ml1.dis.botm[0].format) ``` We can also reset individual format components (we can also generate some warnings): ``` ml.dis.botm[0].format.width = 9 ml.dis.botm[0].format.decimal = 1 print(ml1.dis.botm[0].format) ``` We can also select ``free`` format. Note that setting to free format resets the format attributes to the default, max precision: ``` ml.dis.botm[0].format.free = True print(ml1.dis.botm[0].format) ml.write_input() success, buff = ml.run_model() ml1 = flopy.modflow.Modflow.load("freyberg.nam",model_ws=modelpth) print(ml1.dis.botm[0].format) ```
github_jupyter
### Simple Residual model in Keras This notebook is simply for testing a resnet-50 inspired model built in Keras on a numerical signs dataset. ``` import keras import numpy as np import matplotlib.pyplot as plt from keras import layers from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D,ZeroPadding1D, Conv1D, Add from keras.layers import MaxPooling2D, Dropout, AveragePooling2D from keras.models import Model from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import warnings warnings.filterwarnings('ignore') # Using a signs dataset, with images of numerical signs from 0-9 X = np.load("../data/sign-digits/X.npy") y = np.load("../data/sign-digits/y.npy") X.shape = (2062, 64, 64, 1) X = shuffle(X,random_state=0) y = shuffle(y,random_state=0) print(X.shape) print(y.shape) X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.1) print(X_train.shape) print(X_test.shape) # Block corresponding with no change in size def identity(X, f, filters): """ filters: filters for each of the conv2D f: size of filter to use in mid block """ F1,F2,F3 = filters X_earlier = X # Block 1 X = Conv2D(F1, kernel_size=(1,1), strides=(1,1),padding="valid",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) X = Activation("relu")(X) # Block 2 X = Conv2D(F2, kernel_size=(f,f), strides=(1,1),padding="same",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) X = Activation("relu")(X) # Block 3 X = Conv2D(F3, kernel_size=(1,1), strides=(1,1),padding="valid",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) X = Add()([X,X_earlier]) # Add earlier activation X = Activation("relu")(X) return X # Block corresponding with a change in size def conv_resid(X, f, filters,s): """ filters: filters for each of the conv2D s: stride size to resize the output """ F1,F2,F3 = filters X_earlier = X # Block 1 X = Conv2D(F1, kernel_size=(1,1), strides=(s,s),padding="valid",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) X = Activation("relu")(X) # Block 2 X = Conv2D(F2, kernel_size=(f,f), strides=(1,1),padding="same",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) X = Activation("relu")(X) # Block 3 X = Conv2D(F3, kernel_size=(1,1), strides=(1,1),padding="valid",kernel_initializer=keras.initializers.glorot_normal())(X) X = BatchNormalization(axis=3)(X) # Resize earlier activation (X_earlier) X_earlier = Conv2D(F3, kernel_size=(1,1), strides=(s,s),padding="valid",kernel_initializer=keras.initializers.glorot_normal())(X_earlier) X_earlier = BatchNormalization(axis=3)(X_earlier) # Add earlier activation X = Add()([X,X_earlier]) X = Activation("relu")(X) return X # The Input shape for this model will be 64x64x1 def model(input_shape): X_input = Input(input_shape) X = ZeroPadding2D(padding=(3,3))(X_input) X = Conv2D(64,kernel_size=(7,7),padding="valid",kernel_initializer=keras.initializers.glorot_uniform())(X) X = BatchNormalization(axis=3)(X) X = Activation(("relu"))(X) X = MaxPooling2D((3,3),strides=(2,2))(X) # indentity block 1 X = conv_resid(X, 3, [64,64,256], 1) X = identity(X, 3, [64,64,256]) X = identity(X, 3, [64,64,256]) # Identity block 2 X = conv_resid(X, 3, [128,128,512], 2) X = identity(X, 3, [128,128,512]) X = identity(X, 3, [128,128,512]) X = identity(X, 3, [128,128,512]) # Identity block 3 X = conv_resid(X, 3, [256, 256, 1024], 2) X = identity(X, 3, [256, 256, 1024]) X = identity(X, 3, [256, 256, 1024]) X = identity(X, 3, [256, 256, 1024]) X = identity(X, 3, [256, 256, 1024]) X = identity(X, 3, [256, 256, 1024]) # Identity block 4 X = conv_resid(X, 3, [512, 512, 2048], 2) X = identity(X, 3, [512, 512, 2048]) X = identity(X, 3, [512, 512, 2048]) X = AveragePooling2D((2,2), name="avg_pool")(X) # Flatten final layer X = Flatten()(X) X = Dense(10, activation="softmax",name="dense02",kernel_initializer = keras.initializers.glorot_normal())(X) model = Model(inputs=X_input, outputs=X, name="resnet") return model resid_classi = model(X_train[0].shape) resid_classi.compile(optimizer="adam", loss="categorical_crossentropy", metrics=['accuracy']) resid_classi.fit(X_train, y_train,epochs=10,batch_size=10, validation_data=[X_test,y_test]) ```
github_jupyter
``` # Coder_Hussam Qassim # Import the necessary libraries import tensorflow as tf from tensorflow.contrib.layers import fully_connected import numpy as np from tensorflow.examples.tutorials.mnist import input_data # Define the RNN parameters n_steps = 28 n_inputs = 28 n_neurons = 150 n_outputs = 10 # Create the inpute and lable data x = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.int32, [None]) # Create the graph on 1 input layer and 2 hidden layers and one output layer with tf.name_scope("RNN"): basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons) outputs, states = tf.nn.dynamic_rnn(basic_cell, x, dtype=tf.float32) logits = fully_connected(states, n_outputs, activation_fn=None) # Create the cost function with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") # Craete the optimizer learning_rate = 0.001 with tf.name_scope("train"): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) # Evaluate the NN with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) # Initialize the variables init = tf.global_variables_initializer() # Initialize the saver for save the model saver = tf.train.Saver() # Fetch the data mnist = input_data.read_data_sets("data/") x_test = mnist.test.images.reshape((-1, n_steps, n_inputs)) y_test = mnist.test.labels # define the number of the epochs and the size of the batch n_epochs = 100 batch_size = 150 with tf.Session() as sess: init.run() for epoch in range(n_epochs): for iteration in range(mnist.train.num_examples // batch_size): x_batch, y_batch = mnist.train.next_batch(batch_size) x_batch = x_batch.reshape((-1, n_steps, n_inputs)) sess.run(training_op, feed_dict={x: x_batch, y: y_batch}) acc_train = accuracy.eval(feed_dict={x: x_batch, y: y_batch}) acc_test = accuracy.eval(feed_dict={x: x_test, y: y_test }) print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test) save_path = saver.save(sess, "data/my_model_final.ckpt") # Using the Neural Network with tf.Session() as sess: saver.restore(sess, "data/my_model_final.ckpt") x_new_scaled = [...] # some new images (scaled from 0 to 1) z = logits.eval(feed_dict={x: x_new_scaled}) y_pred = np.argmax(z, axis=1) ```
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` <table align="left"> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/main/ai-platform/notebooks/unofficial/AI_Platform_Custom_Container_Prediction_sklearn.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> ## Overview This tutorial walks through building a custom container to serve a scikit-learn model on AI Platform Predictions. You will use the FastAPI Python web server framework to create a prediction and health endpoint. You will also cover incorporating a pre-processor from training into your online serving. ### Dataset This tutorial uses R.A. Fisher's Iris dataset, a small dataset that is popular for trying out machine learning techniques. Each instance has four numerical features, which are different measurements of a flower, and a target label that marks it as one of three types of iris: Iris setosa, Iris versicolour, or Iris virginica. This tutorial uses [the copy of the Iris dataset included in the scikit-learn library](https://scikit-learn.org/stable/datasets/index.html#iris-dataset). ### Objective The goal is to: - Train a model that uses a flower's measurements as input to predict what type of iris it is. - Save the model and its serialized pre-processor - Build a FastAPI server to handle predictions and health checks - Build a custom container with model artifacts - Upload and deploy custom container to AI Platform Prediction This tutorial focuses more on deploying this model with AI Platform than on the design of the model itself. ### Costs This tutorial uses billable components of Google Cloud: * AI Platform Learn about [AI Platform (Classic) pricing](https://cloud.google.com/ai-platform/pricing), and use the [Pricing Calculator](https://cloud.google.com/products/calculator/) to generate a cost estimate based on your projected usage. ### Set up your local development environment **If you are using Colab or AI Platform Notebooks**, your environment already meets all the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements. You need the following: * Docker * Git * Google Cloud SDK (gcloud) * Python 3 * virtualenv * Jupyter notebook running in a virtual environment with Python 3 The Google Cloud guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: 1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/) 1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python) 1. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment. 1. To install Jupyter, run `pip install jupyter` on the command-line in a terminal shell. 1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell. 1. Open this notebook in the Jupyter Notebook Dashboard. ### Install additional packages Install additional package dependencies not installed in your notebook environment, such as NumPy, Scikit-learn, FastAPI, Uvicorn, and joblib. Use the latest major GA version of each package. ``` %%writefile requirements.txt joblib~=1.0 numpy~=1.20 scikit-learn~=0.24 google-cloud-storage>=1.26.0,<2.0.0dev # Required in Docker serving container %pip install -U -r requirements.txt # For local FastAPI development and running %pip install -U "uvicorn[standard]>=0.12.0,<0.14.0" fastapi~=0.63 # AI Platform (Classic) client library %pip install -U google-api-python-client ``` ### Restart the kernel After you install the additional packages, you need to restart the notebook kernel so it can find the packages. ``` # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). 1. [Enable the AI Platform (Classic) API and Compute Engine API](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component). 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk). 1. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` or `%` as shell commands, and it interpolates Python variables with `$` or `{}` into these commands. #### Set your project ID **If you don't know your project ID**, you may be able to get your project ID using `gcloud`. ``` # Get your Google Cloud project ID from gcloud shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null try: PROJECT_ID = shell_output[0] except IndexError: PROJECT_ID = None print("Project ID:", PROJECT_ID) ``` Otherwise, set your project ID here. ``` if PROJECT_ID == "" or PROJECT_ID is None: PROJECT_ID = "[your-project-id]" # @param {type:"string"} ``` ### Authenticate your Google Cloud account **If you are using AI Platform Notebooks**, your environment is already authenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. **Otherwise**, follow these steps: 1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey). 2. Click **Create service account**. 3. In the **Service account name** field, enter a name, and click **Create**. 4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform" into the filter box, and select **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. 5. Click *Create*. A JSON file that contains your key downloads to your local environment. 6. Enter the path to your service account key as the `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ``` import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING") and not os.getenv( "GOOGLE_APPLICATION_CREDENTIALS" ): %env GOOGLE_APPLICATION_CREDENTIALS '' ``` ### Configure project and resource names ``` REGION = "us-central1" # @param {type:"string"} MODEL_ARTIFACT_DIR = "custom-container-prediction-model" # @param {type:"string"} REPOSITORY = "custom-container-prediction-sklearn" # @param {type:"string"} IMAGE = "sklearn-fastapi-server" # @param {type:"string"} MODEL_NAME = "sklearn_custom_container" # @param {type:"string"} VERSION_NAME = "v1" # @param {type:"string"} ``` `REGION` - Used for operations throughout the rest of this notebook. Make sure to [choose a region where Cloud AI Platform services are available](https://cloud.google.com/ai-platform-unified/docs/general/locations#feature-availability). You may not use a Multi-Regional Storage bucket for training with AI Platform. `MODEL_ARTIFACT_DIR` - Folder directory path to your model artifacts within a Cloud Storage bucket, for example: "my-models/fraud-detection/trial-4" `REPOSITORY` - Name of the Artifact Repository to create or use. `IMAGE` - Name of the container image that will be pushed. `MODEL_NAME` - Name of AI Platform Model. `VERSION_NAME` - Name of AI Platform Model version. ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** To update your model artifacts without re-building the container, you must upload your model artifacts and any custom code to Cloud Storage. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. ``` BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION $BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al $BUCKET_NAME ``` ## Write your pre-processor Scaling training data so each numerical feature column has a mean of 0 and a standard deviation of 1 [can improve your model](https://developers.google.com/machine-learning/crash-course/representation/cleaning-data). Create `preprocess.py`, which contains a class to do this scaling: ``` %mkdir app %%writefile app/preprocess.py import numpy as np class MySimpleScaler(object): def __init__(self): self._means = None self._stds = None def preprocess(self, data): if self._means is None: # during training only self._means = np.mean(data, axis=0) if self._stds is None: # during training only self._stds = np.std(data, axis=0) if not self._stds.all(): raise ValueError("At least one column has standard deviation of 0.") return (data - self._means) / self._stds ``` ## Train and store model with pre-processor Next, use `preprocess.MySimpleScaler` to preprocess the iris data, then train a model using scikit-learn. At the end, export your trained model as a joblib (`.joblib`) file and export your `MySimpleScaler` instance as a pickle (`.pkl`) file: ``` %cd app/ import pickle import joblib from preprocess import MySimpleScaler from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier iris = load_iris() scaler = MySimpleScaler() X = scaler.preprocess(iris.data) y = iris.target model = RandomForestClassifier() model.fit(X, y) joblib.dump(model, "model.joblib") with open("preprocessor.pkl", "wb") as f: pickle.dump(scaler, f) ``` ### Upload model artifacts and custom code to Cloud Storage Before you can deploy your model for serving, AI Platform needs access to the following files in Cloud Storage: * `model.joblib` (model artifact) * `preprocessor.pkl` (model artifact) Run the following commands to upload your files: ``` !gsutil cp model.joblib preprocessor.pkl {BUCKET_NAME}/{MODEL_ARTIFACT_DIR}/ %cd .. ``` ## Build a FastAPI server ``` %%writefile app/main.py from fastapi import FastAPI, Request import joblib import json import numpy as np import pickle import os from google.cloud import storage from preprocess import MySimpleScaler from sklearn.datasets import load_iris app = FastAPI() gcs_client = storage.Client() with open("preprocessor.pkl", 'wb') as preprocessor_f, open("model.joblib", 'wb') as model_f: gcs_client.download_blob_to_file( f"{os.environ['AIP_STORAGE_URI']}/preprocessor.pkl", preprocessor_f ) gcs_client.download_blob_to_file( f"{os.environ['AIP_STORAGE_URI']}/model.joblib", model_f ) with open("preprocessor.pkl", "rb") as f: preprocessor = pickle.load(f) _class_names = load_iris().target_names _model = joblib.load("model.joblib") _preprocessor = preprocessor @app.get(os.environ['AIP_HEALTH_ROUTE'], status_code=200) def health(): return {} @app.post(os.environ['AIP_PREDICT_ROUTE']) async def predict(request: Request): body = await request.json() instances = body["instances"] inputs = np.asarray(instances) preprocessed_inputs = _preprocessor.preprocess(inputs) outputs = _model.predict(preprocessed_inputs) return {"predictions": [_class_names[class_num] for class_num in outputs]} ``` ### Add pre-start script FastAPI will execute this script before starting up the server. The `PORT` environment variable is set to equal `AIP_HTTP_PORT` in order to run FastAPI on same the port expected by AI Platform Prediction. ``` %%writefile app/prestart.sh #!/bin/bash export PORT=$AIP_HTTP_PORT ``` ### Store test instances to use later To learn more about formatting input instances in JSON, [read the documentation.](https://cloud.google.com/ai-platform-unified/docs/predictions/online-predictions-custom-models#request-body-details) ``` %%writefile instances.json { "instances": [ [6.7, 3.1, 4.7, 1.5], [4.6, 3.1, 1.5, 0.2] ] } ``` ## Build and push container to Artifact Registry ### Build your container Optionally copy in your credentials to run the container locally. ``` # NOTE: Copy in credentials to run locally, this step can be skipped for deployment %cp $GOOGLE_APPLICATION_CREDENTIALS app/credentials.json ``` Write the Dockerfile, using `tiangolo/uvicorn-gunicorn-fastapi` as a base image. This will automatically run FastAPI for you using Gunicorn and Uvicorn. Visit [the FastAPI docs to read more about deploying FastAPI with Docker](https://fastapi.tiangolo.com/deployment/docker/). ``` %%writefile Dockerfile FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7 COPY ./app /app COPY requirements.txt requirements.txt RUN pip install -r requirements.txt ``` Build the image and tag the Artifact Registry path that you will push to. ``` !docker build \ --tag={REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE} \ . ``` ### Run and test the container locally (optional) Run the container locally in detached mode and provide the environment variables that the container requires. These env vars will be provided to the container by AI Platform Prediction once deployed. Test the `/health` and `/predict` routes, then stop the running image. ``` !docker rm local-iris !docker run -d -p 80:8080 \ --name=local-iris \ -e AIP_HTTP_PORT=8080 \ -e AIP_HEALTH_ROUTE=/health \ -e AIP_PREDICT_ROUTE=/predict \ -e AIP_STORAGE_URI={BUCKET_NAME}/{MODEL_ARTIFACT_DIR} \ -e GOOGLE_APPLICATION_CREDENTIALS=credentials.json \ {REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE} !curl localhost/health !curl -X POST \ -d @instances.json \ -H "Content-Type: application/json; charset=utf-8" \ localhost/predict !docker stop local-iris ``` ### Push the container to artifact registry Configure Docker to access Artifact Registry. Then push your container image to your Artifact Registry repository. ``` !gcloud beta artifacts repositories create {REPOSITORY} \ --repository-format=docker \ --location=$REGION !gcloud auth configure-docker {REGION}-docker.pkg.dev !docker push {REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE} ``` ## Deploy to AI Platform (Classic) Use gcloud CLI to create your model and model version. ### Create the model ``` !gcloud beta ai-platform models create $MODEL_NAME \ --region=$REGION \ --enable-logging \ --enable-console-logging ``` ### Create the model version After this step completes, the model is deployed and ready for online prediction. ``` !echo "deploymentUri: {BUCKET_NAME}/{MODEL_ARTIFACT_DIR}" > config.yaml !gcloud beta ai-platform versions create $VERSION_NAME \ --region=$REGION \ --model=$MODEL_NAME \ --machine-type=n1-standard-4 \ --config=config.yaml \ --image={REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE} ``` ## Send predictions ### Using REST ``` !curl -X POST \ -H "Authorization: Bearer $(gcloud auth print-access-token)" \ -H "Content-Type: application/json; charset=utf-8" \ -d @instances.json \ https://{REGION}-ml.googleapis.com/v1/projects/{PROJECT_ID}/models/{MODEL_NAME}/versions/{VERSION_NAME}:predict ``` ### Using Python SDK ``` from google.api_core.client_options import ClientOptions from googleapiclient import discovery client_options = ClientOptions(api_endpoint=f"https://{REGION}-ml.googleapis.com") service = discovery.build("ml", "v1", client_options=client_options) response = ( service.projects() .predict( name=f"projects/{PROJECT_ID}/models/{MODEL_NAME}/versions/{VERSION_NAME}", body={"instances": [[6.7, 3.1, 4.7, 1.5], [4.6, 3.1, 1.5, 0.2]]}, ) .execute() ) if "error" in response: raise RuntimeError(response["error"]) else: print(response) ``` ### Using gcloud CLI ``` !gcloud beta ai-platform predict \ --region=$REGION \ --model=$MODEL_NAME \ --json-request=instances.json ``` ## Cleaning up To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: ``` # Delete the model version !gcloud ai-platform versions delete $VERSION_NAME \ --region=$REGION \ --model=$MODEL_NAME \ --quiet # Delete the model !gcloud ai-platform models delete $MODEL_NAME \ --region=$REGION \ --quiet # Delete the container image from Artifact Registry !gcloud artifacts docker images delete \ --quiet \ --delete-tags \ {REGION}-docker.pkg.dev/{PROJECT_ID}/{REPOSITORY}/{IMAGE} ```
github_jupyter
## Brown Datathon - Predicting house buying based on Credit Info Data provided by Citizens Bank (Public use available) ### Setting Environment ``` ## Load Basic Package print('PYTHON & PACKAGE VERSION CONTROL') print('----------') import sys #access to system parameters https://docs.python.org/3/library/sys.html print("Python version: {}". format(sys.version)) import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features print("pandas version: {}". format(pd.__version__)) import matplotlib #collection of functions for scientific and publication-ready visualization print("matplotlib version: {}". format(matplotlib.__version__)) import numpy as np #foundational package for scientific computing print("NumPy version: {}". format(np.__version__)) import scipy as sp #collection of functions for scientific computing and advance mathematics print("SciPy version: {}". format(sp.__version__)) import IPython from IPython import display #pretty printing of dataframes in Jupyter notebook print("IPython version: {}". format(IPython.__version__)) import sklearn #collection of machine learning algorithms print("scikit-learn version: {}". format(sklearn.__version__)) #Visualization import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab import seaborn as sns # from pandas.tools.plotting import scatter_matrix #misc libraries import random import time #ignore warnings import warnings warnings.filterwarnings('ignore') print('----------') ## Path setting path = r'C:\Users\ADMIN\Desktop\Brown_Datathon\citizens-home-financing-challenge' import os print('Path:', path) print('----------') print('\n'.join(os.listdir(path))) ``` ### First Loading Dataset Be careful for bigger dataset! (1.5GB couuld take about 30s through Modin) If that is the case, use sample method in read_csv()! ##### Modin can be faster. However, if doing functions below, use regular Pandas! - df.groupby(by='wp_type') - df.drop_duplicates() - df.describe() - df['seconds'].max() ``` # Pandas can have trouble dealing with moderately large data, here's a sampling example total_row_n = 6009259 # number of records in file sample_row_n = 60000 # sample size (can/should be changed to your preference) skip_row_list = sorted(random.sample(range(1,total_row_n+1), total_row_n-sample_row_n)) sep_df = pd.read_csv(root_path + "zip9_coded_201908_pv.csv", skiprows=skip_row_list) demo_df = pd.read_csv(root_path + "zip9_demographics_coded_pv.csv", skiprows=skip_row_list) import os os.environ["MODIN_ENGINE"] = "dask" # Modin will use Dask import modin.pandas as pd pd.__version__ # import modin.pandas as pd # %%timeit -n1 -r1 t0 = time.time() data_path = path data_file = 'zip9_coded_201909_pv.csv' data_set = pd.read_csv(data_path+'/'+data_file) print('Complete loading df!') t1 = time.time() print("Time to process {}s".format(round(t1-t0,2))) df = data_set.copy() #Basic Checking for dataset print('Dataset Shape:\n', df.shape) df.head() ``` ### New Inspection Tool: d-tale Github: https://github.com/man-group/dtale ``` import dtale d = dtale.show(df) print('Complete loading df!') d ``` ## Data Wrangling ### Basic Info about Dataset ``` print(df.info()) print("-"*10) print('Dataset Shape:\n', df.shape) print("-"*10) ``` ### Data Cleaning: NA, Empty String, Meaningless Value #### Checking ``` print('Dataset columns with null & None values:\n', df.isnull().sum()) print('Note: Please Check for possible null-related values (empty string, meaningless value...)') # print(df2.describe()) print("-"*10) ## Check for 'empty string' ## If this generate non-empty array, then dataset contains empty string in following position. # np.where(df.applymap(lambda x: x == '')) print(df[df.applymap(lambda x: x == '').any(axis=1)]) print("p.s. If the dataframe above show no rows, then the dataframe doesn't have any empty string.") ``` #### Data Cleaning: A variable & B variable ``` df.describe(include = 'all') df.describe().apply(lambda s:s.apply(lambda x:format(x, 'f'))) ``` ### Data Cleaning: String Manipulation ## Explonatory Analysis ### Exploratory: Target Variable ``` ## Target Variable target_variable_name = 'Survived' print('target variable:', target_variable_name) print('variable type:', type(df[target_variable_name][0])) # This is for changing the data type in some cases. # df_Regress[target_variable_name] = df_Regress[target_variable_name].replace('[^.0-9]', '', regex=True).astype(float) ## Classifier only df_Class = df target_sum = pd.DataFrame([df_Class[target_variable_name].value_counts(), round(df_Class[target_variable_name].value_counts()/sum(df_Class[target_variable_name].value_counts()), 4)], index=['Count','Percentage']).T print('Total Observations:', sum(df_Class[target_variable_name])) print(target_sum.astype({"Count": int})) fig = plt.figure(figsize=[3,5]) ax = sns.barplot(y="Count", x=['0','1'], data=target_sum) for p, i in zip(ax.patches, [0,1]): percent = target_sum['Percentage'][i] ax.annotate('{:.2f}%'. format(percent*100), (p.get_x()+0.4, p.get_height()-50), ha='center', size=15, color='white') ## Regression only df_Regress = df plt.figure(figsize=(10,5)) sns.distplot(df_Regress[target_variable_name]) plt.figure(figsize=(10,5)) plt.hist(x=df_Regress[target_variable_name]) # data_path data_path = r'C:\Users\ADMIN\Desktop\Brown_Datathon\citizens-home-financing-challenge' # data_path = r'C:\Users\ADMIN\Desktop\Brown_Datathon\citizens-home-financing-challenge' # tar_data_file = 'ip9_demographics_coded_pv.csv' # tar_data_set = pd.read_csv(data_path+'/'+tar_data_file) data_t1 = pd.read_csv(data_path+'/'+'zip9_demographics_coded_pv.csv') print('Complete loading df!') target_variable_name = 'homebuyers' df_Regress = data_t1 plt.figure(figsize=(10,5)) sns.distplot(df_Regress[target_variable_name]) plt.figure(figsize=(10,5)) plt.hist(x=df_Regress[target_variable_name]) df_Regress[target_variable_name].value_counts() ``` ### Exploratory: Target Variable vs Other Variable ``` path path2 =r'C:\Users\ADMIN\Desktop\Brown_Datathon' ``` ### Fast Auto Visuailization Package: AutoViz ``` n = 6009259 # number of records in file s = 60000 # sample size (can/should be changed to your preference) skip_list = sorted(random.sample(range(1,n+1),n-s)) vis_df = pd.read_csv(path +'/'+ "zip9_coded_201908_pv.csv", skiprows=skip_list, dtype={'zip5': str}) # vis_df = pd.read_csv(path2+'/'+'merge_09_df.csv', ) # vis_df sep_demo_merge11 = vis_df.merge(demo_df, how='inner', on='zip9_code', suffixes=('_sep','_demo'), validate='one_to_one') ### AutoViz from autoviz.AutoViz_Class import AutoViz_Class AV = AutoViz_Class() sep_demo_merge11.head() target_variable_name = 'homebuyers' ## sep = '/' dft = AV.AutoViz('', ',', target_variable_name, sep_demo_merge11) # Generating a whole new html page of the dataframe. Should open it through outside the notebook! import webbrowser dff.to_html("df_web.html") url = "http://localhost:8888/files/notebook/df_web.html" # webbrowser.open(url,new=2) ``` ### Merging Data #### Fields ``` demo_df = pd.read_csv(data_path+'/'+'zip9_demographics_coded_pv.csv') sep_df = df sep_demo_merge = sep_df.merge(demo_df, how='inner', on='zip9_code', suffixes=('_sep','_demo'), validate='one_to_one') sep_demo_merge = sep_demo_merge.drop(['Unnamed: 0', 'zip5_demo'], axis=1) sep_demo_merge.head() d = dtale.show(df) print('Complete loading df!') # sep_demo_merge.to_csv('merge_09_df.csv') data_path = r'C:\Users\ADMIN\Desktop\Brown_Datathon' # generate a smaller df to practice Tableau sep_demo_merge = pd.read_csv(data_path+'/'+'merge_09_df.csv') small_df = sep_demo_merge.sample(frac=0.05, random_state=1) print('complete') # small_df.to_csv('small_df.csv') mid_df = sep_demo_merge.sample(frac=0.2, random_state=1) mid_df.shape mid_df.to_csv('mid_df.csv') print('complete!') ``` #### Area assigned by ZIP Code ``` from uszipcode import SearchEngine small_df['district'] = [search.by_zipcode(i).values()[3] for i in small_df['zip5_sep']] small_df.head() # tt.head() # print(search.by_zipcode(tt['zip5_sep']).values()[3]) # small_df.head()['district'].str.split(', ', expand=True) # tt = small_df.head() # tt['district'].str.split(', ', expand=True) tt_1 = pd.concat([tt, tt['district'].str.split(', ', expand=True)], axis=1, join='inner') tt_1.rename(columns={0: 'small_district', 2:'state'}, inplace=True) tt_1 # small_df. # result = pd.concat([df1, df4], axis=1, join='inner') ``` ### New Sample: Claire ``` col_list = ['age', 'autoloan_open', 'bankcard_balance', 'bankcard_limit', 'bankcard_open', 'bankcard_trades', 'bankcard_util', 'first_homebuyers', 'homebuyers', 'homeequity_open', 'household_count', 'mortgage_open', 'mortgage1_loan_to_value', 'person_count', 'studentloan_open', 'total_homeequity_balance', 'total_homeequity_limit', 'total_homeequity_trades', 'total_mortgage_balance', 'total_mortgage_limit', 'total_mortgage_trades', 'total_revolving_balance', 'total_revolving_limit', 'total_revolving_trades', 'total_revolving_util', 'zip5_sep', 'zip9_code'] col_list1 = ['zip5','zip9_code', 'autoloan_open', 'bankcard_balance', 'bankcard_limit', 'bankcard_open', 'bankcard_trades', 'bankcard_util', 'homeequity_open', 'mortgage_open', 'mortgage1_loan_to_value', 'studentloan_open', 'total_homeequity_balance', 'total_homeequity_limit', 'total_homeequity_trades', 'total_mortgage_balance', 'total_mortgage_limit', 'total_mortgage_trades', 'total_revolving_balance', 'total_revolving_limit', 'total_revolving_trades', 'total_revolving_util'] data_set = pd.read_csv(path+'/'+'zip9_coded_201908_pv.csv', usecols=col_list1) print('complete') demo_df = pd.read_csv(path+'/'+'zip9_demographics_coded_pv.csv') print('complete!') sep_demo_merge = data_set.merge(demo_df, how='inner', on='zip9_code', suffixes=('_sep','_demo'), validate='one_to_one') sep_demo_merge = sep_demo_merge.drop(['zip5_demo'], axis=1) # check! sep_demo_merge.to_csv('merge_08_df.csv',index=False) print('complete') # check! path = r'C:\Users\ADMIN\Desktop\Brown_Datathon\citizens-home-financing-challenge' path2 = r'C:\Users\ADMIN\Desktop\Brown_Datathon' ## pipeline for name in ['zip9_coded_201906_pv.csv', 'zip9_coded_201907_pv.csv']: data_set = pd.read_csv(path+'/'+name, usecols=col_list1) sep_demo_merge = data_set.merge(demo_df, how='inner', on='zip9_code', suffixes=('_sep','_demo'), validate='one_to_one') sep_demo_merge = sep_demo_merge.drop(['zip5_demo'], axis=1) sep_demo_merge.to_csv('new_'+name, index=False) print('complete '+ name) data_set = pd.read_csv(path+'/'+'zip9_coded_201907_pv.csv', names=['zip5_sep','zip9_code']) ``` ## Final works!!! New Feature: Jeff ``` # path # path2 path2 =r'C:\Users\ADMIN\Desktop\Brown_Datathon' econ_df = pd.read_csv(path2+'/'+'17zpallnoagi.csv') print('complete') econ_df.head() econ_df['STATEFIPS'].value_counts(dropna=False) econ_df = econ_df.dropna() econ_df.head() # test = econ_df[econ_df['STATEFIPS'] is not np.nan()] econ = econ_df[['A18800']] econ # econ_df['STATEFIPS'] == True ## new data stardization from sklearn import preprocessing # Create the Scaler object scaler = preprocessing.StandardScaler() scaled_econ = scaler.fit_transform(np.array(econ)) scaled_econ = np.reshape(scaled_econ, (scaled_econ.shape[0],)) scaled_econ.tolist() # econ_df['ZIPCODE'].astype('int64') # econ_df # econ_df['ZIPCODE'] new_econ_df = pd.DataFrame(scaled_econ.tolist(), index = econ_df['ZIPCODE'].astype('int64')) new_econ_df = new_econ_df.reset_index() new_econ_df.columns = ['zip5_sep', 'Personal_property_taxes_amount'] new_econ_df.head() ``` ## Claire data + new econ metric ``` # from feature_selector import FeatureSelector # Features are in train and labels are in train_labels # fs = FeatureSelector(data = train, labels = train_labels) path2 data_file = 'Total_data.csv' final_df = pd.read_csv(path2+'/'+data_file) print('complete') # final_df = final_df.drop('Unnamed: 0', axis = 1) final_df.head() final_df2 = final_df.merge(new_econ_df, how='inner', on='zip5_sep', suffixes=('_sep','_demo'), validate='many_to_many') final_df3= final_df2[['person_count', 'age', 'mortgage_open', 'studentloan_open', 'bankcard_balance', 'total_revolving_util', 'total_revolving_trades', 'autoloan_open', 'total_homeequity_limit', 'total_homeequity_balance', 'total_mortgage_balance', 'zip5_sep', 'homeequity_open', 'Personal_property_taxes_amount','homebuyers']] # final_df3.head() # final_df2.head()['total_homeequity_balance'] final_df2.shape final_df3.shape # no_nan_df = final_df2.dropna(how='any') # no_nan_df.shape # n = 6009259 # number of records in file # s = 60000 # sample size (can/should be changed to your preference) # # final_df2 # skip_list = sorted(random.sample(range(1,n+1),n-s)) # sep_df = pd.read_csv(root_path + "zip9_coded_201908_pv.csv", skiprows=skip_list, dtype={'zip5': str}) # demo_df = pd.read_csv(root_path + "zip9_demographics_coded_pv.csv", skiprows=skip_list, dtype={'zip5': str}) # final_df2_sample = final_df2.sample(frac=0.05, random_state=1) final_df3_sample = final_df3.sample(frac=0.05, random_state=1) print('complete') ``` ## Machine Learning ``` target_variable_name = 'homebuyers' from sklearn import model_selection train_X, test_X, train_y, test_y = model_selection.train_test_split(final_df3_sample.drop(target_variable_name, axis = 1), final_df3_sample[target_variable_name], test_size=0.3, random_state = 10) # generate the train and test data suitable for this package train = train_X.copy() train[target_variable_name] = train_y test = test_X.copy() test[target_variable_name] = test_y # train_y from autoviml.Auto_ViML import Auto_ViML # final import pickle ## Run the AutoML! #### If Boosting_Flag = True => XGBoost, Fase=>ExtraTrees, None=>Linear Model sample_submission='' scoring_parameter = 'balanced-accuracy' m, feats, trainm, testm = Auto_ViML(train, target_variable_name, test, sample_submission, scoring_parameter=scoring_parameter, hyper_param='GS',feature_reduction=True, Boosting_Flag=True,Binning_Flag=False, Add_Poly=0, Stacking_Flag=False, Imbalanced_Flag=False, verbose=1) # p.s. This could run much more than what the package estimated! # m, feats, trainm, testm = Auto_ViML(train, target_variable_name, test, sample_submission, # scoring_parameter=scoring_parameter, # hyper_param='GS',feature_reduction=True, # Boosting_Flag=True,Binning_Flag=False, # Add_Poly=0, Stacking_Flag=False, # Imbalanced_Flag=False, # verbose=1) filename = 'finalized_model.sav' pickle.dump(m, open(filename, 'wb')) ## second time without first homebuyer sample_submission='' scoring_parameter = 'balanced-accuracy' m1, feats1, trainm1, testm1 = Auto_ViML(train, target_variable_name, test, sample_submission, scoring_parameter=scoring_parameter, hyper_param='GS',feature_reduction=True, Boosting_Flag=True,Binning_Flag=False, Add_Poly=0, Stacking_Flag=False, Imbalanced_Flag=False, verbose=1) filename = 'finalized_model2.sav' pickle.dump(m, open(filename, 'wb')) #### Regression Only ##### ## Result of each model def rmse(results, y_cv): return np.sqrt(np.mean((results - y_cv)**2, axis=0)) from autoviml.Auto_ViML import print_regression_model_stats ## Change the 'modelname' to generate different model result modelname='LassoLarsCV Regression' print('Model:', modelname) # print('RMSE:', rmse(test[target_variable_name].values,testm[target_variable_name+'_'+modelname+'_predictions'].values)) print_regression_model_stats(test[target_variable_name].values,testm[target_variable_name+'_'+modelname+'_predictions'].values) ## USE CLAIRE DATA # data_file = 'Total_data.csv' # df_new = pd.read_csv(path2+'/'+data_file) # print('complete') df_new = final_df3.fillna(0) print('Dataset columns with null & None values:\n', df_new.isnull().sum()) print('Note: Please Check for possible null-related values (empty string, meaningless value...)') # print(df2.describe()) print("-"*10) ## Check for 'empty string' ## If this generate non-empty array, then dataset contains empty string in following position. # np.where(df.applymap(lambda x: x == '')) # print(df[df.applymap(lambda x: x == '').any(axis=1)]) # print("p.s. If the dataframe above show no rows, then the dataframe doesn't have any empty string.") df_new.shape df_new_sample = df_new.sample(frac=0.01, random_state=1) print('complete') df_new_sample.shape from autoviml.Auto_ViML import Auto_ViML target_variable_name = 'homebuyers' from sklearn import model_selection train_X1, test_X1, train_y1, test_y1 = model_selection.train_test_split(df_new_sample.drop(target_variable_name, axis = 1), df_new_sample[target_variable_name], test_size=0.3, random_state = 10) # generate the train and test data suitable for this package train1 = train_X1.copy() train1[target_variable_name] = train_y1 test1 = test_X1.copy() test1[target_variable_name] = test_y1 ## second time without first homebuyer sample_submission='' scoring_parameter = 'balanced-accuracy' m1, feats1, trainm1, testm1 = Auto_ViML(train1, target_variable_name, test1, sample_submission, scoring_parameter=scoring_parameter, hyper_param='GS',feature_reduction=True, Boosting_Flag=True,Binning_Flag=False, Add_Poly=0, Stacking_Flag=False, Imbalanced_Flag=False, verbose=1) testm1 m1 path2 hold_out_set = pd.read_csv(path2+'/'+'zip9_coded_201909_wh.csv') # new_econ_df.column = ['zip5', 'Personal_property_taxes_amount'] # new_econ_df.head() demo_df2 = pd.read_csv(path2+'/'+'zip9_demographics_unlabeled_wh_test.csv') hold_out_set.rename(columns={'zip5': 'zip5_sep'}, inplace=True) hold_out_set2 = hold_out_set.merge(demo_df2, how='inner', on='zip9_code', suffixes=('_sep','_demo'), validate='one_to_one') hold_out_set2.head() # hold_out_set2 hold_out_set2 = hold_out_set2.fillna(0) # hold_out_set.rename(columns={'zip5': 'zip5_sep'}, inplace=True) hold_out_set3 = hold_out_set2.merge(new_econ_df, how='inner', on='zip5_sep', suffixes=('_sep','_demo'), validate='many_to_many') hold_out_set3.head() hold_out_set4= hold_out_set3[['person_count', 'age', 'mortgage_open', 'studentloan_open', 'bankcard_balance', 'total_revolving_util', 'total_revolving_trades', 'autoloan_open', 'total_homeequity_balance', 'total_mortgage_balance', 'zip5_sep', 'homeequity_open', 'Personal_property_taxes_amount']] hold_out_set5= hold_out_set4[['person_count', 'autoloan_open', 'total_homeequity_balance', 'total_revolving_util', 'mortgage_open', 'total_mortgage_balance', 'age', 'studentloan_open', 'bankcard_balance', 'homeequity_open', 'Personal_property_taxes_amount', 'zip5_sep', 'total_revolving_trades']] # output = m1.predict(data=hold_out_set3) # list(hold_out_set4.columns) list(train_X1.columns) # output = m1.predict(data=hold_out_set) # final_df = pd.DataFrame() output = m1.predict(data=hold_out_set5) # output aaa = pd.DataFrame(output) aaa.head() aaa.to_csv('result01.csv', index = False) ```
github_jupyter
``` import numpy as np a = np.array([4, 10, 12, 23, -2, -1, 0, 0, 0, -6, 3, -7]) ``` # 1. How many negative numbers are there? ``` neg_a = a[a < 0] #neg_a len(neg_a) ``` # 2. How many positive numbers are there? ``` def pos_a(a): return a[a > 0] #pos_a(a) len(pos_a(a)) ``` # 3. How many even positive numbers are there? ``` #even_a = a[a % 2 ==0] pos_even_a = pos_a([pos_a % 2 == 0]) len(pos_even_a(a)) even_pos = a[(a > 0) & (a % 2 == 0)] even_pos ``` # 4. If you were to add 3 to each data point, how many positive numbers would there be? ``` plus_3 = a + 3 #plus_3 pos_a(plus_3) len(pos_a(plus_3)) ``` # 5. If you squared each number, what would the new mean and standard deviation be? ``` a_sqrd = a * a a_sqrd a_sqrd.mean(), a_sqrd.std() ``` # 6. A common statistical operation on a dataset is **centering**. This means to adjust the data such that the mean of the data is 0. This is done by subtracting the mean from each data point. Center the data set. See this link for more on centering: https://www.theanalysisfactor.com/centering-and-standardizing-predictors/ ``` centered_a = a - a.mean() centered_a ``` # 7. Calculate the z-score for each data point. Recall that the z-score is given by: - z = (x - μ)/ σ ``` zscore_a = (a - a.mean()) / a.std() zscore_a ``` # 8. Copy the setup and exercise directions from More Numpy Practice (https://gist.github.com/ryanorsinger/c4cf5a64ec33c014ff2e56951bc8a42d) into your `numpy_exercises.py` and add your solutions. ``` import numpy as np # Life w/o numpy to life with numpy ## Setup 1 a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Use python's built in functionality/operators to determine the following: # Exercise 1 - Make a variable called sum_of_a to hold the sum of all the numbers in above list #a = np.array(a) #sum_of_a = a.sum() sum_of_a = sum(a) sum_of_a # Exercise 2 - Make a variable named min_of_a to hold the minimum of all the numbers in the above list #min_of_a = a.min() min_of_a = min(a) min_of_a # Exercise 3 - Make a variable named max_of_a to hold the max number of all the numbers in the above list #max_of_a = a.max() max_of_a = max(a) max_of_a # Exercise 4 - Make a variable named mean_of_a to hold the average of all the numbers in the above list #mean_of_a = a.mean() mean_of_a = sum(a) / len(a) mean_of_a # Exercise 5 - Make a variable named product_of_a to hold the product of multiplying all the numbers in the above list together #product_of_a = a.prod() def multiplication(num): product = 1 for ele in num: product *= ele return product product_of_a = multiplication(a) product_of_a # Exercise 6 - Make a variable named squares_of_a. It should hold each number in a squared like [1, 4, 9, 16, 25...] #squares_of_a = np.square(a) def squared(numbers): sqs = [] for n in numbers: sqs.append(n * n) return sqs squares_of_a = squared(a) squares_of_a # Exercise 7 - Make a variable named odds_in_a. It should hold only the odd numbers #odds_in_a = a[a % 2 != 0] def odd_numbers(digits): odd = [] for d in digits: if d % 2 != 0: odd.append(d) else: continue return odd odds_in_a = odd_numbers(a) odds_in_a # Exercise 8 - Make a variable named evens_in_a. It should hold only the evens. #evens_in_a = a[a % 2 == 0] def even_numbers(numeros): even = [] for n in numeros: if n % 2 == 0: even.append(n) else: continue return even evens_in_a = even_numbers(a) evens_in_a ## What about life in two dimensions? A list of lists is matrix, a table, # a spreadsheet, a chessboard... ## Setup 2: Consider what it would take to find the sum, min, max, average, sum, # product, and list of squares for this list of two lists. b = [ [3, 4, 5], [6, 7, 8] ] # Exercise 1 - refactor the following to use numpy. #Use sum_of_b as the variable. #**Hint, you'll first need to make sure that the "b" variable is a numpy array** #sum_of_b = 0 #for row in b: # sum_of_b += sum(row) b = np.array(b) sum_of_b = b.sum() sum_of_b # Exercise 2 - refactor the following to use numpy. #min_of_b = min(b[0]) if min(b[0]) <= min(b[1]) else min(b[1]) min_of_b = b.min() min_of_b # Exercise 3 - refactor the following maximum calculation to find the answer with numpy. #max_of_b = max(b[0]) if max(b[0]) >= max(b[1]) else max(b[1]) max_of_b = b.max() max_of_b # Exercise 4 - refactor the following using numpy to find the mean of b #mean_of_b = (sum(b[0]) + sum(b[1])) / (len(b[0]) + len(b[1])) mean_of_b = b.mean() mean_of_b # Exercise 5 - refactor the following to use numpy for calculating the product of all numbers multiplied together. #product_of_b = 1 #for row in b: # for number in row: # product_of_b *= number product_of_b = b.prod() product_of_b ``` # the above functions are methods that live on the array ## others like below live in the numpy library only ``` # Exercise 6 - refactor the following to use numpy to find the list of squares #squares_of_b = [] #for row in b: # for number in row: # squares_of_b.append(number**2) b = np.array(b) squares_of_b = np.square(b) #squares_of_b = b ** 2 squares_of_b np.power(b, 2) # Exercise 7 - refactor using numpy to determine the odds_in_b #odds_in_b = [] #for row in b: # for number in row: # if(number % 2 != 0): # odds_in_b.append(number) odds_in_b = b[b % 2 != 0] odds_in_b # Exercise 8 - refactor the following to use numpy to filter only the even numbers #evens_in_b = [] #for row in b: # for number in row: # if(number % 2 == 0): # evens_in_b.append(number) evens_in_b = b[b % 2 == 0] evens_in_b # Exercise 9 - print out the shape of the array b. np.shape(b) # Exercise 10 - transpose the array b. b.transpose() b.T # Exercise 11 - reshape the array b to be a single list of 6 numbers. (1 x 6) b.reshape(1, 6) b.reshape(6) # Exercise 12 - reshape the array b to be a list of 6 lists, each containing only 1 number (6 x 1) b.reshape(6, 1) ## Setup 3 c = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ] # HINT, you'll first need to make sure that the "c" variable is a # numpy array prior to using numpy array methods. # Exercise 1 - Find the min, max, sum, and product of c. c = np.array(c) c.min(), c.max(), c.sum(), c.prod() # Exercise 2 - Determine the standard deviation of c. c.std() # Exercise 3 - Determine the variance of c. c.var() # Exercise 4 - Print out the shape of the array c c.shape # Exercise 5 - Transpose c and print out transposed result. print(c.transpose()) # Exercise 6 - Get the dot product of the array c with c. np.dot(c, c) # Exercise 7 - Write the code necessary to sum up the result # of c times c transposed. Answer should be 261 t_c = c.transpose() prod_tc = c * t_c prod_tc.sum() t_c = c.transpose() prod_tc = np.dot(c, t_c) prod_tc.sum() # Exercise 8 - Write the code necessary to determine the product of c times c transposed. Answer should be 131681894400. prod_tc.prod() ## Setup 4 d = [ [90, 30, 45, 0, 120, 180], [45, -90, -30, 270, 90, 0], [60, 45, -45, 90, -45, 180] ] # Exercise 1 - Find the sine of all the numbers in d np.sin(d) # Exercise 2 - Find the cosine of all the numbers in d np.cos(d) # Exercise 3 - Find the tangent of all the numbers in d np.tan(d) # Exercise 4 - Find all the negative numbers in d d = np.array(d) d[d < 0] # Exercise 5 - Find all the positive numbers in d d[d > 0] # Exercise 6 - Return an array of only the unique numbers in d. np.unique(d) # Exercise 7 - Determine how many unique numbers there are in d. len(np.unique(d)) # Exercise 8 - Print out the shape of d. d.shape # Exercise 9 - Transpose and then print out the shape of d. d.T.shape # Exercise 10 - Reshape d into an array of 9 x 2 d.reshape(9, 2) ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> _prepared by Abuzer Yakaryilmaz_ ``` # A jupyter notebook is composed by one or more cells. # This notebook is prepared for jupyter notebooks, and the menu items and command buttons may differ in jupyter lab. # There are two main cells: Code and Markdown. # A code cell is used to write and execute your codes. # A markdown cell is used to write text descriptions, notes, formulas or include graphics and images. # On a markdown cell, you can format your content by using Markdown, HTML, or LaTeX code. # During our tutorial, you are expected to write only python codes. # Interested readers may also use markdown cells, but it is not necesary to complete our tutorial. # # We explain basic usage of cells in Jupyter notebooks here # # This is the first cell in this notebook. # You can write Python code here, # and then EXECUTE/RUN it by # 1) pressing CTRL+ENTER or SHIFT+ENTER # 2) clicking "Run" on the menu # here are few lines of python code print("hello world") str="*" for i in range(10): print(str) str+="*" # after executing this cell, the outcomes will immedeately appear after this cell # you can change the range above and re-run this cell # This is the second cell in this notebook. # # By using menu item "Insert", you can add a new cell before or after the active cell. # When a cell is selected, you may delete it by using menu item "Edit". # # As you may notice, there are other editing options under "Edit", # for example, copy/cut-paste cells and split-merge cells. # ``` <b>This is the third cell.</b> This is a markdown type cell. The type of any cell is shown on the toolbar under the menu bar (right-side). You can change the type of a cell from this pulldown menu You can write Markdown, HTML, and LaTex code on this cell. <i>By double clicking on this cell, you can see the code of this cell.</i> <br> <u>By execucting this cell, you see the result content.</u> <b> This is the fourth cell.</b> This is also a markdown cell. LaTex is used to show mathematical expressions, formulas, etc. For example, $ x^2 + y ^ 2 = \frac{4}{9} $, $ \sum_{i=1}^n (i+2)^{3} $, or $ \left( \begin{array}{rr} 1 & 0 & -1 \\ 2 & -2 & 0 \\ 3 & -1 & -2 \end{array} \right) $. <i>By double clicking on this cell, you can see the code.</i> <br> <u>By executing/running this cell, you can see the result content.</u> <h2> Tips </h2> Showing line numbers: View $\rightarrow$ Toggle Line Numbers Command mode: CTRL+SHIFT+P <h2>Magic Commands</h2> Here we list a few built-in magic commands for Jupyter notebooks that we will use during this tutorial. These commands can be executed in code-type cells. <b><u>Write the content of a code cell</u></b> into an external file: %%writefile FILENAME.py This command should be placed in the first line of cell, and then the cell should be executed. <i> Example:</i> ``` %%writefile first.py print("hello world") str="*" for i in range(5): print(str) str+="*" ``` <b><u>Execute an external script</u></b> without loading its content into the cell: %run FILENAME.py <i>Example:</i> ``` %run first.py ``` <b><u>Load an external script</u></b> into a cell: %load FILENAME.py Once this command is executed, the content of cell is replaced with the content of file. (The previous content is deleted.) Besides, this command is placed to the first line, and then commented out. <i>Example:</i> ``` %load first.py ```
github_jupyter
# Easily export jupyter cells to python module https://github.com/fastai/course-v3/blob/master/nbs/dl2/notebook2script.py ``` ! python /tf/src/scripts/notebook2script.py visualization.ipynb %matplotlib inline ! pip install -U scikit-learn #export from exp.nb_clustering import * from exp.nb_evaluation import * import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import matplotlib.cm as cmx import matplotlib.patches as patches from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import LogNorm cd /tf/src/data/features ``` # Generate all the feature vectors (Skip if already done) ``` embdr = D2VEmbedder("/tf/src/data/doc2vec/model") # Generate and Save Human Features hman_dict = embdr("/tf/src/data/methods/DATA00M_[god-r]/test") with open('hman_features.pickle', 'wb') as f: pickle.dump(hman_dict, f, protocol=pickle.HIGHEST_PROTOCOL) # Generate and Save GPT-2 Pretrained Features m1_dict = embdr("/tf/src/data/samples/unconditional/m1_example") with open('m1_features.pickle', 'wb') as f: pickle.dump(m1_dict, f, protocol=pickle.HIGHEST_PROTOCOL) ``` # Read in Feature Vectors ``` models_path = "/tf/src/data/features/output_space" models_features = load_features(models_path) len(models_features[0]), len(models_features[1]) ``` # Visualize Features ``` dims = 2 models_clusters = cluster(models_features, k_range = [2, 3, 4, 5], dims = 2) def setup_data(model): feature_vectors, _, _, centroids, kmeans = model # Step size of the mesh. Decrease to increase the quality of the VQ. h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = feature_vectors[:, 0].min() - 1, feature_vectors[:, 0].max() + 1 y_min, y_max = feature_vectors[:, 1].min() - 1, feature_vectors[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) return feature_vectors, centroids, xx, yy, Z def plot_features(models_clusters, export = True): plt.figure(figsize=(12, 8)) # Create 2x2 sub plots gs = gridspec.GridSpec(2, 2) plt.clf() for i, model in enumerate(models_clusters): # Setup data to be plotted feature_vectors, centroids, xx, yy, Z = setup_data(model) # Plot data plt.subplot(gs[0, i]) plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower') plt.plot(feature_vectors[:, 0], feature_vectors[:, 1], 'k.', markersize=2) # Plot the centroids as a white X plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) plt.title('K-means clustering\n' '(PCA & T-SNE - reduced data)\n' 'Centroids are marked with white cross') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.subplot(gs[1, :]) colmap = {0: 'b.', 1: 'r.'} plt.title('Blue denotes Human Methods and Red denotes GPT-2 Unconditional Samples') for i, model in enumerate(models_clusters): feature_vectors, _, _, _ = model plt.plot(feature_vectors[:, 0], feature_vectors[:, 1], colmap[i], markersize=10) if export: if not os.path.exists('images/'): os.mkdir('images/') plt.savefig('images/feature_vectors_scatter_plot.png', dpi=100, format='png') plt.show() plot_features(models_clusters) ``` # Gaussian Mixture Visualization ## Visualize 1D ``` dims = 1 models_clusters = cluster(models_features, k_range = [2, 3, 4, 5], dims = dims) def plot_gmm_1d(models_clusters, export = True): plt.figure(figsize=(12, 8)) # Create 2x2 sub plots gs = gridspec.GridSpec(1, 2) plt.clf() for i, model in enumerate(models_clusters): feature_vectors, _, _, kmeans = model gmm = generate_distributions(feature_vectors, kmeans.n_clusters - 1) x_min, x_max = feature_vectors[:, 0].min(), feature_vectors[:, 0].max() # Plot Data plt.subplot(gs[0, i]) delta = 10 x = np.linspace(x_min - delta, x_max + delta, 1000).reshape(1000,1) logprob = gmm.score_samples(x) pdf = np.exp(logprob) plt.plot(x, pdf, '-k') if export: if not os.path.exists('images/'): os.mkdir('images/') plt.savefig('images/1D_GMM_demonstration.png', dpi=100, format='png') plt.show() plot_gmm_1d(models_clusters) ``` ## Visualize 2D ``` dims = 2 models_clusters = cluster(models_features, k_range = [2, 3, 4, 5], dims = dims) # From http://www.itzikbs.com/gaussian-mixture-model-gmm-3d-point-cloud-classification-primer def visualize_2D_gmm(points, w, mu, stdev, id, export=True): ''' plots points and their corresponding gmm model in 2D Input: points: N X 2, sampled points w: n_gaussians, gmm weights mu: 2 X n_gaussians, gmm means stdev: 2 X n_gaussians, gmm standard deviation (assuming diagonal covariance matrix) Output: None ''' n_gaussians = mu.shape[1] N = int(np.round(points.shape[0] / n_gaussians)) # Visualize data # fig = plt.figure(figsize=(8, 8)) axes = plt.gca() plt.set_cmap('Set1') colors = cmx.Set1(np.linspace(0, 1, n_gaussians)) for i in range(n_gaussians): idx = range(i * N, (i + 1) * N) plt.scatter(points[idx, 0], points[idx, 1], alpha=0.3, c=colors[i]) for j in range(8): axes.add_patch( patches.Ellipse(mu[:, i], width=(j+1) * stdev[0, i], height=(j+1) * stdev[1, i], fill=False, color=colors[i])) plt.title('GMM ' + str(id)) plt.xlabel('X') plt.ylabel('Y') def plot_gmm_2d(models_clusters, export = True): plt.figure(figsize=(12, 8)) # Create 2x2 sub plots gs = gridspec.GridSpec(1, 2) plt.clf() for i, model in enumerate(models_clusters): feature_vectors, _, _, kmeans = model gmm = generate_distributions(feature_vectors, kmeans.n_clusters - 1) # Plot Data plt.subplot(gs[0, i]) visualize_2D_gmm(feature_vectors, gmm.weights_, gmm.means_.T, np.sqrt(gmm.covariances_).T, i) if export: if not os.path.exists('images/'): os.mkdir('images/') plt.savefig('images/2D_GMM_demonstration.png', dpi=100, format='png') plt.show() plot_gmm_2d(models_clusters) ``` ## Visualize 3D ``` dims = 3 models_clusters = cluster(models_features, k_range = [2, 3, 4, 5], dims = dims) # From http://www.itzikbs.com/gaussian-mixture-model-gmm-3d-point-cloud-classification-primer def plot_sphere(w=0, c=[0,0,0], r=[1, 1, 1], subdev=10, ax=None, sigma_multiplier=3): ''' plot a sphere surface Input: c: 3 elements list, sphere center r: 3 element list, sphere original scale in each axis ( allowing to draw elipsoids) subdiv: scalar, number of subdivisions (subdivision^2 points sampled on the surface) ax: optional pyplot axis object to plot the sphere in. sigma_multiplier: sphere additional scale (choosing an std value when plotting gaussians) Output: ax: pyplot axis object ''' if ax is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') pi = np.pi cos = np.cos sin = np.sin phi, theta = np.mgrid[0.0:pi:complex(0,subdev), 0.0:2.0 * pi:complex(0,subdev)] x = sigma_multiplier*r[0] * sin(phi) * cos(theta) + c[0] y = sigma_multiplier*r[1] * sin(phi) * sin(theta) + c[1] z = sigma_multiplier*r[2] * cos(phi) + c[2] cmap = cmx.ScalarMappable() cmap.set_cmap('jet') c = cmap.to_rgba(w) ax.plot_surface(x, y, z, color=c, alpha=0.2, linewidth=1) return ax # From http://www.itzikbs.com/gaussian-mixture-model-gmm-3d-point-cloud-classification-primer def visualize_3d_gmm(points, w, mu, stdev, id, axes, export=True): ''' plots points and their corresponding gmm model in 3D Input: points: N X 3, sampled points w: n_gaussians, gmm weights mu: 3 X n_gaussians, gmm means stdev: 3 X n_gaussians, gmm standard deviation (assuming diagonal covariance matrix) Output: None ''' n_gaussians = mu.shape[1] N = int(np.round(points.shape[0] / n_gaussians)) # Visualize data plt.set_cmap('Set1') colors = cmx.Set1(np.linspace(0, 1, n_gaussians)) for i in range(n_gaussians): idx = range(i * N, (i + 1) * N) axes.scatter(points[idx, 0], points[idx, 1], points[idx, 2], alpha=0.3, c=colors[i]) plot_sphere(w=w[i], c=mu[:, i], r=stdev[:, i], ax=axes) plt.title('3D GMM') axes.set_xlabel('X') axes.set_ylabel('Y') axes.set_zlabel('Z') axes.view_init(35.246, 45) def plot_gmm_3d(models_clusters, export = True): # set up a figure twice as wide as it is tall fig = plt.figure(figsize=plt.figaspect(0.5)) for i, model in enumerate(models_clusters): feature_vectors, _, _, kmeans = model gmm = generate_distributions(feature_vectors, kmeans.n_clusters - 1) # Plot Data axes = fig.add_subplot(121 + i, projection='3d') visualize_3d_gmm(feature_vectors, gmm.weights_, gmm.means_.T, np.sqrt(gmm.covariances_).T, i, axes) if export: if not os.path.exists('images/'): os.mkdir('images/') plt.savefig('images/3D_GMM_demonstration.png', dpi=100, format='png') plt.show() plot_gmm_3d(models_clusters) ```
github_jupyter
# UCI Metro dataset ``` import pandas as pd import os from pathlib import Path from config import data_raw_folder, data_processed_folder from timeeval import Datasets import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (20, 10) dataset_collection_name = "Metro" source_folder = Path(data_raw_folder) / "UCI ML Repository/Metro" target_folder = Path(data_processed_folder) from pathlib import Path print(f"Looking for source datasets in {source_folder.absolute()} and\nsaving processed datasets in {target_folder.absolute()}") ``` ## Dataset transformation and pre-processing ``` train_type = "unsupervised" train_is_normal = False input_type = "multivariate" datetime_index = True dataset_type = "real" # create target directory dataset_subfolder = os.path.join(input_type, dataset_collection_name) target_subfolder = os.path.join(target_folder, dataset_subfolder) try: os.makedirs(target_subfolder) print(f"Created directories {target_subfolder}") except FileExistsError: print(f"Directories {target_subfolder} already exist") pass dm = Datasets(target_folder) # get target filenames dataset_name = "metro-traffic-volume" filename = f"{dataset_name}.test.csv" source_file = source_folder / "Metro_Interstate_Traffic_Volume.csv" path = os.path.join(dataset_subfolder, filename) target_filepath = os.path.join(target_subfolder, filename) # transform file df = pd.read_csv(source_file) df = df[["date_time", "traffic_volume", "temp", "rain_1h", "snow_1h", "clouds_all", "holiday"]].copy() df.insert(0, "timestamp", pd.to_datetime(df["date_time"])) df.loc[df["holiday"] == "None", "is_anomaly"] = 0 df.loc[~(df["holiday"] == "None"), "is_anomaly"] = 1 df["is_anomaly"] = df["is_anomaly"].astype(int) df = df.drop(columns=["date_time", "holiday"]) df.to_csv(target_filepath, index=False) print(f"Processed source dataset {source_file} -> {target_filepath}") dataset_length = len(df) # save metadata dm.add_dataset((dataset_collection_name, dataset_name), train_path = None, test_path = path, dataset_type = dataset_type, datetime_index = datetime_index, split_at = None, train_type = train_type, train_is_normal = train_is_normal, input_type = input_type, dataset_length = dataset_length ) dm.save() dm.refresh() dm._df.loc[slice(dataset_collection_name, dataset_collection_name)] ``` ## Experimentation ``` source_file = source_folder / "Metro_Interstate_Traffic_Volume.csv" df = pd.read_csv(source_file) df df1 = df[["date_time", "traffic_volume", "temp", "rain_1h", "snow_1h", "clouds_all", "holiday"]].copy() df1.insert(0, "timestamp", pd.to_datetime(df1["date_time"])) df1.loc[df1["holiday"] == "None", "is_anomaly"] = 0 df1.loc[~(df1["holiday"] == "None"), "is_anomaly"] = 1 df1["is_anomaly"] = df1["is_anomaly"].astype(int) df1 = df1.drop(columns=["date_time", "holiday"]) df1 df1[["traffic_volume", "temp", "rain_1h", "snow_1h", "clouds_all"]].plot() df1["is_anomaly"].plot(secondary_y=True) plt.show() ```
github_jupyter
## Practice: Dealing with Word Embeddings Today we gonna play with word embeddings: train our own little embedding, load one from gensim model zoo and use it to visualize text corpora. This whole thing is gonna happen on top of embedding dataset. __Requirements:__ `pip install --upgrade nltk gensim bokeh umap-learn` , but only if you're running locally. ``` import itertools import string import numpy as np import umap from nltk.tokenize import WordPunctTokenizer from matplotlib import pyplot as plt from IPython.display import clear_output # download the data: !wget https://www.dropbox.com/s/obaitrix9jyu84r/quora.txt?dl=1 -O ./quora.txt -nc # alternative download link: https://yadi.sk/i/BPQrUu1NaTduEw data = list(open("./quora.txt", encoding="utf-8")) data[50] ``` __Tokenization:__ a typical first step for an nlp task is to split raw data into words. The text we're working with is in raw format: with all the punctuation and smiles attached to some words, so a simple str.split won't do. Let's use __`nltk`__ - a library that handles many nlp tasks like tokenization, stemming or part-of-speech tagging. ``` tokenizer = WordPunctTokenizer() print(tokenizer.tokenize(data[50])) # TASK: lowercase everything and extract tokens with tokenizer. # data_tok should be a list of lists of tokens for each line in data. data_tok = # YOUR CODE HEER ``` Let's peek at the result: ``` ' '.join(data_tok[0]) ``` Small check that everything is alright ``` assert all(isinstance(row, (list, tuple)) for row in data_tok), "please convert each line into a list of tokens (strings)" assert all(all(isinstance(tok, str) for tok in row) for row in data_tok), "please convert each line into a list of tokens (strings)" is_latin = lambda tok: all('a' <= x.lower() <= 'z' for x in tok) assert all(map(lambda l: not is_latin(l) or l.islower(), map(' '.join, data_tok))), "please make sure to lowercase the data" ``` __Word vectors:__ as the saying goes, there's more than one way to train word embeddings. There's Word2Vec and GloVe with different objective functions. Then there's fasttext that uses character-level models to train word embeddings. The choice is huge, so let's start someplace small: __gensim__ is another NLP library that features many vector-based models incuding word2vec. ``` from gensim.models import Word2Vec model = Word2Vec(data_tok, size=32, # embedding vector size min_count=5, # consider words that occured at least 5 times window=5).wv # define context as a 5-word window around the target word # now you can get word vectors ! model.get_vector('anything') # or query similar words directly. Go play with it! model.most_similar('bread') ``` ### Using pre-trained model Took it a while, huh? Now imagine training life-sized (100~300D) word embeddings on gigabytes of text: wikipedia articles or twitter posts. Thankfully, nowadays you can get a pre-trained word embedding model in 2 lines of code (no sms required, promise). ``` import gensim.downloader as api model = api.load('glove-twitter-25') model.most_similar(positive=["coder", "money"], negative=["brain"]) ``` ### Visualizing word vectors One way to see if our vectors are any good is to plot them. Thing is, those vectors are in 30D+ space and we humans are more used to 2-3D. Luckily, we machine learners know about __dimensionality reduction__ methods. Let's use that to plot 1000 most frequent words ``` words = sorted(model.vocab.keys(), key=lambda word: model.vocab[word].count, reverse=True)[:1000] print(words[::100]) # for each word, compute it's vector with model word_vectors = # YOUR CODE assert isinstance(word_vectors, np.ndarray) assert word_vectors.shape == (len(words), 25) assert np.isfinite(word_vectors).all() word_vectors.shape ``` #### Linear projection: PCA The simplest linear dimensionality reduction method is __P__rincipial __C__omponent __A__nalysis. In geometric terms, PCA tries to find axes along which most of the variance occurs. The "natural" axes, if you wish. <img src="https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/pca_fish.png" style="width:30%"> Under the hood, it attempts to decompose object-feature matrix $X$ into two smaller matrices: $W$ and $\hat W$ minimizing _mean squared error_: $$\|(X W) \hat{W} - X\|^2_2 \to_{W, \hat{W}} \min$$ - $X \in \mathbb{R}^{n \times m}$ - object matrix (**centered**); - $W \in \mathbb{R}^{m \times d}$ - matrix of direct transformation; - $\hat{W} \in \mathbb{R}^{d \times m}$ - matrix of reverse transformation; - $n$ samples, $m$ original dimensions and $d$ target dimensions; ``` from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler pca = PCA(2) scaler = StandardScaler() # map word vectors onto 2d plane with PCA. Use good old sklearn api (fit, transform) # after that, normalize vectors to make sure they have zero mean and unit variance word_vectors_pca = # YOUR CODE # and maybe MORE OF YOUR CODE here :) assert word_vectors_pca.shape == (len(word_vectors), 2), "there must be a 2d vector for each word" assert max(abs(word_vectors_pca.mean(0))) < 1e-5, "points must be zero-centered" assert max(abs(1.0 - word_vectors_pca.std(0))) < 1e-2, "points must have unit variance" ``` #### Let's draw it! ``` import bokeh.models as bm, bokeh.plotting as pl from bokeh.io import output_notebook output_notebook() def draw_vectors(x, y, radius=10, alpha=0.25, color='blue', width=600, height=400, show=True, **kwargs): """ draws an interactive plot for data points with auxilirary info on hover """ if isinstance(color, str): color = [color] * len(x) data_source = bm.ColumnDataSource({ 'x' : x, 'y' : y, 'color': color, **kwargs }) fig = pl.figure(active_scroll='wheel_zoom', width=width, height=height) fig.scatter('x', 'y', size=radius, color='color', alpha=alpha, source=data_source) fig.add_tools(bm.HoverTool(tooltips=[(key, "@" + key) for key in kwargs.keys()])) if show: pl.show(fig) return fig draw_vectors(word_vectors_pca[:, 0], word_vectors_pca[:, 1], token=words) # hover a mouse over there and see if you can identify the clusters ``` ### Visualizing neighbors with UMAP PCA is nice but it's strictly linear and thus only able to capture coarse high-level structure of the data. If we instead want to focus on keeping neighboring points near, we could use UMAP, which is itself an embedding method. Here you can read __[more on UMAP (ru)](https://habr.com/ru/company/newprolab/blog/350584/)__ and on __[t-SNE](https://distill.pub/2016/misread-tsne/)__, which is also an embedding. ``` embedding = umap.UMAP(n_neighbors=5).fit_transform(word_vectors) # преобразовываем draw_vectors(embedding[:, 0], embedding[:, 1], token=words) # hover a mouse over there and see if you can identify the clusters ``` ### Visualizing phrases Word embeddings can also be used to represent short phrases. The simplest way is to take __an average__ of vectors for all tokens in the phrase with some weights. This trick is useful to identify what data are you working with: find if there are any outliers, clusters or other artefacts. Let's try this new hammer on our data! ``` def get_phrase_embedding(phrase): """ Convert phrase to a vector by aggregating it's word embeddings. See description above. """ # 1. lowercase phrase # 2. tokenize phrase # 3. average word vectors for all words in tokenized phrase # skip words that are not in model's vocabulary # if all words are missing from vocabulary, return zeros vector = np.zeros([model.vector_size], dtype='float32') phrase_tokenized = # YOUR CODE HERE phrase_vectors = [model[x] for x in phrase_tokenized if x in model.vocab.keys()] if len(phrase_vectors) != 0: vector = np.mean(phrase_vectors, axis=0) # YOUR CODE return vector get_phrase_embedding(data[402687]) vector = get_phrase_embedding("I'm very sure. This never happened to me before...") # let's only consider ~5k phrases for a first run. chosen_phrases = data[::len(data) // 1000] # compute vectors for chosen phrases and turn them to numpy array phrase_vectors = np.asarray([get_phrase_embedding(x) for x in chosen_phrases]) # YOUR CODE assert isinstance(phrase_vectors, np.ndarray) and np.isfinite(phrase_vectors).all() assert phrase_vectors.shape == (len(chosen_phrases), model.vector_size) # map vectors into 2d space with pca, tsne or your other method of choice # don't forget to normalize phrase_vectors_2d = umap.UMAP(n_neighbors=3).fit_transform(phrase_vectors) # преобразовываем # phrase_vectors_2d = (phrase_vectors_2d - phrase_vectors_2d.mean(axis=0)) / phrase_vectors_2d.std(axis=0) draw_vectors(phrase_vectors_2d[:, 0], phrase_vectors_2d[:, 1], phrase=[phrase[:50] for phrase in chosen_phrases], radius=20,) ``` Finally, let's build a simple "similar question" engine with phrase embeddings we've built. ``` # compute vector embedding for all lines in data data_vectors = np.vstack([get_phrase_embedding(l) for l in data]) norms = np.linalg.norm(data_vectors, axis=1) printable_set = set(string.printable) data_subset = [x for x in data if set(x).issubset(printable_set)] def find_nearest(query, k=10): """ given text line (query), return k most similar lines from data, sorted from most to least similar similarity should be measured as cosine between query and line embedding vectors hint: it's okay to use global variables: data and data_vectors. see also: np.argpartition, np.argsort """ # YOUR CODE query_vector = get_phrase_embedding(query) dists = data_vectors.dot(query_vector[:, None])[:, 0] / ((norms+1e-16)*np.linalg.norm(query_vector)) nearest_elements = dists.argsort(axis=0)[-k:][::-1] out = [data[i] for i in nearest_elements] return out# <YOUR CODE: top-k lines starting from most similar> results = find_nearest(query="How do i enter the matrix?", k=10) print(''.join(results)) assert len(results) == 10 and isinstance(results[0], str) assert results[0] == 'How do I get to the dark web?\n' # assert results[3] == 'What can I do to save the world?\n' find_nearest(query="How does Trump?", k=10) find_nearest(query="Why don't i ask a question myself?", k=10) from sklearn.cluster import DBSCAN, KMeans kmeans = KMeans(3) labels = kmeans.fit_predict(np.asarray(phrase_vectors)) plt.figure(figsize=(12, 10)) plt.scatter(phrase_vectors_2d[:,0], phrase_vectors_2d[:, 1], c=labels.astype(float)) ``` __Now what?__ * Try running TSNE instead of UMAP (it takes a long time) * Try running UMAP or TSNEon all data, not just 1000 phrases * See what other embeddings are there in the model zoo: `gensim.downloader.info()` * Take a look at [FastText](https://github.com/facebookresearch/fastText) embeddings * Optimize find_nearest with locality-sensitive hashing: use [nearpy](https://github.com/pixelogik/NearPy) or `sklearn.neighbors`.
github_jupyter
``` import numpy as np import cv2 import mediapipe as mp import tensorflow as tf import time mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_hands = mp.solutions.hands # load model tflite_save_path = 'model/model.tflite' interpreter = tf.lite.Interpreter(model_path=tflite_save_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() def gesture_preprocess(landmark): """ convert landmarks for trainable data 66 features X (21): 0-20 Y (21): 21-41 Z (21): 42-62 X,Y,Z range (3): 63-65 params landmark: mediapipe landmark for 1 hand params label: str return: np.array (1,66) """ lm_x = np.array([]) lm_y = np.array([]) lm_z = np.array([]) for hlm in landmark.landmark: lm_x = np.append(lm_x, hlm.x) lm_y = np.append(lm_y, hlm.y) lm_z = np.append(lm_z, hlm.z) data_gest = [lm_x, lm_y, lm_z] x_rng, y_rng, z_rng = lm_x.max()-lm_x.min(), lm_y.max()-lm_y.min(), lm_z.max()-lm_z.min() data_gest = np.ravel([(k-k.min())/(k.max()-k.min()) for i, k in enumerate(data_gest)]) data_gest = np.append(data_gest, [x_rng, y_rng, z_rng]) return data_gest.astype('float32') def gesture_inference(data): """ inference param data: np.array return: int class """ interpreter.set_tensor(input_details[0]['index'], np.array([data])) interpreter.invoke() tflite_results = interpreter.get_tensor(output_details[0]['index']) inf_class_idx = np.argmax(np.squeeze(tflite_results)) if np.squeeze(tflite_results)[inf_class_idx] < 0.95: return 4 return inf_class_idx # For webcam input: detect_time = time.time() inf_class = {0: 'Hit', 1: 'Stand', 2: 'Split', 3: 'Reset', 4: 'None'} inf_class_idx = 4 cap = cv2.VideoCapture(0) with mp_hands.Hands( max_num_hands=1, model_complexity=1, min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") continue # To improve performance, optionally mark the image as not writeable to # pass by reference. image.flags.writeable = False image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) results = hands.process(image) # Draw + infer: the hand annotations on the image. image.flags.writeable = True image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if results.multi_hand_landmarks: if (time.time() - detect_time) > 0.5: print("detected hand") for hand_landmarks in results.multi_hand_landmarks: # inference gest_data = gesture_preprocess(hand_landmarks) inf_class_idx = gesture_inference(gest_data) # draw mp_drawing.draw_landmarks( image, hand_landmarks, mp_hands.HAND_CONNECTIONS, mp_drawing_styles.get_default_hand_landmarks_style(), mp_drawing_styles.get_default_hand_connections_style()) else: detect_time = time.time() image_height, image_width, _ = image.shape cv2.putText(image, f"{inf_class[inf_class_idx]}", (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2) cv2.imshow('MediaPipe Hands', image) if cv2.waitKey(5) & 0xFF == 27: break cap.release() cv2.destroyAllWindows() ```
github_jupyter
### Rome2Rio #### Importing packages that are neccessary ``` import pandas as pd from pymongo import MongoClient import requests as req import json from itertools import permutations import random import time import json ``` #### All the utility functions are defined below. * They can connect to any DB , given that the URI string is given (Including authentication). * Return a MongoDB collection given a pymongo database object and a string mentioning the collection name. * Pluck - From an array of objects return a list containing the elements of a certain attribute present in the given array. ``` #Defining utility functions over here # Connect to a database. def connectToDb(uri, dbName): client = MongoClient(uri) db = client[dbName] return db #Returns a specific collection. def getACollection(db, collectionName): return db[collectionName] # Python Implementation of Underscore - Utiltiy Functions def pluck(array, property): return [x[property] for x in array] ``` #### Rome2Rio Main API call. To view the rome2rio search api documentation, [click here](https://www.rome2rio.com/documentation/1-4/search/). When the api was tested out, we were getting 401 authentication error for few api calls. Not sure as to why though. Sometimes responses comes at the first go, sometimes they don't. That is why we have a retry mechanism with a upper retry count of 50 (Too large a limit. Reduce to 10 or less, if you feel the api fetches responses within few tries.) ``` # Rome2Rio - api call and parsing functions #Get a rome2Rio route and return it as a json def callRome2Rio(oName, dName, oPosLat, oPosLong, dPosLat, dPosLong): orgCo = str(oPosLat)+","+str(oPosLong) destCo = str(dPosLat)+","+str(dPosLong) url = 'http://free.rome2rio.com/api/1.4/json/Search?key=IRlABFW8&oName='+oName+'&dName='+dName+'&oPos='+orgCo+'&dPos='+destCo print("The url is ", url) retryCount = 0 re = req.get(url) data = {} stopProcess = False if re.status_code==200: data = re.json() else: print("Something went wrong. The status we got is ", re.status_code) retryPass = False while retryPass==False and retryCount < 100: retryCount+=1 print("Trying for the ",retryCount," time") re = req.get(url) if(re.status_code==200): retryPass = True data = re.json() if(re.status_code==444): retryPass=True print("Wrong destination name"); data = {} if(re.status_code==429): retryPass=True print("Too-Many requests per hour") stopProcess = True if(re.status_code==402): retryPass=True print("Payment Required") stopProcess = True print("Got data in ",retryCount," retry/retries") return data, stopProcess ``` #### Rome2Rio main parsing function and its corresponding helper functions. ``` #Main Parsing function call. def parse_rome2rio(data, fromCity, toCity,stagingDb): """ This is the main rome2rio parsing functions. Inputs: data -> response from rome2rio which needs to be parsed fromCity -> City object from pyt database whose name is the fromCity to rome2rio api call. toCity -> City object from pyt database whose name is the toCity to rome2rio api call. Outputs: parsedJson -> This is the parsed data notParseblePreferredRouteCount -> This is the count of routes which cannot be parsed due to missing data etc. """ routes = data["routes"] vehicles = data["vehicles"] places = data["places"] airlines = data["airlines"] #Zeroth index is always the preferred route w.r.t combination of distance, time and price. Store the rest as alternative routes. routeFormed = False indexToLook = 0 preferredRoute = {} notParseblePreferredRouteCount = 0 totalDurationToNotExceed = int(routes[0]["totalDuration"]) * 1.5 while routeFormed!=True: if routes[indexToLook]["totalDuration"] < totalDurationToNotExceed: preferredRoute, routeFormed = formRoute(routes[indexToLook], vehicles, places, airlines, fromCity, toCity) if routeFormed!=True: if indexToLook < len(routes): indexToLook+=1 notParseblePreferredRouteCount+=1 else: print("No route has an indicative price for ", fromCity["name"], " and ", toCity["name"], "route") notParseblePreferredRouteCount+=1 else: #Try to get the preferredRoute from city connection database. preferredRoute, routeFormed = getExistingCityConnection(fromCity, toCity, stagingDb) if routeFormed!=True: print("We can't form a viable route for", fromCity["name"], " and ", toCity["name"]) routeFormed=True alternateRoutes = [] for route in routes[1:]: routeJson, routeFormed = formRoute(route, vehicles, places, airlines, fromCity, toCity) alternateRoutes.append(routeJson) parsedJson = { "fromCity": fromCity["planningid"], "toCity": toCity["planningid"], "preferredRoute": preferredRoute, "alternateRoutes": alternateRoutes, "timestamp": time.time() } # Need to compute cost function for each preferred route return parsedJson, notParseblePreferredRouteCount #--------------------------------------------------------------------------------------------------------------------# ## Forming route def formRoute(route, vehicles, places, airlines, fromCity, toCity): """ This a part of parsing function where i take an individual route option and form my desired JSON structure. Inputs: route - Route for which i need to parse into my desired structure. (This can be a preferred route or an alternate route. Both have same structure) vehicles - Array that contains all vehicle types places - Array that contains all places within the fromCity and toCity that a route can traverse. This is for all routes airlines - Array that contains all airlines that ply within the 2 cities. Outputs: routeJson - My desired JSON Structure routeFormed - Boolean that returns True if desired structure is formed and False if not. """ routeFormed = False allSegments = route["segments"] try: routePrice = getPrice(route["indicativePrices"], route["name"]) except KeyError: routeFormed = False return {}, routeFormed preferredMode,flights, trains, bus, car, transfers = parseSegment(route["name"], allSegments, vehicles, places, airlines) routeJson = { "title": route["name"], "fromCity": fromCity["planningid"], "toCity": toCity["planningid"], "totalDuration": route["totalDuration"], "transitDuration": route["totalTransitDuration"], "transferDuration": route["totalTransferDuration"], "allPrice": route["indicativePrices"], "price": routePrice, "currencyCode": route["indicativePrices"][0]["currency"], "preferredMode": list(set(preferredMode)), "flights": flights, "trains": trains, "bus": bus, "car": car, "transfers": transfers } routeFormed = True return routeJson, routeFormed #---------------------------------------------------------------------------------------------------------------------# #Parsing Segment. def parseSegment(routeName, allSegments, vehicles, places, airlines): """ Given all segments within a route, parse it into my desired JSON Structure. Inputs: routeName -> String, that tells me as to the nature of mode of transport (Fly to some place, Train, rideshare etc..) allSegments -> Segment array that i need to parse vehicles - Array that contains all vehicle types places - Array that contains all places within the fromCity and toCity that a route can traverse. This is for all routes airlines - Array that contains all airlines that ply within the 2 cities. Outputs: preferredMode: contains an array of preferred mode of travel (that covers majority of the distance) flights: contains an array of flights in my desired format present within the route, empty if there isn't trains: contains an array of trains in my desired format present within the route, empty if there isn't. bus: contains an array of bus in my desired format present within the route, empty if there isn't. car: contains an array of flights in my desired format present within the route, empty if there isn't. transfers: contains an array of transfers in my desired format present within the route, empty if there isn't(Transfers cover for small distances and it can be in BUS, CAR or TRAIN) """ flights= [] trains=[] bus=[] cars=[] transfers=[] preferredMode=[] isAirSegment=False car_types = ["rideshare", "car", "shuttle", "taxi", "towncar"] for segment in allSegments: depPlaceKeys = list(places[segment["depPlace"]]) arrPlaceKeys = list(places[segment["arrPlace"]]) segmentKeys = list(segment.keys()) if segment["segmentKind"] == "air": #This has flight data. preferredMode.append("flight") for flightOption in segment["outbound"]: assert places[segment["arrPlace"]]["kind"]=="airport" flight = { "vehicleType": "FLIGHT", "depCountryCode": places[segment["depPlace"]]["countryCode"], "arrCountryCode": places[segment["arrPlace"]]["countryCode"], "noOfStops": len(flightOption["hops"])-1, "operatingDays": flightOption["operatingDays"], "indicativePrice": flightOption["indicativePrices"][0]["price"], "indicativeMaxPrice": flightOption["indicativePrices"][0]["priceHigh"], "indicativeMinPrice": flightOption["indicativePrices"][0]["priceLow"], "currencyCode": flightOption["indicativePrices"][0]["currency"], "distance": segment["distance"], "transitDuration": segment["transitDuration"], "transferDuration": segment["transferDuration"], "totalDuration": segment["transitDuration"] + segment["transferDuration"] } if "code" in depPlaceKeys: flight["depAirportCode"] = places[segment["depPlace"]]["code"] if "code" in arrPlaceKeys: flight["arrAirportCode"] = places[segment["arrPlace"]]["code"] flights.append(flight) else: #This includes surface data (either train, bus, car. also check for comma as it indicates multiple modes of transport) if vehicles[segment["vehicle"]]["kind"]=="bus": busSegment = { "vehicleType": "BUS", "depPlaceCountryCode": places[segment["depPlace"]]["countryCode"], "depPlaceTitle": places[segment["depPlace"]]["shortName"], "arrPlaceCountryCode": places[segment["arrPlace"]]["countryCode"], "arrPlaceTitle": places[segment["arrPlace"]]["shortName"], "distance": segment["distance"], "transitDuration": segment["transitDuration"], "transferDuration": segment["transferDuration"], "totalDuration": segment["transitDuration"] + segment["transferDuration"] } if "code" in depPlaceKeys: busSegment["depPlaceCode"] = places[segment["depPlace"]]["code"] if "code" in arrPlaceKeys: busSegment["arrPlaceCode"] = places[segment["arrPlace"]]["code"] if "indicativePrices" in segmentKeys: busSegment["allPrices"] = segment["indicativePrices"] busSegment["indicativePrice"] = segment["indicativePrices"][0]["price"] busSegment["currencyCode"] = segment["indicativePrices"][0]["currency"] #It can be a primary mode of transport or transfer. if "bus" in routeName.lower(): preferredMode.append("bus") bus.append(busSegment) else: transfers.append(busSegment) if vehicles[segment["vehicle"]]["kind"] == "train": trainSegment = { "vehicleType": "TRAIN", "depPlaceCountryCode": places[segment["depPlace"]]["countryCode"], "depPlaceTitle": places[segment["depPlace"]]["shortName"], "arrPlaceCountryCode": places[segment["arrPlace"]]["countryCode"], "arrPlaceTitle": places[segment["arrPlace"]]["shortName"], "distance": segment["distance"], "transitDuration": segment["transitDuration"], "transferDuration": segment["transferDuration"], "totalDuration": segment["transitDuration"] + segment["transferDuration"] } if vehicles[segment["vehicle"]]["name"]!="RER" and "indicativePrices" in segmentKeys: if "priceHigh" in list(segment["indicativePrices"][0].keys()): trainSegment["indicativeMaxPrice"] = segment["indicativePrices"][0]["priceHigh"] if "priceLow" in list(segment["indicativePrices"][0].keys()): trainSegment["indicativeMinPrice"] = segment["indicativePrices"][0]["priceLow"] if "code" in depPlaceKeys: trainSegment["depPlaceCode"] = places[segment["depPlace"]]["code"] if "code" in arrPlaceKeys: trainSegment["arrPlaceCode"] = places[segment["arrPlace"]]["code"] if "indicativePrices" in segmentKeys: trainSegment["allPrices"] = segment["indicativePrices"] trainSegment["indicativePrice"] = segment["indicativePrices"][0]["price"] trainSegment["currencyCode"] = segment["indicativePrices"][0]["currency"] if "train" in routeName.lower(): preferredMode.append("train") trains.append(trainSegment) else: transfers.append(trainSegment) if vehicles[segment["vehicle"]]["kind"] == "car": carSegment = { "vehicleType": "CAR", "depPlaceTitle": places[segment["depPlace"]]["shortName"], "arrPlaceTitle": places[segment["arrPlace"]]["shortName"], "distance": segment["distance"], "transitDuration": segment["transitDuration"], "transferDuration": segment["transferDuration"], "totalDuration": segment["transitDuration"] + segment["transferDuration"] } if "regionCode" in depPlaceKeys: carSegment["depPlaceCode"] = places[segment["depPlace"]]["regionCode"] if "regionCode" in arrPlaceKeys: carSegment["arrPlaceCode"] = places[segment["arrPlace"]]["regionCode"] if "indicativePrices" in segmentKeys: carSegment["allPrices"] = segment["indicativePrices"] carSegment["currencyCode"] = segment["indicativePrices"][0]["currency"] if "countryCode" in depPlaceKeys: carSegment["depPlaceCountryCode"] = places[segment["depPlace"]]["countryCode"] if "countryCode" in arrPlaceKeys: carSegment["arrPlaceCountryCode"] = places[segment["arrPlace"]]["countryCode"] if "drive" in routeName.lower(): preferredMode.append("car") cars.append(carSegment) else: transfers.append(carSegment) return preferredMode, flights, trains, bus, cars, transfers #-------------------------------------------------------------------------------------------------------------------# #Parsing price def getPrice(indicativePrice, routeName): """ Returns price object if there is a median, max and min price available , else returns indicativePrice as it is. There is no max or min price if the mode of transport is one of the car_types. Inputs: indicativePrice -> Array that contains an indicative price for the route. routeName -> Used to figure out the mode of transport. Output: A curated JSON containing the indicative price or the input indicativePrice array. """ car_types = ["rideshare", "car", "shuttle", "taxi", "towncar", "drive"] if routeName.lower() in car_types: return indicativePrice else: return { "indicativeMedianPrice": indicativePrice[0]["price"], "indicativeMaxPrice": indicativePrice[0]["priceHigh"], "indicativeMinPrice": indicativePrice[0]["priceLow"], "currencyCode": indicativePrice[0]["currency"] } def getExistingCityConnection(fromCity, toCity, db): cityConn = getACollection(db, 'city_connection') connection = cityConn.find_one({"fromCity": fromCity["planningid"], "toCity": toCity["planningid"]}) print(connection) if connection != None and "_id" in list(connection.keys()): return connection, True else: return {}, False def getAllEuropeanCities(db): """Returns all European cities present in the database.""" region = getACollection(db, 'searchregion') europeanCountries = region.find_one({"regionCode": "eur"}, {"countryIds": 1})["countryIds"] country = getACollection(db, 'country') europeanCountriesData = country.find({"countryId": {"$in": europeanCountries}}) countryCodes = [] for country in europeanCountriesData: countryCodes.append(country["countryCode"]) city = getACollection(db, 'city') europeanCities = city.find({"countryCode": {"$in": countryCodes}}) return europeanCities local = connectToDb("mongodb://oceanjar:wwmib3112@localhost:27017/localDb?authMechanism=SCRAM-SHA-1", "localDb") ``` ## Rome2Rio Execution Starts here ``` db=connectToDb("mongodb://oceanjardb:oceanjardbwwmib3112#@35.154.159.75:27017/oceanjar?authMechanism=MONGODB-CR", "oceanjar") europeanCities = getAllEuropeanCities(db) europeanCitiesMap = {} routeNotPresentCities = [] for city in europeanCities: europeanCitiesMap[city["planningid"]] = city isRoutePresent = checkIfRoutePresent(city, db) if isRoutePresent!=True: routeNotPresentCities.append(city) defaultResponseTemplates=[] for city1 in routeNotPresentCities and len(routeNotPresentCities) > 0: for city2 in list(europeanCitiesMap.keys()): if city1!=city2: responseTemplate1 = { "fromCity": europeanCitiesMap[city1], "toCity": europeanCitiesMap[city2], "response": {} } responseTemplate2 = { "fromCity": europeanCitiesMap[city2], "toCity": europeanCitiesMap[city1], "response": {} } defaultResponseTemplates.append(responseTemplate1) defaultResponseTemplates.append(responseTemplate2) print("length", len(defaultResponseTemplates)) #write_to_db(db, defaultResponseTemplates) def checkIfRoutePresent(city, db): rome2rio = getACollection(db, 'rome2rioResponses') route = rome2rio.find_one({"fromCity.planningid": city["planningid"]}) if route!=None and "routes" in "routes" not in list(route["response"].keys()): return True else: return False #Testing out for 10 cities. sample_cities = [] all_keys = list(europeanCitiesMap.keys()) for i in range(1,11): sample_cities.append(random.choice(all_keys)) def write_to_db(db, arr): r2r = getACollection(db, 'rome2rioResponses') result = r2r.insert_many(arr) try: assert len(result.inserted_ids) == len(arr) except AssertionError: print("There is a mis-match in the number of documents inserted", len(result.inserted_ids), len(arr)) return None total_count = 0 start_time = time.time() total_api_call_time = 0 parsedResponses = [] for city1 in list(europeanCitiesMap.keys())[0:5]: for city2 in list(europeanCitiesMap.keys())[0:5]: if city1!=city2: total_count+=1 originCity = europeanCitiesMap[city1] destCity = europeanCitiesMap[city2] apiStTime = time.time() r2rResponse = callRome2Rio(originCity["name"], destCity["name"], originCity["latitude"], originCity["longitude"], destCity["latitude"], destCity["longitude"]) r2rResponse["fromCityId"] = originCity["planningid"] r2rResponse["toCityId"] = destCity["planningid"] apiTime = time.time() - apiStTime total_api_call_time+=apiTime parsedResponses.append(r2rResponse) time.sleep(2) elapsed_time = time.time() - start_time print("----Total Count is ", total_count, "it is completed in ", elapsed_time ," seconds") print("-------Writing to database------------") write_to_db(local, parsedResponses) getExistingCityConnection({"planningid": 4}, {"planningid": 14}, db) setResponse = [] for resp in parsedResponses: resp["preferredRoute"]["preferredMode"] = list(set(resp["preferredRoute"]["preferredMode"])) for alt in resp["alternateRoutes"]: alt["preferredMode"] = list(set(alt["preferredMode"])) setResponse.append(resp) print("API time", total_api_call_time, "Parsing time ", total_parsing_time) print() # Problems to be solved # 1. Identify whether a train/bus/car segment is a transfer if the preferred route is also the same. # 2. How to reduce API time. -> (parallelization is not supported by rome2rio. Do we need to explore more on this option ?) # 3. Sho defaultResponseTemplates=[] count=0 for city1 in list(europeanCitiesMap.keys()): for city2 in list(europeanCitiesMap.keys()): if city1!=city2: count+=1 responseTemplate = { "fromCity": europeanCitiesMap[city1], "toCity": europeanCitiesMap[city2], "response": {} } defaultResponseTemplates.append(responseTemplate) print(len(defaultResponseTemplates)) write_to_db(local, defaultResponseTemplates) print(len(list(europeanCitiesMap.keys()))) for i in range(1, 10): print(i) if i==4: break import logging r2r = getACollection(local, 'rome2rioResponses') r2rAll = r2r.find() data = [] for r in r2rAll: data.append(r) write_to_db(db, data) ```
github_jupyter
# Regression Week 1: Simple Linear Regression In this notebook we will use data on house sales in King County to predict house prices using simple (one input) linear regression. You will: * Use Turi Create SArray and SFrame functions to compute important summary statistics (instead using Pandas and sklearn) * Write a function to compute the Simple Linear Regression weights using the closed form solution * Write a function to make predictions of the output given the input feature * Turn the regression around to predict the input given the output * Compare two different models for predicting house prices ### Importing the necessary libraries ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split ``` # Load house sales data Dataset is from house sales in King County, the region where the city of Seattle, WA is located. ``` dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int} sales = pd.read_csv('kc_house_data.csv', dtype=dtype_dict, index_col=0) sales.head() sales.dtypes ``` Relation between living area of house and it's price ``` fig, ax = plt.subplots(figsize=(9, 7)) sns.scatterplot(ax = ax, data=sales, x='sqft_living', y='price', hue='floors') ``` # Split data into training and testing We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let Turi Create pick a random seed for you). ``` # normally # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # but for the quiz I will be using the training and testing data provided train_data = pd.read_csv('kc_house_train_data.csv', dtype=dtype_dict, index_col=0) train_data.head() test_data = pd.read_csv('kc_house_test_data.csv', dtype=dtype_dict, index_col=0) test_data.head() ``` # Useful SFrame summary functions In order to make use of the closed form solution as well as take advantage of turi create's built in functions we will review some important ones. In particular: * Computing the sum of an SArray * Computing the arithmetic average (mean) of an SArray * multiplying SArrays by constants * multiplying SArrays by other SArrays (*) I will be using pandas instead and not turi create as mentioned in the course ``` # Let's compute the mean of the House Prices in King County in 2 different ways. prices = sales['price'] # extract the price column of the sales SFrame -- this is now a (pd.Series) # recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses: sum_prices = prices.sum() num_houses = len(prices) avg_price_1 = sum_prices/num_houses avg_price_2 = prices.mean() # if you just want the average, the .mean() function print("average price via method 1: " + str(avg_price_1)) print("average price via method 2: " + str(avg_price_2)) ``` As we see we get the same answer both ways ``` # or sales['price'].mean() # Let's compute the sum of squares of price prices_squared = prices*prices sum_prices_squared = prices_squared.sum() print(f"the sum of price squared is: {sum_prices_squared}") ``` Aside: The python notation x.xxe+yy means x.xx \* 10^(yy). e.g 100 = 10^2 = 1*10^2 = 1e2 # Build a generic simple linear regression function Complete the following function (or write your own) to compute the simple linear regression slope and intercept: numerator = (mean of X * Y) - (mean of X)*(mean of Y) denominator = (mean of X^2) - (mean of X)*(mean of X) intercept = (mean of Y) - slope * (mean of X) ``` def simple_linear_regression(input_feature, output): x_mean = input_feature.mean() y_mean = output.mean() x_y_mean = (input_feature * output).mean() x_square_mean = np.square(input_feature).mean() slope = (x_y_mean - (x_mean * y_mean)) / (x_square_mean - (x_mean * x_mean)) intercept = y_mean - (slope*x_mean) return (intercept, slope) ``` We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line: output = 1 + 1\*input_feature then we know both our slope and intercept should be 1 ``` test_feature = np.arange(5) test_output = 1 + 1*test_feature (test_intercept, test_slope) = simple_linear_regression(test_feature, test_output) print(f'Intercept: {test_intercept}') print(f'Slope: {test_slope}') ``` Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data! ``` sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'], train_data['price']) print(f"Intercept: {sqft_intercept}") print(f"Slope: {sqft_slope}") ``` # Predicting Values Now that we have the model parameters: intercept & slope we can make predictions. Using SArrays it's easy to multiply an SArray by a constant and add a constant value. Complete the following function to return the predicted output given the input_feature, slope and intercept: ``` def get_regression_predictions(input_feature, intercept, slope): # calculate the predicted values: predicted_values = slope*input_feature + intercept return predicted_values ``` Now that we can calculate a prediction given the slope and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above. **Quiz Question: Using your Slope and Intercept from (4), What is the predicted price for a house with 2650 sqft?** ``` my_house_sqft = 2650 estimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope) print(f"The estimated price for a house with {my_house_sqft} squarefeet is {estimated_price:.2f}") ``` # Residual Sum of Squares Now that we have a model and can make predictions let's evaluate our model using Residual Sum of Squares (RSS). Recall that RSS is the sum of the squares of the residuals and the residuals is just a fancy word for the difference between the predicted output and the true output. Complete the following (or write your own) function to compute the RSS of a simple linear regression model given the input_feature, output, intercept and slope: ``` def get_residual_sum_of_squares(input_feature, output, intercept, slope): predictions = get_regression_predictions(input_feature, intercept, slope) diff = output - predictions squared = diff * diff RSS = squared.sum() return(RSS) ``` Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero! ``` print(get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope))# should be 0.0 ``` Now use your function to calculate the RSS on training data from the squarefeet model calculated above. **Quiz Question: According to this function and the slope and intercept from the squarefeet model What is the RSS for the simple linear regression using squarefeet to predict prices on TRAINING data?** ``` rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope) print(f'The RSS of predicting Prices based on Square Feet is: {rss_prices_on_sqft}') ``` # Predict the squarefeet given price What if we want to predict the squarefoot given the price? Since we have an equation y = a + b\*x we can solve the function for x. So that if we have the intercept (a) and the slope (b) and the price (y) we can solve for the estimated squarefeet (x). Complete the following function to compute the inverse regression estimate, i.e. predict the input_feature given the output. ``` def inverse_regression_predictions(output, intercept, slope): estimated_feature = (output - intercept) / slope return estimated_feature ``` Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that costs $800,000 to be. **Quiz Question: According to this function and the regression slope and intercept from (3) what is the estimated square-feet for a house costing $800,000?** ``` my_house_price = 800000 estimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope) print(f"The estimated squarefeet for a house worth {my_house_price:.2f} is {estimated_squarefeet}") ``` # New Model: estimate prices from bedrooms We have made one model for predicting house prices using squarefeet, but there are many other features in the sales Data. Use your simple linear regression function to estimate the regression parameters from predicting Prices based on number of bedrooms. Use the training data! ``` # Estimate the slope and intercept for predicting 'price' based on 'bedrooms' beds_intercept, beds_slope = simple_linear_regression(train_data['bedrooms'], train_data['price']) print(f'Slope: {beds_slope}, intercept: {beds_intercept}') ``` # Test your Linear Regression Algorithm Now we have two models for predicting the price of a house. How do we know which one is better? Calculate the RSS on the TEST data (remember this data wasn't involved in learning the model). Compute the RSS from predicting prices using bedrooms and from predicting prices using squarefeet. **Quiz Question: Which model (square feet or bedrooms) has lowest RSS on TEST data? Think about why this might be the case.** ``` # Compute RSS when using bedrooms on TEST data: rss_prices_on_beds = get_residual_sum_of_squares(test_data['bedrooms'], test_data['price'], beds_intercept, beds_slope) print(f'The RSS of predicting Prices based on Square Feet is: {rss_prices_on_beds}') # Compute RSS when using squarefeet on TEST data: rss_prices_on_sqft = get_residual_sum_of_squares(test_data['sqft_living'], test_data['price'], sqft_intercept, sqft_slope) print(f'The RSS of predicting Prices based on Square Feet is: {rss_prices_on_sqft}') rss_prices_on_beds > rss_prices_on_sqft rss_prices_on_sqft / test_data.shape[0] from sklearn.metrics import mean_squared_error mean_squared_error(test_data['price'], get_regression_predictions(test_data['sqft_living'], sqft_intercept, sqft_slope)) ``` ## Comparing with sklearn ``` simple_model = LinearRegression() simple_model.fit(train_data[['sqft_living']], train_data['price']) preds = simple_model.predict(test_data[['sqft_living']]) mean_squared_error(test_data['price'], preds) ```
github_jupyter
``` #rede neural para classificação Binária #cria o data set e salva em "meu_data_set.h5" import numpy as np import matplotlib.pyplot as plt import h5py s_p=30 #quantos pontos os dados de entrada tem s_d=80 #quantos exemplos de cada tipo tem meu Dtrain s_t=10 #quantos exemplos de cada tipo para teste p_r = 0.7 #porcentagem de ruido nos cossenos t=np.linspace(0,8*np.pi,s_p) #dados de treinamento x=np.zeros([2*s_d,s_p]) x[0:s_d,0:s_p]=(1-p_r)*np.ones([s_d,1])*np.cos(t) x[0:s_d,0:s_p]=x[0:s_d,0:s_p]+p_r*np.random.normal(0, 0.8, [s_d,s_p]) x[s_d:2*s_d,0:s_p]=np.random.normal(0, 0.7, [s_d,s_p]) y=np.zeros([2*s_d,1]) y[0:s_d]=np.ones([s_d,1]) #dados de teste x_t=np.zeros([2*s_t,s_p]) x_t[0:s_t,0:s_p]=(1-p_r)*np.ones([s_t,1])*np.cos(t) x_t[0:s_t,0:s_p]=x_t[0:s_t,0:s_p]+p_r*np.random.normal(0, 0.8, [s_t,s_p]) x_t[s_t:2*s_t,0:s_p]=np.random.normal(0, 0.7, [s_t,s_p]) y_t=np.zeros([2*s_t,1]);y_t[0:s_t]=np.ones([s_t,1]) #mostra alguns dados de treinamento plt.figure() for nn in range(0,3): plt.subplot(1,3,nn+1) plt.plot(t,x[nn,:],'b.-',label='cos+rand') plt.plot(t,x[s_d+nn,:],'r.-',label='rand') plt.legend(loc='upper center') plt.tight_layout() # salva o dataset with h5py.File('meu_data_set.h5', 'w') as hf: hf.create_dataset("tempo", data=t) hf.create_dataset("xtreinamento", data=x) hf.create_dataset("ytreinamento", data=y) hf.create_dataset("xteste", data=x_t) hf.create_dataset("yteste", data=y_t) hf.create_dataset("data_info",data=[s_p,s_d,s_t]) print('xtreinamento=',x.shape) # carrega do dataset de "meu_data_set.h5" com opção de psd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import h5py with h5py.File('meu_data_set.h5', 'r') as hf: print('dados do arquivo: ',list(hf.keys())) [s_p,s_d,s_t]=hf['data_info'][:] y_train = hf['ytreinamento'][:] y_test = hf['yteste'][:] x_train = hf['xtreinamento'][:] x_test = hf['xteste'][:] print('numero de exemplos de treinamento:',2*s_d) print('numero de exemplos de teste:',2*s_t) #https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/ #cria e treina a rede neural # define a rede neural "keras model" model=tf.keras.Sequential(name='rede_IF_02') model.add(tf.keras.layers.Dense(12, input_dim=s_p, activation='relu')) model.add(tf.keras.layers.Dense(8, activation='relu')) model.add(tf.keras.layers.Dense(1, activation='sigmoid')) # compila a rede neural opt = tf.keras.optimizers.Adam(learning_rate=0.05); model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) print(model.summary()) # treina a rede neural com o data set history =model.fit(x_train, y_train,batch_size=2*s_d, epochs=100,verbose=0) # mostra o loss e a acuracia durante o treinamento plt.figure() plt.subplot(2,1,1) plt.plot(history.history['loss']) plt.ylabel('loss');plt.xlabel('epoch') plt.legend(['Loss'], loc='upper right') plt.subplot(2,1,2) plt.plot(history.history['accuracy']) plt.ylabel('acurácia');plt.xlabel('epoch') plt.legend(['acurácia'], loc='lower right') plt.show() #faz previsões com a rede treinada y_pred=model.predict(x_test) # calcula a accurácia do teste _, accuracy = model.evaluate(x_test, y_test) print('Accuracy: %.2f' % (accuracy*100)) #mostra os resultados esperados e os alcançados lado a lado print('data pred =',np.concatenate((y_test, np.around(y_pred)),axis=1)) # faz o gráfico do erro de previsão plt.figure() plt.plot(y_test-np.around(y_pred)) plt.title('erro de previsão: $y-y_{previsto}$') plt.show() ``` ``` var_acc=history.history['accuracy'] for n in range(0,100): if var_acc[n]>0.97: break print('n= ',n) print('acuracia(n)= ',var_acc[n]) #plota algumas curvas dos dados de treinamento plt.rcParams.update({'font.size': 12}) plt.figure() plt.plot(t,x_train[nn,:],'b.-',label='cos+rand') plt.plot(t,x_train[s_d+nn,:],'r.-',label='rand') plt.legend(loc='upper left') plt.xlabel('tempo'),plt.ylabel('valor da função') # carrega do dataset de "meu_data_set.h5" com opção de psd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import h5py from scipy import signal #q_dado='sinal' q_dado='psd' with h5py.File('meu_data_set.h5', 'r') as hf: print('dados do arquivo: ',list(hf.keys())) y_train = hf['ytreinamento'][:] y_test = hf['yteste'][:] [s_p,s_d,s_t]=hf['data_info'][:] if (q_dado=='psd'): _,x_train=signal.welch(hf['xtreinamento'][:],fs=s_p/4,nperseg=s_p) _,x_test=signal.welch(hf['xteste'][:],fs=s_p/4,nperseg=s_p) s_p=16 else: x_train = hf['xtreinamento'][:] x_test = hf['xteste'][:] print('x_train=',x_train.shape) print('numero de exemplos de treinamento:',2*s_d) print('numero de exemplos de teste:',2*s_t) ```
github_jupyter
# Rule Scorer Example The Rule Scorer is used to generate scores for a set of rules based on a labelled dataset. ## Requirements To run, you'll need the following: * A rule set (specifically the binary columns of the rules as applied to a dataset). * The binary target column associated with the above dataset. ---- ## Import packages ``` from iguanas.rule_scoring import RuleScorer, PerformanceScorer, ConstantScaler from iguanas.metrics.classification import Precision import pandas as pd ``` ## Read in data Let's read in some dummy rules (stored as binary columns) and the target column. ``` X_rules_train = pd.read_csv( 'dummy_data/X_rules_train.csv', index_col='eid' ) y_train = pd.read_csv( 'dummy_data/y_train.csv', index_col='eid' ).squeeze() X_rules_test = pd.read_csv( 'dummy_data/X_rules_test.csv', index_col='eid' ) y_test = pd.read_csv( 'dummy_data//y_test.csv', index_col='eid' ).squeeze() ``` ---- ## Generate scores ### Set up class parameters Now we can set our class parameters for the Rule Scorer. Here we pass an instantiated scoring class (which generates the raw scores) and an instantiated scaling class (which scales the scores to be more readable - **this is optional**). The scoring classes are located in the `rule_scoring_methods` module; the scaling classes are located in the `rule_score_scalers` module. **See the class docstrings for more information on each type of scoring/scaling class.** In this example, we'll use the `PerformanceScorer` class for scoring the rules (based on the precision score) and the `ConstantScaler` class for scaling. **Note that we're using the *Precision* class from the *metrics.classification* module rather than Sklearn's *precision_score* function, as the former is ~100 times faster on larger datasets.** **Please see the class docstring for more information on each parameter.** ``` precision_score = Precision() params = { 'scoring_class': PerformanceScorer(metric=precision_score.fit), 'scaling_class': ConstantScaler(limit=-100) } ``` ### Instantiate class and run fit method Once the parameters have been set, we can run the `fit` method to generate scores. ``` rs = RuleScorer(**params) rs.fit( X_rules=X_rules_train, y=y_train ) ``` ### Outputs The `fit` method does not return anything. See the `Attributes` section in the class docstring for a description of each attribute generated: ``` rs.rule_scores.head() ``` ---- ## Apply rules to a separate dataset Use the `transform` method to apply the generated rules to another dataset. ``` X_scores_test = rs.transform(X_rules=X_rules_test) ``` ### Outputs The `transform` method returns a dataframe giving the scores of the rules as applied to the dataset. ``` X_scores_test.head() ``` ---- ## Generate rule score and apply them to the training set (in one step) You can also use the `fit_transform` method to generate scores and apply them to the training set. ``` X_scores_train = rs.fit_transform( X_rules=X_rules_train, y=y_train ) ``` ### Outputs The `transform` method returns a dataframe giving the scores of the rules as applied to the dataset. See the `Attributes` section in the class docstring for a description of each attribute generated: ``` rs.rule_scores.head() X_scores_train.head() ``` ----
github_jupyter
``` import warnings warnings.simplefilter(action='ignore') import pandas as pd import numpy as np import matplotlib import statsmodels.api as sm from matplotlib import pyplot as plt from datetime import datetime from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.stattools import adfuller, acf, pacf from statsmodels.tsa.statespace.sarimax import SARIMAX from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split df = pd.read_csv('/opt/anaconda3/futuraplan.csv',index_col=0) df.set_index([pd.to_datetime(df.index)], inplace=True) guests = df['GUESTS'] vol = df['VOL'] check_av = df['AVCHECK'] #Split dataset 80/20 as train and test sets train, test = train_test_split(df,shuffle=False, test_size=0.2) #seasonal trends in data metrics res1 = sm.tsa.seasonal_decompose(guests.interpolate(), model='additive', freq=12) res2 = sm.tsa.seasonal_decompose(vol.interpolate(), model='additive', freq=12) res3 = sm.tsa.seasonal_decompose(check_av.interpolate(), model='additive', freq=12) def plotseasonal(res, axes ): res.observed.plot(ax=axes[0], legend=False) axes[0].set_ylabel('Observed') res.trend.plot(ax=axes[1], legend=False) axes[1].set_ylabel('Trend') res.seasonal.plot(ax=axes[2], legend=False) axes[2].set_ylabel('Seasonal') fig, axes = plt.subplots(ncols=3, nrows=3, sharex=True, figsize=(12,5)) axes[0,0].set_title("Guests") axes[0,1].set_title("Volume") axes[0,2].set_title("Average check") plotseasonal(res1, axes[:,0]) plotseasonal(res2, axes[:,1]) plotseasonal(res3, axes[:,2]) fig.tight_layout() plt.show() #for checking results #print(test_V) #df.head() #PACF-ACF for descision about model parameters for SARIMAX def df_test(ts): result = adfuller(ts) print('ADF Statistic: %f' % result[0]) print('p-value: %f' % result[1]) print('Critical Values:') for key, value in result[4].items(): print('\t%s: %.3f' % (key, value)) df_test(vol) lag_pacf = pacf(vol, nlags=10) #Plot PACF: plt.plot(lag_pacf, 'ok-') plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(vol)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(vol)),linestyle='--',color='gray') plt.title('Partial Autocorrelation Function'); lag_acf = acf(vol, nlags=40) #Plot ACF: plt.plot(lag_acf, 'ok-') plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(vol)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(vol)),linestyle='--',color='gray') plt.title('Autocorrelation Function'); model1 = SARIMAX(guests, order=(1,1,1), seasonal_order=(1,0,1,4)) model2 = SARIMAX(vol, order=(1,1,1), seasonal_order=(1,0,1,4)) model3 = SARIMAX(check_av, order=(1,1,1), seasonal_order=(1,0,0,12)) sarima_res1 = model1.fit(disp=True) sarima_res2 = model2.fit(disp=True) sarima_res3 = model3.fit(disp=True) df['FORECAST_GUESTS']= sarima_res1.predict(start="2021-07-31", end="2022-11-30", dynamic=True) df['FORECAST_VOLUME']= sarima_res2.predict(start="2021-07-31", end="2022-11-30", dynamic=True) df['FORECAST_AVCHECK']= sarima_res3.predict(start="2021-07-31", end="2022-11-30", dynamic=True) df[['VOL','FORECAST_VOLUME']].plot() ```
github_jupyter
``` from transformers import BertModel, BertTokenizer from utils import devdf_generator import pandas as pd import torch import vsm import os %load_ext autoreload %autoreload 2 VSM_HOME = os.path.join('data', 'vsmdata') DATA_HOME = os.path.join('data', 'wordrelatedness') def evaluate_pooled_bert(rel_df, layer, pool_func): if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') bert_weights_name = 'bert-base-uncased' # Initialize a BERT tokenizer and BERT model based on # `bert_weights_name`: tokenizer = BertTokenizer.from_pretrained(bert_weights_name) model = BertModel.from_pretrained(bert_weights_name) model = model.to(device) print(f'Model is on {model.device}') # Get the vocabulary from `rel_df`: ##### YOUR CODE HERE vocab = set(rel_df.word1.values) | set(rel_df.word2.values) # Use `vsm.create_subword_pooling_vsm` with the user's arguments: pooled_df = vsm.create_subword_pooling_vsm(vocab, tokenizer, model, layer=layer, pool_func=pool_func) # Return the results of the relatedness evalution: return vsm.word_relatedness_evaluation(rel_df, pooled_df) pooling_function = vsm.mean_pooling dev = pd.read_csv(os.path.join(DATA_HOME, "cs224u-wordrelatedness-dev.csv")) highest = devdf_generator(dev, scoring='highest') lowest = devdf_generator(dev, scoring='lowest') average = devdf_generator(dev, scoring='mean') ``` ### Series of Experiments using differnt "hyperparameters" for BERT pooling model ``` # 1. Same hypers, different dev datasets scores = {} dev_eval, dev_rho = evaluate_pooled_bert(dev, -1, pooling_function) scores['dev'] = dev_rho highest, highest_rho = evaluate_pooled_bert(highest, -1, pooling_function) scores['highest'] = highest_rho lowest, lowest_rho = evaluate_pooled_bert(lowest, -1, pooling_function) scores['lowest'] = lowest_rho mean, mean_rho = evaluate_pooled_bert(average, -1, pooling_function) scores['mean'] = mean_rho scores #2 same dev set (highest) different pooling functions pooling_scores = {} min_eval, min_rho = evaluate_pooled_bert(highest, -1, vsm.min_pooling) pooling_scores['min'] = min_rho max_eval, max_rho = evaluate_pooled_bert(highest, -1, vsm.max_pooling) pooling_scores['max'] = max_rho mean_eval, mean_rho = evaluate_pooled_bert(highest, -1, vsm.mean_pooling) pooling_scores['mean'] = mean_rho last_eval, last_rho = evaluate_pooled_bert(average, -1, vsm.last_pooling) pooling_scores['last'] = last_rho pooling_scores ```
github_jupyter
``` from FeatureGenerator import * import ngram import pickle import pandas as pd from nltk.tokenize import sent_tokenize from helpers import * import hashlib class CountFeatureGenerator(FeatureGenerator): def __init__(self, name='countFeatureGenerator'): super(CountFeatureGenerator, self).__init__(name) def process(self, df): grams = ["unigram", "bigram", "trigram"] feat_names = ["Headline", "articleBody"] print("generate counting features") for feat_name in feat_names: for gram in grams: df["count_of_%s_%s" % (feat_name, gram)] = list(df.apply(lambda x: len(x[feat_name + "_" + gram]), axis=1)) df["count_of_unique_%s_%s" % (feat_name, gram)] = list(df.apply(lambda x: len(set(x[feat_name + "_" + gram])), axis=1)) df["ratio_of_unique_%s_%s" % (feat_name, gram)] = list(map(try_divide, df["count_of_unique_%s_%s"%(feat_name,gram)], df["count_of_%s_%s"%(feat_name,gram)])) # overlapping n-grams count for gram in grams: df["count_of_Headline_%s_in_articleBody" % gram] = list(df.apply(lambda x: sum([1. for w in x["Headline_" + gram] if w in set(x["articleBody_" + gram])]), axis=1)) df["ratio_of_Headline_%s_in_articleBody" % gram] = list(map(try_divide, df["count_of_Headline_%s_in_articleBody" % gram], df["count_of_Headline_%s" % gram])) # number of sentences in headline and body for feat_name in feat_names: df['len_sent_%s' % feat_name] = df[feat_name].apply(lambda x: len(sent_tokenize(x))) # dump the basic counting features into a file feat_names = [ n for n in df.columns if "count" in n or "ratio" in n or "len_sent" in n] # binary refuting features _refuting_words = [ 'fake', 'fraud', 'hoax', 'false', 'deny', 'denies', # 'refute', 'not', 'despite', 'nope', 'doubt', 'doubts', 'bogus', 'debunk', 'pranks', 'retract' ] check_words = _refuting_words for rf in check_words: fname = '%s_exist' % rf feat_names.append(fname) df[fname] = list(df['Headline'].map(lambda x: 1 if rf in x else 0)) print('BasicCountFeatures:') print(df) train = df[~df['target'].isnull()] print('train:') print(train[['Headline_unigram','Body ID', 'count_of_Headline_unigram']]) xBasicCountsTrain = train[feat_names].values outfilename_bcf_train = "train.basic.pkl" with open(outfilename_bcf_train, "wb") as outfile: pickle.dump(feat_names, outfile, -1) pickle.dump(xBasicCountsTrain, outfile, -1) print('basic counting features for training saved in %s' % outfilename_bcf_train) test = df[df['target'].isnull()] print('test:') print(test[['Headline_unigram','Body ID', 'count_of_Headline_unigram']]) if test.shape[0] > 0: # test set exists print('saving test set') xBasicCountsTest = test[feat_names].values outfilename_bcf_test = "test.basic.pkl" with open(outfilename_bcf_test, 'wb') as outfile: pickle.dump(feat_names, outfile, -1) pickle.dump(xBasicCountsTest, outfile, -1) print('basic counting features for test saved in %s' % outfilename_bcf_test) def read(self, header='train'): filename_bcf = "%s.basic.pkl" % header with open(filename_bcf, "rb") as infile: feat_names = pickle.load(infile) xBasicCounts = pickle.load(infile) print('feature names: ') print(feat_names) print('xBasicCounts.shape:') print(xBasicCounts.shape) np.save('counts_test', [xBasicCounts]) return [xBasicCounts] if __name__ == '__main__': cf = CountFeatureGenerator() cf.read('test') ```
github_jupyter
# Audiobooks business case ## Preprocessing exercise It makes sense to shuffle the indices prior to balancing the dataset. Using the code from the lesson (below), shuffle the indices and then balance the dataset. At the end of the course, you will have an exercise to create the same machine learning algorithm, with preprocessing done in this way. Note: This is more of a programming exercise rather than a machine learning one. Being able to complete it successfully will ensure you understand the preprocessing. Good luck! **Solution:** Scroll down to the 'Exercise Solution' section ### Extract the data from the csv ``` import numpy as np # We will use the sklearn preprocessing library, as it will be easier to standardize the data. from sklearn import preprocessing # Load the data raw_csv_data = np.loadtxt('Audiobooks_data.csv',delimiter=',') # The inputs are all columns in the csv, except for the first one [:,0] # (which is just the arbitrary customer IDs that bear no useful information), # and the last one [:,-1] (which is our targets) unscaled_inputs_all = raw_csv_data[:,1:-1] # The targets are in the last column. That's how datasets are conventionally organized. targets_all = raw_csv_data[:,-1] ``` ### EXERCISE SOLUTION We shuffle the indices before balancing (to remove any day effects, etc.) However, we still have to shuffle them AFTER we balance the dataset as otherwise, all targets that are 1s will be contained in the train_targets. This code is suboptimal, but is the easiest way to complete the exercise. Still, as we do the preprocessing only once, speed in not something we are aiming for. We record the variables in themselves, so we don't amend the code that follows. ``` # When the data was collected it was actually arranged by date # Shuffle the indices of the data, so the data is not arranged in any way when we feed it. # Since we will be batching, we want the data to be as randomly spread out as possible shuffled_indices = np.arange(unscaled_inputs_all.shape[0]) np.random.shuffle(shuffled_indices) # Use the shuffled indices to shuffle the inputs and targets. unscaled_inputs_all = unscaled_inputs_all[shuffled_indices] targets_all = targets_all[shuffled_indices] ``` ### Balance the dataset ``` # Count how many targets are 1 (meaning that the customer did convert) num_one_targets = int(np.sum(targets_all)) # Set a counter for targets that are 0 (meaning that the customer did not convert) zero_targets_counter = 0 # We want to create a "balanced" dataset, so we will have to remove some input/target pairs. # Declare a variable that will do that: indices_to_remove = [] # Count the number of targets that are 0. # Once there are as many 0s as 1s, mark entries where the target is 0. for i in range(targets_all.shape[0]): if targets_all[i] == 0: zero_targets_counter += 1 if zero_targets_counter > num_one_targets: indices_to_remove.append(i) # Create two new variables, one that will contain the inputs, and one that will contain the targets. # We delete all indices that we marked "to remove" in the loop above. unscaled_inputs_equal_priors = np.delete(unscaled_inputs_all, indices_to_remove, axis=0) targets_equal_priors = np.delete(targets_all, indices_to_remove, axis=0) ``` ### Standardize the inputs ``` # That's the only place we use sklearn functionality. We will take advantage of its preprocessing capabilities # It's a simple line of code, which standardizes the inputs, as we explained in one of the lectures. # At the end of the business case, you can try to run the algorithm WITHOUT this line of code. # The result will be interesting. scaled_inputs = preprocessing.scale(unscaled_inputs_equal_priors) ``` ### Shuffle the data ``` # When the data was collected it was actually arranged by date # Shuffle the indices of the data, so the data is not arranged in any way when we feed it. # Since we will be batching, we want the data to be as randomly spread out as possible shuffled_indices = np.arange(scaled_inputs.shape[0]) np.random.shuffle(shuffled_indices) # Use the shuffled indices to shuffle the inputs and targets. shuffled_inputs = scaled_inputs[shuffled_indices] shuffled_targets = targets_equal_priors[shuffled_indices] ``` ### Split the dataset into train, validation, and test ``` # Count the total number of samples samples_count = shuffled_inputs.shape[0] # Count the samples in each subset, assuming we want 80-10-10 distribution of training, validation, and test. # Naturally, the numbers are integers. train_samples_count = int(0.8 * samples_count) validation_samples_count = int(0.1 * samples_count) # The 'test' dataset contains all remaining data. test_samples_count = samples_count - train_samples_count - validation_samples_count # Create variables that record the inputs and targets for training # In our shuffled dataset, they are the first "train_samples_count" observations train_inputs = shuffled_inputs[:train_samples_count] train_targets = shuffled_targets[:train_samples_count] # Create variables that record the inputs and targets for validation. # They are the next "validation_samples_count" observations, folllowing the "train_samples_count" we already assigned validation_inputs = shuffled_inputs[train_samples_count:train_samples_count+validation_samples_count] validation_targets = shuffled_targets[train_samples_count:train_samples_count+validation_samples_count] # Create variables that record the inputs and targets for test. # They are everything that is remaining. test_inputs = shuffled_inputs[train_samples_count+validation_samples_count:] test_targets = shuffled_targets[train_samples_count+validation_samples_count:] # We balanced our dataset to be 50-50 (for targets 0 and 1), but the training, validation, and test were # taken from a shuffled dataset. Check if they are balanced, too. Note that each time you rerun this code, # you will get different values, as each time they are shuffled randomly. # Normally you preprocess ONCE, so you need not rerun this code once it is done. # If you rerun this whole sheet, the npzs will be overwritten with your newly preprocessed data. # Print the number of targets that are 1s, the total number of samples, and the proportion for training, validation, and test. print(np.sum(train_targets), train_samples_count, np.sum(train_targets) / train_samples_count) print(np.sum(validation_targets), validation_samples_count, np.sum(validation_targets) / validation_samples_count) print(np.sum(test_targets), test_samples_count, np.sum(test_targets) / test_samples_count) ``` ### Save the three datasets in *.npz ``` # Save the three datasets in *.npz. # In the next lesson, you will see that it is extremely valuable to name them in such a coherent way! np.savez('Audiobooks_data_train', inputs=train_inputs, targets=train_targets) np.savez('Audiobooks_data_validation', inputs=validation_inputs, targets=validation_targets) np.savez('Audiobooks_data_test', inputs=test_inputs, targets=test_targets) ```
github_jupyter
# Enterprise Deep Learning with TensorFlow: openSAP ## SAP Innovation Center Network ``` Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ``` ### Introduction to TensorFlow In this notebook, we will learn - Some common TensorFlow operations - Run math operations using Session - Understand how TensorFlow can run computes on different devices ``` # Import tensorflow library # Reference it as tf for ease of calling import tensorflow as tf # Let's check our TensorFlow version print("You are running TensorFlow %s" % str(tf.__version__)) # List of math ops can be found here: https://www.tensorflow.org/api_guides/python/math_ops # Let us use an InteractiveSession # Provides an easy to use session for interactive environments like a Jupyter notebook sess = tf.InteractiveSession() a = tf.constant(23, name='const_a') b = tf.constant(11, name='const_b') # Let's add the two prime numbers c = tf.add(a, b) print(c.eval()) # Let's subtract two prime numbers c = tf.subtract(a, b) print(c.eval()) # More ops for playing with TensorFlow # Let's multiply two prime numbers c = tf.multiply(a, b) # 253 # Let's divide two prime numbers c = tf.divide(23, 13) # 2 a = tf.constant(23., name='const_a') b = tf.constant(11., name='const_b') # Now, let's divide two prime numbers to see the difference c = tf.divide(23, 13) # 2.090909 # Let's get the modulus of two prime numbers c = tf.mod(a, b) # 1.0 a = tf.constant(2., name='const_a') b = tf.constant(10., name='const_b') # Let's calculate 2^10 c = tf.pow(a, b) # 1024.0 # List of control ops can be found here: https://www.tensorflow.org/api_guides/python/control_flow_ops # Check for a < b c = tf.less(a, b) # True # Check for a <= b c = tf.less_equal(a, b) # True # Check for a > b c = tf.greater(a, b) # False # Check for a >= b c = tf.greater_equal(a, b) # False # Some conditional check statements c = tf.logical_and(True, False) # False tf.logical_or(True, False) # True tf.logical_xor(True, False) # True # Let's create two matrices, a 3x1 and another 1x3 matrix for multiplication mat_a = tf.constant([[1., 3., 5.]], name='mat_a') mat_b = tf.constant([[7.], [11.], [13.]], name='mat_b') # Let's matrix multiply the two matrices prod_op = tf.matmul(mat_a, mat_b) print(prod_op) # Create a session object to run our matrix multiplication sess = tf.Session() # Get the result by calling run on the session # Returns an numpy ndarray object mat_mul = sess.run(prod_op) # Let's view the result print(mat_mul) # [[ 105.]] # Remember to close the session when done, releases the resources sess.close() # Easier way to handle session objects is # using the familiar 'with' block as follows with tf.Session() as sess: mat_mul = sess.run(prod_op) print(mat_mul) # [[ 105.]] # If you have multiple devices capable of computes, use it as follows # /cpu:<device_id> # [Mac] Run the following command to know how many logical cores are present on your machine: sysctl -n hw.ncpu # [Win] Run the following command to know how many logical cores are present on your machine: systeminfo | find /i "processors" with tf.Session() as sess: with tf.device("/cpu:0"): mat_mul = sess.run(prod_op) print(mat_mul) # [[ 105.]] ```
github_jupyter
**Strings** If you want to use text in Python, you have to use a **string.** A **string** is created by entering text between **two single or double quotation masks** > print("Python is fun!") > print('Always look on the bright side of life') The delimiter ("or') used for a string doesn't affect how it behaves in anyway. ``` print("Hello world!") ``` **Backslash** Some characters can't be directly included in a string. For instance, double quotes can't be directly included in a double quote string; this would cause it to end prematurely. Characters like these must be escaped by placing a **backslash** before them. Double quotes only need to be escaped in double quote strings, and the same is true for singel quote strings. **For Example:** > print('Brian\'s mother: He\'s not an angel. He\'s a very naughty boy!') **Backslashes** can also be used to escape tabs, arbitraty Unicode characters, and various other things that can't be reliably printed. ``` print('Brain\'s mother: He\'s not an angel. He\'s a very naughty boy!') print('I\'m learning!') ``` **Newlines** **\n** represents a new line. It can be used in strings to create multi-line output: > print('One \nTwo \nThree') Similarly, **\t** represents a tab. ``` print("Hello \nWorld") ``` **Newlines** Newlines will be automatically added for strings that are created using **three quotes**. **For example** > print( """ this is a multiline text """ ) This makes it easier to format long, multi-line texts without the need to explicitly put **\n** for line breaks. ``` print("Hi") print("""This is great""") print("Hi \nThis \nis \ngreat") ``` **Concatenation** As with integers and floats, strings in Python can be added, using a process called **concatenation**, which can be done on any two strings. > print("Spam" + 'eggs') Even if your strings contain numbers, they are still added as strings rather than integers. > print("2" + "2") **Adding** a **string** to a **number** produces an **error**, as even though they might look similar, they are two different entities. ``` print("Python " + "is " + "awesome. ") ``` **String Operations** Strings can also be **multipled by** **integers.** This produces a **repeated version of the original string**. The order of the string and the integer doesn't matter, but the string usually coms first. > print("spam" *3) > print(4 * '2') Strings can't be multiplied by other strings. Strings also can't be multiplied by floats, even if the floats are whole numbers. ``` print(3*'7') ``` **Variables** A **variable** allows you to store a value by assigning it to a name, which can be used to refer to the value alter in the program. For example, in game development, you would use a variable to store the points of the player. To assign a variable, use **one equals sign** > user = "James" In given example we assigned string "James" to user variable. ``` age = 42 print(age) ``` **Variable names** Certain restrictions apply in regard to the characters that may be used in Python variable names. The only characters that are allowed are - **letters,** - **numbers,** and - **underscores.** Also, they can't start with numbers. Not following these rules results in errors. > this_is_a_normal_name = 7 > 123abc = 7 SyntaxError: invalid syntax Python is a case sensitive programming language. Thus, **Lastname** and **lastname** are two **different variable** names in Python. ``` A_VARIABLE_NAME = True if A_VARIABLE_NAME == True: print("yes") ``` **Working with Variables** You can use variables to perform corresponding operations, just as you did with numbers and strings: > x = 7 > print(x) > print( x + 3) > print(x) As you can see, the variable stores its value throughout the program. ``` spam = "eggs" print(spam*3) ``` **Working with Variables** Variables can be reassigned as many times as you want, in order to change their value. **In Python**, variables **don't have specific types**, so you can assign a string to a variable, and later assign an integer to the same variable. > x = 123.456 > print(x) > x = "This is a string" > print(x + "!") However, it is not good practice. To avaid mistakes, try to avoid overwriting the same variable with different data types. ``` x = 5 y = 7 print(x + y) ``` **Input** Let's assume we want to take the age of the user as input. We know that the **input()** function returns a string. To convert it to a number, we can use the **int()** function. > age = int(input()) > print(age) Similarly, inorder to convert a number to a string, the "str()" function is used. This can be useful if you need to use a number in string concatenation. For example: > age = 42 > print("His age is " + str(age)) ``` x = "2" y = "4" z = int(x) + int(y) print(z) ``` **Input** You can use input() multiple times to take multiple user inputs **For example:** > name = input() > age = input() > print(name + " is " + age) When input() function executes, program flow stops until a user enters some value. ``` x = int(input()) y = int(input()) print(x + y) ``` **In-Place Operators** **In-place operators** allow you to write code like 'x = x + 3' more concisely, as ' x +=3'. The same thing is possible with other operators such as -, * , /and % as well. > x = 2 > print(x) > x += 3 > print(x) ``` x = 4 x *=3 print(x) ``` **In-place Operators** These operators can be used on types other than numbers, as well, such as **strings** > x = "spam" > print(x) > x += "eggs" > print(x) In-place operators can be used for any numerical operation( + , - , * , / , %, **, //). ``` x = "a" x *= 3 print(x) spam = "7" spam = spam + "0" eggs = int(spam) + 3 print(float(eggs)) #enter '42' as input: age = int(input()) print(age + 8) x = 5 y = x +3 y = int(str(y) + "2") print(y) x = 4 x += 5 print(x) x = 3 num = 17 print(num%x) # 3*5 =15 name = input() print("Welcome,"+ name) ```
github_jupyter
Importando as Dependências ``` import os import copy # os.chdir('corpora') from scripts.anntools import Collection from pathlib import Path import nltk nltk.download('punkt') ``` Leitura de Arquivo ``` c = Collection() for fname in Path("original/training/").rglob("*.txt"): c.load(fname) ``` Acesso a uma instância anotada ``` c.sentences[0] ``` Acesso ao texto de uma instância ``` c.sentences[0].text ``` Acesso às entidades nomeadas de uma instância ``` c.sentences[0].keyphrases ``` Acesso às relações anotadas de uma instância ``` c.sentences[0].relations ``` Pré-processando os Dados ``` def extract_keyphrases(keyphrases, text): tags = {} for keyphrase in sorted(keyphrases, key=lambda x: len(x.text)): ktext = keyphrase.text ktokens = [text[s[0]:s[1]] for s in keyphrase.spans] # casos contínuos idxs, ponteiro = [], 0 for i, token in enumerate(tokens): if token == ktokens[ponteiro]: idxs.append(i) ponteiro += 1 else: idxs, ponteiro = [], 0 if ponteiro == len(ktokens): break if len(ktokens) != len(idxs): idxs, ponteiro = [], 0 for i, token in enumerate(tokens): if token == ktokens[ponteiro]: idxs.append(i) ponteiro += 1 if ponteiro == len(ktokens): break error = False if len(ktokens) != len(idxs): error = True tags[keyphrase.id] = { 'text': ktext, 'idxs': idxs, 'tokens': [text[s[0]:s[1]] for s in keyphrase.spans], 'attributes': [attr.__repr__() for attr in keyphrase.attributes], 'spans': keyphrase.spans, 'label': keyphrase.label, 'id': keyphrase.id, 'error': error } return tags data = [] for instance in c.sentences: text = instance.text tokens = nltk.word_tokenize(text.replace('–', ' – '), language='spanish') keyphrases = extract_keyphrases(instance.keyphrases, text) relations = [] for relation in instance.relations: relations.append({ 'arg1': relation.origin, 'arg2': relation.destination, 'label': relation.label }) data.append({ 'text': text, 'tokens': tokens, 'keyphrases': keyphrases, 'relations': relations }) ``` Separando dados e salvando ``` from random import shuffle shuffle(data) size = int(len(data)*0.2) trainset, _set = data[size:], data[:size] size = int(len(_set)*0.5) devset, testset = _set[size:], _set[:size] import json if not os.path.exists('preprocessed'): os.mkdir('preprocessed') json.dump(trainset, open('preprocessed/trainset.json', 'w'), sort_keys=True, indent=4, separators=(',', ':')) json.dump(devset, open('preprocessed/devset.json', 'w'), sort_keys=True, indent=4, separators=(',', ':')) json.dump(testset, open('preprocessed/testset.json', 'w'), sort_keys=True, indent=4, separators=(',', ':')) for row in trainset: keyphrases = row['keyphrases'] for kid in keyphrases: keyphrase = keyphrases[kid] for i, idx in enumerate(keyphrase['idxs']): if i > 0: if keyphrase['idxs'][i-1]+1 != idx: print(keyphrase) print(row['tokens']) print() break ```
github_jupyter
# SU Deep Learning with Tensorflow: Python & NumPy Tutorial Python 3 and NumPy will be used extensively throughout this course, so it's important to be familiar with them. One can also check the website's tutorial for further preparation: https://deep-learning-su.github.io/python-numpy-tutorial/ ## Python 3 If you're unfamiliar with Python 3, here are some of the most common changes from Python 2 to look out for. ### Print is a function ``` print("Hello!") ``` Without parentheses, printing will not work. ``` print "Hello!" ``` ### Floating point division by default ``` 5 / 2 ``` To do integer division, we use two backslashes: ``` 5 // 2 ``` ### No xrange The xrange from Python 2 is now merged into "range" for Python 3 and there is no xrange in Python 3. In Python 3, range(3) does not create a list of 3 elements as it would in Python 2, rather just creates a more memory efficient iterator. Hence, xrange in Python 3: Does not exist range in Python 3: Has very similar behavior to Python 2's xrange ``` for i in range(3): print(i) range(3) # If need be, can use the following to get a similar behavior to Python 2's range: print(list(range(3))) ``` # NumPy "NumPy is the fundamental package for scientific computing in Python. It is a Python library that provides a multidimensional array object, various derived objects (such as masked arrays and matrices), and an assortment of routines for fast operations on arrays, including mathematical, logical, shape manipulation, sorting, selecting, I/O, discrete Fourier transforms, basic linear algebra, basic statistical operations, random simulation and much more" -https://docs.scipy.org/doc/numpy-1.10.1/user/whatisnumpy.html. ``` import numpy as np ``` Let's run through an example showing how powerful NumPy is. Suppose we have two lists a and b, consisting of the first 100,000 non-negative numbers, and we want to create a new list c whose *i*th element is a[i] + 2 * b[i]. Without NumPy: ``` %%time a = [i for i in range(100000)] b = [i for i in range(100000)] %%time c = [] for i in range(len(a)): c.append(a[i] + 2 * b[i]) ``` With NumPy: ``` %%time a = np.arange(100000) b = np.arange(100000) %%time c = a + 2 * b ``` The result is 10 to 15 times faster, and we could do it in fewer lines of code (and the code itself is more intuitive)! Regular Python is much slower due to type checking and other overhead of needing to interpret code and support Python's abstractions. For example, if we are doing some addition in a loop, constantly type checking in a loop will lead to many more instructions than just performing a regular addition operation. NumPy, using optimized pre-compiled C code, is able to avoid a lot of the overhead introduced. The process we used above is **vectorization**. Vectorization refers to applying operations to arrays instead of just individual elements (i.e. no loops). Why vectorize? 1. Much faster 2. Easier to read and fewer lines of code 3. More closely assembles mathematical notation Vectorization is one of the main reasons why NumPy is so powerful. ## ndarray ndarrays, n-dimensional arrays of homogenous data type, are the fundamental datatype used in NumPy. As these arrays are of the same type and are fixed size at creation, they offer less flexibility than Python lists, but can be substantially more efficient runtime and memory-wise. (Python lists are arrays of pointers to objects, adding a layer of indirection.) The number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the array along each dimension. ``` # Can initialize ndarrays with Python lists, for example: a = np.array([1, 2, 3]) # Create a rank 1 array print(type(a)) # Prints "<class 'numpy.ndarray'>" print(a.shape) # Prints "(3,)" print(a[0], a[1], a[2]) # Prints "1 2 3" a[0] = 5 # Change an element of the array print(a) # Prints "[5, 2, 3]" b = np.array([[1, 2, 3], [4, 5, 6]]) # Create a rank 2 array print(b.shape) # Prints "(2, 3)" print(b[0, 0], b[0, 1], b[1, 0]) # Prints "1 2 4" ``` There are many other initializations that NumPy provides: ``` a = np.zeros((2, 2)) # Create an array of all zeros print(a) # Prints "[[ 0. 0.] # [ 0. 0.]]" b = np.full((2, 2), 7) # Create a constant array print(b) # Prints "[[ 7. 7.] # [ 7. 7.]]" c = np.eye(2) # Create a 2 x 2 identity matrix print(c) # Prints "[[ 1. 0.] # [ 0. 1.]]" d = np.random.random((2, 2)) # Create an array filled with random values print(d) # Might print "[[ 0.91940167 0.08143941] # [ 0.68744134 0.87236687]]" ``` How do we create a 2 by 2 matrix of ones? ``` a = np.ones((2, 2)) # Create an array of all ones print(a) # Prints "[[ 1. 1.] # [ 1. 1.]]" ``` Useful to keep track of shape; helpful for debugging and knowing dimensions will be very useful when computing gradients, among other reasons. ``` nums = np.arange(8) print(nums) print(nums.shape) nums = nums.reshape((2, 4)) print('Reshaped:\n', nums) print(nums.shape) # The -1 in reshape corresponds to an unknown dimension that numpy will figure out, # based on all other dimensions and the array size. # Can only specify one unknown dimension. # For example, sometimes we might have an unknown number of data points, and # so we can use -1 instead without worrying about the true number. nums = nums.reshape((4, -1)) print('Reshaped with -1:\n', nums) print(nums.shape) ``` NumPy supports an object-oriented paradigm, such that ndarray has a number of methods and attributes, with functions similar to ones in the outermost NumPy namespace. For example, we can do both: ``` nums = np.arange(8) print(nums.min()) # Prints 0 print(np.min(nums)) # Prints 0 ``` ## Array Operations/Math NumPy supports many elementwise operations: ``` x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = np.array([[5, 6], [7, 8]], dtype=np.float64) # Elementwise sum; both produce the array # [[ 6.0 8.0] # [10.0 12.0]] print(x + y) print(np.add(x, y)) # Elementwise difference; both produce the array # [[-4.0 -4.0] # [-4.0 -4.0]] print(x - y) print(np.subtract(x, y)) # Elementwise product; both produce the array # [[ 5.0 12.0] # [21.0 32.0]] print(x * y) print(np.multiply(x, y)) # Elementwise square root; produces the array # [[ 1. 1.41421356] # [ 1.73205081 2. ]] print(np.sqrt(x)) ``` How do we elementwise divide between two arrays? ``` x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = np.array([[5, 6], [7, 8]], dtype=np.float64) # Elementwise division; both produce the array # [[ 0.2 0.33333333] # [ 0.42857143 0.5 ]] print(x / y) print(np.divide(x, y)) ``` Note * is elementwise multiplication, not matrix multiplication. We instead use the dot function to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. dot is available both as a function in the numpy module and as an instance method of array objects: ``` x = np.array([[1, 2], [3, 4]]) y = np.array([[5, 6], [7, 8]]) v = np.array([9, 10]) w = np.array([11, 12]) # Inner product of vectors; both produce 219 print(v.dot(w)) print(np.dot(v, w)) # Matrix / vector product; both produce the rank 1 array [29 67] print(x.dot(v)) print(np.dot(x, v)) # Matrix / matrix product; both produce the rank 2 array # [[19 22] # [43 50]] print(x.dot(y)) print(np.dot(x, y)) ``` There are many useful functions built into NumPy, and often we're able to express them across specific axes of the ndarray: ``` x = np.array([[1, 2, 3], [4, 5, 6]]) print(np.sum(x)) # Compute sum of all elements; prints "21" print(np.sum(x, axis=0)) # Compute sum of each column; prints "[5 7 9]" print(np.sum(x, axis=1)) # Compute sum of each row; prints "[6 15]" print(np.max(x, axis=1)) # Compute max of each row; prints "[3 6]" ``` How can we compute the index of the max value of each row? Useful, to say, find the class that corresponds to the maximum score for an input image. ``` x = np.array([[1, 2, 3], [4, 5, 6]]) print(np.argmax(x, axis=1)) # Compute index of max of each row; prints "[2 2]" ``` Note the axis you apply the operation will have its dimension removed from the shape. This is useful to keep in mind when you're trying to figure out what axis corresponds to what. For example: ``` x = np.array([[1, 2, 3], [4, 5, 6]]) print(x.shape) # Has shape (2, 3) print((x.max(axis=0)).shape) # Taking the max over axis 0 has shape (3,) # corresponding to the 3 columns. # An array with rank 3 x = np.array([[[1, 2, 3], [4, 5, 6]], [[10, 23, 33], [43, 52, 16]] ]) print(x) print(x.shape) # Has shape (2, 2, 3) print((x.max(axis=1)).shape) # Taking the max over axis 1 has shape (2, 3) print((x.max(axis=(1, 2)))) # Can take max over multiple axes; prints [6 52] print((x.max(axis=(1, 2))).shape) # Taking the max over axes 1, 2 has shape (2,) ``` ## Indexing NumPy also provides powerful indexing schemes. ``` # Create the following rank 2 array with shape (3, 4) # [[ 1 2 3 4] # [ 5 6 7 8] # [ 9 10 11 12]] a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print('Original:\n', a) # Can select an element as you would in a 2 dimensional Python list print('Element (0, 0) (a[0][0]):\n', a[0][0]) # Prints 1 # or as follows print('Element (0, 0) (a[0, 0]) :\n', a[0, 0]) # Prints 1 # Use slicing to pull out the subarray consisting of the first 2 rows # and columns 1 and 2; b is the following array of shape (2, 2): # [[2 3] # [6 7]] b = a[:2, 1:3] print('Sliced (a[:2, 1:3]):\n', b) # Steps are also supported in indexing. The following reverses the first row: print('Reversing the first row (a[0, ::-1]) :\n', a[0, ::-1]) # Prints [4 3 2 1] ``` Often, it's useful to select or modify one element from each row of a matrix. The following example employs **fancy indexing**, where we index into our array using an array of indices (say an array of integers or booleans): ``` # Create a new array from which we will select elements a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) print(a) # prints "array([[ 1, 2, 3], # [ 4, 5, 6], # [ 7, 8, 9], # [10, 11, 12]])" # Create an array of indices b = np.array([0, 2, 0, 1]) # Select one element from each row of a using the indices in b print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]" # Mutate one element from each row of a using the indices in b a[np.arange(4), b] += 10 print(a) # prints "array([[11, 2, 3], # [ 4, 5, 16], # [17, 8, 9], # [10, 21, 12]]) ``` We can also use boolean indexing/masks. Suppose we want to set all elements greater than MAX to MAX: ``` MAX = 5 nums = np.array([1, 4, 10, -1, 15, 0, 5]) print(nums > MAX) # Prints [False, False, True, False, True, False, False] nums[nums > MAX] = MAX print(nums) # Prints [1, 4, 5, -1, 5, 0, 5] ``` Finally, note that the indices in fancy indexing can appear in any order and even multiple times: ``` nums = np.array([1, 4, 10, -1, 15, 0, 5]) print(nums[[1, 2, 3, 1, 0]]) # Prints [4 10 -1 4 1] ``` ## Broadcasting Many of the operations we've looked at above involved arrays of the same rank. However, many times we might have a smaller array and use that multiple times to update an array of a larger rank. For example, consider the below example of shifting the mean of each column from the elements of the corresponding column: ``` x = np.array([[1, 2, 3], [3, 5, 7]]) print(x.shape) # Prints (2, 3) col_means = x.mean(axis=0) print(col_means) # Prints [2. 3.5 5.] print(col_means.shape) # Prints (3,) # Has a smaller rank than x! mean_shifted = x - col_means print('\n', mean_shifted) print(mean_shifted.shape) # Prints (2, 3) ``` Or even just multiplying a matrix by 2: ``` x = np.array([[1, 2, 3], [3, 5, 7]]) print(x * 2) # Prints [[ 2 4 6] # [ 6 10 14]] ``` Broadcasting two arrays together follows these rules: 1. If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length. 2. The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension. 3. The arrays can be broadcast together if they are compatible in all dimensions. 4. After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays. 5. In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension. For example, when subtracting the columns above, we had arrays of shape (2, 3) and (3,). 1. These arrays do not have same rank, so we prepend the shape of the lower rank one to make it (1, 3). 2. (2, 3) and (1, 3) are compatible (have the same size in the dimension, or if one of the arrays has size 1 in that dimension). 3. Can be broadcast together! 4. After broadcasting, each array behaves as if it had shape equal to (2, 3). 5. The smaller array will behave as if it were copied along dimension 0. Let's try to subtract the mean of each row! ``` x = np.array([[1, 2, 3], [3, 5, 7]]) row_means = x.mean(axis=1) print(row_means) # Prints [2. 5.] mean_shifted = x - row_means ``` To figure out what's wrong, we print some shapes: ``` x = np.array([[1, 2, 3], [3, 5, 7]]) print(x.shape) # Prints (2, 3) row_means = x.mean(axis=1) print(row_means) # Prints [2. 5.] print(row_means.shape) # Prints (2,) # Results in the following error: ValueError: operands could not be broadcast together with shapes (2,3) (2,) mean_shifted = x - row_means ``` What happened? Answer: If we following broadcasting rule 1, then we'd prepend a 1 to the smaller rank array ot get (1, 2). However, the last dimensions don't match now between (2, 3) and (1, 2), and so we can't broadcast. Take 2, reshaping the row means to get the desired behavior: ``` x = np.array([[1, 2, 3], [3, 5, 7]]) print(x.shape) # Prints (2, 3) row_means = x.mean(axis=1).reshape((-1, 1)) print(row_means) # Prints [[2.], [5.]] print(row_means.shape) # Prints (2, 1) mean_shifted = x - row_means print(mean_shifted) print(mean_shifted.shape) # Prints (2, 3) ``` More broadcasting examples! ``` # Compute outer product of vectors v = np.array([1, 2, 3]) # v has shape (3,) w = np.array([4, 5]) # w has shape (2,) # To compute an outer product, we first reshape v to be a column # vector of shape (3, 1); we can then broadcast it against w to yield # an output of shape (3, 2), which is the outer product of v and w: # [[ 4 5] # [ 8 10] # [12 15]] print(np.reshape(v, (3, 1)) * w) # Add a vector to each row of a matrix x = np.array([[1, 2, 3], [4, 5, 6]]) # x has shape (2, 3) and v has shape (3,) so they broadcast to (2, 3), # giving the following matrix: # [[2 4 6] # [5 7 9]] print(x + v) # Add a vector to each column of a matrix # x has shape (2, 3) and w has shape (2,). # If we transpose x then it has shape (3, 2) and can be broadcast # against w to yield a result of shape (3, 2); transposing this result # yields the final result of shape (2, 3) which is the matrix x with # the vector w added to each column. Gives the following matrix: # [[ 5 6 7] # [ 9 10 11]] print((x.T + w).T) # Another solution is to reshape w to be a column vector of shape (2, 1); # we can then broadcast it directly against x to produce the same # output. print(x + np.reshape(w, (2, 1))) ``` ## Views vs. Copies Unlike a copy, in a **view** of an array, the data is shared between the view and the array. Sometimes, our results are copies of arrays, but other times they can be views. Understanding when each is generated is important to avoid any unforeseen issues. Views can be created from a slice of an array, changing the dtype of the same data area (using arr.view(dtype), not the result of arr.astype(dtype)), or even both. ``` x = np.arange(5) print('Original:\n', x) # Prints [0 1 2 3 4] # Modifying the view will modify the array view = x[1:3] view[1] = -1 print('Array After Modified View:\n', x) # Prints [0 1 -1 3 4] x = np.arange(5) view = x[1:3] view[1] = -1 # Modifying the array will modify the view print('View Before Array Modification:\n', view) # Prints [1 -1] x[2] = 10 print('Array After Modifications:\n', x) # Prints [0 1 10 3 4] print('View After Array Modification:\n', view) # Prints [1 10] ``` However, if we use fancy indexing, the result will actually be a copy and not a view: ``` x = np.arange(5) print('Original:\n', x) # Prints [0 1 2 3 4] # Modifying the result of the selection due to fancy indexing # will not modify the original array. copy = x[[1, 2]] copy[1] = -1 print('Copy:\n', copy) # Prints [1 -1] print('Array After Modified Copy:\n', x) # Prints [0 1 2 3 4] # Another example involving fancy indexing x = np.arange(5) print('Original:\n', x) # Prints [0 1 2 3 4] copy = x[x >= 2] print('Copy:\n', copy) # Prints [2 3 4] x[3] = 10 print('Modified Array:\n', x) # Prints [0 1 2 10 4] print('Copy After Modified Array:\n', copy) # Prints [2 3 4] ``` ## Summary 1. NumPy is an incredibly powerful library for computation providing both massive efficiency gains and convenience. 2. Vectorize! Orders of magnitude faster. 3. Keeping track of the shape of your arrays is often useful. 4. Many useful math functions and operations built into NumPy. 5. Select and manipulate arbitrary pieces of data with powerful indexing schemes. 6. Broadcasting allows for computation across arrays of different shapes. 7. Watch out for views vs. copies.
github_jupyter
<a href="https://colab.research.google.com/github/jads-nl/execute-nhs-proms/blob/master/notebooks/3.0-modeling-regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Background to osteoarthritis case study This is day 3 from the [5-day JADS NHS PROMs data science case study](https://github.com/jads-nl/execute-nhs-proms/blob/master/README.md). To recap from the previous lectures, we have looked at defining the outcome of knee replacement. - Lecture 1: - Good outcome for knee replacement Y is measured using difference in Oxford Knee Score (OKS) - Research has shown that an improvement in OKS score of approx. 30% is relevant ([van der Wees 2017](https://github.com/jads-nl/execute-nhs-proms/blob/master/references/vanderwees2017patient-reported.pdf)). Hence an increase of +14 points is considered a 'good' outcome. - To account for the ceiling effect, a high final `t1_oks_score` is also considered as a good outcome (even if `delta_oks_score` is smaller than 14) - Lecture 2: - We have constructed a combined outcome parameter using cut-off points for pain and physical functioning. # Modeling: regression and linear modeling ## Learning objectives ### Modeling: regression and linear modeling For this lecture we are going to build various predictors for `t1_eq_vas`, i.e. the reported quality of life after operation. `t1_eq_vas` is measured on a scale from 0 to 100. We are going to use stratefied sampling to ensure we don't introduce sampling bias, using `StratefiedShuffleSplit`. This is different from the simplest function `train_test_split` which is a random sampling method. This is generally fine if your dataset is large enough (especially relative to the number of attributes). But if it is not, you run the risk of introducing significant sampling bias. By the end of this lecture you should know: - Know how to perform different regressions models in Python using scikit-learn - Know how to interpret and assess regression models, including the bias-variance trade-of ### Python: Hands-on Machine Learning (2nd edition) - [End-to-end Machine Learning project (chapter 2)](https://github.com/ageron/handson-ml2/blob/master/02_end_to_end_machine_learning_project.ipynb) - [Training linear models (chapter 4)](https://github.com/ageron/handson-ml2/blob/master/04_training_linear_models.ipynb) ### scikit-learn - [Tutorial cross validation score](https://scikit-learn.org/stable/auto_examples/exercises/plot_cv_diabetes.html?highlight=cross%20validation%20score) ``` import warnings import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.feature_selection import chi2, VarianceThreshold import sklearn.linear_model #supressing warnings for readability warnings.filterwarnings("ignore") # To plot pretty figures directly within Jupyter %matplotlib inline # choose your own style: https://matplotlib.org/3.1.0/gallery/style_sheets/style_sheets_reference.html plt.style.use('seaborn-whitegrid') # Go to town with https://matplotlib.org/tutorials/introductory/customizing.html # plt.rcParams.keys() mpl.rc('axes', labelsize=14, titlesize=14) mpl.rc('figure', titlesize=20) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # contants for figsize S = (8,8) M = (12,12) L = (14,14) # pandas options pd.set_option("display.max.columns", None) pd.set_option("display.max.rows", None) pd.set_option("display.precision", 2) # import data df = pd.read_parquet('https://github.com/jads-nl/execute-nhs-proms/blob/master/data/interim/knee-provider.parquet?raw=true') from sklearn.model_selection import StratifiedShuffleSplit # 999 is used as a sentinel value, replacing those with median df["t1_eq_vas_impute"] = df.t1_eq_vas.replace( to_replace=999, value=np.median(df.t1_eq_vas) ) # add t1_eq_vas categories df['t1_eq_vas_cat'] = pd.cut(df.t1_eq_vas_impute, 10) # Only using 1 split for stratefied sampling, more folds are used later on in cross-validation split = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=42) for train_index, test_index in split.split(df, df['t1_eq_vas_cat']): df_train = df.loc[train_index] df_test = df.loc[test_index] # remove columns so we continue working with original dataset for set_ in (df_train, df_test): set_.drop(["t1_eq_vas_impute", "t1_eq_vas_cat"], axis=1, inplace=True) ``` # Data preparation in a scikit-learn Pipeline Previously we have already discussed the various steps in data preparation using [pandas](https://pandas.pydata.org/). As explained in the [documentation of scikit-learn](https://scikit-learn.org/stable/modules/compose.html#column-transformer), this may be problematic for one of the following reasons: * Incorporating statistics from test data into the preprocessors makes cross-validation scores unreliable (known as data leakage), for example in the case of scalers or imputing missing values. * You may want to include the parameters of the preprocessors in a [parameter search](https://scikit-learn.org/stable/modules/grid_search.html#grid-search). To this purpose, the [`ColumnTransformer` class](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html?highlight=columntransformer#sklearn.compose.ColumnTransformer) has been recently added to scikit-learn. The documentation gives an example how to use this for [pre-processing mixed types](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html#sphx-glr-auto-examples-compose-plot-column-transformer-mixed-types-py). Historically, `sklearn` transformers are designed to work with numpy arrays, not with pandas dataframes. You can use [`sklearn-pandas`](https://github.com/scikit-learn-contrib/sklearn-pandas) to bridge this gap or use `ColumnTransformer` directly on pandas DataFrames. We will use the latter. ## Using ColumnsTransformers and Pipelines Recalling from the second lecture, we want to perform the following preprocessing per (group of) columns. In case feature requires more than one preprocessing step, the use of `Pipeline` is recommended. ### Passing 1D or 2D arrays in your `Pipeline` It is important to remember that `scikit-learn` can be quite fussy about the difference between passing 1D arrays/series and 2D arrays/dataframes. For example, the following code will result in an error because `categories` needs to be a list of lists: ``` enc = OrdinalEncoder(categories=age_band_categories) enc.fit(df[age_band]) ``` The correct code is (brackets!): ``` enc = OrdinalEncoder(categories=[age_band_categories]) enc.fit(df[age_band]) ``` ### Beware: difference between `OrdinalEncoder` and `OneHotEncoding` Using `OrdinalEncoder` to generate an integer representation of a categorical variable can not be used directly with all scikit-learn estimators, as these expect continuous input, and would interpret the categories as being ordered, which is often not desired. Another possibility to convert categorical features to features that can be used with scikit-learn estimators is to use a one-of-K, also known as one-hot or dummy encoding. This type of encoding can be obtained with the OneHotEncoder, which transforms each categorical feature with n_categories possible values into n_categories binary features, with one of them 1, and all others 0. ``` from sklearn.base import BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, LabelEncoder from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer # group columns age_band = ["age_band"] gender = ["gender"] age_band_categories = sorted([x for x in df.age_band.unique() if isinstance(x, str)]) comorb = [ "heart_disease", "high_bp", "stroke", "circulation", "lung_disease", "diabetes", "kidney_disease", "nervous_system", "liver_disease", "cancer", "depression", "arthritis", ] boolean = ["t0_assisted", "t0_previous_surgery", "t0_disability"] eq5d = ["t0_mobility", "t0_self_care", "t0_activity", "t0_discomfort", "t0_anxiety"] eq_vas = ["t0_eq_vas"] categorical = ["t0_symptom_period", "t0_previous_surgery", "t0_living_arrangements"] oks_questions = [ col for col in df.columns if col.startswith("oks_t0") and not col.endswith("_score") ] oks_score = ["oks_t0_score"] # preprocessing pipelines for specific columns age_band_pipe = Pipeline( steps=[ ("impute", SimpleImputer(missing_values=None, strategy="most_frequent")), ("ordinal", OrdinalEncoder(categories=[age_band_categories])), ] ) gender_pipe = Pipeline( steps=[ ("impute", SimpleImputer(missing_values=np.nan, strategy="most_frequent")), ('onehot', OneHotEncoder()), ] ) # ColumnTransformer on all included columns. # Note columns that are not specified are dropped by default transformers = { "age": ("age", age_band_pipe, age_band), "gender": ("gender", gender_pipe, gender), "comorb": ( "comorb", SimpleImputer(missing_values=9, strategy="constant", fill_value=0), comorb, ), "categorical": ( "categorical", SimpleImputer(missing_values=9, strategy="most_frequent"), boolean + eq5d + categorical, ), "oks": ( "oks", SimpleImputer(missing_values=9, strategy="most_frequent"), oks_questions, ), "oks_score": ( "oks_score", SimpleImputer(missing_values=np.nan, strategy="most_frequent"), oks_score, ), "eq_vas": ("eqvas", SimpleImputer(missing_values=999, strategy="median"), eq_vas), } prep = ColumnTransformer(transformers=[v for _, v in transformers.items()]) X_train = prep.fit_transform(df_train) X_test = prep.fit_transform(df_test) # list of columns for convenience # https://stackoverflow.com/questions/54646709/sklearn-pipeline-get-feature-name-after-onehotencode-in-columntransformer X_columns = pd.Series( age_band + prep.named_transformers_["gender"]["onehot"].get_feature_names().tolist() + comorb + boolean + eq5d + categorical + oks_questions + oks_score + eq_vas ) ``` ### Writing custom transformers (advanced, see Géron chapter 2) Although Scikit-Learn provides many useful transformers, you will need to write your own for tasks such as custom cleanup operations or combining specific attributes. You will want your transformer to work seamlessly with Scikit-Learn functionalities (such as pipelines), and since Scikit-Learn relies on duck typing (not inheritance), all you need to do is create a class and implement three methods: fit() (returning self), transform(), and fit_transform(). When writing transformers for data preparation, you only need to define `transform()`. Basically, `ColumnTransformer` passes only the subset of columns from the original dataframe to the transformer. So when writing your own transformer you don't need to do any subsetting, but you can assume that the `transform()` method should be applied to the whole dataframe. ``` # just as an example, not used in Pipeline class ReplaceSentinels(BaseEstimator, TransformerMixin): """Replace sentinel values in dataframe. Attributes: sentinel: sentinel value, default 9 replace_with: value to replace sentinel with, default np.nan """ def __init__(self, sentinel = 9, replace_with=np.nan): self.sentinel = sentinel self.replace_with = replace_with def fit(self, X, y=None): return self def transform(self, X, ): return X.replace(9, self.replace_with) ``` ## Training and assessing linear models ### Simple regression Regression of `t1_eq_vas` ~ `t0_eq_vas`. We don't get our hopes up, since the scatterplot is all over the place: ``` df_train.plot(kind='scatter', x='t0_eq_vas', y='t1_eq_vas', xlim=(0,100), ylim=(0,100), alpha=0.1, figsize=M); from sklearn.linear_model import LinearRegression def fill_median(s): return s.fillna(value=s.median()).to_frame() eq = ['t0_eq_vas', 't1_eq_vas'] eq_prep = ColumnTransformer(transformers= [('eq', SimpleImputer(missing_values=999, strategy='median'), eq), ]) eq_prep.fit(df_train) # note y = t1_eq t0_eq, t1_eq = eq_prep.transform(df_train)[:,0].reshape(-1,1), eq_prep.transform(df_train)[:,1] # prepare t1_eq_test for use in model assessment t1_eq_test = eq_prep.transform(df_test)[:,1] # simple linear regression lin_reg = LinearRegression() lin_reg.fit(t0_eq, t1_eq) lin_reg.intercept_, lin_reg.coef_, lin_reg.score(t0_eq, t1_eq) ``` So this very first, basic model yields an $R^2$ of 0.12 which is not very exciting. Let's do a more robust cross validation using the Mean Square Error (MSE) as our metric ``` from sklearn.metrics import mean_squared_error from sklearn.model_selection import cross_val_score def display_scores(scores): print(f"Scores: {scores}") print(f"Mean: {scores.mean():.4f}") print(f"Standard deviation: {scores.std():.4f}") scores = cross_val_score(lin_reg, t0_eq, t1_eq, scoring='neg_mean_squared_error', cv=5) lin_rmse_scores = np.sqrt(-scores) display_scores(lin_rmse_scores) ``` This confirms a simple linear model has little flexibility (and high bias): the scores for the five CV-folds are very similar. Now that we have seen the simplest setup for a univariate lineair regression, let's try to find out which features are the best predictors. ### SelectKBest For regression tasks, you often want to get a first idea which features contain the most information i.e. are the best predictors. There are various techniques to answer this question, such as stepwise selection. Scikit-learn has various [univariate feature selection](https://scikit-learn.org/stable/modules/feature_selection.html) methods for this purpose. We will use [SelectKBest](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html#sklearn.feature_selection.SelectKBest). ``` from sklearn.feature_selection import SelectKBest, f_regression k10best = Pipeline( steps=[ ("prep", ColumnTransformer(transformers=transformers.values())), ("kbest", SelectKBest(f_regression, k=10)), # ("lin_reg", LinearRegression()) ] ) X_10best = k10best.fit(df_train, t1_eq).transform(df_train) lin_10best = LinearRegression() scores_10best = cross_val_score(lin_10best, X_10best, t1_eq, scoring='neg_mean_squared_error', cv=5) lin_10best_rmse_scores = np.sqrt(-scores_10best) display_scores(lin_10best_rmse_scores) ``` Using 10 KBest features, the model performs slightly better with RMSE of 16.1 +/- 0.08 ``` # show features in descending order of importance pd.concat( {"score": pd.Series(k10best["kbest"].scores_), "feature": X_columns}, axis=1 ).sort_values("score", ascending=False) ``` ### Regularized linear models: Lasso regression Construct a Lasso regression with cross-validation, following the example from [scikit-learn documentation](https://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_model_selection.html#sphx-glr-auto-examples-linear-model-plot-lasso-model-selection-py). Recall that for regularized linear regression a cost function is added. In case of Lasso this is $$ J(\Theta) = MSE (\Theta) + \alpha\sum\limits_{i=1}^n \mid{\Theta_{i}}\mid$$ The larger $\alpha$, the larger the penalty and hence more coefficients will be set to zero. By default `LassoCV` tries 100 different values for $\alpha$ so let's plot MSE against $\alpha$: ``` from sklearn.linear_model import LassoCV # This is to avoid division by zero while doing np.log10 EPSILON = 1e-4 lasso = LassoCV(cv=5, random_state=42, n_jobs=-1).fit(X_train, t1_eq) plt.figure(figsize=S) plt.semilogx(lasso.alphas_ + EPSILON, np.sqrt(lasso.mse_path_), ":") plt.plot( lasso.alphas_ + EPSILON, np.sqrt(lasso.mse_path_.mean(axis=-1)), "k", label="Average across the folds", linewidth=2, ) plt.axvline( lasso.alpha_ + EPSILON, linestyle="--", color="k", label="alpha: CV estimate" ) plt.legend() plt.xlabel(r"$\alpha$") plt.ylabel("Root mean square error") plt.title("Root mean square error on each fold: coordinate descent") plt.axis("tight"); ``` The Lasso model performs best for lowest $\alpha$ ``` print("Best Lasso model:\n" f" RMSE: {np.sqrt(mean_squared_error(lasso.predict(X_train), t1_eq)):.2f}\n" f" intercept: {lasso.intercept_:.2f}") ``` Inspect the relative feature importance by looking at the (absolute) coefficients: ``` pd.concat({'coef_abs': pd.Series(abs(lasso.coef_)), 'coef': pd.Series(lasso.coef_), 'feature': X_columns}, axis=1).sort_values('coef_abs', ascending=False) # final test print(f"MSE from training with CV: {np.sqrt(mean_squared_error(lasso.predict(X_train), t1_eq)):.2f}\n" f"MSE test: {np.sqrt(mean_squared_error(lasso.predict(X_test), t1_eq_test)):.2f}") ``` ### KNN ``` from sklearn.neighbors import KNeighborsRegressor from collections import defaultdict n_neighbors = [1, 2, 3, 5, 10, 20, 30, 50, 100, 200, 300] knn = defaultdict(dict) # use defaultdict for nested dicts for n in n_neighbors: knn[n]["model"] = KNeighborsRegressor(n_neighbors=n, n_jobs=-1).fit(X_train, t1_eq) knn[n]["cross validation scores"] = cross_val_score( knn[n]["model"], X_train, t1_eq, scoring="neg_mean_squared_error", cv=5 ) # plot 1/K vs. error rate just like in ISLR figure 3.19 (p. 108) knn_mse = [np.sqrt(np.mean(-knn[n]['cross validation scores'])) for n in n_neighbors] plt.figure(figsize=S) plt.semilogx([1/n for n in n_neighbors], knn_mse, 'o-', markeredgecolor='w', markeredgewidth='2'); # minimum RMSE is 16.2 at 1/K = 10-2, i.e. n=100 knn_mse # final test print(f"{np.sqrt(mean_squared_error(knn[100]['model'].predict(X_test), t1_eq_test)):.4f}") ``` ### RandomForest regression _consider this as a sneak preview for the next lecture, where we will use RandomForest for classification_ ``` from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor(random_state=42) rf_reg.fit(X_train, t1_eq) rf_crosscv = cross_val_score(rf_reg, X_train, t1_eq, scoring="neg_mean_squared_error", cv=5) print(f"{np.sqrt(np.mean(-rf_crosscv)):.4f}") # final test print(f"{np.sqrt(mean_squared_error(rf_reg.predict(X_test), t1_eq_test)):.4f}") ``` That looks promising: out-of-the-box RandomForest performs better than KNN (but slightly worse than Lasso). Like Lasso, we can inspect feature importance: ``` pd.concat( {"feature": X_columns, "importance": pd.Series(rf_reg.feature_importances_)}, axis=1 ).sort_values("importance", ascending=False) ``` ## Discussion & summary ### For discussion - Which model would you choose to predict `t1_eq_vas_` and why? - Reflect on the differences in feature importance between SelectKBest, Lasso coefficients and RandomForest - See, for example, [this discussion](https://datascience.stackexchange.com/questions/12148/feature-importance-via-random-forest-and-linear-regression-are-different) ### Summary We have considered three basic machine learning algorithms: - (Simple) linear regression - LASSO - KNN A regression is often used as a baseline model, whereas LASSO and KNN are expected to have better performance. An obvious starting strategy, would be to: - Run a regression to get a baseline performance - Run LASSO and compare performance - With the selected variables in your LASSO, run KNN and compare performance
github_jupyter
# How do I create my own dataset? So Caffe2 uses a binary DB format to store the data that we would like to train models on. A Caffe2 DB is a glorified name of a key-value storage where the keys are usually randomized so that the batches are approximately i.i.d. The values are the real stuff here: they contain the serialized strings of the specific data formats that you would like your training algorithm to ingest. So, the stored DB would look (semantically) like this: key1 value1 key2 value2 key3 value3 ... To a DB, it treats the keys and values as strings, but you probably want structured contents. One way to do this is to use a TensorProtos protocol buffer: it essentially wraps Tensors, aka multi-dimensional arrays, together with the tensor data type and shape information. Then, one can use the TensorProtosDBInput operator to load the data into an SGD training fashion. Here, we will show you one example of how to create your own dataset. To this end, we will use the UCI Iris dataset - which was a very popular classical dataset for classifying Iris flowers. It contains 4 real-valued features representing the dimensions of the flower, and classifies things into 3 types of Iris flowers. The dataset can be downloaded [here](https://archive.ics.uci.edu/ml/datasets/Iris). ``` # First let's import a few things needed. %matplotlib inline import urllib2 # for downloading the dataset from the web. import numpy as np from matplotlib import pyplot from StringIO import StringIO from caffe2.python import core, utils, workspace from caffe2.proto import caffe2_pb2 f = urllib2.urlopen('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data') raw_data = f.read() print('Raw data looks like this:') print(raw_data[:100] + '...') # load the features to a feature matrix. features = np.loadtxt(StringIO(raw_data), dtype=np.float32, delimiter=',', usecols=(0, 1, 2, 3)) # load the labels to a feature matrix label_converter = lambda s : {'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2}[s] labels = np.loadtxt(StringIO(raw_data), dtype=np.int, delimiter=',', usecols=(4,), converters={4: label_converter}) ``` Before we do training, one thing that is often beneficial is to separate the dataset into training and testing. In this case, let's randomly shuffle the data, use the first 100 data points to do training, and the remaining 50 to do testing. For more sophisticated approaches, you can use e.g. cross validation to separate your dataset into multiple training and testing splits. Read more about cross validation [here](http://scikit-learn.org/stable/modules/cross_validation.html). ``` random_index = np.random.permutation(150) features = features[random_index] labels = labels[random_index] train_features = features[:100] train_labels = labels[:100] test_features = features[100:] test_labels = labels[100:] # Let's plot the first two features together with the label. # Remember, while we are plotting the testing feature distribution # here too, you might not be supposed to do so in real research, # because one should not peek into the testing data. legend = ['rx', 'b+', 'go'] pyplot.title("Training data distribution, feature 0 and 1") for i in range(3): pyplot.plot(train_features[train_labels==i, 0], train_features[train_labels==i, 1], legend[i]) pyplot.figure() pyplot.title("Testing data distribution, feature 0 and 1") for i in range(3): pyplot.plot(test_features[test_labels==i, 0], test_features[test_labels==i, 1], legend[i]) ``` Now, as promised, let's put things into a Caffe2 DB. In this DB, what would happen is that we will use "train_xxx" as the key, and use a TensorProtos object to store two tensors for each data point: one as the feature and one as the label. We will use Caffe2 python's DB interface to do so. ``` # First, let's see how one can construct a TensorProtos protocol buffer from numpy arrays. feature_and_label = caffe2_pb2.TensorProtos() feature_and_label.protos.extend([ utils.NumpyArrayToCaffe2Tensor(features[0]), utils.NumpyArrayToCaffe2Tensor(labels[0])]) print('This is what the tensor proto looks like for a feature and its label:') print(str(feature_and_label)) print('This is the compact string that gets written into the db:') print(feature_and_label.SerializeToString()) # Now, actually write the db. def write_db(db_type, db_name, features, labels): db = core.C.create_db(db_type, db_name, core.C.Mode.write) transaction = db.new_transaction() for i in range(features.shape[0]): feature_and_label = caffe2_pb2.TensorProtos() feature_and_label.protos.extend([ utils.NumpyArrayToCaffe2Tensor(features[i]), utils.NumpyArrayToCaffe2Tensor(labels[i])]) transaction.put( 'train_%03d'.format(i), feature_and_label.SerializeToString()) # Close the transaction, and then close the db. del transaction del db write_db("minidb", "iris_train.minidb", train_features, train_labels) write_db("minidb", "iris_test.minidb", test_features, test_labels) ``` Now, let's create a very simple network that only consists of one single TensorProtosDBInput operator, to showcase how we load data from the DB that we created. For training, you might want to do something more complex: creating a network, train it, get the model, and run the prediction service. To this end you can look at the MNIST tutorial for details. ``` net_proto = core.Net("example_reader") dbreader = net_proto.CreateDB([], "dbreader", db="iris_train.minidb", db_type="minidb") net_proto.TensorProtosDBInput([dbreader], ["X", "Y"], batch_size=16) print("The net looks like this:") print(str(net_proto.Proto())) workspace.CreateNet(net_proto) # Let's run it to get batches of features. workspace.RunNet(net_proto.Proto().name) print("The first batch of feature is:") print(workspace.FetchBlob("X")) print("The first batch of label is:") print(workspace.FetchBlob("Y")) # Let's run again. workspace.RunNet(net_proto.Proto().name) print("The second batch of feature is:") print(workspace.FetchBlob("X")) print("The second batch of label is:") print(workspace.FetchBlob("Y")) ```
github_jupyter
## Decision Tree ``` from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # DecisionTree Classifier 생성 dt_clf = DecisionTreeClassifier(random_state=156) # 불꽃 데이터를 로딩하고, 학습과 테스트 데이터 세트로 분리 iris_data = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris_data.data, iris_data.target, test_size=0.2, random_state=11) # DecisionTreeClassifer 학습. dt_clf.fit(X_train, y_train) from sklearn.tree import export_graphviz # export_graphviz()의 호출 결과로 out_file로 지정된 tree.dot 파일을 생성함. export_graphviz(dt_clf, out_file="tree.dot", class_names=iris_data.target_names, \ feature_names = iris_data.feature_names, impurity=True, filled=True) import graphviz # 위에서 생성된 tree.dot 파일을 Graphviz가 읽어서 주피터 노트북상에서 시각화 with open("tree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) import seaborn as sns import numpy as np %matplotlib inline # feature importance 추출 print("Feature importances:\n{0}".format(np.round(dt_clf.feature_importances_,3))) # feature importance 매핑 for name, value in zip(iris_data.feature_names, dt_clf.feature_importances_): print('{0} : {1:.3f}'.format(name, value)) # feature importance를 column 별로 시각화하기 sns.barplot(x=dt_clf.feature_importances_, y=iris_data.feature_names) from sklearn.datasets import make_classification import matplotlib.pyplot as plt %matplotlib inline plt.title("3 Class values with 2 Features Sample data creation") # 2차원 시각화를 위해서 피처는 2개, 클래스는 3가지 유형의 분류 샘플 데이터 생성. X_features, y_labels = make_classification(n_features=2, n_redundant=0, n_informative=2, n_classes=3, n_clusters_per_class=1, random_state=0) # 그래프 형태로 2개의 피처로 2차원 좌표 시각화, 각 클래스 값은 다른 색깔로 표시 plt.scatter(X_features[:,0], X_features[:, 1], marker='o', c=y_labels, s=25, edgecolor='k') from sklearn.tree import DecisionTreeClassifier # 특정한 트리 생성 제약 없는 결정 트리의 학습과 결정 경계 시각화. dt_clf = DecisionTreeClassifier().fit(X_features, y_labels) visualize_boundary(dt_clf, X_features, y_labels) ``` ### 결정 트리 실습 - 사용자 행동 인식 데이터 세트 ``` import pandas as pd import matplotlib.pyplot as plt %matplotlib inline # feature.txt 파일에는 피처 이름 index와 피처명이 공백으로 분리되어 있음. 이를 DataFrame으로 롣,. feature_name_df = pd.read_csv('./data/human_activity/features.txt', sep='\s+', header=None, names=['column_index','column_name']) # 피처명 index를 제거하고, 피처명만 리스트 객체로 생성한 뒤 샘플로 10개만 추출 feature_name = feature_name_df.iloc[:,1].values.tolist() print('전체 피처명에서 10개만 추출:', feature_name[:10]) feature_dup_df = feature_name_df.groupby('column_name').count() print(feature_dup_df[feature_dup_df['column_index'] > 1].count()) feature_dup_df[feature_dup_df['column_index'] > 1].head() def get_new_feature_name_df(old_feature_name_df): feature_dup_df = pd.DataFrame(data=old_feature_name_df.groupby('column_name').cumcount(), columns=['dup_cnt']) feature_dup_df = feature_dup_df.reset_index() new_feature_name_df = pd.merge(old_feature_name_df.reset_index(), feature_dup_df, how='outer') new_feature_name_df['column_name'] = new_feature_name_df[['column_name','dup_cnt']].apply(lambda x : x[0]+'_'+str(x[1]) if x[1] > 0 else x[0], axis=1) new_feature_name_df = new_feature_name_df.drop(['index'], axis=1) return new_feature_name_df def get_human_dataset(): # 각 데이터 파일은 공백으로 분리되어 있으므로 read_csv에서 공백 문자를 sep으로 할당. feature_name_df = pd.read_csv('./data/human_activity/features.txt', sep='\s+', header=None, names=['column_index', 'column_name']) # 중복된 피처명을 수정하는 get_new_feature_name_df()를 이용, 신규 피처명 DataFrame 생성. new_feature_name_df = get_new_feature_name_df(feature_name_df) # DataFrame에 피처명을 칼럼으로 부여하기 위해 리스트 객체로 다시 반환 feature_name = new_feature_name_df.iloc[:, 1].values.tolist() # 학습 피처 데이터세트와 테스트 피처 데이터를 DataFrame으로 로딩, 칼럼명은 feature_name 적용 X_train = pd.read_csv('./data/human_activity/train/X_train.txt', sep='\s+', names=feature_name) X_test = pd.read_csv('./data/human_activity/test/X_test.txt', sep='\s+', names=feature_name) # 학습 레이블과 테스트 레이블 데이터를 DataFrame으로 로딩하고 칼럼명은 action으로 부여 y_train = pd.read_csv('./data/human_activity/train/y_train.txt', sep='\s+', header=None, names=['action']) y_test = pd.read_csv('./data/human_activity/test/y_test.txt', sep='\s+', header=None, names=['action']) # 로드된 학습/테스트용 DataFrame을 모두 반환 return X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = get_human_dataset() print('## 학습 피처 데이터셋 info()') print(X_train.info()) print(y_train['action'].value_counts()) from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score # 예제 반복 시마다 동일한 예측 결과 도출을 위해 random_state 설정 dt_clf = DecisionTreeClassifier(random_state = 156) dt_clf.fit(X_train, y_train) pred = dt_clf.predict(X_test) accuracy = accuracy_score(y_test, pred) print('결정 트리 예측 정확도: {0:.4f}'.format(accuracy)) # DecisionTreeClassifier의 하이퍼 파라미터 추출 print('DecisionTreeClassifier 기본 하이퍼 파라미터:\n', dt_clf.get_params()) from sklearn.model_selection import GridSearchCV params = { 'max_depth' : [6, 8, 10, 12, 16, 20, 24] } grid_cv = GridSearchCV(dt_clf, param_grid = params, scoring='accuracy', cv=5, verbose=1) grid_cv.fit(X_train, y_train) print('GridSearchCV 최고 평균 정확도 수치 : {0:.4f}'.format(grid_cv.best_score_)) print('GridSearchCV 최적 하이퍼 파라미터:', grid_cv.best_params_) # GridSearchCV 객체의 cv_results_ 속성을 DataFrame으로 생성. cv_results_df = pd.DataFrame(grid_cv.cv_results_) # max_depth 파라미터 값과 그때의 테스트 세트, 학습 데이터 세트의 정확도 수치 추출 cv_results_df[['param_max_depth', 'mean_test_score']] max_depths=[6, 8, 10, 12, 16, 20, 24] # max_depth 값을 변화시키면서 그때마다 학습과 테스트 세트에서의 예측 성능 측정 for depth in max_depths: dt_clf = DecisionTreeClassifier(max_depth=depth, random_state=156) dt_clf.fit(X_train, y_train) pred = dt_clf.predict(X_test) accuracy = accuracy_score(y_test, pred) print('max_depth={0} 정확도:{1:.4f}'.format(depth,accuracy)) params = { 'max_depth' : [8, 12, 16, 20], 'min_samples_split' : [16, 24], } grid_cv = GridSearchCV(dt_clf, param_grid=params, scoring='accuracy', cv=5, verbose=1) grid_cv.fit(X_train, y_train) print('GridSearchCV 최고 평균 정확도 수치:{0:.4f}'.format(grid_cv.best_score_)) print('GridSearchCV 최적 하이퍼 파라미터:', grid_cv.best_parmas_) best_df_clf = grid_cv.best_estimator_ pred1 = best_df_clf.predict(X_test) accuracy = accuracy_score(y_test, pred1) print('결정 트리 예측 정확도 : {0:.4f}'.format(accuracy)) import seaborn as sns ftr_importances_values = best_df_clf.feature_importances_ # Top 중요도로 정렬을 쉽게, 사본(Seaborn)의 막대그래프로 쉽게 표현하기 위해 Series 변환 ftr_importances = pd.Series(ftr_importances_values, index=X_train.columns) # 중요도값 순으로 Series를 정렬 ftr_top20 = ftr_importances.sort_values(ascending=False)[:20] plt.figure(figsize=(8, 6)) plt.title('Feature importances Top 20') sns.barplot(x=ftr_top20, y=ftr_top20.index) plt.show() ``` ## Ensemble Learning ``` import pandas as pd from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score cancer = load_breast_cancer() data_df = pd.DataFrame(cancer.data, columns=cancer.feature_names) data_df.head(3) # 개별 모델은 로지스틱 회귀와 KNN임. lr_clf = LogisticRegression() knn_clf = KNeighborsClassifier(n_neighbors=8) # 개별 모델을 소프트 보팅 기반의 앙상블 모델로 구현한 분류기 vo_clf = VotingClassifier(estimators=[('LR', lr_clf),('KNN', knn_clf)], voting='soft') X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.2, random_state= 156) # VotingClassifier 학습/예측/평가. vo_clf.fit(X_train, y_train) pred = vo_clf.predict(X_test) print('Voting 분류기 정확도:{0:.4f}'.format(accuracy_score(y_test, pred))) # 개별 모델의 학습/예측/평가 classifiers= [lr_clf, knn_clf] for classifier in classifiers: classifier.fit(X_train, y_train) pred = classifier.predict(X_test) class_name = classifier.__class__.__name__ print('{0} 정확도 : {1:.4f}'.format(class_name, accuracy_score(y_test, pred))) ``` ## Random Forest ``` from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score import pandas as pd import warnings warnings.filterwarnings('ignore') # 결정 트리에서 사용한 get_human_dataset()를 이용해 학습/테스트용 DataFrame 반환 X_train, X_test, y_train, y_test = get_human_dataset() # 랜덤 포레스트 학습 및 별도의 테스트 세트로 예측 성능 평가 rf_clf = RandomForestClassifier(random_state=0) rf_clf.fit(X_train, y_train) pred = rf_clf.predict(X_test) accuracy = accuracy_score(y_test, pred) print('랜덤 포레스트 정확도: {0:.4f}'.format(accuracy)) from sklearn.model_selection import GridSearchCV params = { 'n_estimators':[100], 'max_depth' : [6, 8, 10, 12], 'min_samples_leaf' : [8, 12, 18], 'min_samples_split' : [8, 16, 20] } # RandomForestClassifier 객체 생성 후 GridSearchCV 수행 rf_clf = RandomForestClassifier(random_state=0, n_jobs=-1) grid_cv = GridSearchCV(rf_clf, param_grid = params, cv=2, n_jobs=-1) grid_cv.fit(X_train, y_train) print('최적 하이퍼 파라미터:\n', grid_cv.best_params_) rf_clf1 = RandomForestClassifier(n_estimators=300, max_depth=10, min_samples_leaf=8, \ min_samples_split=8, random_state=0) rf_clf1.fit(X_train, y_train) pred = rf_clf1.predict(X_test) print('예측 정확도 : {0:.4f}'.format(accuracy_score(y_test,pred))) ``` ### 피처 중요도 ``` import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ftr_importances_values = rf_clf1.feature_importances_ ftr_importances = pd.Series(ftr_importances_values, index=X_train.columns) ftr_top20 = ftr_importances.sort_values(ascending=False)[:20] plt.figure(figsize=(8,6)) plt.title('Feature importances Top 20') sns.barplot(x=ftr_top20, y = ftr_top20.index) ``` ## Gradient Boosting Machine ``` from sklearn.ensemble import GradientBoostingClassifier import time import warnings warnings.filterwarnings('ignore') X_train, X_test, y_train, y_test = get_human_dataset() # GBM 수행 시간 측정을 위함. 시작 시간 설정. start_time = time.time() gb_clf = GradientBoostingClassifier(random_state=0) gb_clf.fit(X_train, y_train) gb_pred = gb_clf.predict(X_test) gb_accuracy = accuracy_score(y_test, gb_pred) print('GBM 정확도 : {0:.4f}'.format(gb_accuracy)) print('GBM 수행 시간 : {0:.1f} 초'.format(time.time() - start_time)) print('GBM 수행 시간 : {0:.1f} 분'.format(time.time() - start_time)/60) ``` #### GridSearchCV 사용한 하이퍼 파라미터 최적화 ``` from sklearn.model_selection import GridSearchCV parmas = { 'n_estimators' : [100, 500], 'learing+rate' : [0.05, 0.01] } grid_cv = GridSearchCV(gb_clf, param_grid=params, cv=2, verbose=1) grid_cv.fit(X_train, y_train) print('최적 하이퍼 파라미터:\n', grid_cv.best_params_) print('최고 예측 정확도:{0:.4f}'.format(grid_cv.best_score_)) ```
github_jupyter
# Significance Tests with PyTerrier ``` import pyterrier as pt import pandas as pd RUN_DIR='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-ecir22/' RUN_DIR_MARCO_V2='/mnt/ceph/storage/data-in-progress/data-teaching/theses/wstud-thesis-probst/retrievalExperiments/runs-marco-v2-ecir22/' QREL_DIR = '/mnt/ceph/storage/data-tmp/2021/kibi9872/thesis-probst/Data/navigational-topics-and-qrels-ms-marco-' if not pt.started(): pt.init() def pt_qrels(ret): from trectools import TrecQrel ret = TrecQrel(QREL_DIR + ret).qrels_data ret = ret.copy() del ret['q0'] ret = ret.rename(columns={'query': 'qid','docid': 'docno', 'rel': 'label'}) ret['qid'] = ret['qid'].astype(str) ret['label'] = ret['label'].astype(int) return ret def pt_topics(ret): from trectools import TrecQrel qids = TrecQrel(QREL_DIR + ret).qrels_data['query'].unique() ret = [] for qid in qids: ret += [{'qid': str(qid), 'query': 'Unused, only for significance tests for qid: ' + str(qid)}] return pd.DataFrame(ret) def trec_run(run_name): from pyterrier.transformer import get_transformer return get_transformer(pt.io.read_results(run_name)) QRELS = { 'v1-popular': pt_qrels('v1/qrels.msmarco-entrypage-popular.txt'), 'v1-random': pt_qrels('v1/qrels.msmarco-entrypage-random.txt'), 'v2-popular': pt_qrels('v2/qrels.msmarco-v2-entrypage-popular.txt'), 'v2-random': pt_qrels('v2/qrels.msmarco-v2-entrypage-random.txt'), } TOPICS = { 'v1-popular': pt_topics('v1/qrels.msmarco-entrypage-popular.txt'), 'v1-random': pt_topics('v1/qrels.msmarco-entrypage-random.txt'), 'v2-popular': pt_topics('v2/qrels.msmarco-v2-entrypage-popular.txt'), 'v2-random': pt_topics('v2/qrels.msmarco-v2-entrypage-random.txt'), } APPROACH_TO_MARCO_V1_RUN_FILE={ 'BM25@2016-07': 'run.cc-16-07-anchortext.bm25-default.txt', 'BM25@2017-04': 'run.cc-17-04-anchortext.bm25-default.txt', 'BM25@2018-13': 'run.cc-18-13-anchortext.bm25-default.txt', 'BM25@2019-47': 'run.cc-19-47-anchortext.bm25-default.txt', 'BM25@2020-05': 'run.cc-20-05-anchortext.bm25-default.txt', 'BM25@2021-04': 'run.cc-21-04-anchortext.bm25-default.txt', 'BM25@16--21': 'run.cc-combined-anchortext.bm25-default.txt', 'BM25@Content': 'run.ms-marco-content.bm25-default.txt', 'BM25@Title': 'run.msmarco-document-v1-title-only.pos+docvectors+raw.bm25-default.txt', 'BM25@Orcas': 'run.orcas.bm25-default.txt', 'DeepCT@Anchor': 'run.ms-marco-deepct-v1-anserini-docs-cc-2019-47-sampled-test-overlap-removed-389979.bm25-default.txt', 'DeepCT@Orcas': 'run.ms-marco-deepct-v1-anserini-docs-orcas-sampled-test-overlap-removed-390009.bm25-default.txt', 'DeepCT@Train':'run.ms-marco-deepct-v1-anserini-docs-ms-marco-training-set-test-overlap-removed-389973.bm25-default.txt', 'MonoT5': 'run.ms-marco-content.bm25-mono-t5-maxp.txt', 'MonoBERT': 'run.ms-marco-content.bm25-mono-bert-maxp.txt', 'LambdaMART@CTA':'run.ms-marco.lambda-mart-cta-trees-1000.txt', 'LambdaMART@CTOA':'run.ms-marco.lambda-mart-ctoa-trees-1000.txt', 'LambdaMART@CTO':'run.ms-marco.lambda-mart-cto-trees-1000.txt', 'LambdaMART@CT':'run.ms-marco.lambda-mart-ct-trees-1000.txt', } APPROACH_TO_MARCO_V2_RUN_FILE={ 'BM25@Content': 'run.msmarco-doc-v2.bm25-default.txt', 'BM25@Orcas': 'run.orcas-ms-marco-v2.bm25-default.txt', 'BM25@2016-07': 'run.cc-16-07-anchortext.bm25-default.txt', 'BM25@2017-04': 'run.cc-17-04-anchortext.bm25-default.txt', 'BM25@2018-13': 'run.cc-18-13-anchortext.bm25-default.txt', 'BM25@2019-47': 'run.cc-19-47-anchortext-v2.bm25-default.txt', 'BM25@2020-05': 'run.cc-20-05-anchortext.bm25-default.txt', 'BM25@2021-04': 'run.cc-21-04-anchortext.bm25-default.txt', 'BM25@16--21': 'run.cc-union-16-to-21-anchortext-1000.bm25-default.txt', 'DeepCT@Anchor': 'run.ms-marco-deepct-v2-anserini-docs-cc-2019-47-sampled-test-overlap-removed-389979.bm25-default.txt', 'DeepCT@Orcas': 'run.ms-marco-deepct-v2-anserini-docs-orcas-sampled-test-overlap-removed-390009.bm25-default.txt', 'DeepCT@Train':'run.ms-marco-deepct-v2-anserini-docs-ms-marco-training-set-test-overlap-removed-389973.bm25-default.txt', 'MonoT5': 'run.ms-marco-content.bm25-mono-t5-maxp.txt', 'MonoBERT': 'run.ms-marco-content.bm25-mono-bert-maxp.txt', 'LambdaMART@CTA':'run.ms-marco.lambda-mart-cta-trees-1000.txt', 'LambdaMART@CTOA':'run.ms-marco.lambda-mart-ctoa-trees-1000.txt', 'LambdaMART@CTO':'run.ms-marco.lambda-mart-cto-trees-1000.txt', 'LambdaMART@CT':'run.ms-marco.lambda-mart-ct-trees-1000.txt', } ``` ### Comparison of MRR for Anchor Text approaches to DeepCT ``` runs = ['DeepCT@Anchor', 'BM25@2016-07', 'BM25@2017-04', 'BM25@2018-13', 'BM25@2019-47', 'BM25@2020-05', 'BM25@2021-04', 'BM25@16--21'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: From xy above we see.... ### Comparison of MRR for BM25 on Content with DeepCT trained on anchor text, DeepCT, MonoT5, MonoBERT, and LambdaMART ``` runs = ['BM25@Content', 'DeepCT@Orcas', 'DeepCT@Anchor', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'LambdaMART@CTA', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: DeepCT trained on anchor text, DeepCT trained on the ORCAS query log, MonoT5, MonoBERT, and three of the LambdaMART models improve statistically significant upon the MRR of 0.21 achieved by the BM25 retrieval on the content. ### Comparison of BM25 on Orcas with other Content-Only Models ``` runs = ['BM25@Orcas', 'BM25@Content', 'DeepCT@Orcas', 'DeepCT@Anchor', 'DeepCT@Train', 'MonoT5', 'MonoBERT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: BM25 on ORCAS improves statistically significantly upon all content-only models. ### Comparison of all Anchor-Text Models with all other approaches ``` runs = ['BM25@2016-07', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2017-04', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2018-13', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2019-47', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2020-05', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2021-04', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@16--21', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: For queries pointing to popular entry pages, all BM25 models retrieving on anchor text outperform all other retrieval models statistically significant ### Compare BM25 on ORCAS for popular topics with all other non-anchor-approaches ``` runs = ['BM25@Orcas', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recip_rank'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` # Evaluations Recall@3 and Recall@10 ### Comparison of Recall for Anchor Text approaches to DeepCT ``` runs = ['DeepCT@Anchor', 'BM25@2016-07', 'BM25@2017-04', 'BM25@2018-13', 'BM25@2019-47', 'BM25@2020-05', 'BM25@2021-04', 'BM25@16--21'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` ### Comparison of Recall for BM25 on Content with DeepCT trained on anchor text, DeepCT, MonoT5, MonoBERT, and LambdaMART ``` runs = ['BM25@Content', 'DeepCT@Orcas', 'DeepCT@Anchor', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'LambdaMART@CTA', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` ### Comparison of BM25 on Orcas with other Content-Only Models ``` runs = ['BM25@Orcas', 'BM25@Content', 'DeepCT@Orcas', 'DeepCT@Anchor', 'DeepCT@Train', 'MonoT5', 'MonoBERT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-random/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-random'], QRELS['v1-random'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: BM25 on ORCAS does not statisticall improve upon DeepCT@Anchor (for Recall@3) and DeepCT@Anchor, DeepCT@Orcas, and MonoT5 (Recall@10) ### Comparison of all Anchor-Text Models with all other approaches ``` runs = ['BM25@2016-07', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2017-04', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2018-13', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2019-47', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2020-05', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@2021-04', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) runs = ['BM25@16--21', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'BM25@Orcas', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ``` Result: We see exactly the same relevance results as for MRR. ### Compare BM25 on ORCAS for popular topics with all other non-anchor-approaches ``` runs = ['BM25@Orcas', 'BM25@Content', 'DeepCT@Anchor', 'DeepCT@Orcas', 'DeepCT@Train', 'MonoT5', 'MonoBERT', 'LambdaMART@CTOA', 'LambdaMART@CTO', 'LambdaMART@CTA', 'LambdaMART@CT'] runs = [(i, trec_run(RUN_DIR + '/entrypage-popular/' + APPROACH_TO_MARCO_V1_RUN_FILE[i])) for i in runs] pt.Experiment( [i for _, i in runs], TOPICS['v1-popular'], QRELS['v1-popular'], ['recall.3', 'recall.10'], [i for i, _ in runs], baseline = 0, test='t', correction='b' ) ```
github_jupyter
# End-to-End NLP: News Headline Classifier (Local Version) _**Train a Keras-based model to classify news headlines between four domains**_ This notebook works well with the `Python 3 (TensorFlow 1.15 Python 3.7 CPU Optimized)` kernel on SageMaker Studio, or `conda_tensorflow_p36` on classic SageMaker Notebook Instances. --- In this version, the model is trained and evaluated here on the notebook instance itself. We'll show in the follow-on notebook how to take advantage of Amazon SageMaker to separate these infrastructure needs. Note that you can safely ignore the WARNING about the pip version. ``` # First install some libraries which might not be available across all kernels (e.g. in Studio): !pip install ipywidgets ``` ### Set Up Execution Role and Session Let's start by specifying: - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. If you don't specify a bucket, SageMaker SDK will create a default bucket following a pre-defined naming convention in the same region. - The IAM role ARN used to give SageMaker access to your data. It can be fetched using the **get_execution_role** method from sagemaker python SDK. ``` %%time %load_ext autoreload %autoreload 2 import sagemaker from sagemaker import get_execution_role role = get_execution_role() print(role) sess = sagemaker.Session() ``` ### Download News Aggregator Dataset We will download **FastAi AG News** dataset from the https://registry.opendata.aws/fast-ai-nlp/ public repository. This dataset contains a table of news headlines and their corresponding classes. ``` %%time import util.preprocessing util.preprocessing.download_dataset() ``` ### Let's visualize the dataset We will load the ag_news_csv/train.csv file to a Pandas dataframe for our data processing work. ``` import os import re import numpy as np import pandas as pd column_names = ["CATEGORY", "TITLE", "CONTENT"] # we use the train.csv only df = pd.read_csv("data/ag_news_csv/train.csv", names=column_names, header=None, delimiter=",") # shuffle the DataFrame rows df = df.sample(frac = 1) # make the category classes more readable mapping = {1: 'World', 2: 'Sports', 3: 'Business', 4: 'Sci/Tech'} df = df.replace({'CATEGORY': mapping}) df.head() ``` For this exercise we'll **only use**: - The **title** (Headline) of the news story, as our input - The **category**, as our target variable ``` df["CATEGORY"].value_counts() ``` The dataset has **four article categories** with equal weighting: - Business - Sci/Tech - Sports - World ## Natural Language Pre-Processing We'll do some basic processing of the text data to convert it into numerical form that the algorithm will be able to consume to create a model. We will do typical pre processing for NLP workloads such as: dummy encoding the labels, tokenizing the documents and set fixed sequence lengths for input feature dimension, padding documents to have fixed length input vectors. ### Dummy Encode the Labels ``` encoded_y, labels = util.preprocessing.dummy_encode_labels(df, "CATEGORY") print(labels) df["CATEGORY"][1] encoded_y[0] ``` ### Tokenize and Set Fixed Sequence Lengths We want to describe our inputs at the more meaningful word level (rather than individual characters), and ensure a fixed length of the input feature dimension. ``` padded_docs, tokenizer = util.preprocessing.tokenize_pad_docs(df, "TITLE") df["TITLE"][1] padded_docs[0] ``` ### Import Word Embeddings To represent our words in numeric form, we'll use pre-trained vector representations for each word in the vocabulary: In this case we'll be using pre-built GloVe word embeddings. You could also explore training custom, domain-specific word embeddings using SageMaker's built-in [BlazingText algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html). See the official [blazingtext_word2vec_text8 sample](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/introduction_to_amazon_algorithms/blazingtext_word2vec_text8) for an example notebook showing how. ``` %%time embedding_matrix = util.preprocessing.get_word_embeddings(tokenizer, "data/embeddings") np.save( file="./data/embeddings/docs-embedding-matrix", arr=embedding_matrix, allow_pickle=False, ) vocab_size=embedding_matrix.shape[0] print(embedding_matrix.shape) ``` ### Split Train and Test Sets Finally we need to divide our data into model training and evaluation sets: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( padded_docs, encoded_y, test_size=0.2, random_state=42 ) # Do you always remember to save your datasets for traceability when experimenting locally? ;-) os.makedirs("./data/train", exist_ok=True) np.save("./data/train/train_X.npy", X_train) np.save("./data/train/train_Y.npy", y_train) os.makedirs("./data/test", exist_ok=True) np.save("./data/test/test_X.npy", X_test) np.save("./data/test/test_Y.npy", y_test) ``` ## Define the Model ``` import tensorflow as tf from tensorflow.keras.layers import Conv1D, Dense, Dropout, Embedding, Flatten, MaxPooling1D from tensorflow.keras.models import Sequential seed = 42 np.random.seed(seed) num_classes=len(labels) model = Sequential() model.add(Embedding( vocab_size, 100, weights=[embedding_matrix], input_length=40, trainable=False, name="embed" )) model.add(Conv1D(filters=128, kernel_size=3, activation="relu", name="conv_1")) model.add(MaxPooling1D(pool_size=5, name="maxpool_1")) model.add(Flatten(name="flat_1")) model.add(Dropout(0.3, name="dropout_1")) model.add(Dense(128, activation="relu", name="dense_1")) model.add(Dense(num_classes, activation="softmax", name="out_1")) # Compile the model optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["acc"]) model.summary() ``` ## Fit (Train) and Evaluate the Model ``` %%time # fit the model here in the notebook: print("Training model") model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=1) print("Evaluating model") # TODO: Better differentiate train vs val loss in logs scores = model.evaluate(X_test, y_test, verbose=2) print( "Validation results: " + "; ".join(map( lambda i: f"{model.metrics_names[i]}={scores[i]:.5f}", range(len(model.metrics_names)) )) ) ``` ## Use the Model (Locally) Let's evaluate our model with some example headlines... If you struggle with the widget, you can always simply call the `classify()` function from Python. You can be creative with your headlines! ``` from IPython import display import ipywidgets as widgets from tensorflow.keras.preprocessing.sequence import pad_sequences def classify(text): """Classify a headline and print the results""" encoded_example = tokenizer.texts_to_sequences([text]) # Pad documents to a max length of 40 words max_length = 40 padded_example = pad_sequences(encoded_example, maxlen=max_length, padding="post") result = model.predict(padded_example) print(result) ix = np.argmax(result) print(f"Predicted class: '{labels[ix]}' with confidence {result[0][ix]:.2%}") interaction = widgets.interact_manual( classify, text=widgets.Text( value="The markets were bullish after news of the merger", placeholder="Type a news headline...", description="Headline:", layout=widgets.Layout(width="99%"), ) ) interaction.widget.children[1].description = "Classify!" ``` ## Review In this notebook we pre-processed publicly downloadable data and trained a neural news headline classifier model: As a data scientist might normally do when working on a local machine. ...But can we use the cloud more effectively to allocate high-performance resources; and easily deploy our trained models for use by other applications? Head on over to the next notebook, [Headline Classifier SageMaker.ipynb](Headline%20Classifier%20SageMaker.ipynb), where we'll show how the same model can be trained and then deployed on specific target infrastructure with Amazon SageMaker.
github_jupyter
# 5 minutes intro to IPython for ROOT users In this notebook we show how to use inside IPython __ROOT__ (C++ library, de-facto standard in High Energy Physics). This notebook is aimed to help __ROOT__ users. Working using ROOT-way loops is very slow in python and in most cases useless. You're proposed to use `root_numpy` &mdash; a very convenient python library to operate with ROOT (`root_numpy` is included in REP docker image, but it is installed quite easily). ### Allowing inline plots ``` %matplotlib inline ``` ## Creating ROOT file using root_numpy There are two libraries to work with ROOT files * rootpy http://www.rootpy.org - direct wrapper to ROOT methods. * root_numpy http://rootpy.github.io/root_numpy/ - new-style, efficient and simple library to deal with ROOT files from python Let's show how to use the second library. ``` import numpy import root_numpy # generating random data data = numpy.random.normal(size=[10000, 2]) # adding names of columns data = data.view([('first', float), ('second', float)]) # saving to file root_numpy.array2root(data, filename='./toy_datasets/random.root', treename='tree', mode='recreate') !ls ./toy_datasets ``` ## Add column to the ROOT file using root_numpy ``` from rootpy.io import root_open with root_open('./toy_datasets/random.root', mode='a') as myfile: new_column = numpy.array(numpy.ones([10000, 1]) , dtype=[('new', 'f8')]) root_numpy.array2tree(new_column, tree=myfile.tree) myfile.write() root_numpy.root2array('./toy_datasets/random.root', treename='tree') ``` # Plot function using ROOT pay attention that `canvas` is on the last line. This is an output value of cell. When IPython cell return canvas, it is automatically drawn ``` import ROOT from rep.plotting import canvas canvas = canvas('my_canvas') function1 = ROOT.TF1( 'fun1', 'abs(sin(x)/x)', 0, 10) canvas.SetGridx() canvas.SetGridy() function1.Draw() # Drawing output (last line is considered as output of cell) canvas ``` # Plot histogram using ROOT for branch in root file ``` File = ROOT.TFile("toy_datasets/random.root") Tree = File.Get("tree") Tree.Draw("first") canvas ``` ## use histogram settings ``` # we need to keep histogram in any variable, otherwise it will be deleted automatically h1 = ROOT.TH1F("h1","hist from tree",50, -0.25, 0.25) Tree.Draw("first>>h1") canvas ``` # root_numpy + ipython way But IPython provides it's own plotting / data manipulation techniques. Brief demostration below. Pay attention that there is column-expression which is evaluated on-the-fly. ``` data = root_numpy.root2array("toy_datasets/random.root", treename='tree', branches=['first', 'second', 'sin(first) * exp(second)'], selection='first > 0') ``` __in the example above__ we selected three branches (one of which is an expression and was computed on-the-fly) and selections ``` # taking, i.e. first 10 elements using python slicing: data2 = data[:10] ``` ### Convert to pandas pandas allows easy manipulations with data. ``` import pandas dataframe = pandas.DataFrame(data) # looking at first elements dataframe.head() # taking elements, that satisfy some condition, again showing only first dataframe[dataframe['second'] > 0].head() # adding new column as result of some operation dataframe['third'] = dataframe['first'] + dataframe['second'] dataframe.head() ``` ## Histograms in python Default library for plotting in python is matplotlib. ``` import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) plt.hist(data['first'], bins=50) plt.xlabel('first') plt.figure(figsize=(9, 7)) plt.hist(data['second'], bins=50) plt.xlabel('second') ``` ## Summary - you can work in standard way with ROOT (by using rootpy), but it is slow - you can benefit serously from python tools (those are fast and very flexible): - matplotlib for plotting - numpy / pandas for manipating arrays/dataframes - to deal with ROOT files, you can use `root_numpy` as a very nice bridge between two worlds.
github_jupyter
``` from onstove.raster import * import rasterio import geopandas as gpd import matplotlib.pyplot as plt from matplotlib.colors import LogNorm import matplotlib import numpy as np import psycopg2 from decouple import config import plotly.express as px import pandas as pd import seaborn as sns from sklearn.cluster import DBSCAN, OPTICS from rasterstats import zonal_stats POSTGRES_USER = config('POSTGRES_USER') POSTGRES_KEY = config('POSTGRES_KEY') conn = psycopg2.connect( database="nepal", user=POSTGRES_USER, password=POSTGRES_KEY ) pop_path = 'data/population_npl_2018-10-01_3857_100x100.tif' with rasterio.open(pop_path) as src: population = src.read(1) population_meta = src.meta ``` ## Clusering and urban/rural split ``` rows, cols = np.where(population>0) x, y = rasterio.transform.xy(population_meta['transform'], rows, cols, offset='center') coords = np.column_stack((x, y)) labels = DBSCAN(eps=500, min_samples=50).fit_predict(coords) # labels = OPTICS(max_eps=300, min_samples=7, xi=.05, min_cluster_size=.05).fit_predict(coords) clusters = population.copy() clusters[rows, cols] = labels clusters[np.isnan(clusters)] = -9999 out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.int32, nodata=-9999) with rasterio.open('data/clusters.tif', 'w', **out_meta) as dst: dst.write(clusters.astype(int), 1) df = gpd.GeoDataFrame({'Population': population[rows, cols], 'Cluster': clusters[rows, cols], 'geometry': gpd.points_from_xy(x, y)}) max_cluster = df['Cluster'].max() cluster_number = df.loc[df['Cluster']==-1, 'Cluster'].count() df.loc[df['Cluster']==-1, 'Cluster'] = [max_cluster + i for i in range(1, cluster_number + 1)] df['Area'] = population_meta['transform'][0] * abs(population_meta['transform'][4]) / (1000 ** 2) df_clusters = df.groupby('Cluster')[['Population', 'Area']].sum().reset_index() calibrate_urban(df_clusters, 0.197, '') dff = df.merge(df_clusters[['Cluster', 'IsUrban']], on='Cluster', how='left') isurban = population.copy() isurban[rows, cols] = dff['IsUrban'] isurban[np.isnan(isurban)] = -9999 out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.int32, nodata=-9999) with rasterio.open('data/isurban.tif', 'w', **out_meta) as dst: dst.write(isurban.astype(int), 1) ``` ## Tiers analysis ``` ntl, ntl_meta = align_raster(pop_path, 'data/npp_2020_average_masked.tif', method='average') land_cover, land_cover_meta = align_raster(pop_path, 'data/MCD12Q1_type1.tif', method='nearest') # npp_pop = (population>0) * (land_cover==13) * (npp>0.35) * npp ntl_pop = (population>0) * (ntl>0) * ntl # npp_pop[population>0] /= population[population>0] sql = 'SELECT * FROM admin.npl_admbnda_adm0_nd_20201117' adm0 = gpd.read_postgis(sql, conn) shapes = ((g, 1) for g in adm0.to_crs(3857)['geom'].values) with rasterio.open(pop_path) as src: mask = features.rasterize( shapes, out_shape=src.shape, transform=src.transform, all_touched=False, fill=0) mask[population>0] = 1 ntl_pop[mask==0] = np.nan ntl_pop[ntl_pop==0] = np.nan ntl_urban = ntl_pop.copy() ntl_urban[isurban!=2] = np.nan ntl_periurban = ntl_pop.copy() ntl_periurban[isurban!=1] = np.nan ntl_rural = ntl_pop.copy() ntl_rural[isurban!=0] = np.nan # npp_pop_copy = npp_pop.copy() # npp_pop_copy[npp_pop_copy==0] = -9999 out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.float64, nodata=np.nan) with rasterio.open('data/ntl_urban.tif', 'w', **out_meta) as dst: dst.write(ntl_urban, 1) out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.float64, nodata=np.nan) with rasterio.open('data/ntl_periurban.tif', 'w', **out_meta) as dst: dst.write(ntl_periurban, 1) out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.float64, nodata=np.nan) with rasterio.open('data/ntl_rural.tif', 'w', **out_meta) as dst: dst.write(ntl_rural, 1) sql = 'SELECT * FROM admin.npl_admbnda_districts_nd_20201117' df_district = gpd.read_postgis(sql, conn) df_district.to_crs(3857, inplace=True) # shapes = ((g, d) for g, d in zip(df_district['geom'].values, df_district['id'].values)) def get_tiers(raster_path, ntl, gdf): stats = ['percentile_20', 'percentile_40', 'percentile_60', 'percentile_80'] result = zonal_stats(gdf, raster_path, stats=stats, geojson_out=True) percentiles = gpd.GeoDataFrame.from_features(result) df = percentiles[stats].melt() medians = percentiles[stats].median() ax = sns.displot(df, x='value', hue='variable', kind="kde") # ax.set(xlim=(0, 2)) tiers = ntl.copy() tiers[(ntl>0) & (ntl<medians['percentile_30'])] = 1 tiers[(ntl>=medians['percentile_20']) & (ntl<medians['percentile_40'])] = 2 tiers[(ntl>=medians['percentile_40']) & (ntl<medians['percentile_60'])] = 3 tiers[(ntl>=medians['percentile_60']) & (ntl<medians['percentile_80'])] = 4 tiers[(ntl>=medians['percentile_80'])] = 5 tiers[np.isnan(tiers)] = 0 return tiers tiers_urban = get_tiers('data/ntl_urban.tif', ntl_urban, df_district) tiers_periurban = get_tiers('data/ntl_periurban.tif', ntl_periurban, df_district) tiers_rural = get_tiers('data/ntl_rural.tif', ntl_rural, df_district) tiers = tiers_urban + tiers_periurban + tiers_rural cmap = matplotlib.cm.get_cmap("Spectral_r", 6) fig, ax = plt.subplots(1, 1, figsize=(16,9)) cax = ax.imshow(tiers, extent=extent, cmap=cmap) cbar = fig.colorbar(cax, shrink=0.8, ticks=[0, 1, 2, 3, 4, 5]) # fig.savefig(f"data/tiers.png", dpi=300, bbox_inches='tight') np.nansum(population[tiers==5]) / np.nansum(population) * 100 np.nansum(population[tiers_urban==5]) / np.nansum(population) * 100 tiers[np.isnan(population)] = -9999 out_meta = population_meta.copy() out_meta.update(compression='DEFLATE', dtype=rasterio.int32, nodata=-9999) with rasterio.open('data/tiers3.tif', 'w', **out_meta) as dst: dst.write(tiers.astype(int), 1) ``` ### This approach is not giving good results, so maybe we can calibrate the Tiers with a similar approach as the Urban / Rural split, but using NTL, population density and wealth index.
github_jupyter
# End-to-End NLP: News Headline Classifier (Local Version) _**Train a Keras-based model to classify news headlines between four domains**_ This notebook works well with the `Python 3 (TensorFlow 1.15 Python 3.7 CPU Optimized)` kernel on SageMaker Studio, or `conda_tensorflow_p36` on classic SageMaker Notebook Instances. --- In this version, the model is trained and evaluated here on the notebook instance itself. We'll show in the follow-on notebook how to take advantage of Amazon SageMaker to separate these infrastructure needs. Note that you can safely ignore the WARNING about the pip version. ``` # First install some libraries which might not be available across all kernels (e.g. in Studio): !pip install ipywidgets ``` ### Set Up Execution Role and Session Let's start by specifying: - The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. If you don't specify a bucket, SageMaker SDK will create a default bucket following a pre-defined naming convention in the same region. - The IAM role ARN used to give SageMaker access to your data. It can be fetched using the **get_execution_role** method from sagemaker python SDK. ``` %%time %load_ext autoreload %autoreload 2 import sagemaker from sagemaker import get_execution_role role = get_execution_role() print(role) sess = sagemaker.Session() ``` ### Download News Aggregator Dataset We will download **FastAi AG News** dataset from the https://registry.opendata.aws/fast-ai-nlp/ public repository. This dataset contains a table of news headlines and their corresponding classes. ``` %%time import util.preprocessing util.preprocessing.download_dataset() ``` ### Let's visualize the dataset We will load the ag_news_csv/train.csv file to a Pandas dataframe for our data processing work. ``` import os import re import numpy as np import pandas as pd column_names = ["CATEGORY", "TITLE", "CONTENT"] # we use the train.csv only df = pd.read_csv("data/ag_news_csv/train.csv", names=column_names, header=None, delimiter=",") # shuffle the DataFrame rows df = df.sample(frac=1, random_state=1337) # make the category classes more readable mapping = {1: 'World', 2: 'Sports', 3: 'Business', 4: 'Sci/Tech'} df = df.replace({'CATEGORY': mapping}) df.head() ``` For this exercise we'll **only use**: - The **title** (Headline) of the news story, as our input - The **category**, as our target variable ``` df["CATEGORY"].value_counts() ``` The dataset has **four article categories** with equal weighting: - Business - Sci/Tech - Sports - World ## Natural Language Pre-Processing We'll do some basic processing of the text data to convert it into numerical form that the algorithm will be able to consume to create a model. We will do typical pre processing for NLP workloads such as: dummy encoding the labels, tokenizing the documents and set fixed sequence lengths for input feature dimension, padding documents to have fixed length input vectors. ### Dummy Encode the Labels ``` encoded_y, labels = util.preprocessing.dummy_encode_labels(df, "CATEGORY") print(labels) df["CATEGORY"].iloc[0] encoded_y[0] ``` ### Tokenize and Set Fixed Sequence Lengths We want to describe our inputs at the more meaningful word level (rather than individual characters), and ensure a fixed length of the input feature dimension. ``` padded_docs, tokenizer = util.preprocessing.tokenize_pad_docs(df, "TITLE") df["TITLE"].iloc[0] padded_docs[0] ``` ### Import Word Embeddings To represent our words in numeric form, we'll use pre-trained vector representations for each word in the vocabulary: In this case we'll be using pre-built GloVe word embeddings. You could also explore training custom, domain-specific word embeddings using SageMaker's built-in [BlazingText algorithm](https://docs.aws.amazon.com/sagemaker/latest/dg/blazingtext.html). See the official [blazingtext_word2vec_text8 sample](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/introduction_to_amazon_algorithms/blazingtext_word2vec_text8) for an example notebook showing how. ``` %%time embedding_matrix = util.preprocessing.get_word_embeddings(tokenizer, "data/embeddings") np.save( file="./data/embeddings/docs-embedding-matrix", arr=embedding_matrix, allow_pickle=False, ) vocab_size=embedding_matrix.shape[0] print(embedding_matrix.shape) ``` ### Split Train and Test Sets Finally we need to divide our data into model training and evaluation sets: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( padded_docs, encoded_y, test_size=0.2, random_state=42 ) # Do you always remember to save your datasets for traceability when experimenting locally? ;-) os.makedirs("./data/train", exist_ok=True) np.save("./data/train/train_X.npy", X_train) np.save("./data/train/train_Y.npy", y_train) os.makedirs("./data/test", exist_ok=True) np.save("./data/test/test_X.npy", X_test) np.save("./data/test/test_Y.npy", y_test) ``` ## Define the Model ``` import tensorflow as tf from tensorflow.keras.layers import Conv1D, Dense, Dropout, Embedding, Flatten, MaxPooling1D from tensorflow.keras.models import Sequential seed = 42 np.random.seed(seed) num_classes=len(labels) model = Sequential() model.add(Embedding( vocab_size, 100, weights=[embedding_matrix], input_length=40, trainable=False, name="embed" )) model.add(Conv1D(filters=128, kernel_size=3, activation="relu", name="conv_1")) model.add(MaxPooling1D(pool_size=5, name="maxpool_1")) model.add(Flatten(name="flat_1")) model.add(Dropout(0.3, name="dropout_1")) model.add(Dense(128, activation="relu", name="dense_1")) model.add(Dense(num_classes, activation="softmax", name="out_1")) # Compile the model optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001) model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["acc"]) model.summary() ``` ## Fit (Train) and Evaluate the Model ``` %%time # fit the model here in the notebook: print("Training model") model.fit(X_train, y_train, batch_size=16, epochs=5, verbose=1) print("Evaluating model") # TODO: Better differentiate train vs val loss in logs scores = model.evaluate(X_test, y_test, verbose=2) print( "Validation results: " + "; ".join(map( lambda i: f"{model.metrics_names[i]}={scores[i]:.5f}", range(len(model.metrics_names)) )) ) ``` ## Use the Model (Locally) Let's evaluate our model with some example headlines... If you struggle with the widget, you can always simply call the `classify()` function from Python. You can be creative with your headlines! ``` from IPython import display import ipywidgets as widgets from tensorflow.keras.preprocessing.sequence import pad_sequences def classify(text): """Classify a headline and print the results""" encoded_example = tokenizer.texts_to_sequences([text]) # Pad documents to a max length of 40 words max_length = 40 padded_example = pad_sequences(encoded_example, maxlen=max_length, padding="post") result = model.predict(padded_example) print(result) ix = np.argmax(result) print(f"Predicted class: '{labels[ix]}' with confidence {result[0][ix]:.2%}") interaction = widgets.interact_manual( classify, text=widgets.Text( value="The markets were bullish after news of the merger", placeholder="Type a news headline...", description="Headline:", layout=widgets.Layout(width="99%"), ) ) interaction.widget.children[1].description = "Classify!" ``` ## Review In this notebook we pre-processed publicly downloadable data and trained a neural news headline classifier model: As a data scientist might normally do when working on a local machine. ...But can we use the cloud more effectively to allocate high-performance resources; and easily deploy our trained models for use by other applications? Head on over to the next notebook, [Headline Classifier SageMaker.ipynb](Headline%20Classifier%20SageMaker.ipynb), where we'll show how the same model can be trained and then deployed on specific target infrastructure with Amazon SageMaker.
github_jupyter
# High-level Chainer Example ``` import os os.environ['CHAINER_TYPE_CHECK'] = '0' import sys import numpy as np import math import chainer import chainer.functions as F import chainer.links as L from chainer import optimizers from chainer import cuda from common.params import * from common.utils import * cuda.set_max_workspace_size(512 * 1024 * 1024) chainer.global_config.autotune = True print("OS: ", sys.platform) print("Python: ", sys.version) print("Chainer: ", chainer.__version__) print("CuPy: ", chainer.cuda.cupy.__version__) print("Numpy: ", np.__version__) print("GPU: ", get_gpu_name()) class SymbolModule(chainer.Chain): def __init__(self): super(SymbolModule, self).__init__() with self.init_scope(): self.conv1 = L.Convolution2D(3, 50, ksize=3, pad=1) self.conv2 = L.Convolution2D(50, 50, ksize=3, pad=1) self.conv3 = L.Convolution2D(50, 100, ksize=3, pad=1) self.conv4 = L.Convolution2D(100, 100, ksize=3, pad=1) # feature map size is 8*8 by pooling self.fc1 = L.Linear(100*8*8, 512) self.fc2 = L.Linear(512, N_CLASSES) def __call__(self, x): h = F.relu(self.conv2(F.relu(self.conv1(x)))) h = F.max_pooling_2d(h, ksize=2, stride=2) h = F.dropout(h, 0.25) h = F.relu(self.conv4(F.relu(self.conv3(h)))) h = F.max_pooling_2d(h, ksize=2, stride=2) h = F.dropout(h, 0.25) h = F.dropout(F.relu(self.fc1(h)), 0.5) return self.fc2(h) def init_model(m): optimizer = optimizers.MomentumSGD(lr=LR, momentum=MOMENTUM) optimizer.setup(m) return optimizer %%time # Data into format for library #x_train, x_test, y_train, y_test = mnist_for_library(channel_first=True) x_train, x_test, y_train, y_test = cifar_for_library(channel_first=True) print(x_train.shape, x_test.shape, y_train.shape, y_test.shape) print(x_train.dtype, x_test.dtype, y_train.dtype, y_test.dtype) %%time # Create symbol sym = SymbolModule() if GPU: chainer.cuda.get_device(0).use() # Make a specified GPU current sym.to_gpu() # Copy the model to the GPU %%time optimizer = init_model(sym) %%time # 162s for j in range(EPOCHS): for data, target in yield_mb(x_train, y_train, BATCHSIZE, shuffle=True): # Get samples data = cuda.to_gpu(data) target = cuda.to_gpu(target) output = sym(data) loss = F.softmax_cross_entropy(output, target) sym.cleargrads() loss.backward() optimizer.update() # Log print(j) %%time n_samples = (y_test.shape[0]//BATCHSIZE)*BATCHSIZE y_guess = np.zeros(n_samples, dtype=np.int) y_truth = y_test[:n_samples] c = 0 with chainer.using_config('train', False), chainer.using_config('enable_backprop', False): for data, target in yield_mb(x_test, y_test, BATCHSIZE): # Forwards pred = cuda.to_cpu(sym(cuda.to_gpu(data)).data.argmax(-1)) # Collect results y_guess[c*BATCHSIZE:(c+1)*BATCHSIZE] = pred c += 1 print("Accuracy: ", sum(y_guess == y_truth)/len(y_guess)) ```
github_jupyter
# Gases: Perfect and Semiperfect Models In this Notebook we will use `PerfectIdealGas` and `SemiperfectIdealGas` classes from **pyTurb**, to access the thermodynamic properties with a Perfect Ideal Gas or a Semiperfect Ideal Gas approach. Both classes acquire the thermodynamic properties of different species from the *NASA Glenn coefficients* in `thermo_properties.py`. Note that `PerfectIdealGas` and `SemiperfectIdealGas` classes are two different approaches for an *Ideal Gas*. The `gas_models` functions and classes can be found in the following folders: - pyturb - gas_models - thermo_prop - PerfectIdealGas - SemiperfectIdealGas - GasMixture ```python from pyturb.gas_models import ThermoProperties from pyturb.gas_models import PerfectIdealGas from pyturb.gas_models import SemiperfectIdealGas from pyturb.gas_models import GasMixture ``` For an example about how to declae and use a Gas Mixture in **pyTurb**, go the "Gas Mixtures.ipynb" Notebook. ### Ideal Gas While an Ideal Gas is characterized by a compressibility factor of 1: $$Z=1=\frac{pv}{R_gT}$$ Which means that the *Ideal Gas Equation of State* is available: ($pv=R_gT$). It also means that the Mayer Equation is applicable: $R_g=c_p-c_v$. ### Perfect and Semiperfect approaches A Perfect Gas or a Semiperfect Ideal Gas approach means: - If the gas is perfect: $c_v, c_p, \gamma_g \equiv constant$ - If the gas is Semiperfect: $c_v(T), c_p(T), \gamma_g(T) \equiv f(T)$ By definition, the model used in `ThermoProperties` provide a 7 coefficients polynomial for the heat capacity at constant pressure ($c_p$): $$ \frac{c_p}{R_g} = a_1T^{-2}+a_2T^{-1} + a_3 + a_4T + a_5T^2 a_6T^3 + a_7T^4$$ With the $c_p$, the Mayer Equation (valid for $Z=1$) and the heat capacity ratio we can obtain $c_v \left(T\right)$ and $\gamma \left(T\right)$: $$ R_g =c_p\left(T\right)-c_v \left(T\right) $$ $$\gamma_g\left(T\right) = \frac{c_p\left(T\right)}{c_v\left(T\right)}$$ > In practice, the `PerfectIdealGas` object is a `SemiperfectIdealGas` where the temperature is set to $25ºC$. ### Perfect and Semiperfect content Both `PerfectIdealGas` and `SemiPerfectIdealGas` classes have the following content: - **Gas properties:** Ru, Rg, Mg, cp, cp_molar, cv, cv_molar, gamma - **Gas enthalpies, moles and mass:** h0, h0_molar, mg, Ng - **Chemical properties:** gas_species, thermo_prop ### Other dependencies: We will import `numpy` and `pyplot` as well, to make some graphical examples. --- ### Check Gas Species availability: ``` from pyturb.gas_models import ThermoProperties tp = ThermoProperties() print(tp.species_list[850:875]) tp.is_available('Air') ``` --- ### Import Perfect and Semiperfect Ideal Gas classes: Examples with Air: ``` from pyturb.gas_models import PerfectIdealGas from pyturb.gas_models import SemiperfectIdealGas # Air as perfect gas: perfect_air = PerfectIdealGas('Air') # Air as semiperfect gas: semiperfect_air = SemiperfectIdealGas('Air') ``` --- ##### To retrieve the thermodynamic properties you can `print` the `thermo_prop` from the gas: Including: - Chemical formula - Heat of formation - Molecular mass - cp coefficients ``` print(perfect_air.thermo_prop) ``` --- You can get the thermodynamic properties directly from the gas object. Note that all units are International System of Units (SI): ``` print(perfect_air.Rg) print(perfect_air.Mg) print(perfect_air.cp()) print(perfect_air.cp_molar()) print(perfect_air.cv()) print(perfect_air.cv_molar()) print(perfect_air.gamma()) ``` --- ##### Use the docstrings for more info about the content of a PerfectIdealGas or a SemiperfectIdealGas: ``` perfect_air? ``` --- ##### Compare both models: Note that *Perfect Ideal Air*, with constant $c_p$, $c_v$ and $\gamma$, yields the same properties than a semiperfect gas model at 25ºC (reference temperature): ``` T = 288.15 #K cp_perf = perfect_air.cp() cp_sp = semiperfect_air.cp(T) print('At T={0:8.2f}K, cp_perfect={1:8.2f}J/kg/K'.format(T, cp_perf)) print('At T={0:8.2f}K, cp_semipft={1:8.2f}J/kg/K'.format(T, cp_sp)) T = 1500 #K cp_perf = perfect_air.cp() cp_sp = semiperfect_air.cp(T) print('At T={0:8.2f}K, cp_perfect={1:8.2f}J/kg/K'.format(T, cp_perf)) print('At T={0:8.2f}K, cp_semipft={1:8.2f}J/kg/K'.format(T, cp_sp)) ``` --- ##### $c_p$, $c_v$ and $\gamma$ versus temperature: ``` import numpy as np from matplotlib import pyplot as plt T = np.linspace(200, 2000, 50) cp = np.zeros_like(T) cv = np.zeros_like(T) gamma = np.zeros_like(T) for ii, temperature in enumerate(T): cp[ii] = semiperfect_air.cp(temperature) cv[ii] = semiperfect_air.cv(temperature) gamma[ii] = semiperfect_air.gamma(temperature) fig, (ax1, ax2) = plt.subplots(2) fig.suptitle('Air properties') ax1.plot(T, cp) ax1.plot(T, cv) ax2.plot(T, gamma) ax1.set(xlabel="Temperature [K]", ylabel="cp, cv [J/kg/K]") ax2.set(xlabel="Temperature [K]", ylabel="gamma [-]") ax1.grid() ax2.grid() plt.show() ```
github_jupyter
## Animation options In Vizzu you can set the timing and duration of the animation. You can do this either for the whole animation, or for animation groups such as the elements moving along the x-axis or the y-axis, appearing or disappearing or when the coordinate system is changed. Let’s see first a simple example when a stacked column chart is grouped using the default animation options. ``` from ipyvizzu import Chart, Data, Config chart = Chart() data = Data.from_json("../data/music_example_data.json") chart.animate(data) chart.animate(Config({ "channels": { "y": { "set": ["Popularity", "Types"] }, "x": { "set": "Genres" } }, "label": { "attach": "Popularity" }, "color": { "set": "Types" }, "title": "Default options - step 1" })) chart.animate(Config({ "channels": { "y": { "detach": "Types" }, "x": { "attach": "Types" } } })) snapshot1 = chart.store() ``` We stack the columns, still with the default options. ``` chart.animate(snapshot1) chart.animate(Config({"title": "Default options - step 2"})) chart.animate(Config({ "channels": { "x": { "detach": "Types" }, "y": { "attach": "Types" } } })) snapshot2 = chart.store() ``` Now we change the animation settings for the elements moving along the y-axis and also the change in styles, more specifically when the labels on the markers move from the center of the chart elements to the top of them. ``` chart.animate(snapshot2) chart.animate(Config({"title": "Custom animation settings for specific groups"})) chart.animate( Config({ "channels": { "y": { "detach": "Types" }, "x": { "attach": "Types" } } }), y={ "duration": 2, "delay": 2 }, style={ "duration": 2, "delay": 4 } ) snapshot3 = chart.store() ``` This is an example of changing the settings for the whole animation at once. ``` chart.animate(snapshot3) chart.animate(Config({"title": "Custom options for the whole animation"})) chart.animate( Config({ "channels": { "x": { "detach": "Types" }, "y": { "attach": "Types" } } }), duration=1, easing="linear" ) snapshot4 = chart.store() ``` When the two settings are combined, Vizzu will use the general animation options and spread the unique settings for specific groups proportionally. This is why you can see the same animation as two steps before but happening much quicker since the duration of the whole animation is set to 1 second. ``` chart.animate(snapshot4) chart.animate(Config({"title": "Custom settings for both"})) chart.animate( Config({ "channels": { "y": { "detach": "Types" }, "x": { "attach": "Types" } } }), duration=1, easing="linear", y={ "duration": 2, "delay": 2 }, style={ "duration": 2, "delay": 4 } ) ```
github_jupyter
# General parameters ``` import files import utils import os import models import numpy as np from tqdm.autonotebook import tqdm import pandas as pd from sklearn.preprocessing import OneHotEncoder import datetime import seaborn as sns import matplotlib as mpl from matplotlib.backends.backend_pgf import FigureCanvasPgf mpl.backend_bases.register_backend('pdf', FigureCanvasPgf) from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset,inset_axes size=19 mpl.rcParams.update({ "pgf.texsystem": "pdflatex", 'font.family': 'serif', 'font.serif': 'Times', 'text.usetex': True, 'pgf.rcfonts': False, 'font.size': size, 'axes.labelsize':size, 'axes.titlesize':size, 'figure.titlesize':size, 'xtick.labelsize':size, 'ytick.labelsize':size, 'legend.fontsize':size, }) import matplotlib.pyplot as plt import matplotlib.lines as mlines ######################################################### # Global random forests parameters ######################################################### # the number of trees in the forest n_estimators = 1000 # the minimum number of samples required to be at a leaf node # (default skgarden's parameter) min_samples_leaf = 1 # the number of features to consider when looking for the best split # (default skgarden's parameter) max_features = 6 params_basemodel = {'n_estimators':n_estimators, 'min_samples_leaf':min_samples_leaf, 'max_features':max_features, 'cores':1} ``` # Data import ``` # load the dataset data = pd.read_csv("data_prices/Prices_2016_2019_extract.csv") data.shape # the first week (24*7 rows) has been removed because of the lagged variables. date_plot = pd.to_datetime(data.Date) plt_1 = plt.figure(figsize=(10, 5)) plt.plot(date_plot, data.Spot, color='black', linewidth=0.6) locs, labels = plt.xticks() plt.xticks(locs[0:len(locs):2], labels=['2016','2017','2018','2019','2020']) plt.xlabel('Date') plt.ylabel('Spot price (\u20AC/MWh)') plt.show() limit = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc) id_train = data.index[pd.to_datetime(data['Date'], utc=True) < limit].tolist() data_train = data.iloc[id_train,:] sub_data_train = data_train.loc[:,['hour','dow_0','dow_1','dow_2','dow_3','dow_4','dow_5','dow_6'] + ['lag_24_%d'%i for i in range(24)] + ['lag_168_%d'%i for i in range(24)] + ['conso']] all_x_train = [np.array(sub_data_train.loc[sub_data_train.hour == h]) for h in range(24)] train_size = all_x_train[0].shape[0] sub_data = data.loc[:,['hour','dow_0','dow_1','dow_2','dow_3','dow_4','dow_5','dow_6'] + ['lag_24_%d'%i for i in range(24)] + ['lag_168_%d'%i for i in range(24)] + ['conso']] all_x = [np.array(sub_data.loc[sub_data.hour == h]) for h in range(24)] all_y = [np.array(data.loc[data.hour == h, 'Spot']) for h in range(24)] all_x_train[0].shape ``` # CP methods ``` alpha = 0.1 for h in tqdm(range(24)): X = all_x[h] Y = all_y[h] data_dict = {'X': np.transpose(X), 'Y': Y} dataset = 'Spot_France_Hour_'+str(h)+'_train_'+str(limit)[:10] methods = ['CP', 'EnbPI'] params_methods = {'B': 30} results, methods_ran = models.run_experiments_real_data(data_dict, alpha, methods, params_methods, 'RF', params_basemodel, train_size, dataset, erase=False) for method in methods_ran: name_dir, name_method = files.get_name_results(method, dataset=dataset) results_method = results[method] files.write_file('results/'+name_dir, name_method, 'pkl', results_method) # Mean EnbPI params_methods = {'B': 30, 'mean': True} results, methods_ran = models.run_experiments_real_data(data_dict, alpha, methods, params_methods, 'RF', params_basemodel, train_size, dataset, erase=False) for method in methods_ran: name_dir, name_method = files.get_name_results(method, dataset=dataset) results_method = results[method] files.write_file('results/'+name_dir, name_method, 'pkl', results_method) # Offline methods = ['CP'] params_methods = {'online': False} results, methods_ran = models.run_experiments_real_data(data_dict, alpha, methods, params_methods, 'RF', params_basemodel, train_size, dataset, erase=False) for method in methods_ran: name_dir, name_method = files.get_name_results(method, online=False, dataset=dataset) results_method = results[method] files.write_file('results/'+name_dir, name_method, 'pkl', results_method) tab_gamma = [0, 0.000005, 0.00005, 0.0001,0.0002,0.0003,0.0004,0.0005,0.0006,0.0007,0.0008,0.0009, 0.001,0.002,0.003,0.004,0.005,0.006,0.007,0.008,0.009, 0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09] for h in tqdm(range(24)): X = all_x[h] Y = all_y[h] data_dict = {'X': np.transpose(X), 'Y': Y} dataset = 'Spot_France_Hour_'+str(h)+'_train_'+str(limit)[:10] results, methods_ran = models.run_multiple_gamma_ACP_real_data(data_dict, alpha, tab_gamma, 'RF', params_basemodel, train_size, dataset, erase=False) online = True for method in methods_ran: name_dir, name_method = files.get_name_results(method, dataset=dataset) results_method = results[method] files.write_file('results/'+name_dir, name_method, 'pkl', results_method) ``` ## Results concatenated Be careful that the aggregation algorithm (AgACI) must be run in R separately from this notebook before running the following cells (if you use a new data set or if you erased the supplied results). ``` id_test = data.index[pd.to_datetime(data['Date'], utc=True) >= limit].tolist() data_test = data.iloc[id_test,:] methods = ['CP','EnbPI','EnbPI_Mean']+['ACP_'+str(gamma) for gamma in tab_gamma]+['Aggregation_EWA_Gradient','Aggregation_EWA', 'Aggregation_MLpol_Gradient','Aggregation_MLpol', 'Aggregation_BOA_Gradient','Aggregation_BOA'] for method in methods: y_upper = [None]*data_test.shape[0] y_lower = [None]*data_test.shape[0] for i in range(24): dataset = 'Spot_France_Hour_'+str(i)+'_train_'+str(limit)[:10] name_dir, name_method = files.get_name_results(method, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') y_upper[i::24] = list(results['Y_sup'].reshape(1,-1)[0]) y_lower[i::24] = list(results['Y_inf'].reshape(1,-1)[0]) y_upper = np.array(y_upper) y_lower = np.array(y_lower) results_method = {'Y_inf': y_lower, 'Y_sup':y_upper} dataset = 'Spot_France_ByHour_train_'+str(limit)[:10] name_dir, name_method = files.get_name_results(method, dataset=dataset) if not os.path.isdir('results/'+name_dir): os.mkdir('results/'+name_dir) files.write_file('results/'+name_dir, name_method, 'pkl', results_method) if method == 'CP': y_upper = [None]*data_test.shape[0] y_lower = [None]*data_test.shape[0] for i in range(24): dataset = 'Spot_France_Hour_'+str(i)+'_train_'+str(limit)[:10] name_dir, name_method = files.get_name_results(method, online=False, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') y_upper[i::24] = list(results['Y_sup'].reshape(1,-1)[0]) y_lower[i::24] = list(results['Y_inf'].reshape(1,-1)[0]) y_upper = np.array(y_upper) y_lower = np.array(y_lower) results_method = {'Y_inf': y_lower, 'Y_sup':y_upper} dataset = 'Spot_France_ByHour_train_'+str(limit)[:10] name_dir, name_method = files.get_name_results(method, online=False, dataset=dataset) if not os.path.isdir('results/'+name_dir): os.mkdir('results/'+name_dir) files.write_file('results/'+name_dir, name_method, 'pkl', results_method) dataset = 'Spot_France_ByHour_train_'+str(limit)[:10] Y = data_test['Spot'].values ``` ### Visualisation ``` colors_blindness = sns.color_palette("colorblind") method = 'Aggregation_BOA_Gradient' name_dir, name_method = files.get_name_results(method, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') contains = (Y <= results['Y_sup']) & (Y >= results['Y_inf']) lengths = results['Y_sup'] - results['Y_inf'] y_pred = (results['Y_sup'] + results['Y_inf'])/2 d = 20 plt.plot(pd.to_datetime(data_test['Date'])[24*d:(24*(d+4)+1)],data_test['Spot'][24*d:(24*(d+4)+1)], color='black', label='Observed price') plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],data_test['Spot'][24*(d+4):24*(d+5)], color='black',alpha=.5) plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],y_pred[24*(d+4):24*(d+5)],'--', color=(230/255,120/255,20/255), label='Predicted price') plt.ylabel("Spot price (€/MWh)") plt.xticks(rotation=45) plt.legend() #plt.savefig('plots/prices/spot_last.png', bbox_inches='tight',dpi=300) plt.show() plt.plot(pd.to_datetime(data_test['Date'])[24*d:(24*(d+4)+1)],data_test['Spot'][24*d:(24*(d+4)+1)], color='black', label='Observed price') plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],data_test['Spot'][24*(d+4):24*(d+5)], color='black', alpha=.5) plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_sup'][24*(d+4):24*(d+5)], color=colors_blindness[9]) plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_inf'][24*(d+4):24*(d+5)], color=colors_blindness[9]) plt.fill_between(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_sup'][24*(d+4):24*(d+5)], results['Y_inf'][24*(d+4):24*(d+5)], alpha=.3, fc=colors_blindness[9], ec='None', label='Predicted interval') plt.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],y_pred[24*(d+4):24*(d+5)],'--', color=colors_blindness[1], label='Predicted price') plt.ylabel("Spot price (€/MWh)") plt.xticks(rotation=45) plt.legend() #plt.savefig('plots/prices/ex_int_'+method+'.pdf', bbox_inches='tight',dpi=300) plt.show() date_plot = pd.to_datetime(data.Date) fig,ax = plt.subplots(1,1,figsize=(10, 5)) axins = inset_axes(ax,4.3,2.1,loc='upper right') ax.plot(date_plot, data.Spot, color='black', linewidth=0.6) locs = ax.get_xticks() ax.set_xticks(locs[0:len(locs):2]) ax.set_xticklabels(['2016','2017','2018','2019','2020']) ax.set_xlabel('Date') ax.set_ylabel('Spot price (\u20AC/MWh)') axins.plot(pd.to_datetime(data_test['Date'])[24*d:(24*(d+4)+1)],data_test['Spot'][24*d:(24*(d+4)+1)], color='black', label='Observed price') axins.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],data_test['Spot'][24*(d+4):24*(d+5)], color='black', alpha=.5) axins.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_sup'][24*(d+4):24*(d+5)], color=colors_blindness[9]) axins.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_inf'][24*(d+4):24*(d+5)], color=colors_blindness[9]) axins.fill_between(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],results['Y_sup'][24*(d+4):24*(d+5)], results['Y_inf'][24*(d+4):24*(d+5)], alpha=.3, fc=colors_blindness[9], ec='None', label='Predicted interval') axins.plot(pd.to_datetime(data_test['Date'])[24*(d+4):24*(d+5)],y_pred[24*(d+4):24*(d+5)],'--', color=colors_blindness[1], label='Predicted price') axins.legend(prop={'size': 14}) axins.set_yticks([50,75,100,125]) axins.set_yticklabels([50,75,100,125]) locs = axins.get_xticks() axins.set_xticks(locs[:len(locs)-1]) axins.set_xticklabels(['21/01','22/01','23/01','24/01','25/01']) axins.tick_params(axis='x', rotation=20) mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec="0.7") #plt.savefig('plots/prices/spot_and_ex_int_'+method+'.pdf', bbox_inches='tight',dpi=300) plt.show() ``` ### Marginal validity and efficiency comparison ``` lines = False add_offline = True methods = ['CP', 'EnbPI_Mean', 'ACP_0','ACP_0.01', 'ACP_0.05', 'Aggregation_BOA_Gradient'] marker_size = 80 fig, (ax1) = plt.subplots(1, 1, figsize=(10,5), sharex=True, sharey=True) markers = {'Gaussian': "o", 'CP': "s", 'ACP':'D','ACP_0.05':'D', 'ACP_0.01': "d", 'ACP_0': "^", 'Aggregation_BOA_Gradient':'*', 'QR': "v", 'CQR': "D", 'CQR_CV': "d", 'EnbPI': 'x','EnbPI_Mean': '+'} methods_display = {'Gaussian': 'Gaussian', 'CP': 'OSSCP', # (adapted from Lei et al., 2018) 'EnbPI': 'EnbPI (Xu \& Xie, 2021)','EnbPI_Mean': 'EnbPI V2', 'ACP': 'ACI '+r'$\gamma = 0.05$',#(Gibbs \& Candès, 2021) 'ACP_0.05': 'ACI '+r'$\gamma = 0.05$',#(Gibbs \& Candès, 2021) 'ACP_0.01': 'ACI '+r'$\gamma = 0.01$',# (Gibbs \& Candès, 2021), 'ACP_0': 'ACI '+r'$\gamma = 0$',# (Gibbs \& Candès, 2021), 'Aggregation_BOA_Gradient':'AgACI'} for method in methods: name_dir, name_method = files.get_name_results(method, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') contains = (Y <= results['Y_sup']) & (Y >= results['Y_inf']) lengths = results['Y_sup'] - results['Y_inf'] if method not in ["ACP","ACP_0.01","ACP_QCP_0.05",'EnbPI_Mean', 'Aggregation_BOA_Gradient']: ax1.scatter(np.mean(contains),np.median(lengths), marker=markers[method], color='black',s=marker_size) elif method in ["ACP","ACP_0.01","ACP_QCP_0.05"]: ax1.scatter(np.mean(contains),np.median(lengths), marker=markers[method], color='black',s=marker_size) elif method in ['EnbPI_Mean','Aggregation_BOA_Gradient']: ax1.scatter(np.mean(contains),np.median(lengths), marker=markers[method], color='black',s=marker_size+30) if add_offline and method in ['Gaussian','CP','CQR','CQR_CV']: name_dir, name_method = files.get_name_results(method, online=False, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') contains = (Y <= results['Y_sup']) & (Y >= results['Y_inf']) lengths = results['Y_sup'] - results['Y_inf'] ax1.scatter(np.mean(contains),np.median(lengths), marker=markers[method], color='black', facecolors='none',s=marker_size) ax1.axvline(x=1-alpha, color='black', ls=':') ax1.set_xlabel("Coverage") ax1.set_ylabel("Median length") # Methods legend handles = [] names = [] names_wo_offline = list( map(methods_display.get, methods) ) if add_offline: names = np.append(names,names_wo_offline[0]) names = np.append(names,names_wo_offline) names[1] = 'Offline SSCP'# (adapted from Lei et al., 2018) else: names = names_wo_offlines for marker in list( map(markers.get, methods) ): handles.append(mlines.Line2D([], [], color='black', marker=marker, linestyle='None')) if add_offline and marker == 's': handles.append(mlines.Line2D([], [], color='black', marker=marker, linestyle='None', markerfacecolor='none')) fig.legend(handles, names, bbox_to_anchor=(0,0.95,1,0.2), loc='upper center', ncol=3) if lines: name_plot = 'plots/prices/'+dataset+'_lines' else: name_plot = 'plots/prices/'+dataset+'_median' if add_offline : name_plot = name_plot + '_offline' #plt.savefig(name_plot+'.pdf', bbox_inches='tight',dpi=300) plt.show() lines = False add_offline = True methods = ['CP', 'EnbPI_Mean', 'ACP_0','ACP_0.01', 'ACP_0.05', 'Aggregation_BOA_Gradient'] marker_size = 80 fig, (ax1) = plt.subplots(1, 1, figsize=(10,5), sharex=True, sharey=True) markers = {'Gaussian': "o", 'CP': "s", 'ACP':'D','ACP_0.05':'D', 'ACP_0.01': "d", 'ACP_0': "^", 'Aggregation_BOA_Gradient':'*', 'QR': "v", 'CQR': "D", 'CQR_CV': "d", 'EnbPI': 'x','EnbPI_Mean': '+'} methods_display = {'Gaussian': 'Gaussian', 'CP': 'OSSCP', # (adapted from Lei et al., 2018) 'EnbPI': 'EnbPI (Xu \& Xie, 2021)','EnbPI_Mean': 'EnbPI V2', 'ACP': 'ACI '+r'$\gamma = 0.05$',#(Gibbs \& Candès, 2021) 'ACP_0.05': 'ACI '+r'$\gamma = 0.05$',#(Gibbs \& Candès, 2021) 'ACP_0.01': 'ACI '+r'$\gamma = 0.01$',# (Gibbs \& Candès, 2021), 'ACP_0': 'ACI '+r'$\gamma = 0$',# (Gibbs \& Candès, 2021), 'Aggregation_BOA_Gradient':'AgACI', 'ACP_QCP_0.05': 'ACI (Gibbs \& Candès, 2021) with corrected quantile', 'QR': 'QR (Koenker \& Bassett)', 'CQR': 'CQR (Romano et al., 2019)', 'CQR_CV': 'CQR with CV (Romano et al., 2019)'} # Get values for imputation name_dir, name_method = files.get_name_results('ACP_0', dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') borne_sup = results['Y_sup'] borne_inf = results['Y_inf'] y_chap = (borne_sup+borne_inf)/2 abs_res = np.abs(Y - y_chap) max_eps = np.max(abs_res) val_max = y_chap+max_eps val_min = y_chap-max_eps for method in methods: name_dir, name_method = files.get_name_results(method, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') contains = (Y <= results['Y_sup']) & (Y >= results['Y_inf']) lengths = results['Y_sup']-results['Y_inf'] if method[:3] in ['ACP','Agg']: borne_sup = results['Y_sup'] borne_inf = results['Y_inf'] borne_sup[np.isinf(borne_sup)] = val_max[np.isinf(borne_sup)] borne_inf[np.isinf(borne_inf)] = val_min[np.isinf(borne_inf)] borne_sup[borne_sup > val_max] = val_max[borne_sup > val_max] borne_inf[borne_inf < val_min] = val_min[borne_inf < val_min] lengths = borne_sup-borne_inf if method not in ["ACP","ACP_0.01","ACP_QCP_0.05",'EnbPI_Mean', 'Aggregation_BOA_Gradient']: ax1.scatter(np.mean(contains),np.mean(lengths), marker=markers[method], color='black',s=marker_size) elif method in ["ACP","ACP_0.01","ACP_QCP_0.05"]: ax1.scatter(np.mean(contains),np.mean(lengths), marker=markers[method], color='black',s=marker_size) elif method in ['EnbPI_Mean','Aggregation_BOA_Gradient']: ax1.scatter(np.mean(contains),np.mean(lengths), marker=markers[method], color='black',s=marker_size+30) if add_offline and method in ['Gaussian','CP','CQR','CQR_CV']: name_dir, name_method = files.get_name_results(method, online=False, dataset=dataset) results = files.load_file('results/'+name_dir, name_method, 'pkl') contains = (Y <= results['Y_sup']) & (Y >= results['Y_inf']) lengths = results['Y_sup'] - results['Y_inf'] ax1.scatter(np.mean(contains),np.mean(lengths), marker=markers[method], color='black', facecolors='none',s=marker_size) ax1.axvline(x=1-alpha, color='black', ls=':') ax1.set_xlabel("Coverage") ax1.set_ylabel("Average length") # Methods legend handles = [] names = [] names_wo_offline = list( map(methods_display.get, methods) ) if add_offline: names = np.append(names,names_wo_offline[0]) names = np.append(names,names_wo_offline) names[1] = 'Offline SSCP'# (adapted from Lei et al., 2018) else: names = names_wo_offlines for marker in list( map(markers.get, methods) ): handles.append(mlines.Line2D([], [], color='black', marker=marker, linestyle='None')) if add_offline and marker == 's': handles.append(mlines.Line2D([], [], color='black', marker=marker, linestyle='None', markerfacecolor='none')) fig.legend(handles, names, bbox_to_anchor=(0,0.95,1,0.2), loc='upper center', ncol=3) if lines: name_plot = 'plots/prices/'+dataset+'imputed_lines' else: name_plot = 'plots/prices/'+dataset+'imputed_mean' if add_offline : name_plot = name_plot + '_offline' #plt.savefig(name_plot+'.pdf', bbox_inches='tight',dpi=300) plt.show() ```
github_jupyter
# Analyse de texte avec Unix ## Filtrage L’utilitaire `grep` (*file pattern searcher*) associé à l’option `-a` considère les fichiers en paramètres comme de l’ASCII. Il est utile pour rechercher un motif (*pattern*) en utilisant les expressions rationnelles. ```bash # find, in all the TXT files, the lines that contain the word 'upon' cat ./files/*.txt | grep -a upon ``` ```bash # words that end with suffix -ly cat ./files/*.txt | grep -a "[a-zA-Z]*ly" ``` ## Remplacement ### *SED* `sed` (*stream editor*) est un utilitaire très puissant qui permet d’éditer les lignes d’un flux en effectuant des remplacements. ```bash # substitutes first occurrence of 'id' by 'it' echo "Le petit chat boid du laid." | sed 's/id/it/' ``` ```bash # substitutes all occurrences of 'id' by 'it' echo "Le petit chat boid du laid." | sed 's/id/it/g' ``` Il est possible d’utiliser des expressions rationnelles : ```bash echo "Le petit chat boid du laid." | sed 's/\w*d/t/g' ``` Si l’utilitaire renvoie ordinairement le flux dans la sortie standard, l’option `-i` effectue le remplacement en place dans les fichiers en paramètres : ```bash sed -i '' 's/subscribe/unsubscribe/g' ./files/*.txt ``` La configuration `d` dans le paramétrage de l’outil permet de supprimer les lignes qui répondent au motif renseigné : ```bash # remove lines starting with a whitespace character sed '/^[[:space:]]/d' ./files/*.txt ``` ```bash # remove empty lines sed '/^$/d' ./files/*.txt ``` ### *TR* Un autre utilitaire pour manipuler le flux d’un texte est `tr` (*translate characters*). Il offre la capacité d’effectuer une concordance entre plusieurs caractères à remplacer. ```bash # 'a' => 'e' ; 'e' => 'a' echo "Le petit chat boit du lait." | tr ae ea ``` Les remplacements peuvent s’opérer depuis un fichier : ```bash tr ae ea < ./files/*.txt ``` L’option `-d` s’utilise pour supprimer des caractères : ```bash # removes all occurrences of character 'e' tr -d e < ./files/*.txt ``` Avec l’option `-s`, les caractères répétés fusionnent : ```bash # the three whitespace characters become one, while single ones still echo "Le petit chat boit du lait." | tr -s '[:blank:]' ' ' ``` Une autre option intéressante consiste à effectuer un remplacement sur tous les autres caractères sauf ceux indiqués : ```bash # keep all digits echo "La révolution française a eu lieu en 1789." | tr -cd '[:digit:]' ``` ```bash # every character but digits become an 'x' echo "La révolution française a eu lieu en 1789." | tr -c '[:digit:]' 'x' ``` ## Tri L’utilitaire `sort` permet de trier des lignes de textes. Avec l’option `-r`, le tri s’effectue de manière inversée et, avec `-f`, il ignore la casse des caractères : ```bash echo "Le petit chat boit du lait ." | sort -rf ``` Et pour effectuer un tri sur des valeurs numériques plutôt qu’alphabétiques, il faut recourir à l’option `-n`. ## Comptage Il existe un utilitaire pour compter les lignes, mots et caractères dans un texte : `wc` (*word count*). ```bash # 1 line, 6 words, 28 characters echo "Le petit chat boit du lait." | wc ``` Pour limiter le comptage à l’une ou l’autre des unités, il faut utiliser les options `-l` (lignes), `-w` (mots) et `-m` (caractères). ## Fréquence Grâce à `uniq` et son option `-c`, il est possible d’obtenir un décompte des occurrences d’un mot dans un texte : ```bash echo "Le petit chat boit du lait . Le petit chien boit de l’ eau ." | sort | uniq -c | sort -rn ``` À noter que l’option `-i` permet d’effectuer ce calcul en ignorant la casse.
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import axes3d import statsmodels.api as sm import statsmodels.formula.api as smf import seaborn as sns %matplotlib inline sns.set(style="white", color_codes=True) ``` # Genotypes simulation algorithm Рассмотрим следующую простую модель. Пусть есть два биаллельных ОНП $A$ и $B$ с аллелями $a_1$, $a_2$, и $b_1$, $b_2$ соответственно. Параметры модели: $P(a_1)$ -- частота аллеля $a_1$ в ОНП $A$, $P(b_1)$ -- частота аллеля $b_1$ в ОНП $B$, $N$ -- число индивидов в популяции, $r$ -- коэффициент корреляции между снипами. Запишем выражение для коэффициента неравновесия по сцеплению через частоты всех возможных пар аллелей: $$ D = P(a_1 b_1) - P(a_1) P(b_1) $$ $$ -D = P(a_1 b_2) - P(a_1) P(b_2) $$ $$ -D = P(a_2 b_1) - P(a_2) P(b_1) $$ $$ D = P(a_2 b_2) - P(a_2) P(b_2) $$ Из этой системы уравнений можно выразить вероятности гаплотипов: | | $b_1$ | $b_2$ | |:-----:|:--------------------------------:|:--------------------------------:| | $a_1$ | $P(a_1 b_1) = P(a_1) P(b_1) + D$ | $P(a_1 b_2) = P(a_1) P(b_2) - D$ | | $a_2$ | $P(a_2 b_1) = P(a_2) P(b_1) - D$ | $P(a_2 b_2) = P(a_2) P(b_2) + D$ | Теперь, когда известно распределение вероятностей гаплотипов, сгенерируем $2N$ гаплотипов и случайным образом объединив их пары, получим $N$ генотипов. # Genotypes simulation algorithm implementation ### Setting parameters ``` population_size = 100000 # number of individuals in population freq_a1 = 0.7 # allele frequency of the first allele in first site freq_a2 = 1 - freq_a1 # allele frequency of the second allele in first site freq_b1 = 0.6 # allele frequency of the first allele in second site freq_b2 = 1 - freq_b1 # allele frequency of the second allele in second site r = 0.7 # absolute value should not be more than 0.2 (??) d = r * np.sqrt(freq_a1 * freq_a2 * freq_b1 * freq_b2) print("d =", d) ``` ### Calculating probabilities of haplotypes ``` p_a1_b1 = d + freq_a1 * freq_b1 p_a1_b2 = -d + freq_a1 * freq_b2 p_a2_b1 = -d + freq_a2 * freq_b1 p_a2_b2 = d + freq_a2 * freq_b2 # 1 - p_a1_b1 - p_a1_b2 - p_a2_b1 if 0.0 <= p_a1_b1 <= 1.0 and \ 0.0 <= p_a1_b2 <= 1.0 and \ 0.0 <= p_a2_b1 <= 1.0 and \ 0.0 <= p_a2_b2 <= 1.0: print("p_a1_b1 =", p_a1_b1) print("p_a1_b2 =", p_a1_b2) print("p_a2_b1 =", p_a2_b1) print("p_a2_b2 =", p_a2_b2) else: print("incorrect input") # plot d from r and p_ai_bj, найти множество допустимых значений ``` ### Generating haplotypes ``` haplotypes = [] for counter in range(2 * population_size): x = float(np.random.uniform(0, 1, 1)) if x < p_a1_b1: haplotypes.append(11) elif x < p_a1_b1 + p_a1_b2: haplotypes.append(12) elif x < p_a1_b1 + p_a1_b2 + p_a2_b1: haplotypes.append(21) else: haplotypes.append(22) ``` ### Generating genotypes ``` genotypes = [] for i in range(population_size): genotype = str(haplotypes[i] // 10 + haplotypes[i + population_size] // 10 - 2) genotype += str(haplotypes[i] % 10 + haplotypes[i + population_size] % 10 - 2) genotypes.append(genotype) genotypes = np.array([list(i) for i in genotypes], dtype='int') mse_genotypes_a = np.mean((genotypes[:, 0] - np.mean(genotypes[:, 0])) ** 2) mse_genotypes_b = np.mean((genotypes[:, 1] - np.mean(genotypes[:, 1])) ** 2) print(mse_genotypes_a, 2 * freq_a1 * freq_a2) print(mse_genotypes_b, 2 * freq_b1 * freq_b2) fig = plt.figure() ax = fig.add_subplot(111) n, bins, rectangles = ax.hist(genotypes[:, 1], 100, normed=True) fig.canvas.draw() plt.show() count_a, count_b = 0, 0 for i in range(population_size): count_a += genotypes[i][0] count_b += genotypes[i][1] print(" Calc allele freq \t Exp allele freq") print("A: ", 1 - count_a / (2 * population_size), "\t\t", freq_a1) print("B: ", 1 - count_b / (2 * population_size), "\t\t", freq_b1) ``` # Phenotypes simulation algorithm Рассмотрим аддитивную модель с параметрами $\beta_A$ и $\beta_B$: $a_1 a_1$ 0 $a_1 a_2$ $\beta_A$ $a_2 a_2$ $2 \beta_A$ $b_1 b_1$ 0 $b_1 b_2$ $\beta_B$ $b_2 b_2$ $2 \beta_B$ $\mathbf{y} = \mathbf{G} \mathbf{w} + \mathbf{\varepsilon}$ $\mathbf{y}$ -- вектор фенотипов, $\mathbf{G}$ -- матрица генотипов, $\mathbf{w}$ -- вектор весов, $\mathbf{\varepsilon}$ -- вектор ошибок, полученный из стандартного распределения Пусть $\sigma_y = 1$, тогда, поскольку $\sigma_y = \sigma_{Gw} + \sigma_{\varepsilon}$, $\sigma_{\varepsilon} = 1 - \sigma_{Gw}$ $ \sigma_{Gw} = \beta_1^2 mse_a + \beta_2^2 mse_b - 2 cov(\beta_1^2 mse_a, \beta_2^2 mse_b)$ # Phenotypes simulation algorithm implementation ### Setting parameters ``` beta_a = 0.15 # effect of heterozygote in the first snp on phenotype beta_b = 0.13 # effect of heterozygote in the second snp on phenotype phenotypes = [] sigma_err = 1 - mse_genotypes_a * beta_a ** 2 - mse_genotypes_b * beta_b ** 2 for i in range(population_size): phenotype = genotypes[i][0] * beta_a + genotypes[i][1] * beta_b + np.random.normal(0, np.sqrt(sigma_err)) phenotypes.append(phenotype) # test phenotypes mean and d # test against normal distr. np.std(phenotypes) simulated_data = pd.DataFrame({"phenotype": phenotypes, "snp_a_gen": genotypes[:, 0], "snp_b_gen": genotypes[:, 1]}) simulated_data.head() fig = plt.figure(figsize=(20, 15)) ax = fig.gca(projection="3d") ax.plot(simulated_data.snp_a_gen, simulated_data.snp_b_gen, simulated_data.phenotype, 'co', zorder=0) ax.plot(simulated_data.snp_a_gen, simulated_data.phenotype, 'ko', zdir='y', alpha=0.25, zs=3.0, mec=None, zorder=0) ax.plot(simulated_data.snp_b_gen, simulated_data.phenotype, 'ko', zdir='x', alpha=0.25, zs=3.06, mec=None, zorder=0) ax.plot(simulated_data.snp_a_gen, simulated_data.snp_b_gen, 'ko', zdir='z', alpha=0.25, zs=-3.1, mec=None, zorder=0) # adding plane model = smf.ols('phenotype ~ snp_a_gen + snp_b_gen', data=simulated_data).fit() # print(model.summary()) xx, yy = np.meshgrid(np.linspace(0, 3.0, 20), np.linspace(0, 3.0, 20)) zz = model.params[1] * xx + model.params[2] * yy + model.params[0] plane = ax.plot_surface(xx, yy, zz, color='blue', alpha=0.5, cmap=cm.coolwarm, zorder=1) fit_a = np.polyfit(simulated_data.snp_a_gen, simulated_data.phenotype, deg=1) ax.plot(np.array(simulated_data.snp_a_gen), fit_a[0] * np.array(simulated_data.snp_a_gen) + fit_a[1], color='k', zdir='y', zs=3.0, zorder=1) fit_b = np.polyfit(simulated_data.snp_b_gen, simulated_data.phenotype, deg=1) ax.plot(np.array(simulated_data.snp_b_gen), fit_b[0] * np.array(simulated_data.snp_b_gen) + fit_b[1], color='k', zdir='x', zs=3.06, zorder=1) for gen_a in range(3): for gen_b in range(3): print("gen_a =", gen_a, ", gen_b =", gen_b, ", mean =", simulated_data[(simulated_data.snp_a_gen == gen_a) & (simulated_data.snp_b_gen == gen_b)].phenotype.mean()) ax.scatter(xs=[gen_a], ys=[gen_b], zs=[simulated_data[(simulated_data.snp_a_gen == gen_a) & (simulated_data.snp_b_gen == gen_b)].phenotype.mean()], color='r', zorder=10) print(simulated_data[(simulated_data.snp_a_gen == 2) & (simulated_data.snp_b_gen == 2)].phenotype.mean()) for gen in range(3): ax.plot([gen], [simulated_data[simulated_data.snp_a_gen == gen].phenotype.mean()], color='k', zdir='y', zs=3.0, zorder=10) ax.plot([gen], [simulated_data[simulated_data.snp_b_gen == gen].phenotype.mean()], color='k', zdir='x', zs=3.0, zorder=10) ax.set_xlabel("A SNP Genotypes") ax.set_xlim(3.0, 0.0) ax.set_ylabel('B SNP Genotypes') ax.set_ylim(0.0, 3.0) ax.set_zlabel("Phenotype") ax.set_zlim(-3.0, 3.0) ax.set_yticks(range(3)) ax.set_xticks(range(3)) fig.colorbar(plane, shrink=0.5, aspect=20, ticks=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) # fig.savefig("./multivariate_regression_on_simulated_data.pdf", dpi=300) # fig.savefig("./multivariate_regression_on_simulated_data.png", dpi=300) plt.show() plt.close(fig) joint_z1_z2 = pd.read_csv("../out/1000_iter_z1_z2.csv", names="z1 z2".split()) print(joint_z1_z2.head()) g = sns.jointplot("z1", "z2", data=joint_z1_z2, kind="kde", space=0, color="g") g.savefig("../out/joint_distr_z1_z2_1000_iter.png", dpi=300) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/lionelsamrat10/machine-learning-a-to-z/blob/main/Deep%20Learning/Convolutional%20Neural%20Networks%20(CNN)/convolutional_neural_network_samrat_with_10_epochs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Convolutional Neural Network ### Importing the libraries ``` import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator import numpy as np tf.__version__ ``` ## Part 1 - Data Preprocessing ### Preprocessing the Training set ``` # Transforming the Image # Rescale applies Feature Scaling to each pixels in our images # We are doing this to avoid Overfitting train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) # Only 32 images will run in one batch training_set = train_datagen.flow_from_directory('dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') ``` ### Preprocessing the Test set ``` test_datagen = ImageDataGenerator(rescale = 1./255) test_set = test_datagen.flow_from_directory('dataset/test_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') ``` ## Part 2 - Building the CNN ### Initialising the CNN ``` cnn = tf.keras.models.Sequential(); ``` ### Step 1 - Convolution ``` cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # Kernel Size is same as the number of rows in the Feature Detector # The images are resized as 64px X 64px and 3 denotes 3D R, G, B ``` ### Step 2 - Pooling ``` cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) ``` ### Adding a second convolutional layer ``` cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu')) cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) ``` ### Step 3 - Flattening ``` cnn.add(tf.keras.layers.Flatten()) #Flattens the 2D array into an 1D array ``` ### Step 4 - Full Connection ``` cnn.add(tf.keras.layers.Dense(units=128, activation='relu')) # Units mean the number of neurons in the hidden layer ``` ### Step 5 - Output Layer ``` cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid')) ``` ## Part 3 - Training the CNN ### Compiling the CNN ``` cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) ``` ### Training the CNN on the Training set and evaluating it on the Test set ``` cnn.fit(x = training_set, validation_data = test_set, epochs = 10) ``` ## Part 4 - Making a single prediction ``` from keras.preprocessing import image test_image = image.load_img('dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64)) # Creates a PIL image test_image = image.img_to_array(test_image) # Converts the PIL image to a NumPy Array test_image = np.expand_dims(test_image, axis = 0) # Cobtain the image into a Batch result = cnn.predict(test_image) training_set.class_indices if result[0][0] == 1: prediction = 'dog' else: prediction = 'cat' print(prediction) # cat_or_dog_1.jpg originally is an image of a dog ```
github_jupyter
# RPLib Problem 0001 - Baseline Provides the baseline version to rankability problem 0001. Focuses on Massey and Colley out of the box without ties or indirect game information. ``` %load_ext autoreload %autoreload 2 %matplotlib inline import copy import os import pandas as pd import numpy as np from scipy.stats import pearsonr from tqdm import tqdm #import matplotlib.pyplot as plt from joblib import Parallel, delayed import joblib import itertools from pathlib import Path from IPython.display import display, Markdown, Latex ``` **All packages are relative to the home directory of the user** ``` home = str(Path.home()) ``` **Import the main rankability package** ``` import sys sys.path.insert(0,"%s/rankability_toolbox_dev"%home) import pyrankability ``` **Load the problem information** ``` problem = joblib.load("/disk/RPLib/problem_0001.joblib.z") ``` ## Explore and setup the problem ``` problem.keys() print(problem["description"]) problem['target'] problem['data'].keys() problem['data']['2002'].keys() ``` **Create easier to reference variables** ``` years = list(problem['data'].keys()) frac_keys = list(problem['data'][years[0]].keys()) remaining_games = problem['other']['remaining_games'] madness_teams = problem['other']['madness_teams'] best_df = problem['other']['best_df'] top_k = problem['other']['top_k'] target_column = f"top{top_k}_intersection" best_pred_df = problem['other']['best_pred_df'] ``` ## Define helper functions **Function to compute a D matrix from games using hyperparameters** ``` def compute_D(game_df,team_range,direct_thres,spread_thres): map_func = lambda linked: pyrankability.construct.support_map_vectorized_direct_indirect(linked,direct_thres=direct_thres,spread_thres=spread_thres) Ds = pyrankability.construct.V_count_vectorized(game_df,map_func) for i in range(len(Ds)): Ds[i] = Ds[i].reindex(index=team_range,columns=team_range) return Ds def process(data,target,best_df_all): index_cols = ["Year","frac_key","direct_thres","spread_thres","weight_indirect","range"] Ds = pd.DataFrame(columns=["D"]+index_cols) Ds.set_index(index_cols,inplace=True) for frac_key,year in tqdm(itertools.product(frac_keys,years)): frac = float(frac_key.split("=")[1]) best_df = best_df_all.set_index('frac').loc[frac] for index,row in best_df.iterrows(): dom,ran,dt,st,iw = row.loc['domain'],row.loc['range'],row.loc['direct_thres'],row.loc['spread_thres'],row.loc['weight_indirect'] # set the team_range team_range = None if ran == 'madness': team_range = madness_teams[year] elif ran == 'all': team_range = all_teams[year] else: raise Exception(f"range={ran} not supported") name = (year,frac_key,dt,st,iw,ran) if iw == 0: st = np.Inf D = compute_D(data[year][frac_key],team_range,dt,st) Ds = Ds.append(pd.Series([D],index=["D"],name=name)) return Ds ``` ## Create D matrices We will ignore indirect games and ties (direct threshold modification). ``` param_df = best_df.copy() param_df.spread_thres = 0 param_df.weight_indirect = 0 param_df.direct_thres = 0 param_df Ds = process(problem['data'],problem['target'],param_df) Ds Ds.iloc[[0,-1]] Ds.loc['2002',"D"][0][0] Ds.loc['2002',"D"][0][1] Ds.index.names ``` ### Compute the features ``` feature_columns = ["delta_lop","delta_hillside","nfrac_xstar_lop","nfrac_xstar_hillside","diameter_lop","diameter_hillside"] def compute_features(D,rankings,top_k): top_teams = list(rankings.sort_values().index[:top_k]) D = D.loc[top_teams,top_teams] delta_lop,details_lop = pyrankability.rank.solve(D.fillna(0),method='lop',cont=True) x = pd.DataFrame(details_lop['x'],index=D.index,columns=D.columns) r = x.sum(axis=0) order = np.argsort(r) xstar = x.iloc[order,:].iloc[:,order] xstar.loc[:,:] = pyrankability.common.threshold_x(xstar.values) inxs = np.triu_indices(len(xstar),k=1) xstar_upper = xstar.values[inxs[0],inxs[1]] nfrac_upper_lop = sum((xstar_upper > 0) & (xstar_upper < 1)) top_teams = xstar.columns[:top_k] k_two_distant,details_two_distant = pyrankability.search.solve_pair_max_tau(D.fillna(0),method='lop',cont=False,verbose=False) d_lop = details_two_distant['tau'] delta_hillside,details_hillside = pyrankability.rank.solve(D,method='hillside',cont=True) x = pd.DataFrame(details_hillside['x'],index=D.index,columns=D.columns) r = x.sum(axis=0) order = np.argsort(r) xstar = x.iloc[order,:].iloc[:,order] xstar.loc[:,:] = pyrankability.common.threshold_x(xstar.values) inxs = np.triu_indices(len(xstar),k=1) xstar_upper = xstar.values[inxs[0],inxs[1]] nfrac_upper_hillside = sum((xstar_upper > 0) & (xstar_upper < 1)) top_teams = xstar.columns[:top_k] k_two_distant,details_two_distant = pyrankability.search.solve_pair_max_tau(D,method='hillside',verbose=False,cont=False) d_hillside = details_two_distant['tau'] features = pd.Series([delta_lop,delta_hillside,2*nfrac_upper_lop,2*nfrac_upper_hillside,d_lop,d_hillside],index=feature_columns) return features best_pred_df = best_pred_df.reset_index() best_pred_df['frac_key'] = "frac="+best_pred_df['frac'].astype(str) best_pred_df def create_features(Ds,best_pred_df,top_k): index_cols = list(Ds.index.names)+["Method","Construction"] X = pd.DataFrame(columns=index_cols + feature_columns) X.set_index(index_cols,inplace=True) for index,row in tqdm(Ds.iterrows()): year,frac_key,dt,st,iw,ran = index frac = float(frac_key.split("=")[1]) spec_best_pred_df = best_pred_df.set_index(list(Ds.index.names)).loc[[(year,frac_key,dt,st,iw,ran)]] sum_D = None for i,D in enumerate(Ds.loc[(year,frac_key,dt,st,iw,ran),"D"]): if sum_D is None: sum_D = D else: sum_D = sum_D.add(iw*D,fill_value=0) if i == 0: construction = "Direct" elif i == 1: construction = "Indirect" else: raise Exception("Error") methods = spec_best_pred_df["Method"].unique() for method in methods: rankings = spec_best_pred_df.set_index('Method').loc[method,'rankings'] features = compute_features(D,rankings,top_k) features.name = tuple(list(index)+[method,construction]) X = X.append(features) construction = "Both" methods = spec_best_pred_df["Method"].unique() for method in methods: rankings = spec_best_pred_df.set_index('Method').loc[method,'rankings'] features = compute_features(sum_D,rankings,top_k) features.name = tuple(list(index)+[method,construction]) X = X.append(features) return X X = create_features(Ds,best_pred_df.reset_index(),top_k) X ``` ## Refine the target dataset ``` target = problem['target'].groupby(['frac1','frac2','Method','Year','direct_thres','spread_thres','weight_indirect'])[target_column].mean().to_frame() target X_for_join = X.copy().reset_index() X_for_join['frac1']= X_for_join['frac_key'].str.replace("frac=","").astype(float) X_for_join target Xy = target.reset_index().set_index(['Method','frac1','Year','direct_thres','spread_thres','weight_indirect']).join(X_for_join.set_index(['Method','frac1','Year','direct_thres','spread_thres','weight_indirect'])).dropna() Xy = Xy.reset_index() Xy ``` ## Process results ``` frac_pairs = [(0.5,0.6),(0.6,0.7),(0.7,0.8),(0.8,0.9),(0.9,1.)] summary = None for pair in frac_pairs: data = Xy.set_index(['frac1','frac2']).loc[pair].reset_index() for_corr = data.set_index(['Method','Construction',"frac1","frac2"]) if summary is None: summary = pd.DataFrame(columns=["frac1","frac2","Method","Construction"]+feature_columns).set_index(list(for_corr.index.names)) for ix in for_corr.index.unique(): corr_results = for_corr.loc[ix][[target_column]+feature_columns].corr() target_corr_results = corr_results.loc[target_column].drop(target_column) target_corr_results.name = ix summary = summary.append(target_corr_results) display(summary) summary ``` ## 0.6 to 0.7 ``` data = Xy.set_index(['frac1','frac2']).loc[(0.6,0.7)].reset_index() for_corr = data.set_index(['Method','Construction']) for ix in for_corr.index.unique(): display(pd.Series(ix,index=for_corr.index.names)) display(for_corr.loc[ix][[target_column]+feature_columns].corr()) ``` ### 0.7 to 0.8 ``` data = Xy.set_index(['frac1','frac2']).loc[(0.7,0.8)].reset_index() for_corr = data.set_index(['Method']) for ix in for_corr.index.unique(): display(pd.Series(ix,index=for_corr.index.names)) display(for_corr.loc[ix][[target_column]+feature_columns].corr()) ``` ### 0.8 to 0.9 ``` data = Xy.set_index(['frac1','frac2']).loc[(0.8,0.9)].reset_index() for_corr = data.set_index(['Method']) for ix in for_corr.index.unique(): display(pd.Series(ix,index=for_corr.index.names)) display(for_corr.loc[ix][[target_column]+feature_columns].corr()) ``` ### 0.9 to 1. ``` data = Xy.set_index(['frac1','frac2']).loc[(0.9,1.)].reset_index() for_corr = data.set_index(['Method']) for ix in for_corr.index.unique(): display(pd.Series(ix,index=for_corr.index.names)) display(for_corr.loc[ix][[target_column]+feature_columns].corr()) for_corr = data.set_index(['Method','direct_thres','spread_thres','weight_indirect']) for_display = pd.DataFrame(columns=feature_columns+list(for_corr.index.names)) for_display.set_index(list(for_corr.index.names),inplace=True) for ix in for_corr.index.unique(): dt = for_corr.loc[ix][[target_column]+feature_columns].corr().loc[target_column,feature_columns] dt.name = ix for_display = for_display.append(dt) for_display.T print(for_display.T.to_latex()) ```
github_jupyter
<div align="Right"><font size="1">https://github.com/mrola/jupyter_themes_preview<br>Ola Söderström - 2018</font></div> ----- <p align="center"><font size="6">Jupyter notebook for testing out different themes</font></p> ----- # import libs ``` %matplotlib inline import os import sys import numpy as np import pandas as pd import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt from IPython.core.display import display, HTML from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" ``` ## Display version info ``` try: %load_ext version_information %version_information wget, pandas, numpy except ModuleNotFoundError: print("Module \"version_information\" not found, install using \"pip install version_information\"") pass ``` ## Check requirements ``` if not (sys.version_info.major > 2 and sys.version_info.minor > 2): print("Notebook requires Python 3.2 or higher") ``` # Try new style css ## Fetch css and store as new profile ``` def mynewstyle(new_style_url, profilename="newcoolprofile"): '''Creates directory and custom.css for new notebook style. Run HTML command displayed at the end of execution to apply new style. <style> tags will be inserted if missing. To revert to default style, comment out HTML command using "#". Parameters: new_style_url : URL to css file to download profilename : Name of new profile (arbitrary) ''' use_new_style = True print("Will use {}".format(os.path.basename(new_style_url))) m = !ipython locate profile print("{:35} {}".format("Default profile location:", m[0])) !ipython profile create $profilename m1 = !ipython locate profile $profilename print("{:35} {}".format("New profile directory created", m1[0])) p=!ipython locate profile $profilename p = p[0] + '/static/custom/' if os.path.exists(p) is True: print("{:35} {}".format("Directory already exists:", p)) else: print("Creating {}".format(p)) os.makedirs(p, exist_ok=True) ccss = p + 'custom.css' print() !wget $new_style_url -nv -O $ccss styletag = False with open(ccss, 'r+') as f: for line in f.readlines(): if 'DOCTYPE' in line: print("This appears to be a html document, need standalone css.") return elif '<style>' in line: styletag = True break if styletag is False: # print("\nHTML <style> tags appears to be missing in custom.css, will add...") !sed -i '1s/^/\<style\>/' $ccss !echo "<\style>" >> $ccss html_line = 'HTML(open(\'{}\', \'r\').read())'.format(ccss) print("\nNow you need to execute the follwing line in single cell: \n {}".format(html_line)) ``` ### Set URL Just some random themes I picked up for testing. ``` #new_style_url='https://raw.githubusercontent.com/dunovank/jupyter-themes/master/jupyterthemes/styles/compiled/monokai.css' new_style_url='https://raw.githubusercontent.com/neilpanchal/spinzero-jupyter-theme/master/custom.css' print("Will be using css from {}".format(new_style_url)) ``` ### Run script ``` mynewstyle(new_style_url, profilename="newprofile_34") ``` ## Activate new style ``` HTML(open('/home/ola/.ipython/profile_newprofile_34/static/custom/custom.css', 'r').read()) ``` # Check style on some random stuff ``` df = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),columns=['a', 'b', 'c', 'd', 'e']) df.loc[0, 'a'] = "This is some text" df ``` ## This is heading 2 ### This is heading 3 This is markdown text. # Viz ``` def sinplot(flip=1): x = np.linspace(0, 14, 100) for i in range(1, 7): plt.plot(x, np.sin(x + i * .5) * (7 - i) * flip) sinplot() sns.set_style("ticks") sns.despine(offset=10, trim=True) ``` # Setting individual styles using display(HTML) ``` display(HTML("<style>.cell { font-size: 12px; width:900px }</style>")) display(HTML("<style>.input { margin-top:2em, margin-bottom:2em }</style>")) #display(HTML("<style>.div.output_wrapper { margin-top:2em, margin-bottom:2em }</style>")) #display(HTML("<style>.rendered_html { background-color: white; }</style>")) #display(HTML("<style>.text_cell_render { font-size: 15px; }</style>")) #display(HTML("<style>.text_cell { font-size: 15px; }</style>")) #display(HTML("<style>.cell { font-size: 12px; max-width:000px }</style>")) #display(HTML("<style>.CodeMirror { background-color: #2b303b; }</style>")) #display(HTML("<style>.cell { background-color: #2b303b; }</style>")) ```
github_jupyter
# Human numbers ``` from fastai2.basics import * from fastai2.text.all import * from fastai2.callback.all import * bs=64 ``` ## Data ``` path = untar_data(URLs.HUMAN_NUMBERS) path.ls() def readnums(d): return ', '.join(o.strip() for o in open(path/d).readlines()) train_txt = readnums('train.txt'); train_txt[:80] valid_txt = readnums('valid.txt'); valid_txt[-80:] train_tok = tokenize1(train_txt) valid_tok = tokenize1(valid_txt) dsrc = DataSource([train_tok, valid_tok], tfms=Numericalize, dl_type=LMDataLoader, splits=[[0], [1]]) dbunch = dsrc.databunch(bs=bs, val_bs=bs, after_batch=Cuda()) dsrc.show((dsrc.train[0][0][:80],)) len(dsrc.valid[0][0]) len(dbunch.valid_dl) dbunch.seq_len, len(dbunch.valid_dl) 13017/72/bs it = iter(dbunch.valid_dl) x1,y1 = next(it) x2,y2 = next(it) x3,y3 = next(it) it.close() x1.numel()+x2.numel()+x3.numel() ``` This is the closes multiple of 64 below 13017 ``` x1.shape,y1.shape x2.shape,y2.shape x1[0] y1[0] v = dbunch.vocab ' '.join([v[x] for x in x1[0]]) ' '.join([v[x] for x in y1[0]]) ' '.join([v[x] for x in x2[0]]) ' '.join([v[x] for x in x3[0]]) ' '.join([v[x] for x in x1[1]]) ' '.join([v[x] for x in x2[1]]) ' '.join([v[x] for x in x3[1]]) ' '.join([v[x] for x in x3[-1]]) dbunch.valid_dl.show_batch() ``` ## Single fully connected model ``` dbunch = dsrc.databunch(bs=bs, seq_len=3, after_batch=Cuda) x,y = dbunch.one_batch() x.shape,y.shape nv = len(v); nv nh=64 def loss4(input,target): return F.cross_entropy(input, target[:,-1]) def acc4 (input,target): return accuracy(input, target[:,-1]) class Model0(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) # green arrow self.h_h = nn.Linear(nh,nh) # brown arrow self.h_o = nn.Linear(nh,nv) # blue arrow self.bn = nn.BatchNorm1d(nh) def forward(self, x): h = self.bn(F.relu(self.h_h(self.i_h(x[:,0])))) if x.shape[1]>1: h = h + self.i_h(x[:,1]) h = self.bn(F.relu(self.h_h(h))) if x.shape[1]>2: h = h + self.i_h(x[:,2]) h = self.bn(F.relu(self.h_h(h))) return self.h_o(h) learn = Learner(dbunch, Model0(), loss_func=loss4, metrics=acc4) learn.fit_one_cycle(6, 1e-4) ``` ## Same thing with a loop ``` class Model1(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) # green arrow self.h_h = nn.Linear(nh,nh) # brown arrow self.h_o = nn.Linear(nh,nv) # blue arrow self.bn = nn.BatchNorm1d(nh) def forward(self, x): h = torch.zeros(x.shape[0], nh).to(device=x.device) for i in range(x.shape[1]): h = h + self.i_h(x[:,i]) h = self.bn(F.relu(self.h_h(h))) return self.h_o(h) learn = Learner(dbunch, Model1(), loss_func=loss4, metrics=acc4) learn.fit_one_cycle(6, 1e-4) ``` ## Multi fully connected model ``` dbunch = dsrc.databunch(bs=bs, seq_len=20, after_batch=Cuda) x,y = dbunch.one_batch() x.shape,y.shape class Model2(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) self.h_h = nn.Linear(nh,nh) self.h_o = nn.Linear(nh,nv) self.bn = nn.BatchNorm1d(nh) def forward(self, x): h = torch.zeros(x.shape[0], nh).to(device=x.device) res = [] for i in range(x.shape[1]): h = h + self.i_h(x[:,i]) h = F.relu(self.h_h(h)) res.append(self.h_o(self.bn(h))) return torch.stack(res, dim=1) learn = Learner(dbunch, Model2(), loss_func=CrossEntropyLossFlat(), metrics=accuracy) learn.fit_one_cycle(10, 1e-4, pct_start=0.1) ``` ## Maintain state ``` class Model3(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) self.h_h = nn.Linear(nh,nh) self.h_o = nn.Linear(nh,nv) self.bn = nn.BatchNorm1d(nh) self.h = torch.zeros(bs, nh).cuda() def forward(self, x): res = [] if x.shape[0]!=self.h.shape[0]: self.h = torch.zeros(x.shape[0], nh).cuda() h = self.h for i in range(x.shape[1]): h = h + self.i_h(x[:,i]) h = F.relu(self.h_h(h)) res.append(self.bn(h)) self.h = h.detach() res = torch.stack(res, dim=1) res = self.h_o(res) return res def reset(self): self.f.h = torch.zeros(bs, nh).cuda() learn = Learner(dbunch, Model3(), metrics=accuracy, loss_func=CrossEntropyLossFlat()) learn.fit_one_cycle(20, 3e-3) ``` ## nn.RNN ``` class Model4(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) self.rnn = nn.RNN(nh,nh, batch_first=True) self.h_o = nn.Linear(nh,nv) self.bn = BatchNorm1dFlat(nh) self.h = torch.zeros(1, bs, nh).cuda() def forward(self, x): if x.shape[0]!=self.h.shape[1]: self.h = torch.zeros(1, x.shape[0], nh).cuda() res,h = self.rnn(self.i_h(x), self.h) self.h = h.detach() return self.h_o(self.bn(res)) learn = Learner(dbunch, Model4(), loss_func=CrossEntropyLossFlat(), metrics=accuracy) learn.fit_one_cycle(20, 3e-3) ``` ## 2-layer GRU ``` class Model5(Module): def __init__(self): self.i_h = nn.Embedding(nv,nh) self.rnn = nn.GRU(nh, nh, 2, batch_first=True) self.h_o = nn.Linear(nh,nv) self.bn = BatchNorm1dFlat(nh) self.h = torch.zeros(2, bs, nh).cuda() def forward(self, x): if x.shape[0]!=self.h.shape[1]: self.h = torch.zeros(2, x.shape[0], nh).cuda() res,h = self.rnn(self.i_h(x), self.h) self.h = h.detach() return self.h_o(self.bn(res)) learn = Learner(dbunch, Model5(), loss_func=CrossEntropyLossFlat(), metrics=accuracy) learn.fit_one_cycle(10, 1e-2) ``` ## fin
github_jupyter
# Linear regression from scratch Powerful ML libraries can eliminate repetitive work, but if you rely too much on abstractions, you might never learn how neural networks really work under the hood. So for this first example, let's get our hands dirty and build everything from scratch, relying only on autograd and NDArray. First, we'll import the same dependencies as in the [autograd chapter](../chapter01_crashcourse/autograd.ipynb). We'll also import the powerful `gluon` package but in this chapter, we'll only be using it for data loading. ``` from __future__ import print_function import mxnet as mx from mxnet import nd, autograd, gluon mx.random.seed(1) ``` ## Set the context We'll also want to specify the contexts where computation should happen. This tutorial is so simple that you could probably run it on a calculator watch. But, to develop good habits we're going to specify two contexts: one for data and one for our models. ``` data_ctx = mx.cpu() model_ctx = mx.cpu() ``` ## Linear regression To get our feet wet, we'll start off by looking at the problem of regression. This is the task of predicting a *real valued target* $y$ given a data point $x$. In linear regression, the simplest and still perhaps the most useful approach, we assume that prediction can be expressed as a *linear* combination of the input features (thus giving the name *linear* regression): $$\hat{y} = w_1 \cdot x_1 + ... + w_d \cdot x_d + b$$ Given a collection of data points $X$, and corresponding target values $\boldsymbol{y}$, we'll try to find the *weight* vector $\boldsymbol{w}$ and bias term $b$ (also called an *offset* or *intercept*) that approximately associate data points $\boldsymbol{x}_i$ with their corresponding labels ``y_i``. Using slightly more advanced math notation, we can express the predictions $\boldsymbol{\hat{y}}$ corresponding to a collection of datapoints $X$ via the matrix-vector product: $$\boldsymbol{\hat{y}} = X \boldsymbol{w} + b$$ Before we can get going, we will need two more things * Some way to measure the quality of the current model * Some way to manipulate the model to improve its quality ### Square loss In order to say whether we've done a good job, we need some way to measure the quality of a model. Generally, we will define a *loss function* that says *how far* are our predictions from the correct answers. For the classical case of linear regression, we usually focus on the squared error. Specifically, our loss will be the sum, over all examples, of the squared error $(y_i-\hat{y})^2)$ on each: $$\ell(y, \hat{y}) = \sum_{i=1}^n (\hat{y}_i-y_i)^2.$$ For one-dimensional data, we can easily visualize the relationship between our single feature and the target variable. It's also easy to visualize a linear predictor and it's error on each example. Note that squared loss *heavily penalizes outliers*. For the visualized predictor below, the lone outlier would contribute most of the loss. ![](../img/linear-regression.png) ### Manipulating the model For us to minimize the error, we need some mechanism to alter the model. We do this by choosing values of the *parameters* $\boldsymbol{w}$ and $b$. This is the only job of the learning algorithm. Take training data ($X$, $y$) and the functional form of the model $\hat{y} = X\boldsymbol{w} + b$. Learning then consists of choosing the best possible $\boldsymbol{w}$ and $b$ based on the available evidence. ### Historical note You might reasonably point out that linear regression is a classical statistical model. [According to Wikipedia](https://en.wikipedia.org/wiki/Regression_analysis#History), Legendre first developed the method of least squares regression in 1805, which was shortly thereafter rediscovered by Gauss in 1809. Presumably, Legendre, who had Tweeted about the paper several times, was peeved that Gauss failed to cite his arXiv preprint. ![Legendre](../img/legendre.jpeg) Matters of provenance aside, you might wonder - if Legendre and Gauss worked on linear regression, does that mean there were the original deep learning researchers? And if linear regression doesn't wholly belong to deep learning, then why are we presenting a linear model as the first example in a tutorial series on neural networks? Well it turns out that we can express linear regression as the simplest possible (useful) neural network. A neural network is just a collection of nodes (aka neurons) connected by directed edges. In most networks, we arrange the nodes into layers with each feeding its output into the layer above. To calculate the value of any node, we first perform a weighted sum of the inputs (according to weights ``w``) and then apply an *activation function*. For linear regression, we only have two layers, one corresponding to the input (depicted in orange) and a one-node layer (depicted in green) correspnding to the ouput. For the output node the activation function is just the identity function. ![](../img/onelayer.png) While you certainly don't have to view linear regression through the lens of deep learning, you can (and we will!). To ground the concepts that we just discussed in code, let's actually code up a neural network for linear regression from scratch. To get going, we will generate a simple synthetic dataset by sampling random data points ``X[i]`` and corresponding labels ``y[i]`` in the following manner. Out inputs will each be sampled from a random normal distribution with mean $0$ and variance $1$. Our features will be independent. Another way of saying this is that they will have diagonal covariance. The labels will be generated accoding to the *true* labeling function `y[i] = 2 * X[i][0]- 3.4 * X[i][1] + 4.2 + noise` where the noise is drawn from a random gaussian with mean ``0`` and variance ``.01``. We could express the labeling function in mathematical notation as: $$y = X \cdot w + b + \eta, \quad \text{for } \eta \sim \mathcal{N}(0,\sigma^2)$$ ``` num_inputs = 2 num_outputs = 1 num_examples = 10000 def real_fn(X): return 2 * X[:, 0] - 3.4 * X[:, 1] + 4.2 X = nd.random_normal(shape=(num_examples, num_inputs), ctx=data_ctx) noise = .1 * nd.random_normal(shape=(num_examples,), ctx=data_ctx) y = real_fn(X) + noise ``` Notice that each row in ``X`` consists of a 2-dimensional data point and that each row in ``Y`` consists of a 1-dimensional target value. ``` print(X[0]) print(y[0]) ``` Note that because our synthetic features `X` live on `data_ctx` and because our noise also lives on `data_ctx`, the labels `y`, produced by combining `X` and `noise` in `real_fn` also live on `data_ctx`. We can confirm that for any randomly chosen point, a linear combination with the (known) optimal parameters produces a prediction that is indeed close to the target value ``` print(2 * X[0, 0] - 3.4 * X[0, 1] + 4.2) ``` We can visualize the correspondence between our second feature (``X[:, 1]``) and the target values ``Y`` by generating a scatter plot with the Python plotting package ``matplotlib``. Make sure that ``matplotlib`` is installed. Otherwise, you may install it by running ``pip2 install matplotlib`` (for Python 2) or ``pip3 install matplotlib`` (for Python 3) on your command line. In order to plot with ``matplotlib`` we'll just need to convert ``X`` and ``y`` into NumPy arrays by using the `.asnumpy()` function. ``` import matplotlib.pyplot as plt plt.scatter(X[:, 1].asnumpy(),y.asnumpy()) plt.show() ``` ## Data iterators Once we start working with neural networks, we're going to need to iterate through our data points quickly. We'll also want to be able to grab batches of ``k`` data points at a time, to shuffle our data. In MXNet, data iterators give us a nice set of utilities for fetching and manipulating data. In particular, we'll work with the simple ``DataLoader`` class, that provides an intuitive way to use an ``ArrayDataset`` for training models. We can load `X` and `y` into an ArrayDataset, by calling `gluon.data.ArrayDataset(X, y)`. It's ok for `X` to be a multi-dimensional input array (say, of images) and `y` to be just a one-dimensional array of labels. The one requirement is that they have equal lengths along the first axis, i.e., `len(X) == len(y)`. Given an `ArrayDataset`, we can create a DataLoader which will grab random batches of data from an `ArrayDataset`. We'll want to specify two arguments. First, we'll need to say the `batch_size`, i.e., how many examples we want to grab at a time. Second, we'll want to specify whether or not to shuffle the data between iterations through the dataset. ``` batch_size = 4 train_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X, y), batch_size=batch_size, shuffle=True) ``` Once we've initialized our DataLoader (``train_data``), we can easily fetch batches by iterating over `train_data` just as if it were a Python list. You can use your favorite iterating techniques like foreach loops: `for data, label in train_data` or enumerations: `for i, (data, label) in enumerate(train_data)`. First, let's just grab one batch and break out of the loop. ``` for i, (data, label) in enumerate(train_data): print(data, label) break ``` If we run that same code again you'll notice that we get a different batch. That's because we instructed the `DataLoader` that `shuffle=True`. ``` for i, (data, label) in enumerate(train_data): print(data, label) break ``` Finally, if we actually pass over the entire dataset, and count the number of batches, we'll find that there are 2500 batches. We expect this because our dataset has 10,000 examples and we configured the `DataLoader` with a batch size of 4. ``` counter = 0 for i, (data, label) in enumerate(train_data): pass print(i+1) ``` ## Model parameters Now let's allocate some memory for our parameters and set their initial values. We'll want to initialize these parameters on the `model_ctx`. ``` w = nd.random_normal(shape=(num_inputs, num_outputs), ctx=model_ctx) b = nd.random_normal(shape=num_outputs, ctx=model_ctx) params = [w, b] ``` In the succeeding cells, we're going to update these parameters to better fit our data. This will involve taking the gradient (a multi-dimensional derivative) of some *loss function* with respect to the parameters. We'll update each parameter in the direction that reduces the loss. But first, let's just allocate some memory for each gradient. ``` for param in params: param.attach_grad() ``` ## Neural networks Next we'll want to define our model. In this case, we'll be working with linear models, the simplest possible *useful* neural network. To calculate the output of the linear model, we simply multiply a given input with the model's weights (``w``), and add the offset ``b``. ``` def net(X): return mx.nd.dot(X, w) + b ``` Ok, that was easy. ## Loss function Train a model means making it better and better over the course of a period of training. But in order for this goal to make any sense at all, we first need to define what *better* means in the first place. In this case, we'll use the squared distance between our prediction and the true value. ``` def square_loss(yhat, y): return nd.mean((yhat - y) ** 2) ``` ## Optimizer It turns out that linear regression actually has a closed-form solution. However, most interesting models that we'll care about cannot be solved analytically. So we'll solve this problem by stochastic gradient descent. At each step, we'll estimate the gradient of the loss with respect to our weights, using one batch randomly drawn from our dataset. Then, we'll update our parameters a small amount in the direction that reduces the loss. The size of the step is determined by the *learning rate* ``lr``. ``` def SGD(params, lr): for param in params: param[:] = param - lr * param.grad ``` ## Execute training loop Now that we have all the pieces, we just need to wire them together by writing a training loop. First we'll define ``epochs``, the number of passes to make over the dataset. Then for each pass, we'll iterate through ``train_data``, grabbing batches of examples and their corresponding labels. For each batch, we'll go through the following ritual: * Generate predictions (``yhat``) and the loss (``loss``) by executing a forward pass through the network. * Calculate gradients by making a backwards pass through the network (``loss.backward()``). * Update the model parameters by invoking our SGD optimizer. ``` epochs = 10 learning_rate = .0001 num_batches = num_examples/batch_size for e in range(epochs): cumulative_loss = 0 # inner loop for i, (data, label) in enumerate(train_data): data = data.as_in_context(model_ctx) label = label.as_in_context(model_ctx).reshape((-1, 1)) with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() SGD(params, learning_rate) cumulative_loss += loss.asscalar() print(cumulative_loss / num_batches) ``` ## Visualizing our training progess In the succeeding chapters, we'll introduce more realistic data, fancier models, more complicated loss functions, and more. But the core ideas are the same and the training loop will look remarkably familiar. Because these tutorials are self-contained, you'll get to know this ritual quite well. In addition to updating out model, we'll often want to do some bookkeeping. Among other things, we might want to keep track of training progress and visualize it graphically. We demonstrate one slighly more sophisticated training loop below. ``` ############################################ # Re-initialize parameters because they # were already trained in the first loop ############################################ w[:] = nd.random_normal(shape=(num_inputs, num_outputs), ctx=model_ctx) b[:] = nd.random_normal(shape=num_outputs, ctx=model_ctx) ############################################ # Script to plot the losses over time ############################################ def plot(losses, X, sample_size=100): xs = list(range(len(losses))) f, (fg1, fg2) = plt.subplots(1, 2) fg1.set_title('Loss during training') fg1.plot(xs, losses, '-r') fg2.set_title('Estimated vs real function') fg2.plot(X[:sample_size, 1].asnumpy(), net(X[:sample_size, :]).asnumpy(), 'or', label='Estimated') fg2.plot(X[:sample_size, 1].asnumpy(), real_fn(X[:sample_size, :]).asnumpy(), '*g', label='Real') fg2.legend() plt.show() learning_rate = .0001 losses = [] plot(losses, X) for e in range(epochs): cumulative_loss = 0 for i, (data, label) in enumerate(train_data): data = data.as_in_context(model_ctx) label = label.as_in_context(model_ctx).reshape((-1, 1)) with autograd.record(): output = net(data) loss = square_loss(output, label) loss.backward() SGD(params, learning_rate) cumulative_loss += loss.asscalar() print("Epoch %s, batch %s. Mean loss: %s" % (e, i, cumulative_loss/num_batches)) losses.append(cumulative_loss/num_batches) plot(losses, X) ``` ## Conclusion You've seen that using just mxnet.ndarray and mxnet.autograd, we can build statistical models from scratch. In the following tutorials, we'll build on this foundation, introducing the basic ideas behind modern neural networks and demonstrating the powerful abstractions in MXNet's `gluon` package for building complex models with little code. ## Next [Linear regression with gluon](../chapter02_supervised-learning/linear-regression-gluon.ipynb) For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
github_jupyter
# Introduction Crypocurrency is a topic that is important to discuss. With the increase in companies accepting cryptocurrency as payment, it is becoming more integral to people's lives. Due to its decentralized and anonymous nature, it eliminates the need for a governing body to dictate its value and relies purely on supply and demand (Farrel, 2015). When Bitcoin was introduced in 2009, one of its greatest benefits was that it employed a peer-to-peer transaction system that relied on cryptographic proof instead of "trust" (Farrel, 2015). By doing this, Bitcoin replaced human actions with mathematical algorithms. However, this system gave to rise to new problems associated with double spending and coinage protection, topics that are addressed in the background information section. Since the cryptocurrency industry is relatively new and interest has recently increased, there is little research into how groups are able to take advantage of it compared to establishing how the coin works. Furthermore, there exists many different coin varieties that employ different algorithms which significantly increases the scope of what is possible to research. In this project, I will focus on systems (BitCoin and DogeCoin) that have existed for many years and have a large cultural impact. ### Personal Motivation The initial motivation for proposing this project comes from my lack of previous knowledge on this issue. I decided on cryptocurrency because I wished to learn more about digital transactions, hashing algorithms and cryptography systems. In quarantine, I was heavily invested in mathematical theory but did not focus on applying a human centered perspective to what I was learning. This class has allowed me to see that data ethics is heavily important and that replacing human actions with algorithms isn't enough to remove prejudices and biases that were present before. Hence, in addition to investigating how cryptosystems work, I intend to address the question of whether having a privalged position in society allows you to take advantage of the system Due to its anonimity, there is little known about how these systems are implemented and how it impacts certain groups of individuals. In this project, I hope to uncover more about that impact and hope to uncover possible market manipulation and advantages that are given to certain Miners over others. Futhermore, the cryptocurrency industry is somewhat unregulated and crypto transactions have been used for malicious actions as a result of its anonimity. *** # Background Information ### What is Bitcoin? In 2009, Satoshi Nakamoto developed the first ever decentralized cryptocurrency, a virtual method of currency exchange that functions similarly to standard currency but eliminates the need for a trusted central authority (Farrel, 2015). By getting rid of this Hierarchy, individuals and business perform transactions on a peer-to-peer network that works through digital signatures (Farrel, 2015). ### How is it secure? Bitcoin was developed with the intention of replacing physical objects with a "computer file" (Velde, 2013). It functions on the same principles as currency- exchange something of value for goods and services. However, for a physical transaction, there is a guarantee that you are giving the money to another paty and that you are receiving something in return (ignoring the issue of counterfeiting). How can we guarentee that if we exchange Bitcoin, one party is guarenteed to receive it? Futhermore, digital signatures can easily be duplicated on a computer so how are we able to guarentee that double-spending does not occur? To prevent this, Bitcoin can only be spent and received after they have been publicly recorded in a ledge, known as the "block chain". To transfer a coin, the transcation is first recorded in a public ledger, where a new signature is made by combining the time stamp with the digital signatures of each party (Farrel, 2015). This new signature represents the path that a coin has taken in the network and is then broadcasted to all nodes in the blockchain. To be more precise, let Alice be the owner of a Bitcoin which is essentially a string of 1s and 0s that is stored in a physical hard drive (called the "wallet") (Velde, 2013). She wishes to send this coin to Bob who also has a wallet on his device (Note that these "wallets" are managed by an application the computer) (Velde, 2013). Then Alice's application broadcastes to a network of nodes indicating that there is a proposed transaction between Alice and Bob. Then the "Miners" gather the proposed transaction and attempt to add it to the block chain, which makes the transfer valid. The key for preventing fraud is to ensure that adding to the block chain is "difficult". As an analogy to the physical medium, think of this as saying when a merchant transfers a precious metal, it is clear that the merchant didn't "fake" it because of the amount of costly resources it takes to acquire it. However, it is very easy to check that it is indeed a precious metal. For Bitcoin, the process is similar but utilizes a **hash function**, a function that takes in text and numbers and maps it to a fixed integer. A **Miner** solves the problem (Velde, 2013): **Given the current blockchain $x$, a new (proposed) block $y$, a hashing function $f$ a fixed value $\alpha$, find an $n$ s.t $f(x,y,n) <= \alpha$** The security of this process hinges on the fact that it is very difficult to find $n$ given only $x,y,f, \alpha$ but it is very easy to check that for a given $n$, $f(x,y,n) <= \alpha$ holds. To address the issue of double spending, finding $n$ for a given new addition $y$ verifies whether no previous bitcoin transaction involving $y$ existed. When $y$ is added to the block chain $x$, the transaction becomes official and the bitcoin has been transferred to Bob. ### Rewards When the first miner succesfully makes an addition to the block chain, the message is broadcasted to the Blockchain and the miner receives $N$ new bitcoin attributed to it, a reward for the resources used. The Bitcoin protocol keeps the hashing function constant but changes $\alpha$ and $N$ over time, depending on how many new nodes (or miners) are added to the blockchain. In the case of Bitcoin, the Blockchain is public so any individual with the proper hardware can send transactions or become a miner (Wikipedia). The reason for providing these rewards is because mining requires high energy consumption and has been critized for being very "inefficient". ### Arms race As stated before, the process of finding $n$ is challenging and can only be done through exhaustive checking. Specifically, solving the mathematical equations comes from constantly running operations on a device until a solution is found. In the beginning, users utilized the CPU but as time went on, the same functions could be run more efficiently on GPUs (Farrel, 2015). Think of mining as a single device that runs constantly and keeps running a hash every second, similar to filling out a lottery ticket until you get a winning ticket. Due to this incentive, GPUs have been very hard to acquire (Uzman, 2021). ### What is DogeCoin? With the advent of BitCoin, numerous different cryptocurrencies have been introduced, utilizing the same model of peer-to-peer transactions without the need of a central authority. An unusual example of this is DogeCoin, a cryptocurrency that has recently been in the news due its large monetary growth. As a start, Doge is an internet meme that was created around 2010 and the coin was designed to initially be a joke (Young, 2018). Though it still has the peer-to-peer model, it has fundamental differences from BitCoin. For once, the circulation of coins is significantly larger than Bitcoin- around 300 billion. Its monetary value per unit is also significantly less and implements a different algorithm for deriving its keys (Young, 2018). DogeCoin is an interesting case of demand heavily increasing its value, with numerous individuals and companies profiting heavily off it. *** # Research Questions and Hypothesis Now that the proper background knowledge has been presented, we wish to formulate a hypothesis that relates to how certain groups of people are impacted by cryptocurrency over others. Because BitCoin uses a public Blockchain, there is no restriction on who is able to perform a transaction or become a miner. This accessibility is an area that is important to look at given the large monetary value that these currencies have now attained. Though the claim is that anyone can be apart of the group, acquiring the necessary hardware can be a challenge. Furthermore, digital coins are not immune to fraud and there is the potential for large amounts of money to be lost (Mt. Gox as an example). It is also important to note that there are many different cryptocurrencies that exist so it is best to focus on the most well-known blockchain (Bitcoin) and see if the findings can be extrapolated to other systems. The preliminary research questions from the plan were: - What impact does the cryptocurrency system have on minority groups in the United States? - Does the algorithm behind specific cryptocurrencies target more privaleged groups? Are people in a position of power able to take advantage of such a design? - What mathematical models are companies using to create cryptosystems? - How does encryption and the anonimity of cryptocurrency impact the tech industry? In the background research, the 2nd and 3rd questions have been addressed and there are numerous studies regarding the mathematical models and encryption systems that companies use. Thus, we can focus less on the theory and more on the availability of cryptocurrency to the user. In regard to the second question, this would be difficult to address because it involves identifying what we mean by "privaleged groups" and also what it means to design biased cryptocurrency. Thus, we can reformulate the first question in a way that allows us to create a reasonable hypothesis. #### Research Question **How does the socio-economic status of an individual in the United States impact their ability to receive economic benefits from BitCoin?** #### Hypothesis **A person with higher socio-economic status will be able to receive more benefits from transactions of Bitcoins as well as ready access to become a miner.** *** # Methadology At the start of the project, much of the research came from reading academic articles about the cryptocurrency industry and understanding how BitCoin worked. I was heavily invested in learning about mathematical models to the point where I realized that I was not properly incorporating the techniques I learned in HCDE 410 to conduct human-centered research. Nevetheless, I was able to restructure the project and come up with a plan: ### Part 1: Find articles that address the growth of cryptocurrency industry ### Part 2: Find data about price of GPUs, cryptocurrency over time ### Part 3: Find articles about the acessibility of BitCoin ### Part 4: Data analysis techniques *** # Findings ### Easy access to transactions and mining This finding is primarily in relation to BitCoin. In my research, I wanted to understand whether someone who has little background knowledge in cryptocurrency is able to easily become apart of the mining process. My initial perception was that Bitcoin mining required immense mathematical background and that only those with a upper-level education background were able to find success with it. However, this turned out to be a false assumption. For one, a miner is not directly interacting with the code and mathematics behind mining. Rather, they are installing an application on their device to run constantly and mine the bitcoin (Bitcoinmining, 2017). Furthermore, there exists numerous cloud computing services that run the algorithm in the background and are easy to install on your device (Bitcoinmining, 2017). Once you have installed the software, you are then able to join a "pool" which is a group of miners that work together to solve a particular block (Wikipedia). Once you are part of that group, you are able to set up a wallet on your computer and you are good to go. Purchasing cryptocurrency is also readily available on most platforms and typically involves less privacy concerns because your personal information does not get sent to a third party merchant. Thus, there is a higher likelyhood that your personal financial information is safe ### GPU demand has increased in recent years Cryptomining has had a direct impact on the surge of prices of graphics cards. It has been reported that many miners have been buying mid-range graphics cards in bulk to build machines that mine bitcoin and other cryptocurrencies (Warren, 2018). Hence, we conclude that for individuals with lower socio-economic standing, it is more difficult to participate in the mining process. - https://www.techspot.com/article/2257-cpu-gpu-pricing-2021-update/ - https://www.theverge.com/2018/1/30/16949550/bitcoin-graphics-cards-pc-prices-surge ### Profitability of cryptomining For most cryptosystems, the profitability of mining comes from succesfully adding a new block to the block chain. Due to the immense resources required to do this, the algorithm rewards the miner with "coins" that are worth a monitary value. While many different algorithms exist depending on the coin, they always involve some kind of hash functions that is easy to verify but difficult to check. Since the prices of GPUs have gone up in recent years, it can be challenging to turn a profit from rewards. It is also unclear whether a user should peform mining individually or whether they should join a pool. On one hand, a pool could potentially allow you to guarentee that you are succesfully validating new blocks but individual mining allows you to maximize profits. Furthermore, profitability is also governed by the monitary value of the currency which is heavily volatile. *** # Implications One of the problems with this project was that I did not allocate enough time into the data analysis and so I relied on previously existing studies that helped support my claims. Hence, I was not able to make any "new" findings and the project became more of a survey of a multitude of academic articles. In my plan, I wanted to display visuals of my findings but I was unable to because of time constraints. The research showed that the cryptoindustry is more accessible than I had initially assumed. The mathematical theory behind how cryptosystems work is interesting for me but not necessary to perform transactions or become a miner. My hypothesis was partially supported because it is harder for individuals to acquire the hardware to become successful miners. However, for people who only wish to buy and trade crypto, services exist which allow you to buy a fraction of coins for any value. Because transactions are fairly quick and your privacy isn't compromised, it is beneficial to use cryptocurrency is a way to purchase goods and services. However, due to the volatile nature of the prices, for individuals with a lower socio-economic background, it is important to not place all liquid assets in this system since it is governed by demand alone. While it is profitable now, it is unclear whether it will stay that way. I was more ambitious in my project plan when I formulated my previous research question. However, I found it hard to find datat to support my claims and I instead had to narrow the scope to a topic that has more numerical data (socio-economic standing). *** # Conclusion In this project, I explored the theory behind decentralized cryptocurrencies and how they utilize complex mathematical functions to secure their coinage. With the advent of mining becoming more popular, we found that being from a higher socio-economic standing is more beneficial to become part of the mining process but performing transactions remains unchanged. *** # Possible futher analysis Due to personal reasons and time constraints, I was unable to perform a lot of what was listed in my initial plan. Regadless, I do wish to continue with this project for later as I have been experimenting with statistical analysis. In the future, I wish to utilize theoretical dynamic system analysis, which comes from being able to communicate information about the equations and mathematical systems that companies use in their cryptosystem construction. In regards to the analytic methods, I have background in probability theory that I wish to use to simulate possible price trajectories for the future. The Martingale and Monte-carlo simulations are bit of a stretch-goal which I hope to elaborate on when I finish the other parts of the project. **Analytic Methods** - Statistical analysis through least-squares-regression (via R-Studio on .csv data sets) - Implementing NumPy modules (in Python) to find trends in data sets. - Martingale and Monte-carlo simulations run via PyCharm. - Theoretical dynamic system analysis. - Reading over academic articles relating to research questions **Presenting findings** - Academic style report (using overleaf.com LaTeX renderer) - Line graphs showing price trends, with contexualization - Time series graphs *** # References - Bitcoin arms race https://ieeexplore.ieee.org/abstract/document/6521016 - Introduction to bitcoin and crytocurrency systems https://repository.upenn.edu/cgi/viewcontent.cgi?article=1133&context=wharton_research_scholars - Survey on GPU stock and prices in 2021 https://wccftech.com/nvidia-and-amd-gpu-supply-will-remain-grim-in-q1-2021/#:~:text=It%20seems%20that%20now%20is%20a%20good%20time,very%20large%20number%20of%20variables%20at%20play%20here. https://poseidon01.ssrn.com/delivery.php - Introduction to DogeCoin https://poseidon01.ssrn.com/delivery.php?ID=773124118097097113031023086096078065057081049043000029071006125069009064110096091085056017039010001111005112082093089097000006043087058092007069069087089015094089022071021048009066120095090110120113076065113122114110107111096001024091099107110111095125&EXT=pdf&INDEX=TRUE - Survey on GPU prices https://www.theverge.com/2018/1/30/16949550/bitcoin-graphics-cards-pc-prices-surge - Survey on GPU pricing https://www.techspot.com/article/2257-cpu-gpu-pricing-2021-update/ - Introduction to Cryptocurrency https://www.coinbase.com/learn/crypto-basics/what-is-cryptocurrency - Introduction to Bitcoin mining https://www.bitcoinmining.com/getting-started/#:~:text=Bitcoin%20Mining%20Guide%20-%20Getting%20started%20with%20Bitcoin,is%20important%20for%20your%20bitcoin%20mining%20profits.%20 - Bitcoin Wikipedia https://en.wikipedia.org/wiki/Bitcoin #### Data sets - Bitcoin historic data https://www.nasdaq.com/market-activity/cryptocurrency/btc/historical - CPU parts data https://www.kaggle.com/raczeq/ethereum-effect-pc-parts
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Simulate Monthly Mean Sea Level using a multivariate-linear regression model based on the annual SST PCs inputs required: * WaterLevel historical data from a tide gauge at the study site * Historical and simulated Annual PCs (*from Notebook 01*) in this notebook: * Obtain monthly mean sea level anomalies (MMSLA) from the tidal gauge record * Perform linear regression between MMSLA and annual PCs * Obtain predicted timeseries of MMSLA based on simulated timeseries of annual PCs ### Workflow: <div> <img src="resources/nb01_02.png" width="300px"> </div> Monthly sea level variability is typically due to processes occurring at longer timescales than the daily weather. Slowly varying seasonality and anomalies due to ENSO are retained in the climate emulator via the principle components (APC) used to develop the AWT. A multivariate regression model containing a mean plus annual and seasonal cycles at 12-month and 6-month periods for each APC covariate was fit to the MMSLA. This simple model explains ~75% of the variance without any specific information regarding local conditions (i.e., local anomalies due to coastal shelf dynamics, or local SSTAs) and slightly underpredicts extreme monthly sea level anomalies by ~10 cm. While this component of the approach is a subject of ongoing research, the regression model produces an additional ~0.35 m of regional SWL variability about mean sea level, which was deemed sufficient for the purposes of demonstrating the development of the stochastic climate emulator. ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # basic import import os import os.path as op from collections import OrderedDict # python libs import numpy as np from numpy.random import multivariate_normal import xarray as xr from scipy.stats import linregress from scipy.optimize import least_squares, curve_fit from datetime import datetime, timedelta # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..')) # teslakit from teslakit.database import Database from teslakit.tides import Calculate_MMSL from teslakit.statistical import runmean from teslakit.util.time_operations import date2yearfrac as d2yf from teslakit.plotting.tides import Plot_Tide_SLR, Plot_Tide_RUNM, Plot_Tide_MMSL, \ Plot_Validate_MMSL_tseries, Plot_Validate_MMSL_scatter, Plot_MMSL_Prediction, \ Plot_MMSL_Histogram ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/media/administrador/HD/Dropbox/Guam/teslakit/data' db = Database(p_data) # set site db.SetSite('GUAM') # -------------------------------------- # load data and set parameters WL_split = db.Load_TIDE_hist_astro() # water level historical data (tide gauge) WL = WL_split.WaterLevels SST_KMA = db.Load_SST_KMA() # SST Anual Weather Types PCs SST_PCs_sim_m = db.Load_SST_PCs_sim_m() # simulated SST PCs (monthly) # parameters for mmsl calculation mmsl_year_ini = 1947 mmsl_year_end = 2018 ``` ## Monthly Mean Sea Level ``` # -------------------------------------- # Calculate SLR using linear regression time = WL.time.values[:] wl = WL.values[:] * 1000 # (m to mm) lr_time = np.array(range(len(time))) # for linregress mask = ~np.isnan(wl) # remove nans with mask slope, intercept, r_value, p_value, std_err = linregress(lr_time[mask], wl[mask]) slr = intercept + slope * lr_time # Plot tide with SLR Plot_Tide_SLR(time, wl, slr); # -------------------------------------- # remove SLR and runmean from tide tide_noslr = wl - slr # calculate tide running mean time_window = 365*24*3 runm = runmean(tide_noslr, time_window, 'mean') # remove running mean tide_noslr_norunm = tide_noslr - runm # store data TNSR = xr.DataArray(tide_noslr_norunm, dims=('time'), coords={'time':time}) # Plot tide without SLR and runm Plot_Tide_RUNM(time, tide_noslr, runm); # -------------------------------------- # calculate Monthly Mean Sea Level (mmsl) MMSL = Calculate_MMSL(TNSR, mmsl_year_ini, mmsl_year_end) # fill nans with interpolated values p_nan = np.isnan(MMSL.mmsl) MMSL.mmsl[p_nan]= np.interp(MMSL.time[p_nan], MMSL.time[~p_nan], MMSL.mmsl[~p_nan]) mmsl_time = MMSL.time.values[:] mmsl_vals = MMSL.mmsl.values[:] # Plot tide and mmsl Plot_Tide_MMSL(TNSR.time, TNSR.values, mmsl_time, mmsl_vals); # store historical mmsl db.Save_TIDE_hist_mmsl(MMSL) ``` ## Monthly Mean Sea Level - Principal Components The annual PCs are passed to a monthly resolution ``` # -------------------------------------- # SST Anual Weather Types PCs PCs = np.array(SST_KMA.PCs.values) PC1, PC2, PC3 = PCs[:,0], PCs[:,1], PCs[:,2] PCs_years = [int(str(t).split('-')[0]) for t in SST_KMA.time.values[:]] # MMSL PCs calculations: cut and pad it to monthly resolution ntrs_m_mean = np.array([]) ntrs_time = [] MMSL_PC1 = np.array([]) MMSL_PC2 = np.array([]) MMSL_PC3 = np.array([]) for c, y in enumerate(PCs_years): pos = np.where( (mmsl_time >= np.datetime64('{0}-06-01'.format(y))) & (mmsl_time <= np.datetime64('{0}-05-29'.format(y+1))) ) if pos[0].size: ntrs_m_mean = np.concatenate((ntrs_m_mean, mmsl_vals[pos]),axis=0) # TODO check for 0s and nans in ntrs_m_mean? ntrs_time.append(mmsl_time[pos]) MMSL_PC1 = np.concatenate((MMSL_PC1, np.ones(pos[0].size)*PC1[c]),axis=0) MMSL_PC2 = np.concatenate((MMSL_PC2, np.ones(pos[0].size)*PC2[c]),axis=0) MMSL_PC3 = np.concatenate((MMSL_PC3, np.ones(pos[0].size)*PC3[c]),axis=0) ntrs_time = np.concatenate(ntrs_time) # Parse time to year fraction for linear-regression seasonality  frac_year = np.array([d2yf(x) for x in ntrs_time]) ``` ## Monthly Mean Sea Level - Multivariate-linear Regression Model ``` # -------------------------------------- # Fit linear regression model def modelfun(data, *x): pc1, pc2, pc3, t = data return x[0] + x[1]*pc1 + x[2]*pc2 + x[3]*pc3 + \ np.array([x[4] + x[5]*pc1 + x[6]*pc2 + x[7]*pc3]).flatten() * np.cos(2*np.pi*t) + \ np.array([x[8] + x[9]*pc1 + x[10]*pc2 + x[11]*pc3]).flatten() * np.sin(2*np.pi*t) + \ np.array([x[12] + x[13]*pc1 + x[14]*pc2 + x[15]*pc3]).flatten() * np.cos(4*np.pi*t) + \ np.array([x[16] + x[17]*pc1 + x[18]*pc2 + x[19]*pc3]).flatten() * np.sin(4*np.pi*t) # use non-linear least squares to fit our model split = 160 # train / validation split index x0 = np.ones(20) sigma = np.ones(split) # select data for scipy.optimize.curve_fit x_train = ([MMSL_PC1[:split], MMSL_PC2[:split], MMSL_PC3[:split], frac_year[:split]]) y_train = ntrs_m_mean[:split] res_lsq, res_cov = curve_fit(modelfun, x_train, y_train, x0, sigma) # print optimal parameters and covariance #print('optimal parameters (minimized sum of squares residual)\n{0}\n'.format(res_lsq)) #print('optimal parameters covariance\n{0}\n'.format(res_cov)) ``` ## Train and test model ``` # Check model at fitting period yp_train = modelfun(x_train, *res_lsq) Plot_Validate_MMSL_tseries(ntrs_time[:split], ntrs_m_mean[:split], yp_train); Plot_Validate_MMSL_scatter(ntrs_m_mean[:split], yp_train); # Check model at validation period x_val = ([MMSL_PC1[split:], MMSL_PC2[split:], MMSL_PC3[split:], frac_year[split:]]) yp_val = modelfun(x_val, *res_lsq) Plot_Validate_MMSL_tseries(ntrs_time[split:], ntrs_m_mean[split:], yp_val); Plot_Validate_MMSL_scatter(ntrs_m_mean[split:], yp_val); # Parameter sampling (generate sample of params based on covariance matrix) n_sims = 10 theta_gen = res_lsq theta_sim = multivariate_normal(theta_gen, res_cov, n_sims) # Check model at validation period yp_valp = np.ndarray((n_sims, len(ntrs_time[split:]))) * np.nan for i in range(n_sims): yp_valp[i, :] = modelfun(x_val, *theta_sim[i,:]) # 95% percentile yp_val_quant = np.percentile(yp_valp, [2.275, 97.275], axis=0) Plot_Validate_MMSL_tseries(ntrs_time[split:], ntrs_m_mean[split:], yp_val, mmsl_pred_quantiles=yp_val_quant); # Fit model using entire dataset sigma = np.ones(len(frac_year)) x_fit = ([MMSL_PC1, MMSL_PC2, MMSL_PC3, frac_year]) y_fit = ntrs_m_mean res_lsq, res_cov = curve_fit(modelfun, x_fit, y_fit, x0, sigma) # obtain model output yp = modelfun(x_fit, *res_lsq) # Generate 1000 simulations of the parameters n_sims = 1000 theta_gen = res_lsq param_sim = multivariate_normal(theta_gen, res_cov, n_sims) # Check model yp_p = np.ndarray((n_sims, len(ntrs_time))) * np.nan for i in range(n_sims): yp_p[i, :] = modelfun(x_fit, *param_sim[i,:]) # 95% percentile yp_quant = np.percentile(yp_p, [2.275, 97.275], axis=0) Plot_Validate_MMSL_tseries(ntrs_time, ntrs_m_mean, yp, mmsl_pred_quantiles=yp_quant); # Save model parameters to use in climate change model_coefs = xr.Dataset({'sim_params' : (('n_sims','n_params'), param_sim)}) db.Save_TIDE_mmsl_params(model_coefs) ``` ## Monthly Mean Sea Level - Prediction ``` # -------------------------------------- # Predict 1000 years using simulated PCs (monthly time resolution) # get simulation time as year fractions PCs_sim_time = SST_PCs_sim_m.time.values[:] frac_year_sim = np.array([d2yf(x) for x in PCs_sim_time]) # solve each PCs simulation y_sim_n = np.ndarray((len(SST_PCs_sim_m.n_sim), len(frac_year_sim))) * np.nan for s in SST_PCs_sim_m.n_sim: PCs_s_m = SST_PCs_sim_m.sel(n_sim=s) MMSL_PC1_sim = PCs_s_m.PC1.values[:] MMSL_PC2_sim = PCs_s_m.PC2.values[:] MMSL_PC3_sim = PCs_s_m.PC3.values[:] # use linear-regression model x_sim = ([MMSL_PC1_sim, MMSL_PC2_sim, MMSL_PC3_sim, frac_year_sim]) y_sim_n[s, :] = modelfun(x_sim, *param_sim[s,:]) # join output and store it MMSL_sim = xr.Dataset( { 'mmsl' : (('n_sim','time'), y_sim_n / 1000), # mm to m }, {'time' : PCs_sim_time} ) print(MMSL_sim) db.Save_TIDE_sim_mmsl(MMSL_sim) # Plot mmsl simulation plot_sim = 0 y_sim = MMSL_sim.sel(n_sim=plot_sim).mmsl.values[:] * 1000 # m to mm t_sim = MMSL_sim.sel(n_sim=plot_sim).time.values[:] # Plot mmsl prediction Plot_MMSL_Prediction(t_sim, y_sim); # compare model histograms Plot_MMSL_Histogram(ntrs_m_mean, y_sim); # compare model histograms for all simulations y_sim = MMSL_sim.mmsl.values[:].flatten() * 1000 # m to mm Plot_MMSL_Histogram(ntrs_m_mean, y_sim); ```
github_jupyter
# Exact GP Regression with Multiple GPUs and Kernel Partitioning In this notebook, we'll demonstrate training exact GPs on large datasets using two key features from the paper https://arxiv.org/abs/1903.08114: 1. The ability to distribute the kernel matrix across multiple GPUs, for additional parallelism. 2. Partitioning the kernel into chunks computed on-the-fly when performing each MVM to reduce memory usage. We'll be using the `protein` dataset, which has about 37000 training examples. The techniques in this notebook can be applied to much larger datasets, but the training time required will depend on the computational resources you have available: both the number of GPUs available and the amount of memory they have (which determines the partition size) have a significant effect on training time. ``` import math import torch import gpytorch import sys from matplotlib import pyplot as plt sys.path.append('../') from LBFGS import FullBatchLBFGS %matplotlib inline %load_ext autoreload %autoreload 2 ``` ## Downloading Data We will be using the Protein UCI dataset which contains a total of 40000+ data points. The next cell will download this dataset from a Google drive and load it. ``` import os import urllib.request from scipy.io import loadmat dataset = 'protein' if not os.path.isfile(f'{dataset}.mat'): print(f'Downloading \'{dataset}\' UCI dataset...') urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1nRb8e7qooozXkNghC5eQS0JeywSXGX2S', f'{dataset}.mat') data = torch.Tensor(loadmat(f'{dataset}.mat')['data']) ``` ## Normalization and train/test Splits In the next cell, we split the data 80/20 as train and test, and do some basic z-score feature normalization. ``` import numpy as np N = data.shape[0] # make train/val/test n_train = int(0.8 * N) train_x, train_y = data[:n_train, :-1], data[:n_train, -1] test_x, test_y = data[n_train:, :-1], data[n_train:, -1] # normalize features mean = train_x.mean(dim=-2, keepdim=True) std = train_x.std(dim=-2, keepdim=True) + 1e-6 # prevent dividing by 0 train_x = (train_x - mean) / std test_x = (test_x - mean) / std # normalize labels mean, std = train_y.mean(),train_y.std() train_y = (train_y - mean) / std test_y = (test_y - mean) / std # make continguous train_x, train_y = train_x.contiguous(), train_y.contiguous() test_x, test_y = test_x.contiguous(), test_y.contiguous() output_device = torch.device('cuda:0') train_x, train_y = train_x.to(output_device), train_y.to(output_device) test_x, test_y = test_x.to(output_device), test_y.to(output_device) ``` ## How many GPUs do you want to use? In the next cell, specify the `n_devices` variable to be the number of GPUs you'd like to use. By default, we will use all devices available to us. ``` n_devices = torch.cuda.device_count() print('Planning to run on {} GPUs.'.format(n_devices)) ``` ## GP Model + Training Code In the next cell we define our GP model and training code. For this notebook, the only thing different from the Simple GP tutorials is the use of the `MultiDeviceKernel` to wrap the base covariance module. This allows for the use of multiple GPUs behind the scenes. ``` class ExactGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood, n_devices): super(ExactGPModel, self).__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.ConstantMean() base_covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) self.covar_module = gpytorch.kernels.MultiDeviceKernel( base_covar_module, device_ids=range(n_devices), output_device=output_device ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) def train(train_x, train_y, n_devices, output_device, checkpoint_size, preconditioner_size, n_training_iter, ): likelihood = gpytorch.likelihoods.GaussianLikelihood().to(output_device) model = ExactGPModel(train_x, train_y, likelihood, n_devices).to(output_device) model.train() likelihood.train() optimizer = FullBatchLBFGS(model.parameters(), lr=0.1) # "Loss" for GPs - the marginal log likelihood mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) with gpytorch.beta_features.checkpoint_kernel(checkpoint_size), \ gpytorch.settings.max_preconditioner_size(preconditioner_size): def closure(): optimizer.zero_grad() output = model(train_x) loss = -mll(output, train_y) return loss loss = closure() loss.backward() for i in range(n_training_iter): options = {'closure': closure, 'current_loss': loss, 'max_ls': 10} loss, _, _, _, _, _, _, fail = optimizer.step(options) print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.3f' % ( i + 1, n_training_iter, loss.item(), model.covar_module.module.base_kernel.lengthscale.item(), model.likelihood.noise.item() )) if fail: print('Convergence reached!') break print(f"Finished training on {train_x.size(0)} data points using {n_devices} GPUs.") return model, likelihood ``` ## Automatically determining GPU Settings In the next cell, we automatically determine a roughly reasonable partition or *checkpoint* size that will allow us to train without using more memory than the GPUs available have. Not that this is a coarse estimate of the largest possible checkpoint size, and may be off by as much as a factor of 2. A smarter search here could make up to a 2x performance improvement. ``` import gc def find_best_gpu_setting(train_x, train_y, n_devices, output_device, preconditioner_size ): N = train_x.size(0) # Find the optimum partition/checkpoint size by decreasing in powers of 2 # Start with no partitioning (size = 0) settings = [0] + [int(n) for n in np.ceil(N / 2**np.arange(1, np.floor(np.log2(N))))] for checkpoint_size in settings: print('Number of devices: {} -- Kernel partition size: {}'.format(n_devices, checkpoint_size)) try: # Try a full forward and backward pass with this setting to check memory usage _, _ = train(train_x, train_y, n_devices=n_devices, output_device=output_device, checkpoint_size=checkpoint_size, preconditioner_size=preconditioner_size, n_training_iter=1) # when successful, break out of for-loop and jump to finally block break except RuntimeError as e: print('RuntimeError: {}'.format(e)) except AttributeError as e: print('AttributeError: {}'.format(e)) finally: # handle CUDA OOM error gc.collect() torch.cuda.empty_cache() return checkpoint_size # Set a large enough preconditioner size to reduce the number of CG iterations run preconditioner_size = 100 checkpoint_size = find_best_gpu_setting(train_x, train_y, n_devices=n_devices, output_device=output_device, preconditioner_size=preconditioner_size) ``` # Training ``` model, likelihood = train(train_x, train_y, n_devices=n_devices, output_device=output_device, checkpoint_size=checkpoint_size, preconditioner_size=preconditioner_size, n_training_iter=20) ``` # Testing: Computing test time caches ``` # Get into evaluation (predictive posterior) mode model.eval() likelihood.eval() with torch.no_grad(), gpytorch.settings.fast_pred_var(): latent_pred = model(test_x) ``` # Testing: Computing predictions ``` with torch.no_grad(), gpytorch.settings.fast_pred_var(): %time latent_pred = model(test_x) test_rmse = torch.sqrt(torch.mean(torch.pow(latent_pred.mean - test_y, 2))) print(f"Test RMSE: {test_rmse.item()}") ```
github_jupyter
``` #default_exp data.block #export from fastai2.torch_basics import * from fastai2.data.core import * from fastai2.data.load import * from fastai2.data.external import * from fastai2.data.transforms import * from nbdev.showdoc import * ``` # Data block > High level API to quickly get your data in a `DataBunch` ## TransformBlock - ``` #export class TransformBlock(): "A basic wrapper that links defaults transforms for the data block API" def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dbunch_kwargs=None): self.type_tfms = L(type_tfms) self.item_tfms = ToTensor + L(item_tfms) self.batch_tfms = L(batch_tfms) self.dl_type,self.dbunch_kwargs = dl_type,({} if dbunch_kwargs is None else dbunch_kwargs) #export def CategoryBlock(vocab=None, add_na=False): "`TransformBlock` for single-label categorical targets" return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na)) #export def MultiCategoryBlock(encoded=False, vocab=None, add_na=False): "`TransformBlock` for multi-label categorical targets" tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode] return TransformBlock(type_tfms=tfm) ``` ## General API ``` #export from inspect import isfunction,ismethod #export def _merge_tfms(*tfms): "Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating" g = groupby(concat(*tfms), lambda o: o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__) return L(v[-1] for k,v in g.items()).map(instantiate) #For example, so not exported from fastai2.vision.core import * from fastai2.vision.data import * #hide tfms = _merge_tfms([Categorize, MultiCategorize, Categorize(['dog', 'cat'])], Categorize(['a', 'b'])) #If there are several instantiated versions, the last one is kept. test_eq(len(tfms), 2) test_eq(tfms[1].__class__, MultiCategorize) test_eq(tfms[0].__class__, Categorize) test_eq(tfms[0].vocab, ['a', 'b']) tfms = _merge_tfms([PILImage.create, PILImage.show]) #Check methods are properly separated test_eq(len(tfms), 2) tfms = _merge_tfms([show_image, set_trace]) #Check functions are properly separated test_eq(len(tfms), 2) #export @docs @funcs_kwargs class DataBlock(): "Generic container to quickly build `DataSource` and `DataBunch`" get_x=get_items=splitter=get_y = None dl_type = TfmdDL _methods = 'get_items splitter get_y get_x'.split() def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, **kwargs): blocks = L(getattr(self,'blocks',(TransformBlock,TransformBlock)) if blocks is None else blocks) blocks = L(b() if callable(b) else b for b in blocks) self.default_type_tfms = blocks.attrgot('type_tfms', L()) self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L())) self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L())) for t in blocks: if getattr(t, 'dl_type', None) is not None: self.dl_type = t.dl_type if dl_type is not None: self.dl_type = dl_type self.databunch = delegates(self.dl_type.__init__)(self.databunch) self.dbunch_kwargs = merge(*blocks.attrgot('dbunch_kwargs', {})) self.n_inp,self.getters = n_inp,L(getters) if getters is not None: assert self.get_x is None and self.get_y is None assert not kwargs def datasource(self, source, type_tfms=None): self.source = source items = (self.get_items or noop)(source) if isinstance(items,tuple): items = L(items).zip() labellers = [itemgetter(i) for i in range_of(self.default_type_tfms)] else: labellers = [noop] * len(self.default_type_tfms) splits = (self.splitter or noop)(items) if self.get_x: labellers[0] = self.get_x if self.get_y: labellers[1] = self.get_y if self.getters: labellers = self.getters if type_tfms is None: type_tfms = [L() for t in self.default_type_tfms] type_tfms = L([self.default_type_tfms, type_tfms, labellers]).map_zip( lambda tt,tfm,l: L(l) + _merge_tfms(tt, tfm)) return DataSource(items, tfms=type_tfms, splits=splits, dl_type=self.dl_type, n_inp=self.n_inp) def databunch(self, source, path='.', type_tfms=None, item_tfms=None, batch_tfms=None, **kwargs): dsrc = self.datasource(source, type_tfms=type_tfms) item_tfms = _merge_tfms(self.default_item_tfms, item_tfms) batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms) kwargs = {**self.dbunch_kwargs, **kwargs} return dsrc.databunch(path=path, after_item=item_tfms, after_batch=batch_tfms, **kwargs) _docs = dict(datasource="Create a `Datasource` from `source` with `type_tfms`", databunch="Create a `DataBunch` from `source` with `item_tfms` and `batch_tfms`") ``` To build a `DataBlock` you need to give the library four things: the types of your input/labels then at least two functions: `get_items` and `splitter`. You may also need to include `get_x` and `get_y` or a more generic list of `getters` that are applied to the results of `get_items`. Once those are provided, you automatically get a `DataSource` or a `DataBunch`: ``` show_doc(DataBlock.datasource) #hide_input dblock = DataBlock() show_doc(dblock.databunch, name="DataBlock.databunch") ``` You can create a `DataBlock` by passing functions or subclassing. The two following data blocks are the same for instance: ``` class MNIST(DataBlock): blocks = ImageBlock(cls=PILImageBW),CategoryBlock def get_items(self, source): return get_image_files(Path(source)) def splitter (self, items ): return GrandparentSplitter()(items) def get_y (self, item ): return parent_label(item) mnist = MNIST() mnist = DataBlock(blocks = (ImageBlock(cls=PILImageBW),CategoryBlock), get_items = get_image_files, splitter = GrandparentSplitter(), get_y = parent_label) ``` Each type comes with default transforms that will be applied - at the base level to create items in a tuple (usually input,target) from the base elements (like filenames) - at the item level of the datasource - at the batch level They are called respectively type transforms, item transforms, batch transforms. In the case of MNIST, the type transforms are the method to create a `PILImageBW` (for the input) and the `Categorize` transform (for the target), the item transform is `ToTensor` and the batch transforms are `Cuda` and `IntToFloatTensor`. You can add any other transforms by passing them in `DataBlock.datasource` or `DataBlock.databunch`. ``` test_eq(mnist.default_type_tfms[0], [PILImageBW.create]) test_eq(mnist.default_type_tfms[1].map(type), [Categorize]) test_eq(mnist.default_item_tfms.map(type), [ToTensor]) test_eq(mnist.default_batch_tfms.map(type), [IntToFloatTensor]) dsrc = MNIST().datasource(untar_data(URLs.MNIST_TINY)) test_eq(dsrc.vocab, ['3', '7']) x,y = dsrc.train[0] test_eq(x.size,(28,28)) show_at(dsrc.train, 0, cmap='Greys', figsize=(2,2)); ``` ## Export - ``` #hide from nbdev.export import notebook2script notebook2script() ```
github_jupyter
### Import custom modules from current folder ``` import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import nltk from text_easability_metrics import TextEasabilityMetrics, StanfordNLP from simple_text_representation.classes import Text from simple_text_representation.models import Database from nltk.tree import Tree import pandas as pd # from nltk.draw.tree import draw_trees database = Database('educationalTexts', 'postgres', '', '0.0.0.0', 5432) path = r'/Users/herbert/Projects/Tesis/stanford-corenlp-full-2017-06-09' path = r'http://corenlp.run' path = r'http://localhost/' ``` ### Word Concreteness ``` def removeSpecialCharacters(strWord): return ''.join(character for character in strWord if character.isalnum()) def getWordNetTag(tag): if tag.startswith('J'): return wordnet.ADJ elif tag.startswith('S'): return wordnet.ADJ_SAT elif tag.startswith('R'): return wordnet.ADV elif tag.startswith('N'): return wordnet.NOUN elif tag.startswith('V'): return wordnet.VERB else: return '' import nltk from nltk.corpus import wordnet def getWordConcreteness(currentText): textTotal = 0 for paragraph in currentText: sentenceCount = 0 for sentence in paragraph: sentenceToken = nltk.word_tokenize(sentence) posTags = nltk.pos_tag(sentenceToken) for taggedWord in posTags: word = taggedWord[0] tag = taggedWord[1]pos_tag if (getWordNetTag(tag)): for ss in wordnet.synsets(word, getWordNetTag(tag), lang='spa'): hyperyms = ss.hypernym_paths()[0] if (len(hyperyms)) > 1: # print(ss.hypernym_paths()[0]) category = ss.hypernym_paths()[0][1] sentenceCount += 1 if "physical" in category.name() else 0 # print(ss, "physical" in category.name()) textTotal += sentenceCount return textTotal testTest = [['Si bien los trasplantes se han convertido en una práctica habitual, aún persisten fuertes temores en la población para donar órganos, lograr su superación es la clave para aumentar el número de los donadores solidarios que hacen falta para salvar miles de vidas.'], ['Es preciso, entonces, que se aclaren algunas dudas para que las personas pierdan el miedo a donar.', ' Primero, que lo complicado de los procedimientos de extirpación y trasplantación, en el que intervienen varios equipos médicos altamente especializados, vuelve muy difícil la existencia de mafias.', ' Segundo, que la necesaria compatibilidad (afinidad de grupo sanguíneo) entre donante y receptor dificulta la posibilidad de muertes “a pedido”.'], ['La última cuestión es la más compleja; en la actualidad, aunque alguien haya manifestado expresamente su voluntad de donar, es a la familia a la que se consulta en el momento en que la donación puede efectuarse.', ' Como se entiende, tal consulta llega en un momento difícil y poco propicio para las reflexiones profundas, más aún si se tiene que tomar una decisión rápida.'], ['Por lo tanto, las campañas públicas deben esclarecer la naturaleza de los procedimientos técnicos y legales, para disipar miedos; pero, esencialmente, deben apuntar a que se tome conciencia de lo que significa salvar otra vida, porque para decidirlo en un momento crucial es necesario que la idea se haya considerado y discutido previamente, con reflexión y calma.']] getWordConcreteness(testTest) textOfSeventhGrade = Text.getTexts(database, grade=7) textOfEightGrade = Text.getTexts(database, grade=8) textOfNineGrade = Text.getTexts(database, grade=9) textOfTenthGrade = Text.getTexts(database, grade=10) textOfEleventhGrade = Text.getTexts(database, grade=11) def getResultsOfTexts(currentTexts): results = list() for text in currentTexts: results.append(getWordConcreteness(text)) return results resultsSeventh = getResultsOfTexts(textOfSeventhGrade) resultsMeanSeventh = sum(resultsSeventh)/len(resultsSeventh) resultsEighth = getResultsOfTexts(textOfEightGrade) resultsMeanEighth = sum(resultsEighth)/len(resultsEighth) resultsNinth = getResultsOfTexts(textOfNineGrade) resultsMeanNinth = sum(resultsNinth)/len(resultsNinth) resultsTenh = getResultsOfTexts(textOfTenthGrade) resultsMeanTenth = sum(resultsTenh)/len(resultsTenh) resultsEleventh = getResultsOfTexts(textOfEleventhGrade) resultsMeanEleventh = sum(resultsEleventh)/len(resultsEleventh) import seaborn as sns import matplotlib.pyplot as plt import numpy as np sns.set_style("whitegrid") resultsMean = [resultsMeanSeventh, resultsMeanEighth, resultsMeanNinth, resultsMeanTenth, resultsMeanEleventh] data = np.array(resultsMean).reshape((1, len(resultsMean))) labels = ['Primero', 'Segundo', 'Tercero', 'Cuarto', 'Quinto'] df = pd.DataFrame(data, columns=labels) df ax = sns.barplot(data=df) ```
github_jupyter
## Purpose: Try different models-- Part5. ### Penalized_SVM. ``` # import dependencies. import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.svm import SVC ``` #### STEP1: Read in dataset. Remove data from 2016-2019. - data from 2016-2018 will be used to bs test the model. - data from 2019 will be used to predict the winners of the 2019 WS. ``` # read in the data. team_data = pd.read_csv("../../Resources/clean_data_1905.csv") del team_data["Unnamed: 0"] team_data.head() # remove data from 2016 through 2019. team_data_new = team_data.loc[team_data["year"] < 2016] team_data_new.head() target = team_data_new["winners"] features = team_data_new.drop({"team", "year", "winners"}, axis=1) feature_columns = list(features.columns) print (target.shape) print (features.shape) print (feature_columns) ``` #### STEP2: Split and scale the data. ``` # split data. X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=42) # scale data. scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.fit_transform(X_test) ``` #### STEP3: Try the SVC model. ``` # generate the model. model = SVC(kernel="rbf", class_weight="balanced", probability=True) # fit the model. model.fit(X_train_scaled, y_train) # predict. prediction = model.predict(X_test_scaled) print ((classification_report(y_test, prediction, target_names=["0", "1"]))) ``` #### STEP4: Predict the winner 2016-2018. ``` def predict_the_winner(model, year, team_data, X_train): ''' INPUT: -X_train = scaled X train data. -model = the saved model. -team_data = complete dataframe with all data. -year = the year want to look at. OUTPUT: -printed prediction. DESCRIPTION: -data from year of interest is isolated. -the data are scaled. -the prediction is made. -print out the resulting probability and the name of the team. ''' # grab the data. team_data = team_data.loc[team_data["year"] == year].reset_index() # set features (no team, year, winners). # set target (winners). features = team_data[feature_columns] # scale. scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) features = scaler.fit_transform(features) # fit the model. probabilities = model.predict_proba(features) # convert predictions to datafram.e WS_predictions = pd.DataFrame(probabilities[:,1]) # Sort the DataFrame (descending) WS_predictions = WS_predictions.sort_values(0, ascending=False) WS_predictions['Probability'] = WS_predictions[0] # Print 50 highest probability HoF inductees from still eligible players for i, row in WS_predictions.head(50).iterrows(): prob = ' '.join(('WS Probability =', str(row['Probability']))) print('') print(prob) print(team_data.iloc[i,1:27]["team"]) # predict for 2018. predict_the_winner(model, 2018, team_data, X_train_scaled) # predict for 2017. predict_the_winner(model, 2017, team_data, X_train_scaled) ``` Ok. This didn't work. Let's try this penalized model with a grid search. ``` def grid_search_svc(X_train, X_test, y_train, y_test): ''' INPUT: -X_train = scaled X train data. -X_test = scaled X test data. -y_train = y train data. -y_test = y test data. OUTPUT: -classification report (has F1 score, precision and recall). -grid = saved model for prediction. DESCRIPTION: -the scaled and split data is put through a grid search with svc. -the model is trained. -a prediction is made. -print out the classification report and give the model. ''' # set up svc model. model = SVC(kernel="rbf", class_weight="balanced", probability=True) # create gridsearch estimator. param_grid = {"C": [0.0001, 0.001, 0.01, 0.1, 1, 10, 100], "gamma": [0.0001, 0.001, 0.01, 0.1]} grid = GridSearchCV(model, param_grid, verbose=3) # fit the model. grid.fit(X_train, y_train) # predict. prediction = grid.predict(X_test) # print out the basic information about the grid search. print (grid.best_params_) print (grid.best_score_) print (grid.best_estimator_) grid = grid.best_estimator_ predictions = grid.predict(X_test) print (classification_report(y_test, prediction, target_names=["0", "1"])) return grid model_grid = grid_search_svc(X_train, X_test, y_train, y_test) ``` Nope. This is terrible. Lots of no.
github_jupyter
<a href="https://colab.research.google.com/github/TobyChen320/DS-Unit-4-Sprint-3-Deep-Learning/blob/main/module2-convolutional-neural-networks/Toby's_LS_DS_432_Convolution_Neural_Networks_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> <br></br> <br></br> ## *Data Science Unit 4 Sprint 3 Assignment 2* # Convolutional Neural Networks (CNNs) # Assignment - <a href="#p1">Part 1:</a> Pre-Trained Model - <a href="#p2">Part 2:</a> Custom CNN Model - <a href="#p3">Part 3:</a> CNN with Data Augmentation You will apply three different CNN models to a binary image classification model using Keras. Classify images of Mountains (`./data/train/mountain/*`) and images of forests (`./data/train/forest/*`). Treat mountains as the positive class (1) and the forest images as the negative (zero). |Mountain (+)|Forest (-)| |---|---| |![](https://github.com/LambdaSchool/DS-Unit-4-Sprint-3-Deep-Learning/blob/main/module2-convolutional-neural-networks/data/train/mountain/art1131.jpg?raw=1)|![](https://github.com/LambdaSchool/DS-Unit-4-Sprint-3-Deep-Learning/blob/main/module2-convolutional-neural-networks/data/validation/forest/cdmc317.jpg?raw=1)| The problem is relatively difficult given that the sample is tiny: there are about 350 observations per class. This sample size might be something that you can expect with prototyping an image classification problem/solution at work. Get accustomed to evaluating several different possible models. # Pre - Trained Model <a id="p1"></a> Load a pretrained network from Keras, [ResNet50](https://tfhub.dev/google/imagenet/resnet_v1_50/classification/1) - a 50 layer deep network trained to recognize [1000 objects](https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt). Starting usage: ```python import numpy as np from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model # This is the functional API resnet = ResNet50(weights='imagenet', include_top=False) ``` The `include_top` parameter in `ResNet50` will remove the full connected layers from the ResNet model. The next step is to turn off the training of the ResNet layers. We want to use the learned parameters without updating them in future training passes. ```python for layer in resnet.layers: layer.trainable = False ``` Using the Keras functional API, we will need to additional additional full connected layers to our model. We we removed the top layers, we removed all preivous fully connected layers. In other words, we kept only the feature processing portions of our network. You can expert with additional layers beyond what's listed here. The `GlobalAveragePooling2D` layer functions as a really fancy flatten function by taking the average of each of the last convolutional layer outputs (which is two dimensional still). ```python x = resnet.output x = GlobalAveragePooling2D()(x) # This layer is a really fancy flatten x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(resnet.input, predictions) ``` Your assignment is to apply the transfer learning above to classify images of Mountains (`./data/train/mountain/*`) and images of forests (`./data/train/forest/*`). Treat mountains as the positive class (1) and the forest images as the negative (zero). Steps to complete assignment: 1. Load in Image Data into numpy arrays (`X`) 2. Create a `y` for the labels 3. Train your model with pre-trained layers from resnet 4. Report your model's accuracy ## Load in Data This surprisingly more difficult than it seems, because you are working with directories of images instead of a single file. This boiler plate will help you download a zipped version of the directory of images. The directory is organized into "train" and "validation" which you can use inside an `ImageGenerator` class to stream batches of images thru your model. ### Download & Summarize the Data This step is completed for you. Just run the cells and review the results. ``` import tensorflow as tf import os _URL = 'https://github.com/LambdaSchool/DS-Unit-4-Sprint-3-Deep-Learning/blob/main/module2-convolutional-neural-networks/data.zip?raw=true' path_to_zip = tf.keras.utils.get_file('./data.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'data') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') train_mountain_dir = os.path.join(train_dir, 'mountain') # directory with our training cat pictures train_forest_dir = os.path.join(train_dir, 'forest') # directory with our training dog pictures validation_mountain_dir = os.path.join(validation_dir, 'mountain') # directory with our validation cat pictures validation_forest_dir = os.path.join(validation_dir, 'forest') # directory with our validation dog pictures num_mountain_tr = len(os.listdir(train_mountain_dir)) num_forest_tr = len(os.listdir(train_forest_dir)) num_mountain_val = len(os.listdir(validation_mountain_dir)) num_forest_val = len(os.listdir(validation_forest_dir)) total_train = num_mountain_tr + num_forest_tr total_val = num_mountain_val + num_forest_val print('total training mountain images:', num_mountain_tr) print('total training forest images:', num_forest_tr) print('total validation mountain images:', num_mountain_val) print('total validation forest images:', num_forest_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val) ``` ### Keras `ImageGenerator` to Process the Data This step is completed for you, but please review the code. The `ImageGenerator` class reads in batches of data from a directory and pass them to the model one batch at a time. Just like large text files, this method is advantageous, because it stifles the need to load a bunch of images into memory. Check out the documentation for this class method: [Keras `ImageGenerator` Class](https://keras.io/preprocessing/image/#imagedatagenerator-class). You'll expand it's use in the third assignment objective. ``` batch_size = 16 epochs = 50 IMG_HEIGHT = 224 IMG_WIDTH = 224 from tensorflow.keras.preprocessing.image import ImageDataGenerator train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') ``` ## Instatiate Model ``` import numpy as np from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.layers import Dense, GlobalAveragePooling2D from tensorflow.keras.models import Model # This is the functional API resnet = ResNet50(weights='imagenet', include_top=False) for layer in resnet.layers: layer.trainable=False x = resnet.output x = GlobalAveragePooling2D()(x) # This layer is a really fancy flatten x = Dense(1024, activation='relu')(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(resnet.input, predictions) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() ``` ## Fit Model ``` history = model.fit( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) ``` # Custom CNN Model In this step, write and train your own convolutional neural network using Keras. You can use any architecture that suits you as long as it has at least one convolutional and one pooling layer at the beginning of the network - you can add more if you want. ``` # Define the Model from tensorflow.keras import datasets from tensorflow.keras.models import Sequential, Model # <- May Use from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dropout my_model = Sequential([Conv2D(32,3, activation="relu", input_shape=(224,224,3)), MaxPooling2D(), Conv2D(32,3, activation="relu", input_shape=(224,224,3)), MaxPooling2D(), Conv2D(64,3, activation="relu", input_shape=(224,224,3)), MaxPooling2D(), Flatten(), Dense(64, activation = "relu"), Dropout(0.1), Dense(1, activation="sigmoid") ]) # Compile Model my_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Fit Model history = my_model.fit( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) ``` # Custom CNN Model with Image Manipulations To simulate an increase in a sample of image, you can apply image manipulation techniques: cropping, rotation, stretching, etc. Luckily Keras has some handy functions for us to apply these techniques to our mountain and forest example. Simply, you should be able to modify our image generator for the problem. Check out these resources to help you get started: 1. [Keras `ImageGenerator` Class](https://keras.io/preprocessing/image/#imagedatagenerator-class) 2. [Building a powerful image classifier with very little data](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) ``` train_image_generator = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True) # Generator for our training data validation_image_generator = ImageDataGenerator( rescale=1./255, rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, horizontal_flip=True) # Generator for our validation data train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary') history = my_model.fit( train_data_gen, steps_per_epoch=total_train // batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=total_val // batch_size ) ``` # Resources and Stretch Goals Stretch goals - Enhance your code to use classes/functions and accept terms to search and classes to look for in recognizing the downloaded images (e.g. download images of parties, recognize all that contain balloons) - Check out [other available pretrained networks](https://tfhub.dev), try some and compare - Image recognition/classification is somewhat solved, but *relationships* between entities and describing an image is not - check out some of the extended resources (e.g. [Visual Genome](https://visualgenome.org/)) on the topic - Transfer learning - using images you source yourself, [retrain a classifier](https://www.tensorflow.org/hub/tutorials/image_retraining) with a new category - (Not CNN related) Use [piexif](https://pypi.org/project/piexif/) to check out the metadata of images passed in to your system - see if they're from a national park! (Note - many images lack GPS metadata, so this won't work in most cases, but still cool) Resources - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) - influential paper (introduced ResNet) - [YOLO: Real-Time Object Detection](https://pjreddie.com/darknet/yolo/) - an influential convolution based object detection system, focused on inference speed (for applications to e.g. self driving vehicles) - [R-CNN, Fast R-CNN, Faster R-CNN, YOLO](https://towardsdatascience.com/r-cnn-fast-r-cnn-faster-r-cnn-yolo-object-detection-algorithms-36d53571365e) - comparison of object detection systems - [Common Objects in Context](http://cocodataset.org/) - a large-scale object detection, segmentation, and captioning dataset - [Visual Genome](https://visualgenome.org/) - a dataset, a knowledge base, an ongoing effort to connect structured image concepts to language
github_jupyter
### 当涉及圆形子数组时,有两种情况。 1、情况1:没有交叉边界的最大子数组总和 2、情况2:具有交叉边界的最大子数组总和 写下一些小写的案例,并考虑案例2的一般模式。 记住为输入数组中的所有元素都为负数做一个角点案例句柄。 <img src='https://assets.leetcode.com/users/brianchiang_tw/image_1589539736.png'> ``` class Solution: def maxSubarraySumCircular(self, A) -> int: array_sum = 0 local_min_sum, global_min_sum = 0, float('inf') local_max_sum, global_max_sum = 0, float('-inf') for num in A: local_min_sum = min(local_min_sum + num, num) global_min_sum = min(global_min_sum, local_min_sum) local_max_sum = max(local_max_sum + num, num) global_max_sum = max(global_max_sum, local_max_sum) array_sum += num if global_max_sum > 0: return max(array_sum - global_min_sum, global_max_sum) return global_max_sum class Solution: def maxSubarraySumCircular(self, A) -> int: min_sum = min_glo_sum = max_sum = max_glo_sum = A[0] for a in A[1:]: min_sum = min(a, a + min_sum) min_glo_sum = min(min_sum, min_glo_sum) max_sum = max(a, a + max_sum) max_glo_sum = max(max_sum, max_glo_sum) if sum(A) == min_glo_sum: return max_glo_sum return max(max_glo_sum, sum(A) - min_glo_sum) class Solution: def maxSubarraySumCircular(self, A) -> int: array_sum = 0 local_min_sum, global_min_sum = 0, float('inf') local_max_sum, global_max_sum = 0, float('-inf') for number in A: local_min_sum = min( local_min_sum + number, number ) global_min_sum = min( global_min_sum, local_min_sum ) local_max_sum = max( local_max_sum + number, number ) global_max_sum = max( global_max_sum, local_max_sum ) array_sum += number # global_max_sum denotes the maximum subarray sum without crossing boundary # arry_sum - global_min_sum denotes the maximum subarray sum with crossing boundary if global_max_sum > 0: return max( array_sum - global_min_sum, global_max_sum ) else: # corner case handle for all number are negative return global_max_sum solution = Solution() solution.maxSubarraySumCircular([3,1,3,2,6]) # 时间复杂度较高 class Solution: def maxSubarraySumCircular(self, A) -> int: res = -float('inf') for i in range(len(A)): temp_sum = A[i] temp_max = A[i] for j in range(i+1, len(A) * 2): j %= len(A) if j == i: break temp_sum += A[j] temp_max = max(temp_max, temp_sum) res = max(temp_max, res, A[i]) return res from collections import Counter # 时间复杂度较高 class Solution: def maxSubarraySumCircular(self, A) -> int: h = Counter(A) solution = Solution() solution.maxSubarraySumCircular([3,1,3,2,6]) ```
github_jupyter
# Básico de Python Esta sección, esta pensada para ser una breve introducción al lenguaje de programación *Python* con la intención de conocer los comandos básicos para hacer uso de sus estructuras de datos y las herramientas necesarias que se utilizaran durante el curso. No concideramos que sea un curso formal de programación, pues se pensó para que todos los comando de *Python* puedan ser ejecutados dentro de un notebook utilizando *Jupyter-lab* para tener una familiaridad con el lenguaje y así tener el conocimiento necesario para generar código que sea de utilidad en el análisis de datos. Para iniciar una sesión dentro de *jupyter-lab* es necesario tenerlo instalado (se describe en [Instalación](faltalink)) y ejecutar `jupyter-lab` dentro del ambiente donde se realizó la instalación. ## Cadenas de símbolos (*String*) En todo curso que involucra algún lenguaje de programación empiezan con *Hello world*, **este no es un curso de progeamación** pero vamos hacer muchas cosas similares por lo que vamos ha intentarlo. Es decir lo primero que deseamos es poder imprimir cosas, para tal proposito *Python* tiene la función `print()` esta función nos permite imprimir dentro de la terminal lo que deseamos. ``` print('Hello world!!') ``` Hemos impreso nuestra primera línea usando *Python*, se puede ver que la función print no imprime las comillas, ¿que pasa si las quitamos? ``` print(! Hola mundo !) ``` De aquí podemos hacer un par de observaciones, la función `print` acepta *strings* que son cadenas de símbolos. Para declarar que se empieza una cadena de símbolos, se hace utilizando `'` o bien `"`, para declarar que hemos terminado la cadena la próxima `'` o `"` (respectivamente) determina el fin. ``` print('!Hola mundo !') ``` Podemos hacer varias cosas con un *string* como es "multiplicarlo" ``` print('Hola mundo '*2) print('Hola mundo '*4) ``` "sumarlo" (juntarlo ) ``` print('Hola mundo' + 'junto mal') print('Hola mundo' + ' ' + 'junto mal') ``` Y otras operaciones, durante el curso veremos más operaciones que se puden hacer con las cadenas de símbolos. ## Variables Dentro de los lenguajes de programación es necesario dar nombres a las cosas, en *pyhton* esta asignación se hace usando el símbolo de =. ``` a= 2 a ``` La línea anterior asignó el valor 2 a la variable `a`. Como estamos dentro de un notebook dentro de *jupyter-lab* al llamar a la variable `a` imprime lo que se encuentra dentro de la variable. ``` b= 5 a+b ``` Se pueden realizar operaciones con las variables, si se tienen definidas estas operaciones entre tipos los tipos definidos. Como es el caso de las *strings* y los enteros. ``` a = 'Hola mundo' b = 'Jupiter' a+b a[0:5]+b ``` A diferencia de otros lenguajes de programación como son *C++* o *Java*, en *Python* no es necesario declarar la variable ni determinar el tipo de variable. Usando la función `type` es posible ver que tipo de variable es. ``` a= 5 b= 'Hola' type(a) type(b) ``` Es importante observar que las operaciones son de tipo específico. Por ejemplo el operando `+` aplicado a las variables de tipo *int* o *float* se aplica la función la suma, mientras que el caso de variables *string* funciona como la unión de las cadenas. ``` a+b ``` Como se observa, el operando `+` no esta definido para usarse con *int* y *str*(*string*). Existen distintos tipos de variables como son: | Tipo | Descripción | |:-------|:-----------:| | *int*| Para guardar enteros ($\mathbb{Z}$).| | *float*| Números de doble presición (ejemplo $2.34$).| | *bool*| Valores verdadero(`True`) o falso (`False`). | | *str* | Cadenas de símbolos con codificación (UTF-8).| | *bytes*| Código ASCII en bytes.| | *None* | Valor de *Python* para nulo.| Los elementos en *Python* se pueden comparar, para saber si un elemento es igual a otro o si cumple con alguna condición específica. Las comparaciones entre los elementos de *Python* tienen que ser del mismo tipo, en algunos casos se permite hacer comparaciones entre tipos como *float* e *int*, sin embargo, lo que sucede en el fondo *Pyhton* cambia el tipo y se hace la comparacion. Las comparaciones se hacen usando `==` (iguales), `>` (mayor que), `<`(menor que), `>= ` (mayor o igual que), `<=` (menor o igual que). El resultado de las comparaciones es siempre un *booleano* (tipo `bool`). ``` 4==5 4>=5 'b' < 'b' 'acs'=='acs' 4.5>4 4 > 'a' ``` Utilizando estas comparaciones es posible hacer algebra booleana utilizando los operadores lógicos implementados para ello, |Operador| Descripción |Ejemplo| |---------|-----------|-------| |`and` | Si los dos operandos son `True` entonces la condición se vuelve `True`| `a and b`| | `or` | Si alguno de los operandos es `True` entonces la condición se vuelve `True`| `a or b`| | `not` | Se regresa el estado inverso del operando| `not a`| ``` 4== 4 and 5<4 4== 4 or 5<4 not 4==4 ``` # Listas Las listas en python son especialmente útiles, pues nos permiten tener una estructura para el manejo de los datos y estructuras, para declarar una lista se utilizan `[ ]` y separando por `,` sus elementos. ``` mi_lista= [1, 2, 3,4] mi_lista ``` Se le pueden añadir elementos a las listas utilizando el método `append` implementado detro de las lista, de la siguiente forma ``` mi_lista.append(5) mi_lista ``` También se pueden remover elementos de la lista usando el método `pop` ``` a = mi_lista.pop() print(a) mi_lista ``` Como se observa el método `pop` remueve el último elemento en la lista y lo asigna a la variable `a`, no es necesario asignar el valor a una variable para remover el último elemento. ``` mi_lista.pop() mi_lista ``` Para acceder a los elementos de una lista en python se hace a través de `[]`, python enumera los elementos de las listas a partir de 0 para el primer elemento y de forma asendente, también es posible acceder a los elementos usando enteros negativos. ``` mi_lista[0] mi_lista[2] mi_lista[-1] ``` Es posible cambiar el valor de los elementos ``` mi_lista[0]= 5 mi_lista ``` Pero para aumentar el tamaño de las listas es necesario usar el método `append` u otro. ``` mi_lista[3]= 5 mi_lista ``` Para remover un elemento especifico utilizamos el método `remove`, el cual elimina la primera aparición de elemento que se desea remover. ``` mi_lista = [1,2,3, 4,2] mi_lista.remove(2) mi_lista ``` Es posible unir dos o más listas ``` mi_lista = [1,2,3, 4] mi_lista_2= [3,4,5,6] mi_lista+mi_lista_2 mi_lista ``` Los elementos de las listas pueden ser de distintos tipos ``` mi_lista = [1,2,4,'Hola', 2.34, str(4.56)] mi_lista ``` En la lista generada en la linea anterior se hace notar la función *str()*, esta convierte lo que se encuentre dentro de los paréntesis a tipo *string*, siempre y cuando la conversión sea posible. Como ejemplo se transforma el número flotante a un *string*. Este tipo de funciones también existen para *int* , *bool* o *float*. ``` int(2.75) float(3) float('3') bool(1) ``` A partir de las listas podemos tomar "rebanadas" de estas. Para tal propósito, en las listas utilizamos `:` para indicar los rangos que se desean, como se ve en los ejemplos a continuación Tomemos los elementos de la posición 2 a la posición 5 ``` mi_lista= [1,2,3,4,5,6] mi_lista[2:5] ``` De la posición 0 a la posición antes que 4 ``` mi_lista[:4] ``` De la posición 3 hasta el último elemento ``` mi_lista[3:] ``` ## Diccionarios Los diccionarios en *Python* nos ofrecen la posibilidad de hacer funciones entre conjuntos de forma directa, para la construcción de los diccionarios usamos `{ }` y `:`para determinar la regla de la relación entre los conjuntos. Veamos el siguiente ejemplo. ``` mi_dict= {'a': 'Hola', 'b': 'jupyter', 'c':'mundo'} mi_dict['a'] mi_dict['b'] print(mi_dict['a'], mi_dict['c']) mi_dict['d'] ``` La función de los diccionarios es regresar los valores que se le asignan a cada elemento, a los elementos antes de `:` se les denomina *keys* (llave) y a los que se encuentran después se les denomina *values* (valor) ``` mi_dict.keys() mi_dict.values() ``` Si se desea añadir una pareja llave-valor al diccionario, se puede hacer directamente ``` mi_dict[3]= 'luna' mi_dict[3] mi_dict ``` Para eliminar una pareja llave-valor utilizamos el método `pop` ``` mi_dict.pop('b') mi_dict ``` Si se desea añadir más de un elemento al diccionario o añadir todo un diccionario a uno ya exixtente esto se puede hacer usando el método `update` y otro diciionario. ``` mi_dict.update({'e': 'marte', 3:'casa', 2:'Hola'}) mi_dict ``` Es importante recalcar que en los diccionarios puede haber llaves que tengan el mismo valor pero no llaves iguales ``` mi_dic_2= {'a': 'Hola', 'b': 'mundo', 'a': 'jupyter'} mi_dic_2 ``` En este caso el diccionario asigna a la llave el último valor asignado. Aunque Python nos permite tener como llaves (*keys*) cualquier tipo de objeto es preferible utilizar elementos que sean *hashables* como lo son *int*, *float* o *strings*, hay ciertas estructuras o valor en python que nos son de esta clase de elementos. La idea es que este tipos de estructuras se les puede dar un cierto "nombre" para identificar a los elementos, una explicación más detallada de lo que son los elemenntos *hashable* esta fuera del alcance de este curso. ### Tuplas Otra estructura importante en *Python* son las "tuplas", estos objetos nos permiten almacenar valores como *int*, *float* o *strings* de forma similar a una notación vectorial (como coordenadas). Para declarar a una tupla se hace con `()` y se separa a los elementos usando `,`. ``` tu= (1,2,3,'a') tu ``` Una de las propiedades importante que distingue a las *tuplas* de las listas, es que las tuplas son objetos inmutables. Es decir, éstas no pueden ser modificadas una vez que fueron declaradas, manteniendo la integridad de los datos que contienen. ``` tu[0] tu[0]= 5 ``` Existen ciertas operaciones que se pueden realizar con las tuplas, si sumamos (`+`) tuplas el resultado es concatenar las tuplas, si multiplicamos por un entero $n$ el resultado es la tupla concatenada $n$. ``` tu_2= (4,5,6,'b') tu+tu_2 tu*2 ``` ### Conjuntos Los conjunto en *python* nos permiten almacenar conjuntos de distintos elementos, como su palabra lo dice, los conjuntos son conjunto en el sentido matemático, es decir, podemos hacer las operaciones usuales de conjuntos como son unión, intersección, diferencia (resta), diferencia simétrica, añadir elementos etc. Para declarar un conjunto se utiliza `{}` separando sus elementos por `,` ``` conjunto_1= {1,2,3,4,5,6,7,8,7,6,5, '5', (2,4)} conjunto_1 ``` Para añadir elementos al conjunto se puede hacer usando el método `add` ``` conjunto_1.add('li') conjunto_1 ``` Si deseamos obtener el conjunto de una estructura que contiene datos como pueden ser listas, tuplas o diccionarios se puede hacer a través de la función `set` ``` con_lista= set(mi_lista) con_lista ``` Las operaciones entre conjuntos se hacen a través de los métodos implementados en el objeto, utilizando como argumento otro conjunto. ``` conjunto_2= {'a', 'b', 'c', '2', 6,8, 15} conjunto_1.union(conjunto_2) conjunto_1.difference(conjunto_2) conjunto_1.intersection( conjunto_2) ``` ### Condicionales y ciclos Ya hemos visto que se pueden comparar elementos en *Python*, esto nos permite diferenciar entre los distintos elementos, en cierta estructura como pueden ser diccionarios, listas o tuplas. Las estructuras de control son una de las principales herramientas en lenguajes de programación, estas nos permiten estableces condiciones para poder tomar desiciones. Las estructuras de control que usaremos con mayor frecuencia son `if` `else`, `for` , `while`. Estas estructuras se declaran usando la palabra seguida de una condición logica que se debe de cumplir y `:`, en la siguiente linea se indenta (con un número fijo de espacios o con *tab* ) y se escribe la linea de código a ejecutar. `if x == 4: print('Es cuatro')` #### if La estructura más utilizada es la estructura `if else` en esta se da una condición lógica, si la condición se cumple entonces se ejecuta el código que se encuentra indentado, si no se cumple la condición puede continuar la ejecución del código donde no esta indentado. La estructura `if` permite dos condicionales más, `elif` y `else`. `elif` permite añadir un condicional más a la estructura y `else` se ejecuta si ninguna de las condiciones se cumple. ``` x= 4 if x == 4: print('Es cuatro') x= 5 if x == 4: print('Es cuatro') print('Se termino la estructura') if x == 4: print('Es cuatro') else: print('No es cuatro') print('Se termino la estructura de control') x= 12 if x == 4: print('Es cuatro') elif x>4 and x < 10: print('Es mayor que 4') else: print('No es cuatro y es mayor que 10') print('Se termino la estructura de control') ``` #### For La estructura de control `for` se utiliza para hacer cíclos sobre los elementos dentro de una estructura de datos. En el caso de python lo usual es aplicar cierto código a los elementos. Para declarar los ciclos `for ` se utiliza la la palabra `for` el nombre de una variable, que nos servirá para referirnos al elemento dentro del ciclo, seguido por la palabra `in` y la estructura sobre la cual deseamos que se haga el ciclo terminado la línea con `:`. Al igual que con `if`, el código que se desea ejecutar dentro del ciclo debe estar en una nueva línea indentada con un cierto número de espacios o bien con un número fijo de tabulados ("tabs"). El ciclo se termina cuando la línea del código a ejecutar esta en la misma línea que la palabra `for`. ``` lista= [1,2,3,4,5] for i in lista: print('Número ', i*2) print('Se acabo el ciclo') ``` Existen otro tipos de estructuras implementadas dentro de *python* que podemos utilizar para crear ciclos o para utilizarlas en otros casos como `range`, se puede utilizar esta función para generar los cíclos. ``` for j in range(3, 15): print(j) ``` #### while La estructura `while` es similar a la estructura de control `for`, esta nos permite ejecutar el código deseado dentro de un ciclo mientras se cumpla una condición determinada. Cuando dicha condición ya no se cumple entonces se sale del ciclo. Para declarar la estructura de control se hace a través de la palabra `while`, una condición que se debe de cumplir (True) para entrar en el ciclo y `:` al igual que en las estructuras de control anteriores, en una nueva línea indentada por un número fijo de espacios o tabulaciones se escribe el código a ejecutar. El ciclo termina en donde la nueva línea de código se encuentra indentada a la par de la palabra `while`. ` while x <= 3: print(x , 'es menor o igual que 3') x += 1 ` ``` x = 0 while x <= 3: print(x , 'es menor o igual que 3') x += 1 print('termino el ciclo') ``` La diferencia entre el `for`y `while` radica en que en `for` se sabe que el número de veces que se ejecutara el ciclo, mientras que en `while` el ciclo se seguirá ejecutando mientras se cumpla la condición. Por esta razón no se recomienda usar esta estructura de control cuando se emnpieza a programar, debido a que se pueden cometer errores con facilidad y generar ciclos infinitos. Cuando se use esta estructura, se recomienda estar seguro de la condición del ciclo no sea infinita al menos que así se desee. ## Funciones Las funciones son bloques de código para reutilizar, adicionalmente esto nos ayuda a dividir el código en distintos bloques lo cual ayuda a la lectura, comprensión, y limpieza del código. Para declarar una función se hace usando la palabra `def` seguida del nombre de la función con `()` encerrando los parámetros de la función terminando con `:`. En una nueva línea, y al igual que con las estructuras de control, para definir el código que se ejecuta dentro de la función este se encuentra indentado usando un número fijo de espacios o un número fijo de tabulares la función termina usando la palabra `return` o cuando el indentado se encuentra a la par de la palabra `def`. ` def imprime_saludo(): print('Que bueno que estoy aprendiendo Python') return ` El código que contiene la función sólo se ejecuta cuando se hace un llamado a la funcíon, esto se hace simplemente escribiendo el nombre de la función y poniendo entre parentesis los párametros que se le pasan a ésta. ``` def imprime_saludo(): print('Que bueno que estoy aprendiendo Python') return imprime_saludo() ``` Para que la función utilice argumentos, estos se declaran entre los paréntesis cuando se define la función. Para hacer referencia en el código a ejecutar dentro de la función, se hace con el nombre declarado entre los paréntesis. ``` def hola_tu( nombre): print('Hola ', nombre, ' que bueno estas aprendiendo Python') return hola_tu( 'Miguel') ``` Las funciones pueden (o no) regresar objetos, los cuales muchas veces son *strings*, números o cosas mucho más complejas. Para que la función regrese un objeto es necesario escribir la variable que contiene dicho objeto despues de la palabra `return`. El objeto que se regresa de la función tiene que ser asignado a una variable esta asignación se hace utilizando la asignación normal en ptython `=`. ``` def hola_tu_2(nombre): print('Hola ', nombre, ' que bueno estas aprendiendo Python') ret = 'Te va ayudar mucho' return ret mi_nom= 'Miguel' que= hola_tu_2(mi_nom) ### Se llama a la función print(que) ## Ver que hay en la variable ``` Es recomendable y una buena práctica escribir un pequeño texto que nos ayude a describir para que sirve la función, esto se hace usando `"""` o `'''` y se cierra el texto con `"""` o `'''` respectivamente. Lo que se escribe ayuda a la reutilización del código tanto para la persona que desarrollando la función y como documentación del código desarrollado. ``` def hola_tu_2(nombre): """Imprime el nombre con una frase y regresa un agradecimiento que es un string""" print('Hola ', nombre, ' que bueno estas aprendiendo Python') ret = 'Te va ayudar mucho' return ret ``` ### Funciones implementadas en Python Existen muchas funciones implentadas en *Python* que nos ayudan a resolver tareas comunes necesarias a la hora de programar, el conocer estas funciones nos ayuda a que desarrollar de forma más rápida y que la ejecución sea más eficiente y veloz pues la funciones en la mayoria de los casos se encuentran optimizadas. El enlistar todas las funciones que se encuentran implementadas dentro de *pyhton* se encuentran fuera del alcance de este curso, pero se considera enlistar las que se consideraron de mayor útilidad para el curso, a continuacion se dan ejemplos de algunas de estas funciones. #### Range La función `range` nos permite definir un intervalo de números enteros con un mínimo, un máximo y el tamaño de incremento entre uno y otro. Se puede omitir el mínimo y el incremento, el comportamiento por defecto es tomar el mínimo como 0 y el incremento como 1 ``` for i in range(5, 15, 3): print(i) ``` #### enumerate La función `enumerate` enumera sobre un objeto iterable y regresa una tupla, donde la primera entrada es el entero la posición en el objeto iterable y la segunda es el elemento iterable. ``` lista_1= ['diez', 'nueve', 'ocho', 'siete', 'seis', 'cinco', 'cuatro', 'tres', 'dos', 'uno'] for i in enumerate(lista_1): print (i) ``` #### Zip La función `zip` nos permite fusionar dos objetos iterables en un conjunto de tuplas donde cada entrada de la tupla corresponde a un elemento de las listas ``` lista_1= ['diez', 'nueve', 'ocho', 'siete', 'seis', 'cinco', 'cuatro', 'tres', 'dos'] lista_2 = list('abcdefghij') res =zip(lista_2, lista_1) for j in res: print(j) ``` #### list `list` nos permite crear listas sobre objetos iterables, si no se pasa ningún argumento entonces `list` regresa una lista vacía. ``` list('1234567') ``` ### list Regresa la lontgitud del objeto ``` len(lista_1) len(conjunto_1) ``` ### Funciones cast Las funciones `int` , `float`, `str` y `bool` convierten a lo que se pasa como argumento en un entero, punto flotante, cadena de símbolos o un boleano respectivamente. ### max, min De un objeto iterable regresa el máximo o el mínimo respectivamente. ``` max([1,4,6,7,3]) min({1,3,4,3,5,6,5,7}) max(list('abcdefghijk')) ``` ### set De un objeto iterable regresa el conjunto, si no se pasa ningun argumento regresa un conjunto vacío. ``` set(list('abcdesftredfredfre')) ``` ### abs Regresa el valor absoluto del argumento. ``` abs(-5.8) ``` ### any Regresa `True`de un objeto iterable si al aplicar `bool` alguno de elementos regresa `True` ``` any([0, None, False ]) any([0, None, False, 'a' ]) ``` ### all Regresa `True` si todos los elementos de un objeto iterable regresan `True` al aplicar `bool`. ``` all([0, None, False, 'a' ]) all([1,2,3,4,5,6]) ```
github_jupyter
``` import tensorflow as tf import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import rc from IPython import display import os rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) rc('text', usetex=True) path = "rt-polaritydata/rt-polaritydata/" pos_path = os.path.join(path, 'rt-polarity.pos') neg_path = os.path.join(path, 'rt-polarity.neg') def load_review(path, is_pos=True): with open(path, encoding='latin-1') as f: review = pd.DataFrame({'review':f.read().splitlines()}) review['sentiment'] = 1 if is_pos else 0 return review pos_review = load_review(pos_path, is_pos=True) neg_review = load_review(neg_path, is_pos=False) # display.display(pos_review.head(), neg_review.head()) all_reviews = pd.concat([pos_review, neg_review]) all_reviews.head() plt.hist(all_reviews.sentiment) plt.show() all_reviews["review_splitted"] = all_reviews.review.apply(lambda review: tf.keras.preprocessing.text.text_to_word_sequence(review)) import functools import operator def get_all_characters(df): chars = [] for review in df.review_splitted: for word in review: chars.append(word) chars = functools.reduce(operator.iconcat, chars, []) return list(set(chars)) chars = get_all_characters(all_reviews) NUM_CHARS = len(chars) print('Total number of characters: {}\n{}'.format(NUM_CHARS, chars)) char_to_num = {chars[i]: i for i in range(NUM_CHARS)} num_to_char = {i: chars[i] for i in range(NUM_CHARS)} ``` Find the maximum length of review -- padding ``` def get_max_len(df): all_lenghts = [] for review in df.review: all_lenghts.append(len(list(review))) return max(all_lenghts) MAX_LEN_POS = get_max_len(pos_review) MAX_LEN_NEG = get_max_len(neg_review) MAX_LEN_POS, MAX_LEN_NEG MAX_LEN = get_max_len(all_reviews) print('Maximum length of review: {} (in characters)'.format(MAX_LEN)) from stop_words import get_stop_words def review_to_one_hot(char): one_hot = [0] * NUM_CHARS pos = char_to_num[char] one_hot[pos] = 1 return one_hot def process_review(review, pad=True, max_len=MAX_LEN): review = tf.keras.preprocessing.text.text_to_word_sequence(review) review = [word for word in review if word not in get_stop_words('english')] review = [list(s) for s in review] # to characters review = functools.reduce(operator.iconcat, review, []) review_one_hot = [review_to_one_hot(char) for char in review] if pad: # append 0 value padding while len(review_one_hot) < max_len: review_one_hot.append([0] * NUM_CHARS) review_one_hot = review_one_hot[:max_len] # trucate to max length return review_one_hot def get_len_review(review): review = tf.keras.preprocessing.text.text_to_word_sequence(review) review = [word for word in review if word not in get_stop_words('english')] review = [list(s) for s in review] # to characters review = functools.reduce(operator.iconcat, review, []) return len(review) reviews_len = all_reviews.review.apply(get_len_review) np.median(reviews_len) plt.hist(reviews_len, bins=20, color=(2/255, 0, 247/255, 0.5)) plt.vlines(np.median(reviews_len), 0, 1500) # plt.vlines(np.quantile(reviews_len, q=0.75), 0, 1500, color='red') plt.ylim([0, 1300]) plt.xlabel('# characters') plt.ylabel('Count') plt.savefig('figures/cnn_character_matrix.pdf', bbox_inches='tight') # plt.show() plt.figure(figsize=(6, 5)) plt.subplot(1, 2, 1) position = 180 title = plt.title(neg_review.review.iloc[position]) plt.setp(title, color='blue') plt.imshow([p for p in process_review(neg_review.review.iloc[position], max_len=100)], cmap='gray') plt.axis('off') plt.subplot(1, 2, 2) t1 = pos_review.review.iloc[position] t2 = 'a droll , well-acted , character-driven \ncomedy with unexpected deposits of feeling . ' title = plt.title(t2, y=-0.15) plt.setp(title, color='red') plt.imshow([p for p in process_review(pos_review.review.iloc[position], max_len=100)], cmap='gray') plt.axis('off') # plt.savefig('cnn_character_example.pdf', bbox_inches='tight') # plt.show() MAX_LEN_SEQ = 66 # 66 - median -- in characters processed_review = all_reviews.review.apply(lambda review: process_review(review, max_len=MAX_LEN_SEQ)) X = processed_review.to_numpy().tolist() y = all_reviews.sentiment.values from tensorflow.keras import backend as K def f1(y_true, y_pred): """ Create F1 metric for Keras From: https://stackoverflow.com/a/45305384/9511702 """ def recall(y_true, y_pred): tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = tp / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): tp = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = tp / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon())) def build_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(MAX_LEN_SEQ, NUM_CHARS, 1)), tf.keras.layers.MaxPool2D((2, 2)), tf.keras.layers.Dropout(0.25), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPool2D((2, 2)), tf.keras.layers.Dropout(0.25), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.25), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) metrics = ['accuracy', tf.keras.metrics.AUC(), f1] optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=metrics) return model early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=8) learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', patience=4, verbose=1, factor=0.5, min_lr=0.00001) def train(X_train, y_train, X_test, y_test, epochs=30, batch_size=64): model = build_model() history = model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=[early_stopping, learning_rate_reduction], verbose=0) test_results = model.evaluate(X_test, y_test, batch_size) return history.history, model, test_results from sklearn.model_selection import StratifiedKFold def X_transform(X): X = tf.convert_to_tensor(X) X = tf.reshape(X, [X.shape[0], X.shape[1], X.shape[2], 1]) # one channel (black or white) return X def y_transform(y): return tf.convert_to_tensor(y) def cross_validate(X, y, split_size=3): results = [] models = [] test_results = [] kf = StratifiedKFold(n_splits=split_size) for train_idx, val_idx in kf.split(X, y): X_train = X_transform(X[train_idx]) y_train = y_transform(y[train_idx]) X_test = X_transform(X[val_idx]) y_test = y_transform(y[val_idx]) result, model, test_result = train(X_train, y_train, X_test, y_test) results.append(result) models.append(model) test_results.append(test_result) return results, models, test_results X_new = np.array(X) y_new = np.array(y) results, models, test_results = cross_validate(X_new, y_new) test_results def predict(model, review, max_len=MAX_LEN_SEQ, shape=(MAX_LEN_SEQ, NUM_CHARS, 1)): input_ = [p for p in process_review(review, max_len=max_len)] input_ = tf.cast(input_, tf.float32) input_ = tf.reshape(input_, shape) input_ = input_[np.newaxis, ...] prediction = model.predict(input_)[0][0] print(prediction) if prediction > 0.5: print('Positive review with probability: {:.2f}%'.format(prediction * 100)) else: print('Negative review with probability: {:.2f}%'.format(100 - prediction * 100)) shape = (MAX_LEN_SEQ, NUM_CHARS, 1) predict(models[2], "I really like this film, one of the best I've ever seen", shape=shape) predict(models[2], 'I like this film and recommend to everyone.', shape=shape) predict(models[2], "The movie was terrible, not worth watching once again", shape=shape) for i, model in enumerate(models): print(f"\nModel {i}: \n") predict(model, "I really like this film, one of the best I've ever seen", shape=shape) predict(model, 'I like this film and recommend to everyone.', shape=shape) predict(model, 'Sometimes boring with a simple plot twist.', shape=shape) predict(model, "The movie was terrible, not worth watching once again", shape=shape) def plot_result(i, result): plt.figure(figsize=(20, 4)) plt.subplot(1, 4, 1) plt.plot(result['loss'], label='train') plt.plot(result['val_loss'], label='test') plt.xlabel('epoch', fontsize=14) plt.ylabel('loss', fontsize=14) plt.suptitle(f'Model {i+1}', fontsize=15) plt.legend(fontsize=13) #plt.tick_params(labelsize=14) auc_metrics = [] for key, value in result.items(): if 'auc' in key: auc_metrics.append(key) plt.subplot(1, 4, 2) plt.plot(result[auc_metrics[0]], label='train') plt.plot(result[auc_metrics[1]], label='test') plt.xlabel('epoch', fontsize=14) plt.ylabel('AUC', fontsize=14) plt.legend(fontsize=13) plt.subplot(1, 4, 3) plt.plot(result['f1'], label='train') plt.plot(result['val_f1'], label='test') plt.xlabel('epoch', fontsize=14) plt.ylabel(r'$F_1$', fontsize=14) plt.legend(fontsize=13) plt.subplot(1, 4, 4) plt.plot(result['accuracy'], label='train') plt.plot(result['val_accuracy'], label='test') plt.xlabel('epoch', fontsize=14) plt.ylabel('accuracy', fontsize=14) plt.legend(fontsize=13) plt.savefig(f'figures/cnn_character_training_{i+1}.pdf', bbox_inches='tight') #plt.show() for i, r in enumerate(results): plot_result(i, r) from tensorflow.keras.utils import model_to_dot def save_model_architecture(filename): dot_model = model_to_dot(build_model(), show_shapes=True, show_layer_names=False) dot_model.write_pdf(filename) save_model_architecture('figures/cnn_characters_model.pdf') ```
github_jupyter
``` import pandas as pd from sklearn.metrics import classification_report !ls train = pd.read_csv('../Post Processing/data/postproc_train.csv') val = pd.read_csv('../Post Processing/data/postproc_val.csv') test = pd.read_csv('../Post Processing/data/postproc_test.csv') test_gt = pd.read_csv('../../data/english_test_with_labels.csv') val_gt = pd.read_csv('../../data/Constraint_Val.csv') def post_proc(row): if (row['domain_real']>row['domain_fake']) & (row['domain_real']>0.88): return 0 elif (row['domain_real']<row['domain_fake']) & (row['domain_fake']>0.88): return 1 else: # if (row['username_real']>row['username_fake']) & (row['username_real']>0.88): # return 0 # elif (row['username_real']<row['username_fake']) & (row['username_fake']>0.88): # return 1 # else: if row['class1_pred']>row['class0_pred']: return 1 elif row['class1_pred']<row['class0_pred']: return 0 def post_proc1(row): if row['class1_pred']>row['class0_pred']: return 1 elif row['class1_pred']<row['class0_pred']: return 0 train['final_pred'] = train.apply(lambda x: post_proc(x), 1) print(classification_report(train['label'], train['final_pred'])) val['final_pred'] = val.apply(lambda x: post_proc(x), 1) print(classification_report(val['label'], val['final_pred'])) from sklearn.metrics import f1_score,accuracy_score,precision_score,recall_score print('f1_score : ',f1_score(val['label'], val['final_pred'],average='micro')) print('precision_score : ',precision_score(val['label'], val['final_pred'],average='micro')) print('recall_score : ',recall_score(val['label'], val['final_pred'],average='micro')) test['final_pred'] = test.apply(lambda x: post_proc(x), 1) print(classification_report(test['label'], test['final_pred'])) from sklearn.metrics import f1_score,accuracy_score,precision_score,recall_score print('f1_score : ',f1_score(test['label'], test['final_pred'],average='micro')) print('precision_score : ',precision_score(test['label'], test['final_pred'],average='micro')) print('recall_score : ',recall_score(test['label'], test['final_pred'],average='micro')) ``` ## Get False Pred samples ``` val_false_pred = val[val.final_pred!=val.label] pd.merge(val_false_pred, val_gt, left_index=True, right_index=True) pd.merge(val_false_pred, val_gt, left_index=True, right_index=True).to_csv('../Post Processing/results/val_false_pred_var_1.csv') test_false_pred = test[test.final_pred!=test.label] pd.merge(test_false_pred, test_gt, left_index=True, right_index=True) pd.merge(test_false_pred, test_gt, left_index=True, right_index=True).to_csv('../Post Processing/results/test_false_pred_var_1.csv') ```
github_jupyter
# Visualizing CNN Layers --- In this notebook, we load a trained CNN (from a solution to FashionMNIST) and implement several feature visualization techniques to see what features this network has learned to extract. ### Load the [data](http://pytorch.org/docs/stable/torchvision/datasets.html) In this cell, we load in just the **test** dataset from the FashionMNIST class. ``` # our basic libraries import torch import torchvision # data loading and transforming from torchvision.datasets import FashionMNIST from torch.utils.data import DataLoader from torchvision import transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors for input into a CNN ## Define a transform to read the data in as a tensor data_transform = transforms.ToTensor() test_data = FashionMNIST(root='./data', train=False, download=True, transform=data_transform) # Print out some stats about the test data print('Test data, number of images: ', len(test_data)) # prepare data loaders, set the batch_size ## TODO: you can try changing the batch_size to be larger or smaller ## when you get to training your network, see how batch_size affects the loss batch_size = 32 test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) # specify the image classes classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ``` ### Visualize some test data This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline # obtain one batch of training images dataiter = iter(test_loader) images, labels = dataiter.next() images = images.numpy() print(images.shape) # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title(classes[labels[idx]]) ``` ### Define the network architecture The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll use a simple series of layers: * Convolutional layers * Maxpooling layers * Fully-connected (linear) layers ``` import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel (grayscale), 10 output channels/feature maps # 3x3 square convolution kernel ## output size = (W-F)/S +1 = (28-3)/1 +1 = 26 # the output Tensor for one image, will have the dimensions: (10, 26, 26) # after one pool layer, this becomes (10, 13, 13) self.conv1 = nn.Conv2d(1, 10, 3) # maxpool layer # pool with kernel_size=2, stride=2 self.pool = nn.MaxPool2d(2, 2) # second conv layer: 10 inputs, 20 outputs, 3x3 conv ## output size = (W-F)/S +1 = (13-3)/1 +1 = 11 # the output tensor will have dimensions: (20, 11, 11) # after another pool layer this becomes (20, 5, 5); 5.5 is rounded down self.conv2 = nn.Conv2d(10, 20, 3) # 20 outputs * the 5*5 filtered/pooled map size self.fc1 = nn.Linear(20*5*5, 50) # dropout with p=0.4 self.fc1_drop = nn.Dropout(p=0.4) # finally, create 10 output channels (for the 10 classes) self.fc2 = nn.Linear(50, 10) # define the feedforward behavior def forward(self, x): # two conv/relu + pool layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) # prep for linear layer # this line of code is the equivalent of Flatten in Keras x = x.view(x.size(0), -1) # two linear layers with dropout in between x = F.relu(self.fc1(x)) x = self.fc1_drop(x) x = self.fc2(x) # final output return x ``` ### Load in our trained net This notebook needs to know the network architecture, as defined above, and once it knows what the "Net" class looks like, we can instantiate a model and load in an already trained network. The architecture above is taken from the example solution code, which was trained and saved in the directory `saved_models/`. ``` # instantiate your Net net = Net() # load the net parameters by name net.load_state_dict(torch.load('saved_models/fashion_net_ex.pt')) print(net) ``` ## Feature Visualization Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. These techniques are called feature visualization and they are useful for understanding the inner workings of a CNN. In the cell below, you'll see how to extract and visualize the filter weights for all of the filters in the first convolutional layer. Note the patterns of light and dark pixels and see if you can tell what a particular filter is detecting. For example, the filter pictured in the example below has dark pixels on either side and light pixels in the middle column, and so it may be detecting vertical edges. <img src='edge_filter_ex.png' width= 30% height=30%/> ``` # Get the weights in the first conv layer weights = net.conv1.weight.data w = weights.numpy() # for 10 filters fig=plt.figure(figsize=(20, 8)) columns = 5 rows = 2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) plt.imshow(w[i][0], cmap='gray') print('First convolutional layer') plt.show() weights = net.conv2.weight.data w = weights.numpy() ``` ### Activation Maps Next, you'll see how to use OpenCV's `filter2D` function to apply these filters to a sample test image and produce a series of **activation maps** as a result. We'll do this for the first and second convolutional layers and these activation maps whould really give you a sense for what features each filter learns to extract. ``` # obtain one batch of testing images dataiter = iter(test_loader) images, labels = dataiter.next() images = images.numpy() # select an image by index idx = 3 img = np.squeeze(images[idx]) # Use OpenCV's filter2D function # apply a specific set of filter weights (like the one's displayed above) to the test image import cv2 plt.imshow(img, cmap='gray') weights = net.conv1.weight.data w = weights.numpy() # 1. first conv layer # for 10 filters fig=plt.figure(figsize=(30, 10)) columns = 5*2 rows = 2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) if ((i%2)==0): plt.imshow(w[int(i/2)][0], cmap='gray') else: c = cv2.filter2D(img, -1, w[int((i-1)/2)][0]) plt.imshow(c, cmap='gray') plt.show() # Same process but for the second conv layer (20, 3x3 filters): plt.imshow(img, cmap='gray') # second conv layer, conv2 weights = net.conv2.weight.data w = weights.numpy() # 1. first conv layer # for 20 filters fig=plt.figure(figsize=(30, 10)) columns = 5*2 rows = 2*2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) if ((i%2)==0): plt.imshow(w[int(i/2)][0], cmap='gray') else: c = cv2.filter2D(img, -1, w[int((i-1)/2)][0]) plt.imshow(c, cmap='gray') plt.show() ``` ### Question: Choose a filter from one of your trained convolutional layers; looking at these activations, what purpose do you think it plays? What kind of feature do you think it detects? **Answer**: In the first convolutional layer (conv1), the very first filter, pictured in the top-left grid corner, appears to detect horizontal edges. It has a negatively-weighted top row and positively weighted middel/bottom rows and seems to detect the horizontal edges of sleeves in a pullover. In the second convolutional layer (conv2) the first filter looks like it may be dtecting the background color (since that is the brightest area in the filtered image) and the more vertical edges of a pullover.
github_jupyter
# Customer Churning In this notebook I go through the process of evaluating different Classification Models. I end up using `CatBoost`, as it yielded the highest `recall` of all. ## Disclaimer This notebook doesn't include an EDA nor any other type of analysis, given that I already submitted another [notebook](https://www.kaggle.com/augusto1982/credit-card-customers-analysis) for that. ## Loading the data ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.inspection import permutation_importance from sklearn.preprocessing import RobustScaler from sklearn.model_selection import train_test_split, KFold, cross_validate, cross_val_score from sklearn.feature_selection import SelectFromModel from sklearn.metrics import confusion_matrix, recall_score, accuracy_score from sklearn.preprocessing import LabelEncoder, OneHotEncoder import seaborn as sns from sklearn.model_selection import GridSearchCV from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from catboost import CatBoostClassifier from xgboost import XGBClassifier import xgboost as xgb df = pd.read_csv('../input/credit-card-customers/BankChurners.csv') df = df.iloc[:, :-2] # Setting the index df.set_index('CLIENTNUM', inplace=True) # Replacing 'Unknown' values. categorical = ['Education_Level', 'Marital_Status', 'Income_Category'] encoders = {} for cat in categorical: encoder = LabelEncoder() encoders[cat] = encoder values = df[cat] known_values = values[values != 'Unknown'] df[cat] = pd.Series( encoder.fit_transform(known_values), index=known_values.index) imp_cat = IterativeImputer(estimator=RandomForestClassifier(), initial_strategy='most_frequent', max_iter=10, random_state=0) df[categorical] = imp_cat.fit_transform(df[categorical]) for cat in categorical: df[cat] = encoders[cat].inverse_transform(df[cat].astype(int)) def make_categorical(data: pd.DataFrame, column: str, categories: list, ordered: bool = False): data[column] = pd.Categorical(df[column], categories=categories, ordered=ordered) df['Attrition_Flag'] = df['Attrition_Flag'].map({'Attrited Customer':1, 'Existing Customer':0}) make_categorical(df, 'Gender', ['F', 'M']) make_categorical(df, 'Education_Level', ['Uneducated', 'High School', 'Graduate', 'College', 'Post-Graduate', 'Doctorate'], True) make_categorical(df, 'Marital_Status', ['Married', 'Single', 'Divorced']) make_categorical(df, 'Income_Category', ['Less than $40K', '$40K - $60K', '$60K - $80K', '$80K - $120K', '$120K +'], True) make_categorical(df, 'Card_Category', ['Blue', 'Silver', 'Gold', 'Platinum'], True) ``` ## Adding additional columns ``` # These columns I added while doing the EDA. age_bins = [20, 40, 60, 80] age_labels = ['20 - 40', '40 - 60', '60 - 80'] df['Age_Range'] = pd.cut(df['Customer_Age'], age_bins, labels=age_labels, ordered=True) df['No_Revolving_Bal'] = df['Total_Revolving_Bal'] == 0 df['New_Customer'] = df['Months_on_book'] <= 24 df['Optimal_Utilization'] = df['Avg_Utilization_Ratio'] <= 0.3 # The next two columns I added after doing some Feature Selection analysis (more on that below). df['Avg_Transaction'] = df['Total_Trans_Amt'] / df['Total_Trans_Ct'] def get_avg_q4_q1(row): if row['Total_Ct_Chng_Q4_Q1'] == 0: return 0 return row['Total_Amt_Chng_Q4_Q1'] / row['Total_Ct_Chng_Q4_Q1'] df['Avg_Q4_Q1'] = df.apply(get_avg_q4_q1, axis=1) ``` ## Encoding the categorical variables ``` label_encoding_columns = ['Education_Level', 'Marital_Status'] dummy_encoding_columns = ['Gender', 'Income_Category', 'Card_Category', 'Age_Range'] df[label_encoding_columns]= df[label_encoding_columns].apply(LabelEncoder().fit_transform) df = pd.get_dummies(df, columns=dummy_encoding_columns, prefix=dummy_encoding_columns, drop_first=True) ``` ## Splitting the target and independent variables ``` X = df.iloc[:, 1:] y = df.iloc[:, 0] ``` ## Feature Selection Here I don't use Feature Selection for selecting a subset of relevant features, as that didn't improve the score of the model. Instead, I use it to determine which of the whole group turn out to be more relevant and see if there's any other column I create to reinforce the model. The process determined these are the most relevant: ``` [ 'Total_Relationship_Count', 'Months_Inactive_12_mon', 'Contacts_Count_12_mon', 'Total_Revolving_Bal', 'Total_Amt_Chng_Q4_Q1', 'Total_Trans_Amt', 'Total_Trans_Ct', 'Total_Ct_Chng_Q4_Q1', 'No_Revolving_Bal' ] ``` As we can see we have the columns regarding Q4/Q1, and the two for the total of transactions. Therefore, I decided to create two additional columns, as I previously mentioned (`Avg_Transaction` and `Avg_Q4_Q1`). ``` # forest = ExtraTreesClassifier(n_estimators=250) # forest.fit(X, y) # # feat_importances = pd.Series(forest.feature_importances_, index=X.columns).sort_values(ascending=False) # # sel = SelectFromModel(forest) # sel.fit(X, y) # selected_feat= X.columns[sel.get_support()] # # df_sel = df[selected_feat] ``` ## Scaling the data ``` X = RobustScaler().fit_transform(X) ``` ## Split into train and test sets ``` X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y) ``` ## Evaluate different models with K-Fold ``` base_models = [ ("LR_model", LogisticRegression(random_state=42,n_jobs=-1)), ("KNN_model", KNeighborsClassifier(n_jobs=-1)), ("SVM_model", SVC(random_state=42, kernel = 'rbf')), ("DT_model", DecisionTreeClassifier(random_state=42)), ("RF_model", RandomForestClassifier(random_state=42,n_jobs=-1)), ("XGB_model", XGBClassifier(random_state=42, n_jobs=-1, scale_pos_weight=5)), ("CXGB_model", CatBoostClassifier(random_state=42, auto_class_weights='Balanced')) ] split = KFold(n_splits=4, shuffle=True, random_state=42) # Preprocessing, fitting, making predictions and scoring for every model: for name, model in base_models: # get cross validation score for each model: cv_results = cross_val_score(model, X, y, cv=split, scoring="recall", n_jobs=-1) # output: min_score = round(min(cv_results), 4) max_score = round(max(cv_results), 4) mean_score = round(np.mean(cv_results), 4) std_dev = round(np.std(cv_results), 4) print(f"{name} cross validation recall score: {mean_score} +/- {std_dev} (std) min: {min_score}, max: {max_score}") ``` As we can see, `CatBoost` seems to be the best option. ## Search for optimal hyperparameters I commented the code below, given that it takes hours to run. Its execution produced the following combination of parameters: ``` { 'border_count': 100, 'depth': 6, 'iterations': 250, 'l2_leaf_reg': 100, 'learning_rate': 0.1 } ``` However, I ran this before adding the last two columns, so I tweak them manually some more afterwards. ``` # grid_params = { # 'depth':[4, 5, 6, 7, 8 ,9, 10], # 'iterations':[250, 500, 1000], # 'learning_rate':[0.001, 0.1, 0.2, 0.3], # 'l2_leaf_reg':[3, 5, 10, 100], # 'border_count':[10, 20, 50, 100], # } # # gd_sr = GridSearchCV(estimator=CatBoostClassifier(random_state=42, auto_class_weights='Balanced'), # param_grid=grid_params, # scoring='recall', # cv=5, # n_jobs=-1) # # gd_sr.fit(X_train, y_train) # # best_parameters = gd_sr.best_params_ # print(best_parameters) ``` ## Construction and execution of the optimal? model ``` best_classifier = CatBoostClassifier( random_state=42, border_count=100, depth=6, iterations=140, l2_leaf_reg=100, learning_rate=0.1, auto_class_weights='Balanced', verbose=False ) best_classifier.fit(X_train, y_train) y_pred = best_classifier.predict(X_test) ``` ## Confusion Matrix ``` cm = confusion_matrix(y_test, y_pred) recall = recall_score(y_test, y_pred) # labels = ['Survived', 'No Survived'] ax = sns.heatmap(cm, annot=True) print("recall: {}".format(recall)) ``` ## K-Fold and CatBoost ``` np.mean( cross_val_score( best_classifier, X, y, cv=split, scoring="recall", n_jobs=-1) ) ```
github_jupyter
# "Text Classification with Roberta - Does a Twitter post actually announce a diasater?" - toc:true - branch: master - badges: true - comments: true - author: Peiyi Hung - categories: [category, project] - image: "images/tweet-class.png" ``` import numpy as np import pandas as pd from fastai.text.all import * import re ``` # Import the data and clean it ``` dir_path = "/kaggle/input/nlp-getting-started/" train_df = pd.read_csv(dir_path + "train.csv") test_df = pd.read_csv(dir_path + "test.csv") train_df train_df = train_df.drop(columns=["id", "keyword", "location"]) train_df["target"].value_counts() def remove_URL(text): url = re.compile(r'https?://\S+|www\.\S+') return url.sub(r'',text) train_df["text"] = train_df["text"].apply(remove_URL) test_df["text"] = test_df["text"].apply(remove_URL) def remove_html(text): html=re.compile(r'<.*?>') return html.sub(r'',text) train_df["text"] = train_df["text"].apply(remove_html) test_df["text"] = test_df["text"].apply(remove_html) def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) train_df["text"] = train_df["text"].apply(remove_emoji) test_df["text"] = test_df["text"].apply(remove_emoji) train_df train_df["text"].apply(lambda x:len(x.split())).plot(kind="hist"); ``` # Get tokens for the transformer ``` from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("roberta-large") ``` From the graph above, we can know that the longest tweet has 30 words, so I set the `max_length` to 30. ``` train_tensor = tokenizer(list(train_df["text"]), padding="max_length", truncation=True, max_length=30, return_tensors="pt")["input_ids"] ``` # Preparing datasets and dataloaders ``` class TweetDataset: def __init__(self, tensors, targ, ids): self.text = tensors[ids, :] self.targ = targ[ids].reset_index(drop=True) def __len__(self): return len(self.text) def __getitem__(self, idx): t = self.text[idx] y = self.targ[idx] return t, tensor(y) train_ids, valid_ids = RandomSplitter()(train_df) target = train_df["target"] train_ds = TweetDataset(train_tensor, target, train_ids) valid_ds = TweetDataset(train_tensor, target, valid_ids) train_dl = DataLoader(train_ds, bs=64) valid_dl = DataLoader(valid_ds, bs=512) dls = DataLoaders(train_dl, valid_dl).to("cuda") ``` # Get the model ``` bert = AutoModelForSequenceClassification.from_pretrained("roberta-large", num_labels=2).train().to("cuda") class BertClassifier(Module): def __init__(self, bert): self.bert = bert def forward(self, x): return self.bert(x).logits model = BertClassifier(bert) ``` # Start training ``` learn = Learner(dls, model, metrics=[accuracy, F1Score()]).to_fp16() learn.lr_find() learn.fit_one_cycle(3, lr_max=1e-5) ``` # Find the best threshold for f1 score ``` from sklearn.metrics import f1_score preds, targs = learn.get_preds() min_threshold = None max_f1 = -float("inf") thresholds = np.linspace(0.3, 0.7, 50) for threshold in thresholds: f1 = f1_score(targs, F.softmax(preds, dim=1)[:, 1]>threshold) if f1 > max_f1: min_threshold = threshold min_f1 = f1 print(f"threshold:{threshold:.4f} - f1:{f1:.4f}") ``` # Make prediction on the test set and submit the prediction ``` test_tensor = tokenizer(list(test_df["text"]), padding="max_length", truncation=True, max_length=30, return_tensors="pt")["input_ids"] class TestDS: def __init__(self, tensors): self.tensors = tensors def __len__(self): return len(self.tensors) def __getitem__(self, idx): t = self.tensors[idx] return t, tensor(0) test_dl = DataLoader(TestDS(test_tensor), bs=128) test_preds = learn.get_preds(dl=test_dl) sub = pd.read_csv(dir_path + "sample_submission.csv") prediction = (F.softmax(test_preds[0], dim=1)[:, 1]>min_threshold).int() sub = pd.read_csv(dir_path + "sample_submission.csv") sub["target"] = prediction sub.to_csv("submission.csv", index=False) ```
github_jupyter
## COCO dataset validation using Faster-RCNN ``` import json from pathlib import Path import numpy as np import matplotlib.pyplot as plt import cv2 as cv import torch import tqdm import torchvision.datasets as dset from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, fasterrcnn_resnet50_fpn from torchvision.transforms import ToTensor, Compose from torchvision.datasets import CocoDetection from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') coco_val = dset.CocoDetection(root="../data/coco/val2017/", annFile="../data/coco/annotations/instances_val2017.json", transform=ToTensor()) model = fasterrcnn_resnet50_fpn(pretrained=True) model.to(device) params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005) # Since images are different sizes, must keep batch size to 1... coco_val_dl = torch.utils.data.DataLoader(coco_val, batch_size=1, num_workers=1) def validation_loop(coco_dataloader, model): # Prepare a dictionary of counts for each category counts = {} for cid in coco_dataloader.dataset.coco.cats.keys(): counts[cid] = 0 results = [] model.eval() dl = tqdm.tqdm(coco_dataloader) with torch.no_grad(): for X, y in dl: pred = model(X.to(device)) # For some reason, some images return empty labels (?) if not y: continue image_id = y[0]['image_id'].item() # Record instances of each category for gt in y: cid = gt['category_id'].item() counts[cid] += 1 for p in pred: for label, box, score in zip(p['labels'].tolist(), p['boxes'].tolist(), p['scores'].tolist()): res = {'image_id': image_id} res['category_id'] = label # Convert to x, y, width, height res['bbox'] = [box[0], box[1], box[2] - box[0], box[3] - box[1]] res['score'] = score results.append(res) return results, counts #results, counts = validation_loop(coco_val_dl, model) #with open("results.json", "w") as f: # json.dump(results, f) #with open("counts.json", "w") as f: # json.dump(counts, f) with open("results.json", "r") as f: results = json.load(f) with open("counts.json", "r") as f: counts = json.load(f) img_ids = set() cat_ids = set() for res in results: img_ids.add(res['image_id']) cat_ids.add(res['category_id']) coco_res = coco_val.coco.loadRes("results.json") coco_eval = COCOeval(cocoGt=coco_val.coco, cocoDt=coco_res, iouType='bbox') def coco_summarize(coco_eval, ap=1, iouThr=None, areaRng='all', maxDets=100 ): p = coco_eval.params iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}' titleStr = 'Average Precision' if ap == 1 else 'Average Recall' typeStr = '(AP)' if ap==1 else '(AR)' iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \ if iouThr is None else '{:0.2f}'.format(iouThr) aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] if ap == 1: # dimension of precision: [TxRxKxAxM] s = coco_eval.eval['precision'] # IoU if iouThr is not None: t = np.where(iouThr == p.iouThrs)[0] s = s[t] s = s[:,:,:,aind,mind] else: # dimension of recall: [TxKxAxM] s = coco_eval.eval['recall'] if iouThr is not None: t = np.where(iouThr == p.iouThrs)[0] s = s[t] s = s[:,:,aind,mind] if len(s[s>-1])==0: mean_s = -1 else: mean_s = np.mean(s[s>-1]) return mean_s coco_eval.params.areaRng = [[0, 1e8]] coco_eval.params.areaRngLbl = ['all'] coco_eval.params.maxDets = [100] coco_eval.params.iouThrs = [0.5] coco_eval.params.imgIds = list(img_ids) coco_eval.params.catIds = list(cat_ids) coco_eval.evaluate() coco_eval.accumulate() %%capture precisions = [] recalls = [] # IoU threshold for cid in cat_ids: coco_eval.params.catIds = [cid] coco_eval.evaluate() coco_eval.accumulate() precisions.append(coco_summarize(coco_eval)) recalls.append(coco_summarize(coco_eval, ap=0)) precisions = np.array(precisions) recalls = np.array(recalls) f1_scores = 2 * precisions * recalls / (precisions + recalls) k = 20 k_lowest = np.argsort(f1_scores)[:k] bad_cats = np.array(list(cat_ids))[k_lowest] bad_cat_dict = {coco_val.coco.cats[cid]['name']: cid for cid in bad_cats} print(f"{'Category': >16}\t{'F1': >5} \tInstances\n") for cid, low in zip(bad_cats, k_lowest): print(f"{coco_val.coco.cats[cid]['name']: >16}", f"\t{f1_scores[low]:0.04f} \t{counts[str(cid)]}") %%capture coco_eval.params.catIds = list(cat_ids) bad_cat_imgs = [] for cid in bad_cats: cat_img_ids = set() coco_eval.params.catIds = [cid] coco_eval.evaluate() coco_eval.accumulate() cat_imgs = np.where(coco_eval.evalImgs)[0] for cimg in cat_imgs: cat_img_ids.add(coco_eval.evalImgs[cimg]['image_id']) bad_cat_imgs.append(sorted(list(cat_img_ids))) coco_eval = COCOeval(cocoGt=coco_val.coco, cocoDt=coco_res, iouType='bbox') # IoU threshold coco_eval.params.catIds = list(cat_ids) coco_eval.params.imgIds = list(img_ids) coco_eval.params.maxDets = [100] coco_eval.params.iouThrs = [0.5] coco_eval.evaluate() coco_eval.accumulate() def get_img_from_id(iid): ind = np.where(np.array(coco_val.ids) == iid)[0][0] img, ann = coco_val[ind] return img.squeeze().permute(1, 2, 0).numpy().copy(), ann bad_cat_imgs = [list(set(coco_val.coco.catToImgs[cat])) for cat in bad_cats] results_by_img = {} for res in results: if results_by_img.get(res['image_id']): results_by_img[res['image_id']].append(res) else: results_by_img[res['image_id']] = [res] def write_bad_cat_images(cat_name, output_dir=Path("../data/coco_val_results/"), draw_bboxes=True, show_images=False): outdir = Path(output_dir) / f"{cat_name}" outdir.mkdir(exist_ok=True) cid = bad_cat_dict[cat_name] cat_imgs = bad_cat_imgs[np.where(bad_cats == cid)[0][0]] for iid in cat_imgs: metrics = coco_eval.evaluateImg(iid, cid, [0, 1e6], 1000) if metrics['gtMatches'].any(): continue img_res = results_by_img[iid] img, anns = get_img_from_id(iid) if draw_bboxes: for res in anns: if res['category_id'] == cid: bx, by, w, h = np.array(res['bbox'], dtype=int) img = cv.rectangle(img, (bx, by), (bx+w, by+h), color=[1, 0, 0], thickness=3) for res in img_res: if res['category_id'] == cid: bx, by, w, h = np.array(res['bbox'], dtype=int) img = cv.rectangle(img, (bx, by), (bx+w, by+h), color=[0, 1, 1], thickness=2) if show_images: plt.figure(figsize=(5, 5)) plt.imshow(img) plt.axis("off") plt.show() plt.close() img = (np.round(img * 255)).astype(np.uint8) cv.imwrite(str(outdir / f"{iid}.jpg"), cv.cvtColor(img, cv.COLOR_BGR2RGB)) img_dir = Path("../data/coco_val_results/unlabeled") img_dir.mkdir(exist_ok=True) for cat_name in ["dining table", "handbag", "backpack", "bench", "chair", "hair drier"]: write_bad_cat_images(cat_name, output_dir=img_dir, draw_bboxes=False) coco_eval.evaluate_image(cid ```
github_jupyter
<a href="https://colab.research.google.com/github/google/jax-md/blob/main/notebooks/talk_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #@title Import & Util !pip install -q git+https://www.github.com/google/jax !pip install -q git+https://www.github.com/google/jax-md !pip install dm-haiku !pip install optax import jax.numpy as np from jax import device_put from jax.config import config # TODO: Uncomment this and enable warnings when XLA bug is fixed. import warnings; warnings.simplefilter('ignore') # config.update('jax_enable_x64', True) from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf', 'svg') import matplotlib.pyplot as plt import seaborn as sns import pickle import warnings warnings.simplefilter("ignore") sns.set_style(style='white') background_color = [56 / 256] * 3 def plot(x, y, *args): plt.plot(x, y, *args, linewidth=3) plt.gca().set_facecolor([1, 1, 1]) def draw(R, **kwargs): if 'c' not in kwargs: kwargs['color'] = [1, 1, 0.9] ax = plt.axes(xlim=(0, float(np.max(R[:, 0]))), ylim=(0, float(np.max(R[:, 1])))) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.set_facecolor(background_color) plt.scatter(R[:, 0], R[:, 1], marker='o', s=1024, **kwargs) plt.gcf().patch.set_facecolor(background_color) plt.gcf().set_size_inches(6, 6) plt.tight_layout() def draw_big(R, **kwargs): if 'c' not in kwargs: kwargs['color'] = [1, 1, 0.9] fig = plt.figure(dpi=128) ax = plt.axes(xlim=(0, float(np.max(R[:, 0]))), ylim=(0, float(np.max(R[:, 1])))) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.set_facecolor(background_color) s = plt.scatter(R[:, 0], R[:, 1], marker='o', s=0.5, **kwargs) s.set_rasterized(True) plt.gcf().patch.set_facecolor(background_color) plt.gcf().set_size_inches(10, 10) plt.tight_layout() def draw_displacement(R, dR): plt.quiver(R[:, 0], R[:, 1], dR[:, 0], dR[:, 1], color=[1, 0.5, 0.5]) # Progress Bars from IPython.display import HTML, display import time def ProgressIter(iter_fun, iter_len=0): if not iter_len: iter_len = len(iter_fun) out = display(progress(0, iter_len), display_id=True) for i, it in enumerate(iter_fun): yield it out.update(progress(i + 1, iter_len)) def progress(value, max): return HTML(""" <progress value='{value}' max='{max}', style='width: 45%' > {value} </progress> """.format(value=value, max=max)) # Data Loading !wget -O silica_train.npz https://www.dropbox.com/s/3dojk4u4di774ve/silica_train.npz?dl=0 !wget https://raw.githubusercontent.com/google/jax-md/main/examples/models/si_gnn.pickle import numpy as onp with open('silica_train.npz', 'rb') as f: files = onp.load(f) Rs, Es, Fs = [device_put(x) for x in (files['arr_0'], files['arr_1'], files['arr_2'])] Rs = Rs[:10] Es = Es[:10] Fs = Fs[:10] test_Rs, test_Es, test_Fs = [device_put(x) for x in (files['arr_3'], files['arr_4'], files['arr_5'])] test_Rs = test_Rs[:200] test_Es = test_Es[:200] test_Fs = test_Fs[:200] def tile(box_size, positions, tiles): pos = positions for dx in range(tiles): for dy in range(tiles): for dz in range(tiles): if dx == 0 and dy == 0 and dz == 0: continue pos = np.concatenate((pos, positions + box_size * np.array([[dx, dy, dz]]))) return box_size * tiles, pos ``` ## Demo www.github.com/google/jax-md -> notebooks -> talk_demo.ipynb ### Energy and Automatic Differentiation $u(r) = \begin{cases}\frac13(1 - r)^3 & \text{if $r < 1$} \\ 0 & \text{otherwise} \end{cases}$ ``` import jax.numpy as np def soft_sphere(r): return np.where(r < 1, 1/3 * (1 - r) ** 3, 0.) print(soft_sphere(0.5)) r = np.linspace(0, 2., 200) plot(r, soft_sphere(r)) ``` We can compute its derivative automatically ``` from jax import grad du_dr = grad(soft_sphere) print(du_dr(0.5)) ``` We can vectorize the derivative computation over many radii ``` from jax import vmap du_dr_v = vmap(du_dr) plot(r, soft_sphere(r)) plot(r, -du_dr_v(r)) ``` ### Randomly Initialize a System ``` from jax import random key = random.PRNGKey(0) particle_count = 128 dim = 2 from jax_md.quantity import box_size_at_number_density # number_density = N / V box_size = box_size_at_number_density(particle_count = particle_count, number_density = 1.0, spatial_dimension = dim) R = random.uniform(key, (particle_count, dim), maxval=box_size) draw(R) ``` ### Displacements and Distances ``` from jax_md import space displacement, shift = space.periodic(box_size) print(displacement(R[0], R[1])) metric = space.metric(displacement) print(metric(R[0], R[1])) ``` Compute distances between pairs of points ``` displacement = space.map_product(displacement) metric = space.map_product(metric) print(metric(R[:3], R[:3])) ``` ### Total energy of a system ``` def energy(R): dr = metric(R, R) return 0.5 * np.sum(soft_sphere(dr)) print(energy(R)) print(grad(energy)(R).shape) ``` ### Minimization ``` from jax_md.minimize import fire_descent init_fn, apply_fn = fire_descent(energy, shift) state = init_fn(R) trajectory = [] while np.max(np.abs(state.force)) > 1e-3: state = apply_fn(state) trajectory += [state.position] from jax_md.colab_tools import renderer trajectory = np.stack(trajectory) renderer.render(box_size, {'particles': renderer.Disk(trajectory)}, resolution=(512, 512)) cond_fn = lambda state: np.max(np.abs(state.force)) > 1e-3 ``` ### Making it Fast ``` def minimize(R): init, apply = fire_descent(energy, shift) state = init(R) for _ in range(20): state = apply(state) return energy(state.position) %%timeit minimize(R).block_until_ready() from jax import jit # Just-In-Time compile to GPU minimize = jit(minimize) # The first call incurs a compilation cost minimize(R) %%timeit minimize(R).block_until_ready() from jax.lax import while_loop def minimize(R): init_fn, apply_fn = fire_descent(energy, shift) state = init_fn(R) # Using a JAX loop reduces compilation cost state = while_loop(cond_fun=cond_fn, body_fun=apply_fn, init_val=state) return state.position from jax import jit minimize = jit(minimize) R_is = minimize(R) %%timeit minimize(R).block_until_ready() ``` ### Elastic Moduli ``` displacement, shift = space.periodic_general(box_size, fractional_coordinates=False) from jax_md import energy soft_sphere = energy.soft_sphere_pair(displacement, alpha=3) print(soft_sphere(R_is)) strain_energy = lambda strain, R: soft_sphere(R, new_box=box_size * strain) from jax import hessian elastic_constants = hessian(strain_energy)(np.eye(2), R_is) elastic_constants.shape from jax_md.quantity import bulk_modulus B = bulk_modulus(elastic_constants) print(B) from functools import partial @jit def elastic_moduli(number_density, key): # Randomly initialize particles. box_size = box_size_at_number_density(particle_count = particle_count, number_density = number_density, spatial_dimension = dim) R = random.uniform(key, (particle_count, dim), maxval=box_size) # Create the space and energy function. displacement, shift = space.periodic_general(box_size, fractional_coordinates=False) soft_sphere = energy.soft_sphere_pair(displacement, alpha=3) # Minimize at no strain. init_fn, apply_fn = fire_descent(soft_sphere, shift) state = init_fn(R) state = while_loop(cond_fn, apply_fn, state) # Compute the bulk modulus. strain_energy = lambda strain, R: soft_sphere(R, new_box=box_size * strain) elastic_constants = hessian(strain_energy)(np.eye(2), state.position) return bulk_modulus(elastic_constants) number_densities = np.linspace(1.0, 1.6, 40) elastic_moduli = vmap(elastic_moduli, in_axes=(0, None)) B = elastic_moduli(number_densities, key) plot(number_densities, B) keys = random.split(key, 10) elastic_moduli = vmap(elastic_moduli, in_axes=(None, 0)) B_ensemble = elastic_moduli(number_densities, keys) for B in B_ensemble: plt.plot(number_densities, B) plot(number_densities, np.mean(B_ensemble, axis=0), 'k') ``` ### Going Big ``` key = random.PRNGKey(0) particle_count = 128000 box_size = box_size_at_number_density(particle_count = particle_count, number_density = 1.0, spatial_dimension = dim) R = random.uniform(key, (particle_count, dim)) * box_size displacement, shift = space.periodic(box_size) renderer.render(box_size, {'particles': renderer.Disk(R)}, resolution=(512, 512)) from jax_md.energy import soft_sphere_neighbor_list neighbor_fn, energy_fn = soft_sphere_neighbor_list(displacement, box_size) init_fn, apply_fn = fire_descent(energy_fn, shift) nbrs = neighbor_fn(R) print(nbrs.idx.shape) state = init_fn(R, neighbor=nbrs) def cond_fn(state_and_nbrs): state, _ = state_and_nbrs return np.any(np.abs(state.force) > 1e-3) def step_fn(state_and_nbrs): state, nbrs = state_and_nbrs nbrs = neighbor_fn(state.position, nbrs) state = apply_fn(state, neighbor=nbrs) return state, nbrs state, nbrs = while_loop(cond_fn, step_fn, (state, nbrs)) renderer.render(box_size, {'particles': renderer.Disk(state.position)}, resolution=(700, 700)) nbrs = neighbor_fn(state.position) nbrs.idx.shape ``` ## Neural Network Potentials Here is some data we loaded of a 64-atom Silicon system computed using DFT. ``` print(Rs.shape) # Positions print(Es.shape) # Energies print(Fs.shape) # Forces E_mean = np.mean(Es) E_std = np.std(Es) print(f'E_mean = {E_mean}, E_std = {E_std}') plt.hist(Es) ``` Setup the system and a Graph Neural Network energy function ``` box_size = 10.862 displacement, shift = space.periodic(box_size) from jax_md.energy import graph_network init_fn, energy_fn = graph_network(displacement, r_cutoff=3.0) params = init_fn(key, test_Rs[0]) energy_fn(params, test_Rs[0]) vectorized_energy_fn = vmap(energy_fn, (None, 0)) predicted_Es = vectorized_energy_fn(params, test_Rs) plt.plot(test_Es, predicted_Es, 'o') ``` Define a loss function. ``` def energy_loss_fn(params): return np.mean((vectorized_energy_fn(params, Rs) - Es) ** 2) def force_loss_fn(params): # We want the gradient with respect to the position, not the parameters. grad_fn = vmap(grad(energy_fn, argnums=1), (None, 0)) return np.mean((grad_fn(params, Rs) + Fs) ** 2) @jit def loss_fn(params): return energy_loss_fn(params) + force_loss_fn(params) ``` Take a few steps of gradient descent. ``` import optax opt = optax.chain(optax.clip_by_global_norm(0.01), optax.adam(1e-4)) opt_state = opt.init(params) @jit def update(params, opt_state): updates, opt_state = opt.update(grad(loss_fn)(params), opt_state) return optax.apply_updates(params, updates), opt_state for i in ProgressIter(range(100)): params, opt_state = update(params, opt_state) if i % 10 == 0: print(f'Loss at step {i} is {loss_fn(params)}') predicted_Es = vectorized_energy_fn(params, test_Rs) plt.plot(test_Es, predicted_Es, 'o') ``` Now load a pretrained model. ``` with open('si_gnn.pickle', 'rb') as f: params = pickle.load(f) from functools import partial energy_fn = partial(energy_fn, params) predicted_Es = vmap(energy_fn)(test_Rs) plt.plot(test_Es, predicted_Es, 'o') from jax_md.quantity import force force_fn = force(energy_fn) predicted_Fs = force_fn(test_Rs[1]) plt.plot(test_Fs[1].reshape((-1,)), predicted_Fs.reshape((-1,)), 'o') ``` This energy can be used in a simulation ``` from jax_md.simulate import nvt_nose_hoover from jax_md.quantity import temperature K_B = 8.617e-5 dt = 1e-3 kT = K_B * 300 Si_mass = 2.91086E-3 init_fn, apply_fn = nvt_nose_hoover(energy_fn, shift, dt, kT) apply_fn = jit(apply_fn) from jax.lax import fori_loop state = init_fn(key, Rs[0], Si_mass, T_initial=300 * K_B) @jit def take_steps(state): return fori_loop(0, 100, lambda i, state: apply_fn(state), state) times = np.arange(100) * dt temperatures = [] trajectory = [] for _ in ProgressIter(times): state = take_steps(state) temperatures += [temperature(state.velocity, Si_mass) / K_B] trajectory += [state.position] plot(times, temperatures) trajectory = np.stack(trajectory) renderer.render(box_size, {'atoms': renderer.Sphere(trajectory)}, resolution=(512,512)) box_size, R = tile(box_size, Rs[0], 3) displacement, shift = space.periodic(box_size) neighbor_fn, _, energy_fn = energy.graph_network_neighbor_list(displacement, box_size, r_cutoff=3.0, dr_threshold=0.5) energy_fn = partial(energy_fn, params) init_fn, apply_fn = nvt_nose_hoover(energy_fn, shift, dt, kT) apply_fn = jit(apply_fn) nbrs = neighbor_fn(R) state = init_fn(key, R, Si_mass, T_initial=300 * K_B, neighbor=nbrs) def step_fn(i, state_and_nbrs): state, nbrs = state_and_nbrs nbrs = neighbor_fn(state.position, nbrs) state = apply_fn(state, neighbor=nbrs) return state, nbrs times = np.arange(100) * dt temperatures = [] trajectory = [] for _ in ProgressIter(times): state, nbrs = fori_loop(0, 100, step_fn, (state, nbrs)) temperatures += [temperature(state.velocity, Si_mass) / K_B] trajectory += [state.position] trajectory = np.stack(trajectory) renderer.render(box_size, { 'atoms': renderer.Sphere(trajectory, color=np.array([0, 0, 1])), 'bonds': renderer.Bond('atoms', nbrs.idx, color=np.array([1, 0, 0])) }, resolution=(512,512)) ```
github_jupyter
# Managing dependencies using containerization ## Topic learning objectives By the end of this topic, students should be able to: 1. Explain what containers are, and why they can be useful for reproducible data analyses 2. Discuss the advantages and limitations of containerization (e.g., Docker) in the context of reproducible data analyses 3. Compare and contrast the difference between running software/scripts in a virtual environment, a virtual machine and a container 4. Evaluate, choose and justify an appropriate environment management solution based on the data analysis project’s complexity, expected usage and longevity. 5. Use a containerization software (e.g., Docker) to run the software needed for your analysis 6. Write a container file (e.g., Dockerfile) that can be used to reproducibly build a container image that would contain the needed software and environment dependencies of your Data Science project 7. Use manual and automated tools (e.g., Docker, GitHub Actions) to build and share container images 8. List good container base images for Data Science projects ## Introduction to containerization ### Documenting and loading dependencies You've made some beautiful data analysis pipeline/project using make, R, and/or Python. It runs on your machine, but how easily can you, or someone else, get it working on theirs? The answer usually is, it depends... What does it depend on? 1. Does your `README` and your scripts make it blatantly obvious what programming languages and packages need to run your data analysis pipeline/project? 2. Do you also document the version numbers of the programming languages and packages you used? This can have big consequences when it comes to reproducibility... (*e.g.*,the [change to random number generation](https://blog.revolutionanalytics.com/2019/05/whats-new-in-r-360.html) in R in 2019?) 3. Did you document what other software (beyond the the programming languages and packages used) and operating system dependencies are needed to run your data analysis pipeline/project? *Virtual environments can be tremendously helpful with #1 & #2, however, they may or may not be helpful to manage #3...* __*Enter containerization as a possible solution!*__ ### What is a container? Containers are another way to generate (and share!) isolated computational environments. They differ from virtual environments (which we discussed previously) in that they are even more isolated from the computers operating system, as well as they can be used share many other types of software, applications and operating system dependencies. Before we can fully define containers, however, we need to define **virtualization**. Virtualization is a process that allows us to divide the the elements of a single computer into several virtual elements. These elements can include computer hardware platforms, storage devices, and computer network resources, and even operating system user spaces (e.g., graphical tools, utilities, and programming languages). Containers virtualize operating system user spaces so that they can isolate the processes they contain, as well as control the processes’ access to computer resources (e.g., CPUs, memory and desk space). What this means in practice, is that an operating system user space can be carved up into multiple containers running the same, or different processes, in isolation. Below we show the schematic of a container whose virtual user space contains the: - R programming language, the Bioconductor package manager, and two Bioconductor packages - Galaxy workflow software and two toolboxes that can be used with it - Python programming language, iPython interpreter and Jupyter notebook package <img src="img/13742_2016_article_135_f7.jpeg" width=250> **Schematic of a container for genomics research.** Source: <https://doi.org/10.1186/s13742-016-0135-4> #### Exercise - running a simple container To further illustrate what a container looks like, and feels like, we can use Docker (containerization software) to run one and explore. First we will run an linux (debian-flavoured) container that has R installed. To run this type: ``` docker run --rm -it rocker/r-base:3.6.3 ``` When you successfully launch the container, R should have started. Check the version of R - is it the same as your computer's version of R? Use `getwd()` and `list.files()` to explore the containers filesystem from R. Does this look like your computer's filesystem or something else? #### Exercise - running a container with a web app Next, try to use Docker to run a container that contains the RStudio server web-application installed: ``` docker run --rm -p 8787:8787 -e PASSWORD="apassword" rocker/rstudio:4.1.2 ``` Then visit a web browser on your computer and type: <http://localhost:8787> If it worked, then you should be at an RStudio Sign In page. To sign in, use the following credentials: - **username:** rstudio - **password:** apassword The RStudio server web app being run by the container should look something like this: <img src="img/rstudio-container-web-app.png" width=600> ## Contrasting containers with virtual machines Virtual machines are another technology that can be used to generate (and share) isolated computational environments. Virtual machines emulate the functionality an entire computer on a another physical computer. With virtual machine the virtualization occurs at the layer of software that sits between the computer's hardware and the operating system(s). This software is called a hypervisor. For example, on a Mac laptop, you could install a program called [Oracle Virtual Box](https://www.virtualbox.org/) to run a virtual machine whose operating system was Windows 10, as the screen shot below shows: <img src="https://www.virtualbox.org/raw-attachment/wiki/Screenshots/Windows_8.1_on_OSX.png"> **A screenshot of a Mac OS computer running a Windows virtual machine.** Source: <https://www.virtualbox.org/wiki/Screenshots> Below, we share an illustration that compares where virtualization happens in containers compared to virtual machines. This difference, leads to containers being more light-weight and portable compared to virtual machines, and also less isolated. <img src="img/container_v_vm.png" width=600> *Source: https://www.docker.com/resources/what-container* **Key take home:** - Containerization software shares the host's operating system, whereas virtual machines have a completely separate, additional operating system. This can make containers lighter (smaller in terms of size) and more resource and time-efficient than using a virtual machine.* ## Contrasting common computational environment virtualization strategies | Feature | Virtual environment | Container | Virtual machine | |---------|---------------------|-----------|-----------------| | Virtualization level | Application | Operating system user-space | Hardware | | Isolation | Programming languages, packages | Programming languages, packages, **other software, operating system dependencies, filesystems, networks** | Programming languages, packages, other software, operating system dependencies, filesystems, networks, **operating systems** | | Size | Extremely light-weight | light-weight | heavy-weight | ## Virtualization strategy advantages and disadvantages for reproducibility Let's collaboratively generate a list of advantages and disadvantages of each virtualization strategy in the context of reproducibility: ### Virtual environment #### Advantages - Extremely small size - Porous (less isolated) - makes it easy to pair the virtualized computational environment with files on your computer - Specify these with a single text file #### Disadvantages - Not always possible to capture and share operating system dependencies, and other software your analysis depends upon - Computational environment is not fully isolated, and so silent missed dependencies ### Containers #### Advantages - Somewhat light-weight in size (manageable for easy sharing - there are tools and software to facilitate this) - Possible to capture and share operating system dependencies, and other software your analysis depends upon - Computational environment is fully isolated, and errors will occur if dependencies are missing - Specify these with a single text file - Can share volumes and ports (advantage compared to virtual machines) #### Disadvantages - Possible security issues - running software on your computer that you may allow to be less isolated (i.e., mount volumes, expose ports) - Takes some effort to share volumes and ports (disadvantage compared to virtual environments) ### Virtual machine #### Advantages - High security, because these are much more isolated (filesystem, ports, etc) - Can share an entirely different operating system (might not so useful in the context of reproducibility however...) #### Disadvantages - Very big in size, which can make it prohibitive to share them - Takes great effort to share volumes and ports - which makes it hard to give access to data on your computer ## Container useage workflow A schematic of Container useage workflow from a [blog post](https://blog.octo.com/en/docker-registry-first-steps/) by Arnaud Mazin: <img src="img/docker-stages.png" width=600> *Source: [OctoTalks](https://blog.octo.com/en/docker-registry-first-steps/)* ## Image vs container? Analogy: The program Chrome is like a Docker image, whereas a Chrome window is like a Docker container. <img src="img/instance_analogy.png" width="600"> You can list the container **images** on your computer that you pulled using Docker via: `docker images`. You should see a list like this when you do this: ``` REPOSITORY TAG IMAGE ID CREATED SIZE rocker/rstudio 4.1.2 ff47c56c9c0b 8 days ago 1.89GB continuumio/miniconda3 latest 4d529c886124 4 weeks ago 399MB jupyter/base-notebook latest 8610b7acbd67 5 weeks ago 683MB jupyter/minimal-notebook latest 4801dcfde35b 2 months ago 1.38GB rocker/r-base latest 91af7f4c94cd 3 months ago 814MB ubuntu focal ba6acccedd29 3 months ago 72.8MB rocker/r-base 3.6.3 ddcf1852524d 23 months ago 679MB ``` You can list the states of containers that have been started by Docker on your computer (and not yet removed) via: `docker ps -a`: ``` CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 9160100c7d4b rocker/r-base:3.6.3 "R" 5 seconds ago Up 4 seconds friendly_merkle 0d0871c90313 rocker/rstudio:4.1.2 "/init" 33 minutes ago Up 33 minutes 0.0.0.0:8787->8787/tcp, :::8787->8787/tcp exciting_kepler ``` ## What is a container registry A container registry Is a remote repository used to share container images. This is similar to remote version control repositories for sharing code. Instead of code however, it is container images that are pushed and pulled to/from there. There are many container registries that can be used, but for this course we will focus on the widely-used DockerHub container registry: <https://hub.docker.com/> #### Demonstration Let's visit the repositories for the two container images that we used in the exercise earlier in class: - [rocker/r-base](https://hub.docker.com/r/rocker/r-base) - [rocker/rstudio](https://hub.docker.com/r/rocker/rstudio) Question: how did we get the images for the exercise earlier in class? We were just prompted to type `docker run...` Answer: `docker run ...` will first look for images you have locally, and run those if they exist. If they do not exist, it then attempts to pull the image from DockerHub. ## How do we specify a container image? Container images are specified from plain text files! In the case of the Docker containerization software, we call these `Dockerfiles`. We will explain these in more detail later, however for now it is useful to look at one to get a general idea of their structure: Example `Dockerfile`: ``` FROM continuumio/miniconda3 # Install Jupyter, JupterLab, R & the IRkernel RUN conda install -y --quiet \ jupyter \ jupyterlab=3.* \ r-base=4.1.* \ r-irkernel # Install JupyterLab Git Extension RUN pip install jupyterlab-git # Create working directory for mounting volumes RUN mkdir -p /opt/notebooks # Make port 8888 available for JupyterLab EXPOSE 8888 # Install Git, the nano-tiny text editor and less (needed for R help) RUN apt-get update && \ apt-get install --yes \ git \ nano-tiny \ less # Copy JupyterLab start-up script into container COPY start-notebook.sh /usr/local/bin/ # Change permission of startup script and execute it RUN chmod +x /usr/local/bin/start-notebook.sh ENTRYPOINT ["/usr/local/bin/start-notebook.sh"] # Switch to staring in directory where volumes will be mounted WORKDIR "/opt/notebooks" ``` The commands in all capitals are Docker commands. `Dockerfile`s typically start with a `FROM` command that specifies which base image the new image should be built off. Docker images are built in layers - this helps make them more light-weight. The `FROM` command is usually followed by `RUN` commands that usually install new software, or execute configuration commands. Other commands in this example copy in needed configuration files, expose ports, specify the working directory, and specify programs to execute at start-up. #### Demonstration of container images being built from layers Let's take a look at the `Dockerfile` for the `jupyter/docker-stacks` `r-notebook` container image: - [Dockerfile](https://github.com/jupyter/docker-stacks/blob/master/r-notebook/Dockerfile) *Question: What images does it build off?* ## Running containers Below we demonstrate how to run containers using the [`continuumio/miniconda3` image](https://hub.docker.com/r/continuumio/miniconda3) as an example: #### Step 1 - launch the Docker app (for OSX & Windows only) - Use launchpad/Finder/Start menu/etc to find and launch Docker > Note: Docker might already be running, if so great, but if its not, the commands below will not work. So it is always good to check! #### Step 2 - get container image from Dockerhub - open the terminal - type: `docker pull continuumio/miniconda3` - verify that it successfully pulled by typing: `docker images`, you should see something like: ``` REPOSITORY TAG IMAGE ID CREATED SIZE continuumio/miniconda3 latest 4d529c886124 4 weeks ago 399MB ``` > Note 1: You can skip this step and just got onto `docker run ...` as that command will pull the image if you do not have it locally. > > Note 2: If you ever need to delete a container image from your computer, you can run `docker rmi <IMAGE_ID>` to do so. #### Step 3 - launch a container from the image and poke around! - type: `docker run -it continuumio/miniconda3` - If it worked, then your command line prompt should now look something like this: ``` root@ad0560c5b81a:/# ``` - use `ls`, `cd`, `pwd` and explore the container - type `exit` to leave when you are done (your prompt will look normal again)! #### Step 4 - clean up your container! - After you close a container it still "hangs" around... - View any existing containers using `docker ps -a` - Remove the container by typing `docker rm <container_id>` - Prove to yourself that the container is no longer "hanging around" via `docker ps -a`, but that you still have the image installed (via `docker images`) > Note: to remove running containers, you will need to first stop them via `docker stop <container_id>` #### That's a lot of work... - We can tell Docker to delete the container upon exit using the `--rm` flag in the run command. - Type the command below to run the container again, exit it and prove to yourself that the container was deleted (but not the image!): ``` docker run -it --rm continuumio/miniconda3 ``` ## Mounting volumes to containers Often times we want to use the software made available to us in containers on files on our computers. To do this, we need to explicitly tell Docker to mount a volume to the container. We can do this via: `-v <path_to_computer_directory>:<absolute_path_to_container_directory>` Often, we want to mount the volume from our current directory (where we are working) and we can do that with a short-form of `/$(pwd)` in place of the path to our computer's directory. To mount our current directory to a container from the `continuumio/miniconda3` image we type the following on your laptop: ``` docker run -it --rm -v /$(pwd):/home/my_mounted_volume continuumio/miniconda3 ``` Navigate to the directory where you mounted your files via: `cd /home/my_mounted_volume` and type `ls` to ensure you can see them. > Note: if you are mounting volumes to a container from a Docker image that runs a web app, be sure to read the documentation to see where you should mount that volume. Usually the web apps are only exposed to certain directories and you will only be able to access the files in the mounted volume if you mount them to the correct place. For example, in the `rocker/rstudio` image that we loaded earlier, volumes need to be mounted within `/home/rstudio/` to be able to access them via the RStudio server web app. ### Windows notes for mounting volumes: - Windows machines need to explicitly share drives with Docker - this should be part of your computer setup! - On Windows, the laptop path depends what shell you are using, here are some details: - If you are going to run it in Windows terminal, then the command should be: `docker run --rm -it -v /$(pwd):<PATH_ON_CONTAINER> <IMAGE_NAME>` to share the current directory. - If you are going to run it in Power Shell, then the command should be: `docker run --rm -it -v <ABSOLUTE_PATH_TO_CONTAINER>:<PATH_ON_CONTAINER> <IMAGE_NAME>` (`pwd` and variants do not seem to work). And the path must be formatted like: `C:\Users\tiffany.timbers\Documents\project\:/home/project` ## Mapping ports to containers with web apps [Docker documentation on Container networking](https://docs.docker.com/config/containers/container-networking/) If we want to use a graphical user interface (GUI) with our containers, for example to be able to use the computational environment in the container in an integrated development environment (IDE) such as RStudio or JupyterLab, then we need to map the correct port from the container to a port on our computer. To do this, we use the `-p` flag with `docker run`, specifying the port in the container on the left-hand side, and the port on your computer (the container/Docker host) on the right-hand side of `:`. For example, to run the `rocker/rstudio` container image we would type `-p 8787:8787` to map the ports as shown in the `docker run` command below: ``` docker run --rm -p 8787:8787 -e PASSWORD="apassword" rocker/rstudio:4.1.2 ``` Then to access the web app, we need to navigate a browser url to `http://localhost:<COMPUTER_PORT>`. In this case we would navigate to <http://localhost:8787> to use the RStudio server web app from the container. Note that we can only map one port on our computer (the container/Docker host) to a container at any given time. However, our computer (the container/Docker host) has many ports we can choose from to map. So if we wanted to run a second `rocker/rstudio` container, then we could map it to a different port as shown below: ``` docker run --rm -p 8788:8787 -e PASSWORD="apassword" rocker/rstudio:4.1.2 ``` When we do this, to run the app in a browser on our computer, we need to go to <http://localhost:8788> (instead of <http://localhost:8787>) to access this container as we mapped it to the `8788` port on our computer (and not `8787`). Another important note is that the container port is specific to the container, and the web app installed therein. So we cannot change that without changing the container image, and/or application installed therein. Where do you learn what port is exposed in a container image? The image documentation should specify this. For example, in the [`rocker/rstudio` container image documentation](https://hub.docker.com/r/rocker/rstudio) it states: <img src="img/rocker-rstudio-port-docs.png" width=600> *Source: <https://hub.docker.com/r/rocker/rstudio>* ## Running a Docker container non-interactively So far we have been running our containers interactively, but sometimes we want to automate further and run things non interactively. We do this be dropping the `-it` flag from our `docker run` command as well as calling a command or a script after the docker image is specified. The general form for for running things non-interactively is this: ``` docker run --rm -v PATH_ON_YOUR_COMPUTER:VOLUME_ON_CONTAINER DOCKER_IMAGE PROGRAM_TO_RUN PROGRAM_ARGUMENTS ``` For example, let's use the container run a `cowsay::say` function call to print some asci art with a cute message! ``` $ docker run --rm ttimbers/dockerfile-practice:v0.1.0 Rscript -e "library(cowsay); say('Snow again this week?', 'snowman')" ``` And if succesfful, we should get: ``` ----- Snow again this week? ------ \ \ _[_]_ (") >--( : )--< (__:__) [nosig] ``` Now that was a silly example, but this can be made powerful so that we can run an analysis pipeline, such as a Makefile non-interactively using Docker! Here's a demo we can try: https://github.com/ttimbers/data_analysis_pipeline_eg/tree/v4.0 #### Exercise 1: Download https://github.com/ttimbers/data_analysis_pipeline_eg/archive/v4.0.zip, unzip it and navigate to the root of the project directory, try to run the analysis via `make all`. #### Exercise 2: Now try to run the analysis using Docker via: ``` docker run --rm -v /$(pwd):/home/rstudio/data_analysis_eg ttimbers/data_analysis_pipeline_eg make -C /home/rstudio/data_analysis_eg all ``` *note - windows users must use Git Bash, set Docker to use Linux containers, and have shared their drives with Docker (see docs [here](https://token2shell.com/howto/docker/sharing-windows-folders-with-containers/)) for this to work* ## Docker commands The table below summarizes the Docker commands we have learned so far and can serve as a useful reference when we are using Docker: | command/flag | What it does | |--------------|-----------------------| | `pull` | Downloads a Docker image from Docker Hub | | `images` | Tells you what container images are installed on your machine | | `rmi` | Deletes a specified container image from your machine | | `ps -a` | Tells you what containers are running on your machine | | `stop` | Stops a specified running container | | `rm` | Removes a specified stopped container | | `run` | Launches a container from an image | | `-it` | Tells Docker to run the container interactively | | `--rm` | Makes a container ephemeral (deletes it upon exit) | | `-v` | Mounts a volume of your computer to the Docker container | | `-p` | Specifies the ports to map a web app to | | `-e` | Sets environment variables in the container (*e.g.*, PASSWORD="apassword") | | `exit` | Exits a Docker container| ## Building container images from `Dockerfile`'s - A `Dockerfile` is a plain text file that contains commands primarily about what software to install in the Docker image. This is the more trusted and transparent way to build Docker images. - Once we have created a `Dockerfile` we can build it into a Docker image. - Docker images are built in layers, and as such, `Dockerfiles` always start by specifiying a base Docker image that the new image is to be built on top off. - Docker containers are all Linux containers and thus use Linux commands to install software, however there are different flavours of Linux (e.g., Ubuntu, Debian, CentOs, RedHat, etc) and thus you need to use the right Linux install commands to match your flavour of container. For this course we will focus on Ubuntu- or Debian-based images and thus use `apt-get` as our installation program. ### Workflow for building a Dockerfile 1. Choose a base image to build off (from https://hub.docker.com/). 2. Create a `Dockerfile` named `Dockerfile` and save it in an appropriate project repository. Open that file and type `FROM <BASE_IMAGE> on the first line`. 3. In a terminal, type `docker run --rm -it <IMAGE_NAME>` and interactively try the install commands you think will work. Edit and try again until the install command works. 4. Write working install commands in the `Dockerfile`, preceeding them with `RUN` and save the `Dockerfile`. 5. After adding every 2-3 commands to your `Dockerfile`, try building the Docker image via `docker build --tag <TEMP_IMAGE_NAME> <PATH_TO_DOCKERFILE_DIRECTORY>`. 6. Once the entire Dockerfile works from beginning to end on your laptop, then you can finally move to building remotely (e.g., creating a trusted build on GitHub Actions). ### Demo workflow for creating a `Dockfile` locally We will demo this workflow together to build a Docker image locally on our machines that has R and the `cowsay` R package installed. Let's start with the `debian:stable` image, so the first line of our `Dockerfile` should be as such: ``` FROM debian:stable ``` Now let's run the `debian:stable` image so we can work on our install commands to find some that work! ``` $ docker run --rm -it debian:stable ``` Now that we are in a container instance of the `debian:stable` Docker image, we can start playing around with installing things. To install things in the Debian flavour of Linux we use the command `apt-get`. We will do some demo's in class today, but a more comprehensive tutorial can be found [here](https://www.digitalocean.com/community/tutorials/how-to-manage-packages-in-ubuntu-and-debian-with-apt-get-apt-cache). To install R on Debian, we can figure out how to do this by following the CRAN documentation available [here](https://cran.r-project.org/bin/linux/debian/). First they recommend updating the list of available software package we can install with `apt-get` to us via the `apt-get update` command: ``` root@5d0f4d21a1f9:/# apt-get update ``` Next, they suggest the following commands to install R: ``` root@5d0f4d21a1f9:/# apt-get install r-base r-base-dev ``` OK, great! That seemed to have worked! Let's test it by trying out R! ``` root@5d0f4d21a1f9:/# R R version 3.5.2 (2018-12-20) -- "Eggshell Igloo" Copyright (C) 2018 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ``` Awesome! This seemed to have worked! Let's exit R (via `q()`) and the Docker container (via `exit`). Then we can add these commands to the Dockerfile, proceeding them with `RUN` and try to build our image to ensure this works. Our `Dockerfile` so far: ``` FROM debian:stable RUN apt-get update RUN apt-get install r-base r-base-dev -y ``` ``` $ docker build --tag testr1 src ``` Wait! That didn't seem to work! Let's focus on the last two lines of the error message: ``` Do you want to continue? [Y/n] Abort. The command '/bin/sh -c apt-get install r-base r-base-dev' returned a non-zero code: 1 ``` Ohhhh, right! As we were interactively installing this, we were prompted to press "Y" on our keyboard to continue the installation. We need to include this in our Dockerfile so that we don't get this error. To do this we append the `-y` flag to the end of the line contianing `RUN apt-get install r-base r-base-dev`. Let's try building again! Great! Success! Now we can play with installing R packages! Let's start now with the test image we have built from our `Dockerfile`: ``` $ docker run -it --rm testr1 ``` Now while we are in the container interactively, we can try to install the R package via: ``` root@51f56d653892:/# Rscript -e "install.packages('cowsay')" ``` And it looks like it worked! Let's confirm by trying to call a function from the `cowsay` package in R: ``` root@51f56d653892:/# R > cowsay::say("Smart for using Docker are you", "yoda") ``` Great, let's exit the container, and add this command to our `Dockerfile` and try to build it again! ``` root@51f56d653892:/# exit ``` Our `Dockerfile` now: ``` FROM debian:stable RUN apt-get update RUN apt-get install r-base r-base-dev -y RUN Rscript -e "install.packages('cowsay')" ``` Build the `Dockerfile` into an image: ``` $ docker build --tag testr1 src $ docker run -it --rm testr1 ``` Looks like a success, let's be sure we can use the `cowsay` package: ``` root@861487da5d00:/# R > cowsay::say("why did the chicken cross the road", "chicken") ``` Hurray! We did it! Now we can automate this build on GitHub, push it to Docker Hub and share this Docker image with the world! <img src="https://media.giphy.com/media/ZcKASxMYMKA9SQnhIl/giphy-downsized.gif"> Source: https://giphy.com/gifs/memecandy-ZcKASxMYMKA9SQnhIl ## Tips for installing things programmatically on Debian-flavoured Linux ### Installing things with `apt-get` Before you install things with `apt-get` you will want to update the list of packages that `apt-get` can see. We do this via `apt-get update`. Next, to install something with `apt-get` you will use the `apt-get install` command along with the name of the software. For example, to install the Git version control software we would type `apt-get install git`. Note however that we will be building our containers non-interactively, and so we want to preempt any questions/prompts the installation software we will get by including the answers in our commands. So for example, to `apt-get install` we append `--yes` to tell `apt-get` that yes we are happy to install the software we asked it to install, using the amount of disk space required to install it. If we didn't append this, the installation would stall out at this point waiting for our answer to this question. Thus, the full command to Git via `apt-get` looks like: ``` apt-get install --yes git ``` ### Breaking shell commands across lines If we want to break a single command across lines in the shell, we use the `\` character. For example, to reduce the long line below which uses `apt-get` to install the programs Git, Tiny Nano, Less, and wget: ``` apt-get install --yes git nano-tiny less wget ``` We can use `\` after each program, to break the long command across lines and make the command more readable (especially if there were even more programs to install). Similarly, we indent the lines after `\` to increase readability: ``` apt-get install --yes \ git \ nano-tidy \ less \ wget ``` ### Running commands only if the previous one worked Sometimes we don't want to run a command if the command that was run immediately before it failed. We can specify this in the shell using `&&`. For example, if we want to not run `apt-get` installation commands if `apt-get update` failed, we can write: ``` apt-get update && \ apt-get install --yes git ``` ## `Dockerfile` command summary Most common `Dockerfile` commands I use: | Command | Description | |---------|-------------| | FROM | States which base image the new Docker image should be built on top of | | RUN | Specifies that a command should be run in a shell | | ENV | Sets environment variables | | EXPOSE | Specifies the port the container should listen to at runtime | | COPY or ADD | adds files (or URL's in the case of ADD) to a container's filesystem | | ENTRYPOINT | Configure a container that will run as an executable | | WORKDIR | sets the working directory for any `RUN`, `CMD`, `ENTRYPOINT`, COPY and ADD instructions that follow it in the `Dockerfile` | And more here in the [Dockerfile reference](https://docs.docker.com/engine/reference/builder/). ## Choosing a base image for your Dockerfile <img src="https://themuslimtimesdotinfodotcom.files.wordpress.com/2018/10/newton-quotes-2.jpg?w=1334" width=700> Source: https://themuslimtimes.info/2018/10/25/if-i-have-seen-further-it-is-by-standing-on-the-shoulders-of-giants/ ### Good base images to work from for R or Python projects! | Image | Software installed | |-------|--------------------| | [rocker/tidyverse](https://hub.docker.com/r/rocker/tidyverse/) | R, R packages (including the tidyverse), RStudio, make | | [continuumio/anaconda3](https://hub.docker.com/r/continuumio/anaconda3/) | Python 3.7.4, Ananconda base package distribution, Jupyter notebook | | [jupyter/scipy-notebook](https://hub.docker.com/r/jupyter/scipy-notebook) | Includes popular packages from the scientific Python ecosystem. | For mixed language projects, I would recommend using the `rocker/tidyverse` image as the base and then installing Anaconda or miniconda as I have done here: https://github.com/UBC-DSCI/introduction-to-datascience/blob/b0f86fc4d6172cd043a0eb831b5d5a8743f29c81/Dockerfile#L19 This is also a nice tour de Docker images from the Jupyter core team: https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html#selecting-an-image ## Dockerfile FAQ: #### 1. Where does the `Dockerfile` live? The Dockerfile should live in the root directory of your project. #### 2. How do I make an image from a `Dockerfile`? There are 2 ways to do this! I use the first when developing my `Dockerfile` (to test quickly that it works), and then the second I use when I think I am "done" and want to have it archived on [Docker Hub](https://hub.docker.com/). 1. Build a Docker image locally on your laptop 2. Build a Docker image and push it to DockerHub using GitHub Actions, #### 3. How do I build an image locally on my laptop From the directory that contains your `Dockerfile` (usually your project root): ``` docker build --tag IMAGE_NAME:VERSION . ``` *note: `--tag` let's you name and version the Docker image. You can call this anything you want. The version number/name comes after the colon* After I build, I think try to `docker run ...` to test the image locally. If I don't like it, or it doesn't work, I delete the image with `docker rmi {IMAGE_NAME}`, edit my Dockerfile and try to build and run it again. ## Build a Docker image from a Dockerfile on GitHub Actions Building a Docker image from a Dockerfile using an automated tool (e.g., DockerHub or GitHub Actions) lets others trust your image as they can clearly see which Dockerfile was used to build which image. We will do this in this course by using GitHub Actions (a continuous integration tool) because is provides a great deal of nuanced control over when to trigger the automated builds of the Docker image, and how to tag them. An example GitHub repository that uses GitHub Actions to build a Docker image from a Dockerfile and publish it on DockerHub is available here: [https://github.com/ttimbers/gha_docker_build](https://github.com/ttimbers/gha_docker_build) We will work through a demonstration of this now starting here: [https://github.com/ttimbers/dockerfile-practice](https://github.com/ttimbers/dockerfile-practice) ## Version Docker images and report software and package versions It is easier to create a Docker image from a Dockerfile and tag it (or use it's digest) than to control the version of each thing that goes into your Docker image. - tags are human readable, however they can be associated with different builds of the image (potentially using different Dockerfiles...) - digests are not human readable, but specify a specific build of an image Example of how to pull using a tag: ``` docker pull ttimbers/dockerfile-practice:v1.0 ``` Example of how to pull using a digest: ``` docker pull ttimbers/dockerfile-practice@sha256:cc512c9599054f24f4020e2c7e3337b9e71fd6251dfde5bcd716dc9b1f8c3a73 ``` Tags are specified when you build on Docker Hub on the Builds tab under the Configure automated builds options. Digests are assigned to a build. You can see the digests on the Tags tab, by clicking on the "Digest" link for a specific tag of the image. ### How to get the versions of your software in your container Easiest is to enter the container interactively and poke around using the following commands: - `python --version` and `R --version` to find out the versions of Python and R, respectively - `pip freeze` or `conda list` in the bash shell to find out Python package versions - Enter R and load the libraries used in your scripts, then use `sessionInfo()` to print the package versions ### But I want to control the versions! ### How to in R: #### The Rocker team's strategy This is not an easy thing, but the Rocker team has made a concerted effort to do this. Below is their strategy: > Using the R version tag will naturally lock the R version, and also lock the install date of any R packages on the image. For example, rocker/tidyverse:3.3.1 Docker image will always rebuild with R 3.3.1 and R packages installed from the 2016-10-31 MRAN snapshot, corresponding to the last day that version of R was the most recent release. Meanwhile rocker/tidyverse:latest will always have both the latest R version and latest versions of the R packages, built nightly. See [VERSIONS.md](https://github.com/rocker-org/rocker-versioned/blob/master/VERSIONS.md) for details, but in short they use the line below to lock the R version (or view in r-ver Dockerfile [here](https://github.com/rocker-org/rocker-versioned/blob/c4a9f540d4c66a6277f281be6dcfe55d3cb40ec0/r-ver/3.6.1.Dockerfile#L76) for more context): ``` && curl -O https://cran.r-project.org/src/base/R-3/R-${R_VERSION}.tar.gz \ ``` And this line to specify the CRAN snapshot from which to grab the R packages (or view in r-ver Dockerfile [here](mhttps://github.com/rocker-org/rocker-versioned/blob/c4a9f540d4c66a6277f281be6dcfe55d3cb40ec0/r-ver/3.6.1.Dockerfile#L121) for more context): ``` && Rscript -e "install.packages(c('littler', 'docopt'), repo = '$MRAN')" \ ``` ### A newer thing that might be useful! You can pair [renv](https://rstudio.github.io/renv/articles/docker.html?q=docker#running-docker-containers-with-renv) with Docker - this is new and will be covered in tutorial this week! 🎉 ### How to in Python: Python version: - `conda` to specify an install of specific Python version, either when downloading (see example [here](https://github.com/ContinuumIO/docker-images/blob/8e10242c6d7804a0e991a9d9d758e25b340f4fce/miniconda3/debian/Dockerfile#L10), or after downloading with `conda install python=3.6`). - Or you can install a specific version of Python yourself, as they do in the Python official images (see [here](https://github.com/docker-library/python/blob/master/3.7/stretch/slim/Dockerfile) for example), but this is more complicated. For Python packages, there are a few tools: - conda (via `conda install scipy=0.15.0` for example) - pip (via `pip install scipy=0.15.0` for example) ### Take home messages: - At a minimum, tag your Docker images or reference image digests - If you want to version installs inside the container, use base images that version R & Python, and add what you need on top in a versioned manner! ## Docker compose Docker compose is a tool that uses a `YAML` file to configure/specify how you want to run one or more Docker containers. To use Docker compose, we create a `docker-compose.yml` file that specifies things such as: - the Docker images (and version) - the ports - volume mapping - any environment variables Then to run the Docker container using the specifications in the `docker-compose.yml` file, we run: ``` docker-compose run --rm service command ``` - `service` is a name you give to your application configurations in the `docker-compose.yml` - `command` is some command or script you would like to run (e.g., `make all`) Here is an example `docker-compose.yml`: ``` services: analysis-env: image: ttimbers/bc_predictor:v4.0 ports: - "8787:8787" volumes: - .:/home/rstudio/introduction-to-datascience environment: PASSWORD: password ``` And to run the container and the analysis we would type: ``` docker-compose run --rm analysis-env make -C /home/rstudio/breast_cancer_predictor all ``` This means we do not have to type out the: - ports - volume mapping - environment variables - and potentially more! ## Where to next? - Testing code written for data science
github_jupyter
<a href="https://colab.research.google.com/github/flower-go/DiplomaThesis/blob/master/sentiment_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Helper code ``` #clone repo !git clone https://github.com/flower-go/DiplomaThesis.git !pip install ufal.morphodita !pip install -r "DiplomaThesis/requirements.txt" !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/forms.vectors-w5-d300-ns5.16b.npz !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-07-02_181019-a=32,bs=1,b=...index !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-07-02_181019-a=32,bs=1,b=...data-00000-of-00001 !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-07-05_115521-a=16,bs=2,b=...index !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-07-05_115521-a=16,bs=2,b=...data-00000-of-00001 !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-06-08_234151-a=12,bs=4,b=...index !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-06-08_234151-a=12,bs=4,b=...data-00000-of-00001 !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-06-08_172844-a=12,bs=4,b=...index !wget --no-check-certificate aic.ufal.mff.cuni.cz/~doubrap1/sentiment_analysis.py-2021-06-08_172844-a=12,bs=4,b=...data-00000-of-00001 ``` # Creating model and input data ``` s_16 = "sentiment_analysis.py-2021-07-02_181019-a=32,bs=1,b=.." csfd_69 = "sentiment_analysis.py-2021-07-05_115521-a=16,bs=2,b=.." mall_63 = "sentiment_analysis.py-2021-06-08_234151-a=12,bs=4,b=.." fb_45 = "sentiment_analysis.py-2021-06-08_172844-a=12,bs=4,b=.." import os os.chdir("/content/DiplomaThesis/code/sentiment") !pip install --upgrade tensorflow !pip install --upgrade tensorflow-gpu import os os.chdir("DiplomaThesis/code/sentiment") import sentiment_analysis as sen pokusny_vstup = "Velmi příjemný zážitek.\nJe to na hovno.\nRelativně dobrý." ``` You can also upload own file with each text for classification separated by a newline. In that case, do not run followinf cell or/and change the value of the argument "predict". You can also change the model by changing the argument "model". ``` with open("pokusny_vstup", "w") as f: f.write(pokusny_vstup) sen.main(["--bert", "../morphodita-research/robeczech/noeol-210323/", "--model", "./" + s_16, "--predict", "pokusny_vstup"]) !cat ./pokusny_vstup_vystup | sed 's/^2/positive/' | sed 's/^1/negative/' | sed 's/^0/neutral/' ```
github_jupyter
# IBM Cloud Pak for Data - Multi-Cloud Virtualization Hands-on Lab ## Introduction Welcome to the IBM Cloud Pak for Data Multi-Cloud Virtualization Hands on Lab. In this lab you analyze data from multiple data sources, from across multiple Clouds, without copying data into a warehouse. This hands-on lab uses live databases, were data is “virtually” available through the IBM Cloud Pak for Data Virtualization Service. This makes it easy to analyze data from across your multi-cloud enterprise using tools like, Jupyter Notebooks, Watson Studio or your favorite reporting tool like Cognos. ### Where to find this sample online You can find a copy of this notebook on GITHUB at https://github.com/Db2-DTE-POC/CPDDVLAB. ### The business problem and the landscape The Acme Company needs timely analysis of stock trading data from multiple source systems. Their data science and development teams needs access to: * Customer data * Account data * Trading data * Stock history and Symbol data <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/CPDDVLandscape.png"> The data sources are running on premises and on the cloud. In this example many of the databases are also running on OpenShift but they could be managed, virtual or bare-metal cloud installations. IBM Cloud Pak for Data doesn't care. Enterprise DB (Postgres) is also running in the Cloud. Mongo and Informix are running on premises. Finally, we also have a VSAM file on zOS leveraging the Data Virtualization Manager for zOS. To simplify access for Data Scientists and Developers the Acme team wants to make all their data look like it is coming from a single database. They also want to combine data to create simple to use tables. In the past, Acme built a dedicated data warehouse, and then created ETL (Export, Transform and Load) job to move data from each data source into the warehouse were it could be combined. Now they can just virtualize your data without moving it. ### In this lab you learn how to: * Sign into IBM Cloud Pak for Data using your own Data Engineer and Data Scientist (User) userids * Connect to different data sources, on premises and across a multi-vendor Cloud * Make remote data from across your multi-vendor enterprise look and act like local tables in a single database * Make combining complex data and queries simple even for basic users * Capture complex SQL in easy to consume VIEWs that act just like simple tables * Ensure that users can securely access even complex data across multiple sources * Use roles and priviledges to ensure that only the right user may see the right data * Make development easy by connecting to your virtualized data using Analytic tools and Application from outside of IBM Cloud Pak for Data. ## Getting Started ### Using Jupyter notebooks You are now officially using a Jupyter notebook! If this is your first time using a Jupyter notebook you might want to go through the [An Introduction to Jupyter Notebooks](http://localhost:8888/notebooks/An_Introduction_to_Jupyter_Notebooks.ipynb). The introduction shows you some of the basics of using a notebook, including how to create the cells, run code, and save files for future use. Jupyter notebooks are based on IPython which started in development in the 2006/7 timeframe. The existing Python interpreter was limited in functionality and work was started to create a richer development environment. By 2011 the development efforts resulted in IPython being released (http://blog.fperez.org/2012/01/ipython-notebook-historical.html). Jupyter notebooks were a spinoff (2014) from the original IPython project. IPython continues to be the kernel that Jupyter runs on, but the notebooks are now a project on their own. Jupyter notebooks run in a browser and communicate to the backend IPython server which renders this content. These notebooks are used extensively by data scientists and anyone wanting to document, plot, and execute their code in an interactive environment. The beauty of Jupyter notebooks is that you document what you do as you go along. ### Connecting to IBM Cloud Pak for Data For this lab you will be assigned two IBM Cloud Pak for Data User IDs: A Data Engineer userid and and end-user userid. Check with the lab coordinator which userid and passwords you should use. * **Engineer:** * ID: LABDATAENGINEERx * PASSWORD: xxx * **User:** * ID: LABUSERx * PASSWORD: xxx To get started, sign in using you Engineer id: 1. Right-click the following link and select **open link in new window** to open the IBM Cloud Pak for Data Console: https://services-uscentral.skytap.com:9152/ 1. Organize your screen so that you can see both this notebook as well as the IBM Cloud Pak for Data Console at the same time. This will make it much easier for you to complete the lab without switch back and forth between screens. 2. Sign in using your Engineer userid and password 3. Click the icon at the very top right of the webpage. It will look something like this: <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.06.10 EngineerUserIcon.png"> 4. Click **Profile and settings** 5. Click **Permissions** and review the user permissions for this user 6. Click the **three bar menu** at the very top left of the console webpage <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/2.42.03 Three Bar.png"> 7. Click **Collect** if the Collect menu isn't already open 7. Click **Data Virtualization**. The Data Virtualization user interface is displayed <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.06.12 CollectDataVirtualization.png"> 8. Click the carrot symbol beside **Menu** below the Data Virtualization title <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/3.07.47 Menu Carrot.png"> This displays the actions available to your user. Different user have access to more or fewer menu options depending on their role in Data Virtualization. As a Data Engineer you can: * Add and modify Data sources. Each source is a connection to a single database, either inside or outside of IBM Cloud Pak for Data. * Virtualize data. This makes tables in other data sources look and act like tables that are local to the Data Virtualization database * Work with the data you have virtualized. * Write SQL to access and join data that you have virtualized * See detailed information on how to connect external analytic tools and applications to your virtualized data <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.12.54 Menu Data sources.png"> As a User you can only: * Work with data that has been virtualized for you * Write SQL to work with that data * See detailed connection information As an Administrator (only available to the course instructor) you can also: * Manage IBM Cloud Pak for Data User Access and Roles * Create and Manage Data Caches to accelerate performance * Change key service setttings ## Basic Data Virtualiation ### Exploring Data Source Connections Let's start by looking at the the Data Source Connections that are already available. 1. Click the Data Virtualization menu and select **Data Sources**. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.12.54 Menu Data sources.png"> 2. Click the **icon below the menu with a circle with three connected dots**. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.14.50 Connections Icons Spider.png"> 3. A spider diagram of the connected data sources opens. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.15.31 Data Sources Spider.png"> This displays the Data Source Graph with 8 active data sources: * 4 Db2 Family Databases hosted on premises, IBM Cloud, Azure and AWS * 1 EDB Postgres Database on Azure * 1 zOS VSAM file * 1 Informix Database running on premises **We are not going to add a new data source** but just go through the steps so you can see how to add additional data sources. 1. Click **+ Add** at the right of the console screen 2. Select **Add data source** from the menu You can see a history of other data source connection information that was used before. This history is maintain to make reconnecting to data sources easier and faster. 3. Click **Add connection** 4. Click the field below **Connection type** 5. Scroll through all the **available data sources** to see the available connection types 6. Select **different data connection types** from the list to see the information required to connect to a new data source. At a minumum you typically need the host URL and port address, database name, userid and password. You can also connect using an SSL certificate that can be dragged and dropped directly into the console interface. 7. Click **Cancel** to return to the previous list of connections to add 8. Click **Cancel** again to return to the list of currently connected data sources ### Exploring the available data Now that you understand how to connect to data sources you can start virtualizing data. Much of the work has already been done for you. IBM Cloud Pak for Data searches through the available data sources and compiles a single large inventory of all the tables and data available to virtualize in IBM Cloud Pak for Data. 1. Click the Data Virtualization menu and select **Virtualize** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.07 Menu Virtualize.png"> 2. Check the total number of available tables at the top of the list. There should be well over 500 available. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.15.50 Available Tables.png"> 3. Enter "STOCK" into the search field and hit **Enter**. Any tables with the string **STOCK** in the table name, the table schema or with a colunn name that includes **STOCK** appears in the search results. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.39.43 Find STOCK.png"> 4. Hover your mouse pointer to the far right side to the search results table. An **eye** icon will appear on each row as you move your mouse. 5. Click the **eye** icon beside one table. This displays a preview of the data in the selected table. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/3.26.54 Eye.png"> 6. Click **X** at the top right of the dialog box to return to the search results. ### Creating New Tables So that each user in this lab can have their own data to virtualize you will create your own table in a remote database. In this part of the lab you will use this Jupyter notebook and Python code to connect to a source database, create a simple table and populate it with data. IBM Cloud Pak for Data will automatically detect the change in the source database and make the new table available for virtualization. In this example, you connect to the Db2 Warehouse database running in IBM Cloud Pak for Data but the database can be anywhere. All you need is the connection information and authorized credentials. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/Db2CPDDatabase.png"> The first step is to connect to one of our remote data sources directly as if we were part of the team builing a new business application. Since each lab user will create their own table in their own schema the first thing you need to do is update and run the cell below with your engineer name. 1. In this Juypyter notebook, click on the cell below 2. Update the lab number in the cell below to your assigned user and lab number 3. Click **Run** from the Jupyter notebook menu above ``` # Setting your userID labnumber = 0 engineer = 'DATAENGINEER' + str(labnumber) print('variable engineer set to = ' + str(engineer)) ``` The next part of the lab relies on a Jupyter notebook extension, commonly refer to as a "magic" command, to connect to a Db2 database. To use the commands you load load the extension by running another notebook call db2 that contains all the required code <pre> &#37;run db2.ipynb </pre> The cell below loads the Db2 extension directly from GITHUB. Note that it will take a few seconds for the extension to load, so you should generally wait until the "Db2 Extensions Loaded" message is displayed in your notebook. 1. Click the cell below 2. Click **Run**. When the cell is finished running, In[*] will change to In[2] ``` # !wget https://raw.githubusercontent.com/IBM/db2-jupyter/master/db2.ipynb !wget -O db2.ipynb https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/db2.ipynb %run db2.ipynb print('db2.ipynb loaded') ``` #### Connecting to Db2 Before any SQL commands can be issued, a connection needs to be made to the Db2 database that you will be using. The Db2 magic command tracks whether or not a connection has occured in the past and saves this information between notebooks and sessions. When you start up a notebook and issue a command, the program will reconnect to the database using your credentials from the last session. In the event that you have not connected before, the system will prompt you for all the information it needs to connect. This information includes: - Database name - Hostname - PORT - Userid - Password Run the next cell. #### Connecting to Db2 ``` # Connect to the Db2 Warehouse on IBM Cloud Pak for Data Database from inside of IBM Cloud Pak for Data database = 'bludb' user = 'user999' password = 't1cz?K9-X1_Y-2Wi' host = 'openshift-skytap-nfs-woker-5.ibm.com' port = '31928' %sql CONNECT TO {database} USER {user} USING {password} HOST {host} PORT {port} ``` To check that the connection is working. Run the following cell. It lists the tables in the database in the **DVDEMO** schema. Only the first 5 tables are listed. ``` %sql select TABNAME, OWNER from syscat.tables where TABSCHEMA = 'DVDEMO' ``` Now that you can successfully connect to the database, you are going to create two tables with the same name and column across two different schemas. In following steps of the lab you are going to virtualize these tables in IBM Cloud Paks for Data and fold them together into a single table. The next cell sets the default schema to your engineer name followed by 'A'. Notice how you can set a python variable and substitute it into the SQL Statement in the cell. The **-e** option echos the command. Run the next cell. ``` schema_name = engineer+'A' table_name = 'DISCOVER_'+str(labnumber) print("") print("Lab #: "+str(labnumber)) print("Schema name: " + str(schema_name)) print("Table name: " + str(table_name)) %sql -e SET CURRENT SCHEMA {schema} ``` Run next cell to create a table with a single INTEGER column containing values from 1 to 10. The **-q** flag in the %sql command supresses any warning message if the table already exists. ``` sqlin = f''' DROP TABLE {table_name}; CREATE TABLE {table_name} (A INT); INSERT INTO {table_name} VALUES 1,2,3,4,5,6,7,8,9,10; SELECT * FROM {table_name}; ''' %sql -q {sqlin} ``` Run the next two cells to create the same table in a schema ending in **B**. It is populated with values from 11 to 20. ``` schema_name = engineer+'B' print("") print("Lab #: "+str(labnumber)) print("Schema name: " + str(schema_name)) print("Table name: " + str(table_name)) %sql -e SET CURRENT SCHEMA {schema_name} sqlin = f''' DROP TABLE {table_name}; CREATE TABLE {table_name} (A INT); INSERT INTO {table_name} VALUES 11,12,13,14,15,16,17,18,19,20; SELECT * FROM {table_name}; ''' %sql -q {sqlin} ``` Run the next cell to see all the tables in the database you just created. ``` %sql SELECT TABSCHEMA, TABNAME FROM SYSCAT.TABLES WHERE TABNAME = '{table_name}' ``` Run the next cell to see all the tables in the database that are like **DISCOVER**. You may see tables created by other people running the lab. ``` %sql SELECT TABSCHEMA, TABNAME FROM SYSCAT.TABLES WHERE TABNAME LIKE 'DISCOVER%' ``` ### Virtualizing your new Tables Now that you have created two new tables you can virtualize that data and make it look like a single table in your database. 1. Return to the IBM Cloud Pak for Data Console 2. Click **Virtualize** in the Data Virtualization menu if you are not still in the Virtualize page <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.07 Menu Virtualize.png"> 3. Enter your current userid, i.e. DATAENGINEER1 in the search bar and hit **Enter**. Now you can see that your new tables have automatically been discovered by IBM Cloud Pak for Data. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.31.01 Available Discover Tables.png"> 4. Select the two tables you just created by clicking the **check box** beside each table. Make sure you only select those for your LABDATAENGINEER schema. 5. Click **Add to Cart**. Notice that the number of items in your cart is now **2**. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.33.11 Available ENGINEER Tables.png"> 6. Click **View Cart** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.33.31 View Cart(2).png"> 7. Change the name of your two tables from DISCOVER to **DISCOVERA** and **DISCOVERB**. These are the new names that you will be able to use to find your tables in the Data Virtualization database. Don't change the Schema name. It is unique to your current userid. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.34.21 Assign to Project.png"> 9. Click the **back arrow** beside **Review cart and virtualize tables**. We are going to add one more thing to your cart. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.34.30 Back Arrow Icon.png"> 10. Click the checkbox beside **Automatically group tables**. Notice how all the tables called **DISCOVER** have been grouped together into a single entry. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.35.18 Automatically Group Available Tables.png"> 11. Select the row were all the DISCOVER tables have been grouped together 12. Click **Add to cart**. 13. Click **View cart** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.35.28 View cart(3).png"> You should now see three items in your cart. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.35.57 Cart with Fold.png"> 14. Hover over the elipsis icon at the right side of the list for the **DISCOVER** table <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.34.44 Elipsis.png"> 15. Select **Edit grouped tables** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.36.11 Cart Elipsis Menu.png"> 16. Deselect all the tables except for those in one of the schemas you created. You should now have two tables selected. 17. Click **Apply** 17. Change the name of the new combined table to **DISCOVERFOLD** 18. Select the **Data Virtualization Hands in Lab** project from the drop down list. 20. Click **Virtualize**. You see that three new virtual tables have been created. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.36.49 Virtualize.png"> The Virtual tables created dialog box opens. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.37.24 Virtual tables created.png"> 21. Click **View my virtualized data**. You return to the My virtualized data page. ### Working with your new tables 1. Enter DISCOVER_# where # is your lab number <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.37.55 Find DISCOVER.png"> You should see the three virtual tables you just created. Notice that you do not see tables that other users have created. By default, Data Engineers only see virtualized tables they have virtualized or virtual tables where they have been given access by other users. 2. Click the elipsis (...) beside your **DISCOVERFOLD_#** table and select **Preview** to confirm that it contains 20 rows. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/4.32.01 Elipsis Fold.png"> 3. Click **SQL Editor** from the Data Virtualization menu <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.33 Menu SQL editor.png"> 4. Click **Blank** to create a new blank SQL Script <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.38.24 + Blank.png"> 4. Enter **SELECT * FROM DISCOVERFOLD_#;** into the SQL Editor <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.38.44 SELECT*.png"> 5. Click **Run All** at the bottom left of the SQL Editor window. You should see 20 rows returned in the result. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.38.52 Run all.png"> Notice that you didn't have to specify the schema for your new virtual tables. The SQL Editor automatically uses the schema associated with your userid that was used when you created your new tables. Now you can: * Create connection to a remote data source * Make a new or existing table in that remote data source look and act like a local table * Fold data from different tables in the same data source or access data sources by folding it together into a single virtual table ## Gaining Insight from Virtualized Data Now that you understand the basics of Data Virtualization you can explore how easy it is to gain insight across multiple data sources without moving data. In the next set of steps you connect to virtualized data from this notebook using your LABDATAENGINEER userid. You can use the same techniques to connect to virtualized data from applications and analytic tools from outside of IBM Cloud Pak for Data. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/ConnectingTotheAnalyticsDatabase.png"> Connecting to all your virtualized data is just like connecting to a single database. All the complexity of a dozens of tables across multiple databases on different on premises and cloud providers is now as simple as connecting to a single database and querying a table. We are going to connect to the IBM Cloud Pak for Data Virtualization database in exactly the same way we connected to a Db2 database earlier in this lab. However we need to change the detailed connection information. 1. Click **Connection Details** in the Data Virtualization menu <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.44 Menu connection details.png"> 2. Click **Without SSL** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.14.29 Connection details.png"> 3. Copy the **User ID** by highlighting it with your mouse, right click and select **Copy** 4. Paste the **User ID** in to the next cell in this notebook where **user=** (see below) between the quotation marks <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.54.27 Notebook Login.png"> 5. Click **Service Settings** in the Data Virtualization menu <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.14.05 Menu Service settings.png"> 6. Look for the Access Information section of the page <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.14.15 Access information.png"> 6. Click **Show** to see the password. Highlight the password and copy using the right-click menu 7. Paste the **password** into the cell below between the quotation marks using the righ click paste. 8. Run the cell below to connect to the Data Virtualization database. #### Connecting to Data Virtualization SQL Engine ``` # Connect to the IBM Cloud Pak for Data Virtualization Database from inside CPD database = 'bigsql' user = 'userxxxx' password = 'xxxxxxxxxxxxxx' host = 'openshift-skytap-nfs-lb.ibm.com' port = '32080' %sql CONNECT TO {database} USER {user} USING {password} HOST {host} PORT {port} ``` ### Stock Symbol Table #### Get information about the stocks that are in the database **System Z - VSAM** This table comes from a VSAM file on zOS. IBM Cloud Pak for Data Virtualization works together with Data Virtualization Manager for zOS to make this looks like a local database table. For the following examples you can substitute any of the symbols below. ``` %sql -a select * from DVDEMO.STOCK_SYMBOLS ``` ### Stock History Table #### Get Price of a Stock over the Year Set the Stock Symbol in the line below and run the cell. This information is folded together with data coming from two identical tables, one on Db2 database and on on and Informix database. Run the next two cells. Then pick a new stock symbol from the list above, enter it into the cell below and run both cells again. **CP4D - Db2, Skytap - Informix** ``` stock = 'AXP' print('variable stock set to = ' + str(stock)) %%sql -pl SELECT WEEK(TX_DATE) AS WEEK, OPEN FROM FOLDING.STOCK_HISTORY WHERE SYMBOL = :stock AND TX_DATE != '2017-12-01' ORDER BY WEEK(TX_DATE) ASC ``` #### Trend of Three Stocks This chart shows three stock prices over the course of a year. It uses the same folded stock history information. **CP4D - Db2, Skytap - Informix** ``` stocks = ['INTC','MSFT','AAPL'] %%sql -pl SELECT SYMBOL, WEEK(TX_DATE), OPEN FROM FOLDING.STOCK_HISTORY WHERE SYMBOL IN (:stocks) AND TX_DATE != '2017-12-01' ORDER BY WEEK(TX_DATE) ASC ``` #### 30 Day Moving Average of a Stock Enter the Stock Symbol below to see the 30 day moving average of a single stock. **CP4D - Db2, Skytap - Informix** ``` stock = 'AAPL' sqlin = \ """ SELECT WEEK(TX_DATE) AS WEEK, OPEN, AVG(OPEN) OVER ( ORDER BY TX_DATE ROWS BETWEEN 15 PRECEDING AND 15 FOLLOWING) AS MOVING_AVG FROM FOLDING.STOCK_HISTORY WHERE SYMBOL = :stock ORDER BY WEEK(TX_DATE) """ df = %sql {sqlin} txdate= df['WEEK'] sales = df['OPEN'] avg = df['MOVING_AVG'] plt.xlabel("Day", fontsize=12); plt.ylabel("Opening Price", fontsize=12); plt.suptitle("Opening Price and Moving Average of " + stock, fontsize=20); plt.plot(txdate, sales, 'r'); plt.plot(txdate, avg, 'b'); plt.show(); ``` #### Trading volume of INTC versus MSFT and AAPL in first week of November **CP4D - Db2, Skytap - Informix** ``` stocks = ['INTC','MSFT','AAPL'] %%sql -pb SELECT SYMBOL, DAY(TX_DATE), VOLUME/1000000 FROM FOLDING.STOCK_HISTORY WHERE SYMBOL IN (:stocks) AND WEEK(TX_DATE) = 45 ORDER BY DAY(TX_DATE) ASC ``` #### Show Stocks that Represent at least 3% of the Total Purchases during Week 45 **CP4D - Db2, Skytap - Informix** ``` %%sql -pie WITH WEEK45(SYMBOL, PURCHASES) AS ( SELECT SYMBOL, SUM(VOLUME * CLOSE) FROM FOLDING.STOCK_HISTORY WHERE WEEK(TX_DATE) = 45 AND SYMBOL <> 'DJIA' GROUP BY SYMBOL ), ALL45(TOTAL) AS ( SELECT SUM(PURCHASES) * .03 FROM WEEK45 ) SELECT SYMBOL, PURCHASES FROM WEEK45, ALL45 WHERE PURCHASES > TOTAL ORDER BY SYMBOL, PURCHASES ``` ### Stock Transaction Table #### Show Transactions by Customer This next two examples uses data folded together from three different data sources representing three different trading organizations to create a combined of a single customer's stock trades. **AWS - Db2, Azure - EDB (Postgres), Azure - Db2** ``` %%sql -a SELECT * FROM FOLDING.STOCK_TRANSACTIONS_DV WHERE CUSTID = '107196' FETCH FIRST 10 ROWS ONLY ``` #### Bought/Sold Amounts of Top 5 stocks **AWS - Db2, Azure - EDB (Postgres), Azure - Db2** ``` %%sql -a WITH BOUGHT(SYMBOL, AMOUNT) AS ( SELECT SYMBOL, SUM(QUANTITY) FROM FOLDING.STOCK_TRANSACTIONS_DV WHERE QUANTITY > 0 GROUP BY SYMBOL ), SOLD(SYMBOL, AMOUNT) AS ( SELECT SYMBOL, -SUM(QUANTITY) FROM FOLDING.STOCK_TRANSACTIONS_Dv WHERE QUANTITY < 0 GROUP BY SYMBOL ) SELECT B.SYMBOL, B.AMOUNT AS BOUGHT, S.AMOUNT AS SOLD FROM BOUGHT B, SOLD S WHERE B.SYMBOL = S.SYMBOL ORDER BY B.AMOUNT DESC FETCH FIRST 5 ROWS ONLY ``` ### Customer Accounts #### Show Top 5 Customer Balance These next two examples use data folded from systems running on AWS and Azure. **AWS - Db2, Azure - EDB (Postgres), Azure - Db2** ``` %%sql -a SELECT CUSTID, BALANCE FROM FOLDING.ACCOUNTS_DV ORDER BY BALANCE DESC FETCH FIRST 5 ROWS ONLY ``` #### Show Bottom 5 Customer Balance **AWS - Db2, Azure - EDB (Postgres), Azure - Db2** ``` %%sql -a SELECT CUSTID, BALANCE FROM FOLDING.ACCOUNTS_DV ORDER BY BALANCE ASC FETCH FIRST 5 ROWS ONLY ``` ### Selecting Customer Information from MongoDB The MongoDB database (running on premises) has customer information in a document format. In order to materialize the document data as relational tables, a total of four virtual tables are generated. The following query shows the tables that are generated for the Customer document collection. ``` %sql LIST TABLES FOR SCHEMA MONGO_ONPREM ``` The tables are all connected through the CUSTOMERID field, which is based on the generated _id of the main CUSTOMER colllection. In order to reassemble these tables into a document, we must join them using this unique identifier. An example of the contents of the CUSTOMER_CONTACT table is shown below. ``` %sql -a SELECT * FROM MONGO_ONPREM.CUSTOMER_CONTACT FETCH FIRST 5 ROWS ONLY ``` A full document record is shown in the following SQL statement which joins all of the tables together. ``` %%sql -a SELECT C.CUSTOMERID AS CUSTID, CI.FIRSTNAME, CI.LASTNAME, CI.BIRTHDATE, CC.CITY, CC.ZIPCODE, CC.EMAIL, CC.PHONE, CC.STREET, CC.STATE, CP.CARD_TYPE, CP.CARD_NO FROM MONGO_ONPREM.CUSTOMER C, MONGO_ONPREM.CUSTOMER_CONTACT CC, MONGO_ONPREM.CUSTOMER_IDENTITY CI, MONGO_ONPREM.CUSTOMER_PAYMENT CP WHERE CC.CUSTOMER_ID = C."_ID" AND CI.CUSTOMER_ID = C."_ID" AND CP.CUSTOMER_ID = C."_ID" FETCH FIRST 3 ROWS ONLY ``` ### Querying All Virtualized Data In this final example we use data from each data source to answer a complex business question. "What are the names of the customers in Ohio, who bought the most during the highest trading day of the year (based on the Dow Jones Industrial Index)?" **AWS Db2, Azure EDB, Azure Db2, Skytap MongoDB, CP4D Db2Wh, Skytap Informix** ``` %%sql WITH MAX_VOLUME(AMOUNT) AS ( SELECT MAX(VOLUME) FROM FOLDING.STOCK_HISTORY WHERE SYMBOL = 'DJIA' ), HIGHDATE(TX_DATE) AS ( SELECT TX_DATE FROM FOLDING.STOCK_HISTORY, MAX_VOLUME M WHERE SYMBOL = 'DJIA' AND VOLUME = M.AMOUNT ), CUSTOMERS_IN_OHIO(CUSTID) AS ( SELECT C.CUSTID FROM TRADING.CUSTOMERS C WHERE C.STATE = 'OH' ), TOTAL_BUY(CUSTID,TOTAL) AS ( SELECT C.CUSTID, SUM(SH.QUANTITY * SH.PRICE) FROM CUSTOMERS_IN_OHIO C, FOLDING.STOCK_TRANSACTIONS_DV SH, HIGHDATE HD WHERE SH.CUSTID = C.CUSTID AND SH.TX_DATE = HD.TX_DATE AND QUANTITY > 0 GROUP BY C.CUSTID ) SELECT LASTNAME, T.TOTAL FROM MONGO_ONPREM.CUSTOMER_IDENTITY CI, MONGO_ONPREM.CUSTOMER C, TOTAL_BUY T WHERE CI.CUSTOMER_ID = C."_ID" AND C.CUSTOMERID = CUSTID ORDER BY TOTAL DESC ``` ### Seeing where your Virtualized Data is coming from You may eventually work with a complex Data Virtualization system. As an administrator or a Data Scientist you may need to understand where data is coming from. Fortunately, the Data Virtualization engine is based on Db2. It includes the same catalog of information as does Db2 with some additional features. If you want to work backwards and understand where each of your virtualized tables comes from, the information is included in the **SYSCAT.TABOPTIONS** catalog table. ``` %%sql SELECT TABSCHEMA, TABNAME, SETTING FROM SYSCAT.TABOPTIONS WHERE OPTION = 'SOURCELIST' AND TABSCHEMA <> 'QPLEXSYS'; %%sql SELECT * from SYSCAT.TABOPTIONS; ``` The table includes more information than you need to answer the question of where is my data coming from. The query below only shows the rows that contain the information of the source of the data ('SOURCELIST'). Notice that tables that have been folded together from several tables includes each of the data source information seperated by a semi-colon. ``` %%sql SELECT TABSCHEMA, TABNAME, SETTING FROM SYSCAT.TABOPTIONS WHERE OPTION = 'SOURCELIST' AND TABSCHEMA <> 'QPLEXSYS'; %%sql SELECT TABSCHEMA, TABNAME, SETTING FROM SYSCAT.TABOPTIONS WHERE TABSCHEMA = 'DVDEMO'; ``` In this last example, you can search for any virtualized data coming from a Postgres database by searching for **SETTING LIKE '%POST%'**. ``` %%sql SELECT TABSCHEMA, TABNAME, SETTING FROM SYSCAT.TABOPTIONS WHERE OPTION = 'SOURCELIST' AND SETTING LIKE '%POST%' AND TABSCHEMA <> 'QPLEXSYS'; ``` What is missing is additional detail for each connection. For example all we can see in the table above is a connection. You can find that detail in another table: **QPLEXSYS.LISTRDBC**. In the last cell, you can see that CID DB210113 is included in the STOCK_TRANSACTIONS virtual table. You can find the details on that copy of Db2 by running the next cell. ``` %%sql SELECT CID, USR, SRCTYPE, SRCHOSTNAME, SRCPORT, DBNAME, IS_DOCKER FROM QPLEXSYS.LISTRDBC; ``` ## Advanced Data Virtualization Now that you have seen how powerful and easy it is to gain insight from your existing virtualized data, you can learn more about how to do advanced data virtualization. You will learn how to join different remote tables together to create a new virtual table and how to capture complex SQL into VIEWs. ### Joining Tables Together The virtualized tables below come from different data sources on different systems. We can combine them into a single virtual table. * Select **My virtualized data** from the Data Virtualization menu <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.20 Menu My virtual data.png"> * Enter **Stock** in the find field and hit enter <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.39.43 Find STOCK.png"> * Select table **STOCK_TRANSACTIONS** in the **FOLDING** schema * Select table **STOCK_SYMBOLS** in the **DVDEMO** schema <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.40.18 Two STOCK seleted.png"> * Click **Join View** * In table STOCK_SYMBOLS: deselect **SYMBOL** * In table STOCK_TRANSACTIONS: deselect **TX_NO** * Click **STOCK_TRANSACTION.SYMBOL** and drag to **STOCK_SYMBOLS.SYMBOL** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.41.07 Joining Tables.png"> * Click **Preview** to check that your join is working. Each row shoud now contain the stock symbol and the long stock name. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.41.55 New Join Preview.png"> * Click **X** to close the preview window * Click **JOIN** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.42.20 Join.png"> * Type view name **TRANSACTIONS_FULLNAME** * Don't change the default schema. This corresponds to your LABENGINEER user id. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.43.10 View Name.png"> * Click **NEXT** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.43.30 Next.png"> * Select the **Data Virtualization Hands on Lab** project. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.43.58 Assign to Project.png"> * Click **CREATE VIEW**. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.44.06 Create view.png"> You see the successful Join View window. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.44.23 Join view created.png"> * Click **View my virtualized data** * Click the elipsis menu beside **TRANSACTIONS_FULLNAME** * Click **Preview** You can now join virtualize tables together to combine them into new virtualized tables. Now that you know how to perform simple table joins you can learn how to combine multiple data sources and virtual tables using the powerful SQL query engine that is part of the IBM Cloud Pak for Data - Virtualization. ### Using Queries to Answer Complex Business Questions The IBM Cloud Pak for Data Virtualization Administrator has set up more complex data from multiple source for the next steps. The administrator has also given you access to this virtualized data. You may have noticed this in previous steps. 1. Select **My virtualized data** from the Data Virtualiztion menu. All of these virtualized tables look and act like normal Db2 tables. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.20 Menu My virtual data.png"> 2. Click **Preview** for any of the tables to see what they contain. The virtualized tables in the **FOLDING** schema have all been created by combining the same tables from different data sources. Folding isn't something that is restricted to the same data source in the simple example you just completed. The virtualized tables in the **TRADING** schema are a view of complex queries that were use to combine data from multiple data sources to answer specific business questions. 3. Select **SQL Editor** from the Data Virtualization menu. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.33 Menu SQL editor.png"> 4. Select **Script Library** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.45.02 Script Library.png"> 5. Search for **OHIO** 6. Select and expand the **OHIO Customer** query <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.45.47 Ohio Script.png"> 7. Click the **Open a script to edit** icon to open the script in the SQL Editor. **Note** that if you cannot open the script then you may have to refresh your browser or contract and expand the script details section before the icon is active. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.45.54 Open Script.png"> 8. Click **Run All** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.46.21 Run Ohio Script.png"> This script is a complex SQL join query that uses data from all the virtualize data sources you explored in the first steps of this lab. While the SQL looks complex the author of the query did not have be aware that the data was coming from multiple sources. Everything used in this query looks like it comes from a single database, not eight different data sources across eight different systems on premises or in the Cloud. ### Making Complex SQL Simple to Consume You can easily make this complex query easy for a user to consume. Instead of sharing this query with other users, you can wrap the query into a view that looks and acts like a simple table. 1. Enter **CREATE VIEW MYOHIOQUERY AS** in the SQL Editor at the first line below the comment and before the **WITH** clause <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.46.54 Add CREATE VIEW.png"> 2. Click **Run all** 3. Click **+** to **Add a new script** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.48.28 Add to script.png"> 4. Click **Blank** 4. Enter **SELECT * FROM MYOHIOQUERY;** 5. Click **Run all** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.48.57 Run Ohio View.png"> Now you have a very simple virtualized table that is pulling data from eight different data sources, combining the data together to resolve a complex business problem. In the next step you will share your new virtualized data with a user. ### Sharing Virtualized Tables 1. Select **My virtualized data** from the Data Virtualization Menu. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.13.20 Menu My virtual data.png"> 2. Click the elipsis (...) menu to the right of the **MYOHIOQUERY** virtualized table <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.49.30 Select MYOHIOQUERY.png"> 3. Select **Manage Access** from the elipsis menu <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.49.46 Virtualized Data Menu.png"> 3. Click **Grant access** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.50.07 Grant access.png"> 4. Select the **LABUSERx** id associated with your lab. For example, if you are LABDATAENGINEER5, then select LABUSER5. <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.52.42 Grant access to specific user.png"> 5. Click **Add** <img src="https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/media/11.50.28 Add.png"> You should now see that your **LABUSER** id has view-only access to the new virtualized table. Next switch to your LABUSERx id to check that you can see the data you have just granted access for. 6. Click the user icon at the very top right of the console 7. Click **Log out** 8. Sign in using the LABUSER id specified by your lab instructor 9. Click the three bar menu at the top left of the IBM Cloud Pak for Data console 10. Select **Data Virtualization** You should see the **MYOHIOQUERY** with the schema from your engineer userid in the list of virtualized data. 11. Make a note of the schema of the MYOHIOQUERY in your list of virtualized tables. It starts with **USER**. 12. Select the **SQL Editor** from the Data virtualization menu 13. Click **Blank** to open a new SQL Editor window 14. Enter **SELECT * FROM USERxxxx.MYOHIOQUERY** where xxxx is the user number of your engineer user. The view created by your engineer user was created in their default schema. 15. Click **Run all** 16. Add the following to your query: **WHERE TOTAL > 3000 ORDER BY TOTAL** 17. Click **</>** to format the query so it is easiler to read 18. Click **Run all** You can see how you have just make a very complex data set extremely easy to consume by a data user. They don't have to know how to connect to multiple data sources or how to combine the data using complex SQL. You can hide that complexity while ensuring only the right user has access to the right data. In the next steps you will learn how to access virtualized data from outside of IBM Cloud Pak for Data. ### Allowing User to Access Virtualized Data with Analytic Tools In the next set of steps you connect to virtualized data from this notebook using your **LABUSER** userid. Just like you connected to IBM Cloud Pak for Data Virtualized Data using your LABDATAENGINEER you can connect using your LABUSER. We are going to connect to the IBM Cloud Pak for Data Virtualization database in exactly the same way we connected using you LABENGINEER. However you need to change the detailed connection information. Each user has their own unique userid and password to connect to the service. This ensures that no matter what tool you use to connect to virtualized data you are always in control over who can access specifical virtualized data. 2. Click the user icon at the top right of the IBM Cloud Pak for data console to confirm that you are using your **LABUSER** id 1. Click **Connection Details** in the Data Virtualization menu 2. Click **Without SSL** 3. Copy the **User ID** by highlighting it with your mouse, right click and select **Copy** 4. Paste the **User ID** in to the cell below were **user =** between the quotation marks 5. Click **Service Settings** in the Data Virtualization menu 6. Show the password. Highlight the password and copy using the right click menu 7. Paste the **password** into the cell below between the quotation marks using the righ click paste. 8. Run the cell below to connect to the Data Virtualization database. #### Connecting a USER to Data Virtualization SQL Engine ``` # Connect to the IBM Cloud Pak for Data Virtualization Database from inside CPD database = 'bigsql' user = 'userxxxx' password = 'xxxxxxxxxxxxxxxxxx' host = 'openshift-skytap-nfs-lb.ibm.com' port = '32080' %sql CONNECT TO {database} USER {user} USING {password} HOST {host} PORT {port} ``` Now you can try out the view that was created by the LABDATAENGINEER userid. Substitute the **xxxx** for the schema used by your ***LABDATAENGINEERx*** user in the next two cells before you run them. ``` %sql SELECT * FROM USERxxxx.MYOHIOQUERY WHERE TOTAL > 3000 ORDER BY TOTAL; ``` Only LABENGINEER virtualized tables that have been authorized for the LABUSER to see are available. Try running the next cell. You should receive an error that the current user does not have the required authorization or privlege to perform the operation. ``` %sql SELECT * FROM USERxxxx.DISCOVERFOLD; ``` ### Next Steps: Now you can use IBM Cloud Pak for Data to make even complex data and queries from different data sources, on premises and across a multi-vendor Cloud look like simple tables in a single database. You are ready for some more advanced labs. 1. Use Db2 SQL and Jupyter Notebooks to Analyze Virtualized Data * Build simple to complex queries to answer important business questions using the virtualized data available to you in IBM Cloud Pak for Data * See how you can transform the queries into simple tables available to all your users 2. Use Open RESTful Services to connect to the IBM Cloud Pak for Data Virtualization * Everything you can do in the IBM Cloud Pak for Data User Interface is accessible through Open RESTful APIs * Learn how to automate and script your managment of Data Virtualization using RESTful API * Learn how to accelerate appliation development by accessing virtaulied data through RESTful APIs ## Automating Data Virtualization Setup and Management through REST The IBM Cloud Pak for Data Console is only one way you can interact with the Virtualization service. IBM Cloud Pak for Data is built on a set of microservices that communicate with each other and the Console user interface using RESTful APIs. You can use these services to automate anything you can do throught the user interface. This Jupyter Notebook contains examples of how to use the Open APIs to retrieve information from the virtualization service, how to run SQL statements directly against the service through REST and how to provide authoritization to objects. This provides a way write your own script to automate the setup and configuration of the virtualization service. The next part of the lab relies on a set of base classes to help you interact with the RESTful Services API for IBM Cloud Pak for Data Virtualization. You can access this library on GITHUT. The commands below download the library and run them as part of this notebook. <pre> &#37;run CPDDVRestClass.ipynb </pre> The cell below loads the RESTful Service Classes and methods directly from GITHUB. Note that it will take a few seconds for the extension to load, so you should generally wait until the "Db2 Extensions Loaded" message is displayed in your notebook. 1. Click the cell below 2. Click **Run** ``` !wget -O CPDDVRestClass.ipynb https://raw.githubusercontent.com/Db2-DTE-POC/CPDDVLAB/master/CPDDVRestClass.ipynb %run CPDDVRestClass.ipynb ``` ### The Db2 Class The CPDDVRestClass.ipynb notebook includes a Python class called Db2 that encapsulates the Rest API calls used to connect to the IBM Cloud Pak for Data Virtualization service. To access the service you need to first authenticate with the service and create a reusable token that we can use for each call to the service. This ensures that we don't have to provide a userID and password each time we run a command. The token makes sure this is secure. Each request is constructed of several parts. First, the URL and the API identify how to connect to the service. Second the REST service request that identifies the request and the options. For example '/metrics/applications/connections/current/list'. And finally some complex requests also include a JSON payload. For example running SQL includes a JSON object that identifies the script, statement delimiters, the maximum number of rows in the results set as well as what do if a statement fails. You can find this class and use it for your own notebooks in GITHUB. Have a look at how the class encapsulated the API calls by clicking on the following link: https://github.com/Db2-DTE-POC/CPDDVLAB/blob/master/CPDDVRestClass.ipynb ### Example Connections To connect to the Data Virtualization service you need to provide the URL, the service name (v1) and profile the console user name and password. For this lab we are assuming that the following values are used for the connection: * Userid: LABDATAENGINEERx * Password: password Substitute your assigned LABDATAENGINEER userid below along with your password and run the cell. It will generate a breaer token that is used in the following steps to authenticate your use of the API. #### Connecting to Data Virtualization API Service ``` # Set the service URL to connect from inside the ICPD Cluster Console = 'https://openshift-skytap-nfs-lb.ibm.com' # Connect to the Db2 Data Management Console service user = 'labdataengineerx' password = 'password' # Set up the required connection databaseAPI = Db2(Console) api = '/v1' databaseAPI.authenticate(api, user, password) database = Console ``` #### Data Sources The following call (getDataSources) uses an SQL call in the DB2 class to run the same SQL statement you saw earlier in the lab. ``` # Display the Available Data Sources already configured json = databaseAPI.getDataSources() databaseAPI.displayResults(json) ``` #### Virtualized Data This call retrieves all of the virtualized data available to the role of Data Engineer. It uses a direct RESTful service call and does not use SQL. The service returns a JSON result set that is converted into a Python Pandas dataframe. Dataframes are very useful in being able to manipulate tables of data in Python. If there is a problem with the call, the error code is displayed. ``` # Display the Virtualized Assets Avalable to Engineers roles = ['DV_ENGINEER'] for role in roles: r = databaseAPI.getRole(role) if (databaseAPI.getStatusCode(r)==200): json = databaseAPI.getJSON(r) df = pd.DataFrame(json_normalize(json['objects'])) display(df) else: print(databaseAPI.getStatusCode(r)) ``` #### Virtualized Tables and Views This call retrieves all the virtualized tables and view available to the userid that you use to connect to the service. In this example the whole call is included in the DB2 class library and returned as a complete Dataframe ready for display or to be used for analysis or administration. ``` ### Display Virtualized Tables and Views display(databaseAPI.getVirtualizedTablesDF()) display(databaseAPI.getVirtualizedViewsDF()) ``` #### Get a list of the IBM Cloud Pak for Data Users This example returns a list of all the users of the IBM Cloud Pak for Data system. It only displays three colunns in the Dataframe, but the list of all the available columns is als printed out. Try changing the code to display other columns. ``` # Get the list of CPD Users r = databaseAPI.getUsers() if (databaseAPI.getStatusCode(r)==200): json = databaseAPI.getJSON(r) df = pd.DataFrame(json_normalize(json)) print(', '.join(list(df))) # List available column names display(df[['uid','username','displayName']]) else: print(databaseAPI.getStatusCode(r)) ``` #### Get the list of available schemas in the DV Database Do not forget that the Data Virtualization engine supports the same function as a regular Db2 database. So you can also look at standard Db2 objects like schemas. ``` # Get the list of available schemas in the DV Database r = databaseAPI.getSchemas() if (databaseAPI.getStatusCode(r)==200): json = databaseAPI.getJSON(r) df = pd.DataFrame(json_normalize(json['resources'])) print(', '.join(list(df))) display(df[['name']].head(10)) else: print(databaseAPI.getStatusCode(r)) ``` #### Object Search Fuzzy object search is also available. The call is a bit more complex. If you look at the routine in the DB2 class it posts a RESTful service call that includes a JSON payload. The payload includes the details of the search request. ``` # Search for tables across all schemas that match simple search critera # Display the first 100 # Switch between searching tables or views object = 'view' # object = 'table' r = databaseAPI.postSearchObjects(object,"TRADING",10,'false','false') if (databaseAPI.getStatusCode(r)==200): json = databaseAPI.getJSON(r) df = pd.DataFrame(json_normalize(json)) print('Columns:') print(', '.join(list(df))) display(df[[object+'_name']].head(100)) else: print("RC: "+str(databaseAPI.getStatusCode(r))) ``` #### Run SQL through the SQL Editor Service You can also use the SQL Editor service to run your own SQL. Statements are submitted to the editor. Your code then needs to poll the editor service until the script is complete. Fortunately you can use the DB2 class included in this lab so that it becomes a very simple Python call. The **runScript** routine runs the SQL and the **displayResults** routine formats the returned JSON. ``` sqlText = \ ''' WITH MAX_VOLUME(AMOUNT) AS ( SELECT MAX(VOLUME) FROM FOLDING.STOCK_HISTORY WHERE SYMBOL = 'DJIA' ), HIGHDATE(TX_DATE) AS ( SELECT TX_DATE FROM FOLDING.STOCK_HISTORY, MAX_VOLUME M WHERE SYMBOL = 'DJIA' AND VOLUME = M.AMOUNT ), CUSTOMERS_IN_OHIO(CUSTID) AS ( SELECT C.CUSTID FROM TRADING.CUSTOMERS C WHERE C.STATE = 'OH' ), TOTAL_BUY(CUSTID,TOTAL) AS ( SELECT C.CUSTID, SUM(SH.QUANTITY * SH.PRICE) FROM CUSTOMERS_IN_OHIO C, FOLDING.STOCK_TRANSACTIONS SH, HIGHDATE HD WHERE SH.CUSTID = C.CUSTID AND SH.TX_DATE = HD.TX_DATE AND QUANTITY > 0 GROUP BY C.CUSTID ) SELECT LASTNAME, T.TOTAL FROM MONGO_ONPREM.CUSTOMER_IDENTITY CI, MONGO_ONPREM.CUSTOMER C, TOTAL_BUY T WHERE CI.CUSTOMER_ID = C."_ID" AND C.CUSTOMERID = CUSTID ORDER BY TOTAL DESC FETCH FIRST 5 ROWS ONLY; ''' databaseAPI.displayResults(databaseAPI.runScript(sqlText)) ``` #### Run scripts of SQL Statements repeatedly through the SQL Editor Service The runScript routine can contain more than one statement. The next example runs a scipt with eight SQL statements multple times. ``` repeat = 3 sqlText = \ ''' SELECT * FROM TRADING.MOVING_AVERAGE; SELECT * FROM TRADING.VOLUME; SELECT * FROM TRADING.THREEPERCENT; SELECT * FROM TRADING.TRANSBYCUSTOMER; SELECT * FROM TRADING.TOPBOUGHTSOLD; SELECT * FROM TRADING.TOPFIVE; SELECT * FROM TRADING.BOTTOMFIVE; SELECT * FROM TRADING.OHIO; ''' for x in range(0, repeat): print('Repetition number: '+str(x)) databaseAPI.displayResults(databaseAPI.runScript(sqlText)) print('done') ``` ### What's next if you are inteested in finding out more about using RESTful services to work with Db2, check out this DZone article: https://dzone.com/articles/db2-dte-pocdb2dmc. The article also includes a link to a complete hands-on lab for Db2 and the Db2 Data Management Console. In it you can find out more about using REST and Db2 together. #### Credits: IBM 2019, Peter Kohlmann [kohlmann@ca.ibm.com]
github_jupyter
``` import pandas as pd d = pd.read_csv("YouTube-Spam-Collection-v1/Youtube01-Psy.csv") d.tail() len(d.query('CLASS == 1')) len(d.query('CLASS == 0')) len(d) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() dvec = vectorizer.fit_transform(d['CONTENT']) dvec analyze = vectorizer.build_analyzer() print(d['CONTENT'][349]) analyze(d['CONTENT'][349]) vectorizer.get_feature_names() dshuf = d.sample(frac=1) d_train = dshuf[:300] d_test = dshuf[300:] d_train_att = vectorizer.fit_transform(d_train['CONTENT']) # fit bag-of-words on training set d_test_att = vectorizer.transform(d_test['CONTENT']) # reuse on testing set d_train_label = d_train['CLASS'] d_test_label = d_test['CLASS'] d_train_att d_test_att from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=80) clf.fit(d_train_att, d_train_label) clf.score(d_test_att, d_test_label) from sklearn.metrics import confusion_matrix pred_labels = clf.predict(d_test_att) confusion_matrix(d_test_label, pred_labels) from sklearn.model_selection import cross_val_score scores = cross_val_score(clf, d_train_att, d_train_label, cv=5) # show average score and +/- two standard deviations away (covering 95% of scores) print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) # load all datasets and combine them d = pd.concat([pd.read_csv("YouTube-Spam-Collection-v1/Youtube01-Psy.csv"), pd.read_csv("YouTube-Spam-Collection-v1/Youtube02-KatyPerry.csv"), pd.read_csv("YouTube-Spam-Collection-v1/Youtube03-LMFAO.csv"), pd.read_csv("YouTube-Spam-Collection-v1/Youtube04-Eminem.csv"), pd.read_csv("YouTube-Spam-Collection-v1/Youtube05-Shakira.csv")]) len(d) len(d.query('CLASS == 1')) len(d.query('CLASS == 0')) dshuf = d.sample(frac=1) d_content = dshuf['CONTENT'] d_label = dshuf['CLASS'] # set up a pipeline from sklearn.pipeline import Pipeline, make_pipeline pipeline = Pipeline([ ('bag-of-words', CountVectorizer()), ('random forest', RandomForestClassifier()), ]) pipeline # or: pipeline = make_pipeline(CountVectorizer(), RandomForestClassifier()) make_pipeline(CountVectorizer(), RandomForestClassifier()) pipeline.fit(d_content[:1500],d_label[:1500]) pipeline.score(d_content[1500:], d_label[1500:]) pipeline.predict(["what a neat video!"]) pipeline.predict(["plz subscribe to my channel"]) scores = cross_val_score(pipeline, d_content, d_label, cv=5) print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) # add tfidf from sklearn.feature_extraction.text import TfidfTransformer pipeline2 = make_pipeline(CountVectorizer(), TfidfTransformer(norm=None), RandomForestClassifier()) scores = cross_val_score(pipeline2, d_content, d_label, cv=5) print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) pipeline2.steps # parameter search parameters = { 'countvectorizer__max_features': (None, 1000, 2000), 'countvectorizer__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams 'countvectorizer__stop_words': ('english', None), 'tfidftransformer__use_idf': (True, False), # effectively turn on/off tfidf 'randomforestclassifier__n_estimators': (20, 50, 100) } from sklearn.model_selection import GridSearchCV grid_search = GridSearchCV(pipeline2, parameters, n_jobs=-1, verbose=1) grid_search.fit(d_content, d_label) print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import xlrd import matplotlib.pyplot as plt import os from sklearn.utils import check_random_state # Generating artificial data. n = 50 XX = np.arange(n) rs = check_random_state(0) YY = rs.randint(-10, 10, size=(n,)) + 2.0 * XX data = np.stack([XX,YY], axis=1) ####################### ## Defining flags ##### ####################### num_epochs = 5 # creating the weight and bias. # The defined variables will be initialized to zero. W = tf.Variable(0.0, name="weights") b = tf.Variable(0.0, name="bias") ############################### ##### Necessary functions ##### ############################### # Creating placeholders for input X and label Y. def inputs(): """ Defining the place_holders. :return: Returning the data and label place holders. """ X = tf.placeholder(tf.float32, name="X") Y = tf.placeholder(tf.float32, name="Y") return X,Y # Create the prediction. def inference(X): """ Forward passing the X. :param X: Input. :return: X*W + b. """ return X * W + b def loss(X, Y): ''' compute the loss by comparing the predicted value to the actual label. :param X: The input. :param Y: The label. :return: The loss over the samples. ''' # Making the prediction. Y_predicted = inference(X) return tf.squared_difference(Y, Y_predicted) # The training function. def train(loss): learning_rate = 0.0001 return tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) with tf.Session() as sess: # Initialize the variables[w and b]. sess.run(tf.global_variables_initializer()) # Get the input tensors X, Y = inputs() # Return the train loss and create the train_op. train_loss = loss(X, Y) train_op = train(train_loss) # Step 8: train the model for epoch_num in range(num_epochs): # run 100 epochs for x, y in data: train_op = train(train_loss) # Session runs train_op to minimize loss loss_value,_ = sess.run([train_loss,train_op], feed_dict={X: x, Y: y}) # Displaying the loss per epoch. print('epoch %d, loss=%f' %(epoch_num+1, loss_value)) # save the values of weight and bias wcoeff, bias = sess.run([W, b]) ############################### #### Evaluate and plot ######## ############################### Input_values = data[:,0] Labels = data[:,1] Prediction_values = data[:,0] * wcoeff + bias # uncomment if plotting is desired! plt.plot(Input_values, Labels, 'ro', label='main') plt.plot(Input_values, Prediction_values, label='Predicted') # Saving the result. plt.legend() plt.show() plt.close() ```
github_jupyter
# Jupyter-Specific Functionality While GAP does provide a lot of useful functionality by itself on the command line, it is enhanced greatly by the numerous features that Jupyter notebooks have to offer. This notebook attempts to provide some insight into how Jupyter notebooks can improve the workflow of a user who is already well-versed in GAP. ## The Basics In Jupyter, code is split into a number of cells. While these cells may look independent from one another, and can be run independently, there is some interconnectedness between them. One major example of this is that variables defined in one cell are accessible from cells that are run **after** the cell containing the variable. The value of the variable will be taken from the **most recent** assignment to that variable: ``` a := 3; b := 5; a + b; a := 7; a + b; ``` To run a cell, users can either use the toolbar at the top and clicking the play button, or use the handy keyboard shortcut `Shift + Enter`. Using this shortcut will also create a new cell so users can continue their work while the cell runs. Using `Enter` by itself will allow users to add lines to a cell, should they so desire. The `Cell` option in the top menu also provides some other commands to run all cells. Additionally, cells can also support a multitude of different inputs. One useful example of this is markdown. In order to use markdown syntax within a cell, it must be converted to a markdown cell. This conversion can be done by either using the dropdown menu at the top which allows users to change the type of the cell (it will be `Code` by default). Alternatively, users can press the `Esc` key while in the cell, which allows them to access "Command Mode" for the cell. While in this mode, the `M` key can be pressed to convert the cell to a Markdown cell. While in Markdown cells, all the typical markdown syntax is supported. Furthermore, while in "Command Mode", users can use the key sequence `D` `D` to delete cells as they wish. The key `H` can be pressed to look at other useful key shortcuts while in this mode. ## Cell Magic While the main purpose of most users will be GAP-orientated, Jupyter can also render and run some other code fragments. For example, the code magic `%%html` allows Jupyter to render the contents of a code cell as html: ## Visualisation Another neat feature about Jupyter is the ability to visualise items right after running cells. ## Notebook Conversion Since Jupyter Notebooks are simply JSON, they can be easily converted to other formats. For example, to convert to HTML one would run: jupyter nbconvert --to html notebook.ipynb from their terminal.
github_jupyter
# Kurulum ve Gerekli Modullerin Yuklenmesi ``` from google.colab import drive drive.mount('/content/gdrive') import sys import os import pandas as pd import matplotlib.pyplot as plt import matplotlib import matplotlib.pyplot as plt import numpy as np import nltk import os from nltk import sent_tokenize, word_tokenize from nltk.stem.snowball import SnowballStemmer from nltk.stem.wordnet import WordNetLemmatizer import nltk nltk.download('stopwords') import matplotlib.pyplot as plt import pandas as pd nltk.download('punkt') import string from nltk.corpus import stopwords import pandas as pd import numpy as np import re ``` # Incelenecek konu basligindaki tweetlerin yuklenmesi Burada ornek olarak ulkeler konu basligi gosteriliyor gosteriliyor ``` os.chdir("/content/gdrive/My Drive/css/dezenformasyon_before") df3 = pd.read_csv("/content/gdrive/My Drive/css/dezenformasyon_before/dezenformasyon_before_nodublication.csv", engine = 'python') df3['tweet'] = df3['tweet'].astype(str) ``` Data pre-processing (on temizlemesi): 1. kucuk harfe cevirme 2. turkce karakter uyumlarini duzeltme 3. ozel karakterleri, noktalamalari temizleme ``` df3.tweet = df3.tweet.apply(lambda x: re.sub(r"İ", "i",x)) #harika calisiyor df3.tweet = df3.tweet.apply(lambda x: x.lower()) df3.loc[:,"tweet"] = df3.tweet.apply(lambda x : " ".join(re.findall('[\w]+',x))) ``` # Tokenize islemi, stop wordlerin atilmasi ve kelime frequencylerini (kullanim sayilarini) ileride gelecek gorsellestirme icin kaydetme ``` top_N = 10 txt = df3.tweet.str.lower().str.replace(r'\|', ' ').str.cat(sep=' ') words = nltk.tokenize.word_tokenize(txt) word_dist = nltk.FreqDist(words) user_defined_stop_words = ['ekonomi', '1', 'ye', 'nin' ,'nın', 'koronavirüs', 'olsun', 'karşı' , 'covid_19', 'artık', '3', 'sayısı' , 'olarak', 'oldu', 'olan', '2' , 'nedeniyle','bile' , 'sonra' ,'sen','virüs', 'ben', 'vaka' , 'son', 'yeni', 'sayi', 'sayisi','virüsü','bir','com','twitter', 'kadar', 'dan' , 'değil' ,'pic' , 'http', 'https' , 'www' , 'status' , 'var', 'bi', 'mi','yok', 'bu' , 've', 'korona' ,'corona' ,'19' ,'kovid', 'covid'] i = nltk.corpus.stopwords.words('turkish') j = list(string.punctuation) + user_defined_stop_words stopwords = set(i).union(j) words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords) print('All frequencies, including STOPWORDS:') print('=' * 60) rslt3 = pd.DataFrame(word_dist.most_common(top_N), columns=['Word', 'Frequency']) print(rslt3) print('=' * 60) rslt3 = pd.DataFrame(words_except_stop_dist.most_common(top_N), columns=['Word', 'Frequency']).set_index('Word') ``` # TR deki ilk vakan onceki tweetlerin incelenmek icin yuklenmesi ``` df2 = pd.read_csv("/content/gdrive/My Drive/css/dezenformasyon_after/dezenformasyon_after_nodublication.csv", engine = 'python') df2['tweet'] = df2['tweet'].astype(str) df2['tweet'] = df2['tweet'].astype(str) df2.tweet = df2.tweet.apply(lambda x: re.sub(r"İ", "i",x)) #harika calisiyor df2.tweet = df2.tweet.apply(lambda x: x.lower()) df2.loc[:,"tweet"] = df2.tweet.apply(lambda x : " ".join(re.findall('[\w]+',x))) top_N = 10 txt = df2.tweet.str.lower().str.replace(r'\|', ' ').str.cat(sep=' ') words = nltk.tokenize.word_tokenize(txt) word_dist = nltk.FreqDist(words) user_defined_stop_words = ['ekonomi', '1', 'ye', 'nin' ,'nın', 'koronavirüs', 'olsun', 'karşı' , 'covid_19', 'artık', '3', 'sayısı' , 'olarak', 'oldu', 'olan', '2' , 'nedeniyle','bile' , 'sonra' ,'sen','virüs', 'ben', 'vaka' , 'son', 'yeni', 'sayi', 'sayisi','virüsü','bir','com','twitter', 'kadar', 'dan' , 'değil' ,'pic' , 'http', 'https' , 'www' , 'status' , 'var', 'bi', 'mi','yok', 'bu' , 've', 'korona' ,'corona' ,'19' ,'kovid', 'covid'] i = nltk.corpus.stopwords.words('turkish') j = list(string.punctuation) + user_defined_stop_words stopwords = set(i).union(j) words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords) print('All frequencies, including STOPWORDS:') print('=' * 60) rslt = pd.DataFrame(word_dist.most_common(top_N), columns=['Word', 'Frequency']) print(rslt) print('=' * 60) rslt = pd.DataFrame(words_except_stop_dist.most_common(top_N), columns=['Word', 'Frequency']).set_index('Word') ``` # Karsilastirmali gorsellestirme (Ayni konu basliklarinin 11 marttan oncesi ve sonrasi ) ``` fig, (ax1, ax2) = plt.subplots(1,2, sharex=False, sharey= True, figsize=(24,5)) rslt3.plot.bar(rot=0, ax =ax1 , title = "Dezenformasyon_Once" ) rslt.plot.bar(rot=0, ax =ax2 , title = "Dezenformasyon_Sonra" ) plt.savefig('Disinfo_comparison.png',dpi=300) ```
github_jupyter
## <small> Copyright (c) 2017-21 Andrew Glassner Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. </small> # Deep Learning: A Visual Approach ## by Andrew Glassner, https://glassner.com ### Order: https://nostarch.com/deep-learning-visual-approach ### GitHub: https://github.com/blueberrymusic ------ ### What's in this notebook This notebook is provided as a “behind-the-scenes” look at code used to make some of the figures in this chapter. It is cleaned up a bit from the original code that I hacked together, and is only lightly commented. I wrote the code to be easy to interpret and understand, even for those who are new to Python. I tried never to be clever or even more efficient at the cost of being harder to understand. The code is in Python3, using the versions of libraries as of April 2021. This notebook may contain additional code to create models and images not in the book. That material is included here to demonstrate additional techniques. Note that I've included the output cells in this saved notebook, but Jupyter doesn't save the variables or data that were used to generate them. To recreate any cell's output, evaluate all the cells from the start up to that cell. A convenient way to experiment is to first choose "Restart & Run All" from the Kernel menu, so that everything's been defined and is up to date. Then you can experiment using the variables, data, functions, and other stuff defined in this notebook. ## Chapter 11: Classifers, Notebook 1: kNN Figures demonstrating k nearest neighbors (kNN) ``` import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.samples_generator import make_blobs from sklearn.neighbors import KNeighborsClassifier import math import seaborn as sns; sns.set() # Make a File_Helper for saving and loading files. save_files = False import os, sys, inspect current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir from DLBasics_Utilities import File_Helper file_helper = File_Helper(save_files) # create a custom color map with nice colors from matplotlib.colors import LinearSegmentedColormap dot_clr_0 = np.array((79, 135, 219))/255. # blue dot_clr_1 = np.array((255, 141, 54))/255. # orange dot_cmap = LinearSegmentedColormap.from_list('dot_map', [dot_clr_0, dot_clr_1], N=100) # Show a scatter plot with blue/orange colors and no ticks def show_Xy(X, y, filename): plt.scatter(X[:,0], X[:,1], c=y, s=50, cmap=dot_cmap) plt.xticks([],[]) plt.yticks([],[]) file_helper.save_figure(filename) plt.show() # Create the "smile" dataset. A curve for the smile with a circle at each end. # All the magic values were picked by hand. def make_smile(num_samples = 20, thickness=0.3, noise=0.0): np.random.seed(42) X = [] y = [] for i in range(num_samples): px = np.random.uniform(-1.5, 1.5) py = np.random.uniform(-1, 1) c = 0 if (px - -0.8)**2 + (py-.4)**2 < thickness**2: c = 1 if (px - 0.8)**2 + (py-.4)**2 < thickness**2: c = 1 theta = np.arctan2(py-.4, px) r = math.sqrt((px**2)+((py-.4)**2)) if (theta < 0) and (r > .8-thickness) and (r < .8+thickness): c = 1 px += np.random.uniform(-noise, noise) py += np.random.uniform(-noise, noise) X.append([px,py]) y.append(c) return (np.array(X),y) # Create the "happy face" dataset by adding some eyes to the smile. # All the magic values were picked by hand. def make_happy_face(num_samples = 20, thickness=0.3, noise=0.0): np.random.seed(42) X = [] y = [] eye_x = .5 eye_y = 1.5 for i in range(num_samples): px = np.random.uniform(-1.5, 1.5) py = np.random.uniform(-1, 2.0) c = 0 if (px - eye_x)**2 + (py-eye_y)**2 < thickness**2: c = 1 if (px - -eye_x)**2 + (py-eye_y)**2 < thickness**2: c = 1 if (px - -0.8)**2 + (py-.4)**2 < thickness**2: c = 1 if (px - 0.8)**2 + (py-.4)**2 < thickness**2: c = 1 theta = np.arctan2(py-.4, px) r = math.sqrt((px**2)+((py-.4)**2)) if (theta < 0) and (r > .8-thickness) and (r < .8+thickness): c = 1 px += np.random.uniform(-noise, noise) py += np.random.uniform(-noise, noise) X.append([px,py]) y.append(c) return (np.array(X),y) # Show the clean smile X_clean, y_clean = make_smile(1000, .3, 0) show_Xy(X_clean, y_clean, 'KNN-smile-data-clean') # Show the noisy smile X_noisy, y_noisy = make_smile(1000, .3, .25) show_Xy(X_noisy, y_noisy, 'KNN-smile-data-noisy') # Show a grid of k-nearest-neighbors (kNN) results for different values of k. # For large values of k, this can take a little while. def show_fit_grid(X, y, data_version): k_list = [1, 2, 3, 4, 5, 6, 10, 20, 50] plt.figure(figsize=(8,6)) resolution = 500 xmin = np.min(X[:,0]) - .1 xmax = np.max(X[:,0]) + .1 ymin = np.min(X[:,1]) - .1 ymax = np.max(X[:,1]) + .1 xx, yy = np.meshgrid(np.linspace(xmin, xmax, resolution), np.linspace(ymin, ymax, resolution)) zin = np.array([xx.ravel(), yy.ravel()]).T for i in range(9): plt.subplot(3, 3, i+1) num_neighbors = k_list[i] knn = KNeighborsClassifier(n_neighbors=num_neighbors) knn.fit(X,y) Z = knn.predict(zin) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=dot_cmap) #plt.scatter(X[:,0], X[:,1], c=y, s=5, alpha=0.3, cmap='cool') plt.xticks([],[]) plt.yticks([],[]) plt.title('k='+str(num_neighbors)) plt.tight_layout() file_helper.save_figure('KNN-smile-grid-'+data_version) plt.show() # Show the grid for the clean smile dataset show_fit_grid(X_clean, y_clean, 'clean') # Show the grid for the noisy smile dataset show_fit_grid(X_noisy, y_noisy, 'noisy') # Show the clean face dataset X_clean_face, y_clean_face = make_happy_face(1000, .3, 0) show_Xy(X_clean_face, y_clean_face, 'KNN-face-data-clean') # Show the grid for the clean face dataset show_fit_grid(X_clean_face, y_clean_face, 'clean-face') # Show the noisy face dataset X_noisy_face, y_noisy_face = make_happy_face(1000, .3, .25) show_Xy(X_noisy_face, y_noisy_face, 'KNN-face-data-noisy') # Show the grid for the noisy face dataset show_fit_grid(X_noisy_face, y_noisy_face, 'noisy-face') ```
github_jupyter
# PyTorch Basics ``` import torch import numpy as np torch.manual_seed(1234) ``` ## Tensors * Scalar is a single number. * Vector is an array of numbers. * Matrix is a 2-D array of numbers. * Tensors are N-D arrays of numbers. #### Creating Tensors You can create tensors by specifying the shape as arguments. Here is a tensor with 5 rows and 3 columns ``` def describe(x): print("Type: {}".format(x.type())) print("Shape/size: {}".format(x.shape)) print("Values: \n{}".format(x)) describe(torch.Tensor(2, 3)) describe(torch.randn(2, 3)) ``` It's common in prototyping to create a tensor with random numbers of a specific shape. ``` x = torch.rand(2, 3) describe(x) ``` You can also initialize tensors of ones or zeros. ``` describe(torch.zeros(2, 3)) x = torch.ones(2, 3) describe(x) x.fill_(5) describe(x) ``` Tensors can be initialized and then filled in place. Note: operations that end in an underscore (`_`) are in place operations. ``` x = torch.Tensor(3,4).fill_(5) print(x.type()) print(x.shape) print(x) ``` Tensors can be initialized from a list of lists ``` x = torch.Tensor([[1, 2,], [2, 4,]]) describe(x) ``` Tensors can be initialized from numpy matrices ``` npy = np.random.rand(2, 3) describe(torch.from_numpy(npy)) print(npy.dtype) ``` #### Tensor Types The FloatTensor has been the default tensor that we have been creating all along ``` import torch x = torch.arange(6).view(2, 3) describe(x) x = torch.FloatTensor([[1, 2, 3], [4, 5, 6]]) describe(x) x = x.long() describe(x) x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.int64) describe(x) x = x.float() describe(x) x = torch.randn(2, 3) describe(x) describe(torch.add(x, x)) describe(x + x) x = torch.arange(6) describe(x) x = x.view(2, 3) describe(x) describe(torch.sum(x, dim=0)) describe(torch.sum(x, dim=1)) describe(torch.transpose(x, 0, 1)) import torch x = torch.arange(6).view(2, 3) describe(x) describe(x[:1, :2]) describe(x[0, 1]) indices = torch.LongTensor([0, 2]) describe(torch.index_select(x, dim=1, index=indices)) indices = torch.LongTensor([0, 0]) describe(torch.index_select(x, dim=0, index=indices)) row_indices = torch.arange(2).long() col_indices = torch.LongTensor([0, 1]) describe(x[row_indices, col_indices]) ``` Long Tensors are used for indexing operations and mirror the `int64` numpy type ``` x = torch.LongTensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) describe(x) print(x.dtype) print(x.numpy().dtype) ``` You can convert a FloatTensor to a LongTensor ``` x = torch.FloatTensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) x = x.long() describe(x) ``` ### Special Tensor initializations We can create a vector of incremental numbers ``` x = torch.arange(0, 10) print(x) ``` Sometimes it's useful to have an integer-based arange for indexing ``` x = torch.arange(0, 10).long() print(x) ``` ## Operations Using the tensors to do linear algebra is a foundation of modern Deep Learning practices Reshaping allows you to move the numbers in a tensor around. One can be sure that the order is preserved. In PyTorch, reshaping is called `view` ``` x = torch.arange(0, 20) print(x.view(1, 20)) print(x.view(2, 10)) print(x.view(4, 5)) print(x.view(5, 4)) print(x.view(10, 2)) print(x.view(20, 1)) ``` We can use view to add size-1 dimensions, which can be useful for combining with other tensors. This is called broadcasting. ``` x = torch.arange(12).view(3, 4) y = torch.arange(4).view(1, 4) z = torch.arange(3).view(3, 1) print(x) print(y) print(z) print(x + y) print(x + z) ``` Unsqueeze and squeeze will add and remove 1-dimensions. ``` x = torch.arange(12).view(3, 4) print(x.shape) x = x.unsqueeze(dim=1) print(x.shape) x = x.squeeze() print(x.shape) ``` all of the standard mathematics operations apply (such as `add` below) ``` x = torch.rand(3,4) print("x: \n", x) print("--") print("torch.add(x, x): \n", torch.add(x, x)) print("--") print("x+x: \n", x + x) ``` The convention of `_` indicating in-place operations continues: ``` x = torch.arange(12).reshape(3, 4) print(x) print(x.add_(x)) ``` There are many operations for which reduce a dimension. Such as sum: ``` x = torch.arange(12).reshape(3, 4) print("x: \n", x) print("---") print("Summing across rows (dim=0): \n", x.sum(dim=0)) print("---") print("Summing across columns (dim=1): \n", x.sum(dim=1)) ``` #### Indexing, Slicing, Joining and Mutating ``` x = torch.arange(6).view(2, 3) print("x: \n", x) print("---") print("x[:2, :2]: \n", x[:2, :2]) print("---") print("x[0][1]: \n", x[0][1]) print("---") print("Setting [0][1] to be 8") x[0][1] = 8 print(x) ``` We can select a subset of a tensor using the `index_select` ``` x = torch.arange(9).view(3,3) print(x) print("---") indices = torch.LongTensor([0, 2]) print(torch.index_select(x, dim=0, index=indices)) print("---") indices = torch.LongTensor([0, 2]) print(torch.index_select(x, dim=1, index=indices)) ``` We can also use numpy-style advanced indexing: ``` x = torch.arange(9).view(3,3) indices = torch.LongTensor([0, 2]) print(x[indices]) print("---") print(x[indices, :]) print("---") print(x[:, indices]) ``` We can combine tensors by concatenating them. First, concatenating on the rows ``` x = torch.arange(6).view(2,3) describe(x) describe(torch.cat([x, x], dim=0)) describe(torch.cat([x, x], dim=1)) describe(torch.stack([x, x])) ``` We can concentate along the first dimension.. the columns. ``` x = torch.arange(9).view(3,3) print(x) print("---") new_x = torch.cat([x, x, x], dim=1) print(new_x.shape) print(new_x) ``` We can also concatenate on a new 0th dimension to "stack" the tensors: ``` x = torch.arange(9).view(3,3) print(x) print("---") new_x = torch.stack([x, x, x]) print(new_x.shape) print(new_x) ``` #### Linear Algebra Tensor Functions Transposing allows you to switch the dimensions to be on different axis. So we can make it so all the rows are columsn and vice versa. ``` x = torch.arange(0, 12).view(3,4) print("x: \n", x) print("---") print("x.tranpose(1, 0): \n", x.transpose(1, 0)) ``` A three dimensional tensor would represent a batch of sequences, where each sequence item has a feature vector. It is common to switch the batch and sequence dimensions so that we can more easily index the sequence in a sequence model. Note: Transpose will only let you swap 2 axes. Permute (in the next cell) allows for multiple ``` batch_size = 3 seq_size = 4 feature_size = 5 x = torch.arange(batch_size * seq_size * feature_size).view(batch_size, seq_size, feature_size) print("x.shape: \n", x.shape) print("x: \n", x) print("-----") print("x.transpose(1, 0).shape: \n", x.transpose(1, 0).shape) print("x.transpose(1, 0): \n", x.transpose(1, 0)) ``` Permute is a more general version of tranpose: ``` batch_size = 3 seq_size = 4 feature_size = 5 x = torch.arange(batch_size * seq_size * feature_size).view(batch_size, seq_size, feature_size) print("x.shape: \n", x.shape) print("x: \n", x) print("-----") print("x.permute(1, 0, 2).shape: \n", x.permute(1, 0, 2).shape) print("x.permute(1, 0, 2): \n", x.permute(1, 0, 2)) ``` Matrix multiplication is `mm`: ``` torch.randn(2, 3, requires_grad=True) x1 = torch.arange(6).view(2, 3).float() describe(x1) x2 = torch.ones(3, 2) x2[:, 1] += 1 describe(x2) describe(torch.mm(x1, x2)) x = torch.arange(0, 12).view(3,4).float() print(x) x2 = torch.ones(4, 2) x2[:, 1] += 1 print(x2) print(x.mm(x2)) ``` See the [PyTorch Math Operations Documentation](https://pytorch.org/docs/stable/torch.html#math-operations) for more! ## Computing Gradients ``` x = torch.tensor([[2.0, 3.0]], requires_grad=True) z = 3 * x print(z) ``` In this small snippet, you can see the gradient computations at work. We create a tensor and multiply it by 3. Then, we create a scalar output using `sum()`. A Scalar output is needed as the the loss variable. Then, called backward on the loss means it computes its rate of change with respect to the inputs. Since the scalar was created with sum, each position in z and x are independent with respect to the loss scalar. The rate of change of x with respect to the output is just the constant 3 that we multiplied x by. ``` x = torch.tensor([[2.0, 3.0]], requires_grad=True) print("x: \n", x) print("---") z = 3 * x print("z = 3*x: \n", z) print("---") loss = z.sum() print("loss = z.sum(): \n", loss) print("---") loss.backward() print("after loss.backward(), x.grad: \n", x.grad) ``` ### Example: Computing a conditional gradient $$ \text{ Find the gradient of f(x) at x=1 } $$ $$ {} $$ $$ f(x)=\left\{ \begin{array}{ll} sin(x) \text{ if } x>0 \\ cos(x) \text{ otherwise } \\ \end{array} \right.$$ ``` def f(x): if (x.data > 0).all(): return torch.sin(x) else: return torch.cos(x) x = torch.tensor([1.0], requires_grad=True) y = f(x) y.backward() print(x.grad) ``` We could apply this to a larger vector too, but we need to make sure the output is a scalar: ``` x = torch.tensor([1.0, 0.5], requires_grad=True) y = f(x) # this is meant to break! y.backward() print(x.grad) ``` Making the output a scalar: ``` x = torch.tensor([1.0, 0.5], requires_grad=True) y = f(x) y.sum().backward() print(x.grad) ``` but there was an issue.. this isn't right for this edge case: ``` x = torch.tensor([1.0, -1], requires_grad=True) y = f(x) y.sum().backward() print(x.grad) x = torch.tensor([-0.5, -1], requires_grad=True) y = f(x) y.sum().backward() print(x.grad) ``` This is because we aren't doing the boolean computation and subsequent application of cos and sin on an elementwise basis. So, to solve this, it is common to use masking: ``` def f2(x): mask = torch.gt(x, 0).float() return mask * torch.sin(x) + (1 - mask) * torch.cos(x) x = torch.tensor([1.0, -1], requires_grad=True) y = f2(x) y.sum().backward() print(x.grad) def describe_grad(x): if x.grad is None: print("No gradient information") else: print("Gradient: \n{}".format(x.grad)) print("Gradient Function: {}".format(x.grad_fn)) import torch x = torch.ones(2, 2, requires_grad=True) describe(x) describe_grad(x) print("--------") y = (x + 2) * (x + 5) + 3 describe(y) z = y.mean() describe(z) describe_grad(x) print("--------") z.backward(create_graph=True, retain_graph=True) describe_grad(x) print("--------") x = torch.ones(2, 2, requires_grad=True) y = x + 2 y.grad_fn ``` ### CUDA Tensors PyTorch's operations can seamlessly be used on the GPU or on the CPU. There are a couple basic operations for interacting in this way. ``` print(torch.cuda.is_available()) x = torch.rand(3,3) describe(x) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) x = torch.rand(3, 3).to(device) describe(x) print(x.device) cpu_device = torch.device("cpu") # this will break! y = torch.rand(3, 3) x + y y = y.to(cpu_device) x = x.to(cpu_device) x + y if torch.cuda.is_available(): # only is GPU is available a = torch.rand(3,3).to(device='cuda:0') # CUDA Tensor print(a) b = torch.rand(3,3).cuda() print(b) print(a + b) a = a.cpu() # Error expected print(a + b) ``` ### Exercises Some of these exercises require operations not covered in the notebook. You will have to look at [the documentation](https://pytorch.org/docs/) (on purpose!) (Answers are at the bottom) #### Exercise 1 Create a 2D tensor and then add a dimension of size 1 inserted at the 0th axis. #### Exercise 2 Remove the extra dimension you just added to the previous tensor. #### Exercise 3 Create a random tensor of shape 5x3 in the interval [3, 7) #### Exercise 4 Create a tensor with values from a normal distribution (mean=0, std=1). #### Exercise 5 Retrieve the indexes of all the non zero elements in the tensor torch.Tensor([1, 1, 1, 0, 1]). #### Exercise 6 Create a random tensor of size (3,1) and then horizonally stack 4 copies together. #### Exercise 7 Return the batch matrix-matrix product of two 3 dimensional matrices (a=torch.rand(3,4,5), b=torch.rand(3,5,4)). #### Exercise 8 Return the batch matrix-matrix product of a 3D matrix and a 2D matrix (a=torch.rand(3,4,5), b=torch.rand(5,4)). Answers below Answers still below.. Keep Going #### Exercise 1 Create a 2D tensor and then add a dimension of size 1 inserted at the 0th axis. ``` a = torch.rand(3,3) a = a.unsqueeze(0) print(a) print(a.shape) ``` #### Exercise 2 Remove the extra dimension you just added to the previous tensor. ``` a = a.squeeze(0) print(a.shape) ``` #### Exercise 3 Create a random tensor of shape 5x3 in the interval [3, 7) ``` 3 + torch.rand(5, 3) * 4 ``` #### Exercise 4 Create a tensor with values from a normal distribution (mean=0, std=1). ``` a = torch.rand(3,3) a.normal_(mean=0, std=1) ``` #### Exercise 5 Retrieve the indexes of all the non zero elements in the tensor torch.Tensor([1, 1, 1, 0, 1]). ``` a = torch.Tensor([1, 1, 1, 0, 1]) torch.nonzero(a) ``` #### Exercise 6 Create a random tensor of size (3,1) and then horizonally stack 4 copies together. ``` a = torch.rand(3,1) a.expand(3,4) ``` #### Exercise 7 Return the batch matrix-matrix product of two 3 dimensional matrices (a=torch.rand(3,4,5), b=torch.rand(3,5,4)). ``` a = torch.rand(3,4,5) b = torch.rand(3,5,4) torch.bmm(a, b) ``` #### Exercise 8 Return the batch matrix-matrix product of a 3D matrix and a 2D matrix (a=torch.rand(3,4,5), b=torch.rand(5,4)). ``` a = torch.rand(3,4,5) b = torch.rand(5,4) torch.bmm(a, b.unsqueeze(0).expand(a.size(0), *b.size())) ``` ### END
github_jupyter
# Support Vector Machines Let's create the same fake income / age clustered data that we used for our K-Means clustering example: ``` import numpy as np #Create fake income/age clusters for N people in k clusters def createClusteredData(N, k): np.random.seed(1234) pointsPerCluster = float(N)/k X = [] y = [] for i in range (k): incomeCentroid = np.random.uniform(20000.0, 200000.0) ageCentroid = np.random.uniform(20.0, 70.0) for j in range(int(pointsPerCluster)): X.append([np.random.normal(incomeCentroid, 10000.0), np.random.normal(ageCentroid, 2.0)]) y.append(i) X = np.array(X) y = np.array(y) return X, y %matplotlib inline from pylab import * from sklearn.preprocessing import MinMaxScaler (X, y) = createClusteredData(100, 5) plt.figure(figsize=(8, 6)) plt.scatter(X[:,0], X[:,1], c=y.astype(np.float)) plt.show() scaling = MinMaxScaler(feature_range=(-1,1)).fit(X) X = scaling.transform(X) plt.figure(figsize=(8, 6)) plt.scatter(X[:,0], X[:,1], c=y.astype(np.float)) plt.show() ``` Now we'll use linear SVC to partition our graph into clusters: ``` from sklearn import svm, datasets C = 1.0 svc = svm.SVC(kernel='linear', C=C).fit(X, y) ``` By setting up a dense mesh of points in the grid and classifying all of them, we can render the regions of each cluster as distinct colors: ``` def plotPredictions(clf): # Create a dense grid of points to sample xx, yy = np.meshgrid(np.arange(-1, 1, .001), np.arange(-1, 1, .001)) # Convert to Numpy arrays npx = xx.ravel() npy = yy.ravel() # Convert to a list of 2D (income, age) points samplePoints = np.c_[npx, npy] # Generate predicted labels (cluster numbers) for each point Z = clf.predict(samplePoints) plt.figure(figsize=(8, 6)) Z = Z.reshape(xx.shape) #Reshape results to match xx dimension plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Draw the contour plt.scatter(X[:,0], X[:,1], c=y.astype(np.float)) # Draw the points plt.show() plotPredictions(svc) ``` Or just use predict for a given point: ``` print(svc.predict(scaling.transform([[200000, 40]]))) print(svc.predict(scaling.transform([[50000, 65]]))) ``` ## Activity "Linear" is one of many kernels scikit-learn supports on SVC. Look up the documentation for scikit-learn online to find out what the other possible kernel options are. Do any of them work well for this data set?
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') import tensorflow as tf # run on training variation of powerlaw: # path for fine tuning: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/Window method Supervised autoencoder with fine tuning/script.py" # path for stage 1: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/Window_method_Supervised_autoencoder/script.py" # path for stage 2: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/stage 2 - window method/script.py" 2+3 # # run trivial test results:---------------------------------- # trivial 0 : !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/trivial tests/trivial 0/script.py" # trivial 1: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/trivial tests/trivial 1/script.py" # train on just observed: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/trivial tests/train on only observed entries/script.py" # run codes for train bombing network: # stage 0: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/codes for Rasika/stage 1/script.py" # stage 1: # stage 2: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/trivial tests/trivial 1/script.py" -------------- Calculating error only for unobserved entries-------------------- 72.93460370607394 2.691420013784058 1.194878183344078 1.194878183344078 Fraction-------------------------------- 40 -------------- Calculating error only for unobserved entries-------------------- 72.95451892710567 2.6908033993328098 1.1939517644297712 1.1939517644297712 Fraction-------------------------------- 20 inside main_code -------------- Calculating error only for unobserved entries-------------------- 72.92523846562295 2.6921294640853106 1.1942552286744916 1.1942552286744916 Fraction-------------------------------- 99 inside main_code -------------- Calculating error only for unobserved entries-------------------- 72.92569927908839 2.692057610346308 1.1943441352077104 1.1943441352077104 Fraction-------------------------------- 90 inside main_code -------------- Calculating error only for unobserved entries-------------------- 72.92596729008994 2.69190896794981 1.194432209824622 1.194432209824622 Fraction-------------------------------- 80 inside main_code -------------- Calculating error only for unobserved entries-------------------- 72.93001407568013 2.6919051981735547 1.194144671184917 1.194144671184917 Fraction-------------------------------- 60 inside main_code # run protein network # try collaboration network cost3 = [158.96724, 98.78402, 74.9231, 64.00145, 58.63235, 55.861347, 54.366524, 53.519352, 53.006138, 52.66433, 52.408646, 52.192135, 51.986546, 51.774937, 51.549442, 51.309853, 51.061428, 50.811607, 50.565613, 50.329323, 58.723026, 58.361225, 58.161457, 58.010284, 57.86779, 57.714756, 57.539577, 57.335247, 57.09924, 56.833363, 56.542717, 56.234505, 55.917282, 55.598743, 55.28897, 54.9952, 54.72235, 54.47319, 54.24661, 54.041183, 61.436134, 60.774326, 60.399055, 60.09885, 59.79915, 59.45637, 59.046936, 58.57618, 58.07581, 57.58582, 57.134533, 56.730938, 56.37296, 56.049274, 55.762352, 55.497215, 55.25945, 55.037453, 54.838146, 54.65893] cost2 = [158.96724, 98.78402, 74.9231, 64.00145, 58.63235, 55.861347, 54.366524, 53.519352, 53.006138, 52.66433, 52.408646, 52.192135, 51.986546, 51.774937, 51.549442, 51.309853, 51.061428, 50.811607, 50.565613, 50.329323, 58.723026, 58.361225, 58.161457, 58.010284, 57.86779, 57.714756, 57.539577, 57.335247, 57.09924, 56.833363, 56.542717, 56.234505, 55.917282, 55.598743, 55.28897, 54.9952, 54.72235, 54.47319, 54.24661, 54.041183] cost1 = [158.96724, 98.78402, 74.9231, 64.00145, 58.63235, 55.861347, 54.366524, 53.519352, 53.006138, 52.66433, 52.408646, 52.192135, 51.986546, 51.774937, 51.549442, 51.309853, 51.061428, 50.811607, 50.565613, 50.329323] import matplotlib.pyplot as plt plt.plot(cost1, label = 'nw1') # plt.plot(cost2, label = 'nw2') # plt.plot(cost3, label = 'nw3') plt.xlabel('number of iterations') plt.ylabel('cost') plt.title('Cost value vs Iterations for various training sessions') plt.legend() plt.show() # run for facebook: !python3 "/content/drive/MyDrive/PhD work/Projects/parameter estimation/codes for Rasika/stage 1/script.py" ```
github_jupyter
``` import pandas as pd from pandasql import sqldf mysql = lambda q: sqldf(q, globals()) ``` # Group an ID by consecutive dates Calculate the number of consecutive days for a given ID. If there is a gap of days for an ID, we should capture both streaks as different rows ``` df1 = pd.DataFrame({'ID': [1, 1, 1, 1, 2, 2, 2, 2], 'Date': ['2017-01-07', '2017-01-08', '2017-01-09', '2017-01-23', '2017-01-05', '2017-01-06', '2017-01-10', '2017-01-11'] }) df1['Date'] = pd.to_datetime(df1['Date']) df1 ``` #### PYTHON - Method 1: using diff for datetime datatype ``` # In order for SHIFT to work properly, we would probably need to sort the dataframe # 1. Is there more than 1 day difference with the previous day? (use the not equal method ne(1)) df1['is_there_more_than_one_day_difference'] = df1.groupby('ID')['Date'].diff().dt.days.ne(1) # 2. Group the booleans by using cumsum() df1['streak_id'] = df1['is_there_more_than_one_day_difference'].cumsum() # Calculate the size of each grouped_streaks by ID df1['streak_size_days'] = df1.groupby(['ID', 'streak_id'])['streak_id'].transform('size') # With this we could extract, for each ID, what is the longest streak df1['longest_streak_rank'] = df1.groupby('ID')['streak_size_days'].rank(method='dense', ascending=False) df1 df1[['ID', 'streak_size_days', 'longest_streak_rank']].drop_duplicates().sort_values(['ID','longest_streak_rank']) ``` #### SQL ``` df1 = pd.DataFrame({'ID': [1, 1, 1, 1, 2, 2, 2, 2], 'Date': ['2017-01-07', '2017-01-08', '2017-01-09', '2017-01-23', '2017-01-05', '2017-01-06', '2017-01-10', '2017-01-11'] }) df1['Date'] = pd.to_datetime(df1['Date']) # In order for LAG to work properly, we would probably need to sort the dataframe query = "WITH previous_date_df AS (" \ "SELECT ID, " \ " Date AS first_date, " \ " COALESCE(LAG(Date) OVER (PARTITION BY ID ORDER BY Date), Date) AS previous_date " \ "FROM df1), " \ "date_difference_is_not_one_df AS (" \ "SELECT *, " \ " CASE WHEN (julianday(first_date) - (julianday(previous_date))) != 1 THEN True ELSE False END AS is_there_more_than_one_day_difference " \ "FROM previous_date_df), " \ "grouped_streaks_df AS (" \ "SELECT *, " \ " SUM(is_there_more_than_one_day_difference) OVER (ORDER BY ID, first_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as streak_id " \ "FROM date_difference_is_not_one_df) " \ "SELECT ID, streak_id, COUNT(*) AS streak_size_days FROM grouped_streaks_df GROUP BY ID, streak_id " mysql(query) ``` # Groupby an ID by consecutive events For example, wins and losses ``` df2 = pd.DataFrame({'Group':['A','A', 'A','A','A','A','B','B','B','B','B','B','B'], 'Score':['win', 'loss', 'loss', 'loss', 'win', 'win', 'win', 'win', 'win', 'loss', 'win', 'loss', 'loss']}) df2 ``` #### PYTHON - Overall win streak ``` # 1. Extract previous score by using the shift() method df2['previous_score'] = df2['Score'].shift(periods=1) # 2. Compare if they are not equal df2['is_score_not_equal_to_previous'] = df2['Score'] != df2['previous_score'] # 3. Calculate the grouped scores streaks by using cumsum() and the booleans from is_score_equal_to_previous df2['streak_id'] = df2['is_score_not_equal_to_previous'].cumsum() # 4. Calculate the streaks df2['cumulative_streaks'] = df2.groupby('streak_id')['Score'].cumcount()+1 df2 ``` #### SQL - Overall win streak ``` df2 = pd.DataFrame({'ID': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 'Group_':['A','A', 'A','A','A','A','B','B','B','B','B','B','B'], 'Score':['win', 'loss', 'loss', 'loss', 'win', 'win', 'win', 'win', 'win', 'loss', 'win', 'loss', 'loss']}) # In order for LAG to work properly, we would probably need to sort the dataframe query = "WITH previous_score_df AS (" \ "SELECT ID, " \ " Score AS first_score, " \ " COALESCE(LAG(Score) OVER (ORDER BY ID, Score), Score) AS previous_score " \ "FROM df2), " \ "are_scores_equal_df AS (" \ "SELECT *, " \ " CASE WHEN first_score != previous_score THEN True ELSE False END AS is_previous_score_equal " \ "FROM previous_score_df), " \ "grouped_streaks_df AS (" \ "SELECT *, " \ " SUM(is_previous_score_equal) OVER (ORDER BY ID ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as streak_id " \ "FROM are_scores_equal_df) " \ "SELECT first_score, streak_id, COUNT(*) AS streak_size_days " \ "FROM grouped_streaks_df " \ "GROUP BY first_score, streak_id " \ "ORDER BY streak_id" mysql(query) ``` #### PYTHON - Win streak by group ``` df2 = pd.DataFrame({'Group':['A','A', 'A','A','A','A','B','B','B','B','B','B','B'], 'Score':['win', 'loss', 'loss', 'loss', 'win', 'win', 'win', 'win', 'win', 'loss', 'win', 'loss', 'loss']}) # 1. Extract previous score by using the shift() method df2['previous_score'] = df2.groupby(['Group'])['Score'].shift(periods=1) # 2. Compare if they are equal df2['is_score_equal_to_previous'] = df2['Score'] != df2['previous_score'] # 3. Calculate the grouped scores streaks by using cumsum() and the booleans from is_score_equal_to_previous df2['equal_grouped_scores'] = df2['is_score_equal_to_previous'].cumsum() # 4. Calculate the streaks df2['streaks'] = df2.groupby('equal_grouped_scores')['Score'].cumcount()+1 df2 ``` #### SQL - Win streak by group ``` df2 = pd.DataFrame({'ID': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], 'Group_':['A','A', 'A','A','A','A','B','B','B','B','B','B','B'], 'Score':['win', 'loss', 'loss', 'loss', 'win', 'win', 'win', 'win', 'win', 'loss', 'win', 'loss', 'loss']}) # In order for LAG to work properly, we would probably need to sort the dataframe query = "WITH previous_score_df AS (" \ "SELECT ID, " \ " Group_, " \ " Score AS first_score, " \ " COALESCE(LAG(Score) OVER (PARTITION BY Group_ ORDER BY ID, Score), Score) AS previous_score " \ "FROM df2), " \ "are_scores_equal_df AS (" \ "SELECT *, " \ " CASE WHEN first_score != previous_score THEN True ELSE False END AS is_previous_score_equal " \ "FROM previous_score_df), " \ "grouped_streaks_df AS (" \ "SELECT *, " \ " SUM(is_previous_score_equal) OVER (ORDER BY ID, Group_ ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as streak_id " \ "FROM are_scores_equal_df) " \ "SELECT Group_, first_score, streak_id, COUNT(*) AS streak_size_days " \ "FROM grouped_streaks_df " \ "GROUP BY Group_, first_score, streak_id " \ "ORDER BY streak_id" mysql(query) ```
github_jupyter
<a href="https://colab.research.google.com/github/MHadavand/Lessons/blob/master/ML/NearestNeighbour/Nearest_neighbor_spine.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Nearest neighbor for spine injury classification In this homework notebook we use **nearest neighbor classification** to classify back injuries for patients in a hospital, based on measurements of the shape and orientation of their pelvis and spine. The data set contains information from **310** patients. For each patient, there are: six measurements (the x i.e. features) and a label (the y). The label has **3** possible values, `’NO’` (normal), `’DH’` (herniated disk), or `’SL’` (spondilolysthesis). Credits: Edx Machine Learning Fundamentals # 1. Setup notebook ``` import numpy as np ``` Load the data set and divide the data into a training set of 248 patients and a separate test set of 62 patients. The following arrays are created: * **`trainx`** : The training data's features, one point per row. * **`trainy`** : The training data's labels. * **`testx`** : The test data's features, one point per row. * **`testy`** : The test data's labels. We will use the training set (`trainx` and `trainy`), with nearest neighbor classification, to predict labels for the test data (`testx`). We will then compare these predictions with the correct labels, `testy`. Notice that we code the three labels as `0. = ’NO’, 1. = ’DH’, 2. = ’SL’`. ``` # Load data set and code labels as 0 = ’NO’, 1 = ’DH’, 2 = ’SL’ labels = [b'NO', b'DH', b'SL'] data = np.loadtxt('../Data/NN_Spine/column_3C.dat', converters={6: lambda s: labels.index(s)} ) # Separate features from labels x = data[:,0:6] y = data[:,6] # Divide into training and test set training_indices = list(range(0,20)) + list(range(40,188)) + list(range(230,310)) test_indices = list(range(20,40)) + list(range(188,230)) trainx = x[training_indices,:] trainy = y[training_indices] testx = x[test_indices,:] testy = y[test_indices] ``` ## 2. Nearest neighbor classification with L2 (*Euclidean*) distance A brute forces nearest neighbor implementation based on Euclidean distance between a test feature and entire training data set ``` def get_l_dist(x,y, p=2): ''' computes P normed distance between two arrays ''' return (np.sum(abs(x-y)**p))**(1/p) def NN_Euclidean(trainx, trainy, testx): ''' A naive algorithm to find the nearest neighbor without any sorting ''' if len(testx.shape)> 1: # Recursive call return np.array(list(map(lambda test_item: NN_Euclidean(trainx, trainy, test_item), testx))) distances = [get_l_dist(trainx_instance, testx) for trainx_instance in trainx] return trainy[np.argmin(distances)] testy_L2 = NN_Euclidean(trainx, trainy, testx) ## Compute the accuracy accuracy = np.equal(testy_L2, testy) accuracy = float(np.sum(accuracy))/len(testy) print("Accuracy of nearest neighbor classifier (Euclidean): %{:.2f}".format(accuracy*100)) ``` # 3. Nearest neighbor classification with L1 (Manhattan) distance We now compute nearest neighbors using the L1 distance (sometimes called *Manhattan Distance*). <font color="magenta">**For you to do:**</font> Write a function, **NN_L1**, which again takes as input the arrays `trainx`, `trainy`, and `testx`, and predicts labels for the test points using 1-nearest neighbor classification. For **NN_L1**, the L1 distance metric should be used. As before, the predicted labels should be returned in a `numpy` array with one entry per test point. Notice that **NN_L1** and **NN_L2** may well produce different predictions on the test set. ``` def NN_Manhattan(trainx, trainy, testx): ''' A naive algorithm to find the nearest neighbor without any sorting ''' if len(testx.shape)> 1: # Recursive call return np.array(list(map(lambda test_item: NN_Manhattan(trainx, trainy, test_item), testx))) distances = [get_l_dist(trainx_instance, testx, p=1) for trainx_instance in trainx] return trainy[np.argmin(distances)] testy_L1 = NN_Manhattan(trainx, trainy, testx) ## Compute the accuracy accuracy = np.equal(testy_L1, testy) accuracy = float(np.sum(accuracy))/len(testy) print("Accuracy of nearest neighbor classifier (Manhattan): %{:.2f}".format(accuracy*100)) ``` # 4. Confusion matrix In order to have a more in depth comparison between the two distance functions, we can use the <font color="blue">*confusion matrix*</font> that is often use to evaluate a classifier. We will now look a bit more deeply into the specific types of errors made by nearest neighbor classification, by constructing the . | | NO | DH | SL | | ------------- |:-------------:| -----:|-----:| | NO | | | | | | DH | | | | | | SL | | | | | Since there are three labels, the confusion matrix is a 3x3 matrix whose rows correspond to the true label and whose columns correspond to the predicted label. For example, the entry at row DH, column SL, contains the number of test points whose correct label was DH but which were classified as SL. ``` def confusion_matrix(testy,testy_fit): dict_label ={0: 'NO', 1: 'DH', 2: 'SL'} n_label = len(dict_label) output = np.zeros((n_label,n_label)) if len(testy) != len(testy_fit): raise ValueError('The two data sets must have the same length') for predict, true in zip(testy_fit, testy): output[int(true), int(predict)] +=1 return output L1_cm = confusion_matrix(testy, testy_L1) L2_cm = confusion_matrix(testy, testy_L2) print( 'Confusion matrix nearest neighbor classifier (Manhattan):\n', L1_cm ) print( 'Confusion matrix nearest neighbor classifier (Euclidean):\n', L2_cm ) ``` # 5. Some Observations from the results *Note down the answers to these, since you will need to enter them as part of this week's assignment.* * DH was **most frequently** misclassified by the L1-based nearest neighbor classifier? * SL was **never** misclassified by the L2-based nearest neighbor classifier? * Only one test point had a different prediction between the two classification schemes (based on L1 and L2 distance)
github_jupyter
# Getting Started In this tutorial, you will know how to - use the models in **ConvLab-2** to build a dialog agent. - build a simulator to chat with the agent and evaluate the performance. - try different module combinations. - use analysis tool to diagnose your system. Let's get started! ## Environment setup Run the command below to install ConvLab-2. Then restart the notebook and skip this commend. ``` # first install ConvLab-2 and restart the notebook ! git clone https://github.com/thu-coai/ConvLab-2.git && cd ConvLab-2 && pip install -e . # installing en_core_web_sm for spacy to resolve error in BERTNLU !python -m spacy download en_core_web_sm ``` ## build an agent We use the models adapted on [Multiwoz](https://www.aclweb.org/anthology/D18-1547) dataset to build our agent. This pipeline agent consists of NLU, DST, Policy and NLG modules. First, import some models: ``` # common import: convlab2.$module.$model.$dataset from convlab2.nlu.jointBERT.multiwoz import BERTNLU from convlab2.nlu.milu.multiwoz import MILU from convlab2.dst.rule.multiwoz import RuleDST from convlab2.policy.rule.multiwoz import RulePolicy from convlab2.nlg.template.multiwoz import TemplateNLG from convlab2.dialog_agent import PipelineAgent, BiSession from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator from pprint import pprint import random import numpy as np import torch ``` Then, create the models and build an agent: ``` # go to README.md of each model for more information # BERT nlu sys_nlu = BERTNLU() # simple rule DST sys_dst = RuleDST() # rule policy sys_policy = RulePolicy() # template NLG sys_nlg = TemplateNLG(is_user=False) # assemble sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, name='sys') ``` That's all! Let's chat with the agent using its response function: ``` sys_agent.response("I want to find a moderate hotel") sys_agent.response("Which type of hotel is it ?") sys_agent.response("OK , where is its address ?") sys_agent.response("Thank you !") sys_agent.response("Try to find me a Chinese restaurant in south area .") sys_agent.response("Which kind of food it provides ?") sys_agent.response("Book a table for 5 , this Sunday .") ``` ## Build a simulator to chat with the agent and evaluate In many one-to-one task-oriented dialog system, a simulator is essential to train an RL agent. In our framework, we doesn't distinguish user or system. All speakers are **agents**. The simulator is also an agent, with specific policy inside for accomplishing the user goal. We use `Agenda` policy for the simulator, this policy requires dialog act input, which means we should set DST argument of `PipelineAgent` to None. Then the `PipelineAgent` will pass dialog act to policy directly. Refer to `PipelineAgent` doc for more details. ``` # MILU user_nlu = MILU() # not use dst user_dst = None # rule policy user_policy = RulePolicy(character='usr') # template NLG user_nlg = TemplateNLG(is_user=True) # assemble user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user') ``` Now we have a simulator and an agent. we will use an existed simple one-to-one conversation controller BiSession, you can also define your own Session class for your special need. We add `MultiWozEvaluator` to evaluate the performance. It uses the parsed dialog act input and policy output dialog act to calculate **inform f1**, **book rate**, and whether the task is **success**. ``` evaluator = MultiWozEvaluator() sess = BiSession(sys_agent=sys_agent, user_agent=user_agent, kb_query=None, evaluator=evaluator) ``` Let's make this two agents chat! The key is `next_turn` method of `BiSession` class. ``` def set_seed(r_seed): random.seed(r_seed) np.random.seed(r_seed) torch.manual_seed(r_seed) set_seed(20200131) sys_response = '' sess.init_session() print('init goal:') pprint(sess.evaluator.goal) print('-'*50) for i in range(20): sys_response, user_response, session_over, reward = sess.next_turn(sys_response) print('user:', user_response) print('sys:', sys_response) print() if session_over is True: break print('task success:', sess.evaluator.task_success()) print('book rate:', sess.evaluator.book_rate()) print('inform precision/recall/f1:', sess.evaluator.inform_F1()) print('-'*50) print('final goal:') pprint(sess.evaluator.goal) print('='*100) ``` ## Try different module combinations The combination modes of pipeline agent modules are flexible. We support joint models such as TRADE, SUMBT for word-DST and MDRG, HDSA, LaRL for word-Policy, once the input and output are matched with previous and next module. We also support End2End models such as Sequicity. Available models: - NLU: BERTNLU, MILU, SVMNLU - DST: RuleDST - Word-DST: SUMBT, TRADE (set `sys_nlu` to `None`) - Policy: RulePolicy, Imitation, REINFORCE, PPO, GDPL - Word-Policy: MDRG, HDSA, LaRL (set `sys_nlg` to `None`) - NLG: Template, SCLSTM - End2End: Sequicity, DAMD, RNN_rollout (directly used as `sys_agent`) - Simulator policy: Agenda, VHUS (for `user_policy`) ``` # available NLU models from convlab2.nlu.svm.multiwoz import SVMNLU from convlab2.nlu.jointBERT.multiwoz import BERTNLU from convlab2.nlu.milu.multiwoz import MILU # available DST models from convlab2.dst.rule.multiwoz import RuleDST from convlab2.dst.sumbt.multiwoz import SUMBT from convlab2.dst.trade.multiwoz import TRADE # available Policy models from convlab2.policy.rule.multiwoz import RulePolicy from convlab2.policy.ppo.multiwoz import PPOPolicy from convlab2.policy.pg.multiwoz import PGPolicy from convlab2.policy.mle.multiwoz import MLEPolicy from convlab2.policy.gdpl.multiwoz import GDPLPolicy from convlab2.policy.vhus.multiwoz import UserPolicyVHUS from convlab2.policy.mdrg.multiwoz import MDRGWordPolicy from convlab2.policy.hdsa.multiwoz import HDSA from convlab2.policy.larl.multiwoz import LaRL # available NLG models from convlab2.nlg.template.multiwoz import TemplateNLG from convlab2.nlg.sclstm.multiwoz import SCLSTM # available E2E models from convlab2.e2e.sequicity.multiwoz import Sequicity from convlab2.e2e.damd.multiwoz import Damd ``` NLU+RuleDST or Word-DST: ``` # NLU+RuleDST: sys_nlu = BERTNLU() # sys_nlu = MILU() # sys_nlu = SVMNLU() sys_dst = RuleDST() # or Word-DST: # sys_nlu = None # sys_dst = SUMBT() # sys_dst = TRADE() ``` Policy+NLG or Word-Policy: ``` # Policy+NLG: sys_policy = RulePolicy() # sys_policy = PPOPolicy() # sys_policy = PGPolicy() # sys_policy = MLEPolicy() # sys_policy = GDPLPolicy() sys_nlg = TemplateNLG(is_user=False) # sys_nlg = SCLSTM(is_user=False) # or Word-Policy: # sys_policy = LaRL() # sys_policy = HDSA() # sys_policy = MDRGWordPolicy() # sys_nlg = None ``` Assemble the Pipeline system agent: ``` sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, 'sys') ``` Or Directly use an end-to-end model: ``` # sys_agent = Sequicity() # sys_agent = Damd() ``` Config an user agent similarly: ``` user_nlu = BERTNLU() # user_nlu = MILU() # user_nlu = SVMNLU() user_dst = None user_policy = RulePolicy(character='usr') # user_policy = UserPolicyVHUS(load_from_zip=True) user_nlg = TemplateNLG(is_user=True) # user_nlg = SCLSTM(is_user=True) user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user') ``` ## Use analysis tool to diagnose the system We provide an analysis tool presents rich statistics and summarizes common mistakes from simulated dialogues, which facilitates error analysis and system improvement. The analyzer will generate an HTML report which contains rich statistics of simulated dialogues. For more information, please refer to `convlab2/util/analysis_tool`. ``` from convlab2.util.analysis_tool.analyzer import Analyzer # if sys_nlu!=None, set use_nlu=True to collect more information analyzer = Analyzer(user_agent=user_agent, dataset='multiwoz') set_seed(20200131) analyzer.comprehensive_analyze(sys_agent=sys_agent, model_name='sys_agent', total_dialog=100) ``` To compare several models: ``` set_seed(20200131) analyzer.compare_models(agent_list=[sys_agent1, sys_agent2], model_name=['sys_agent1', 'sys_agent2'], total_dialog=100) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from matplotlib import rcParams rcParams['figure.figsize'] = 11.7,8.27 # figure size in inches pd.options.mode.chained_assignment = None # default='warn' pd.set_option('display.max_colwidth', None) pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 30) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" %config Completer.use_jedi = False from sklearn.impute import KNNImputer from sklearn.preprocessing import LabelEncoder ``` # Note * Aggregate data by every 180 days ``` df_creatinine = pd.read_csv('CSV/T_creatinine.csv'); df_creatinine.rename(columns = {'value': 'creatinine'}, inplace=True) df_dbp = pd.read_csv('CSV/T_DBP.csv'); df_dbp.rename(columns = {'value': 'dbp'}, inplace=True) df_glucose = pd.read_csv('CSV/T_glucose.csv'); df_glucose.rename(columns = {'value': 'glucose'}, inplace=True) df_hgb = pd.read_csv('CSV/T_HGB.csv'); df_hgb.rename(columns = {'value': 'hgb'}, inplace=True) df_ldl = pd.read_csv('CSV/T_ldl.csv'); df_ldl.rename(columns = {'value': 'ldl'}, inplace=True) df_meds = pd.read_csv('CSV/T_meds.csv') df_sbp = pd.read_csv('CSV/T_sbp.csv'); df_sbp.rename(columns = {'value': 'sbp'}, inplace=True) ``` # Compute maximum time point (day) for each subject ``` df_creatinine_d = df_creatinine.groupby(['id'])['time'].max() df_dbp_d = df_dbp.groupby(['id'])['time'].max() df_glucose_d = df_glucose.groupby(['id'])['time'].max() df_hgb_d = df_hgb.groupby(['id'])['time'].max() df_ldl_d = df_ldl.groupby(['id'])['time'].max() df_sbp_d = df_sbp.groupby(['id'])['time'].max() df_meds_d = df_meds.groupby(['id'])['end_day'].max() df_meds_d = df_meds_d.rename('time') df_d_merge = pd.DataFrame(pd.concat([df_creatinine_d, df_dbp_d, df_glucose_d, df_hgb_d, df_ldl_d, df_sbp_d, df_meds_d])).reset_index() df_d_merge = df_d_merge.groupby(['id']).max().reset_index() df_d_merge = df_d_merge.sort_values('time') print('Minimum = ' + str(df_d_merge['time'].min()) + ', Maximum = ' + str(df_d_merge['time'].max())) print('Mean = ' + str(df_d_merge['time'].mean()) + ', Median = ' + str(df_d_merge['time'].median())) plt.plot(list(range(df_d_merge.shape[0])), df_d_merge['time'], '-p', markersize=1) plt.xlabel("Subject") plt.ylabel("Days") plt.title("Days of record") df_d_merge.to_csv('CSV/days_of_record.csv', index=False) ``` # Process med data ``` # Ignore medication ended before day 0 df_meds = df_meds[df_meds['end_day'] >= 0] df_meds.head(10) period_bin = 180 def generate_bin(n_start, n_end): global period_bin start_count = period_bin n = 1 token = 0 # keep trying until a code is assigned while token == 0: if n_end <= start_count: # start and end within period if n_start <= (start_count + 1): return int(start_count / period_bin) token = 1 else: # the "end of period" is within start and end (e.g.: 90 < 180 < 280) if n_start <= start_count: # set a code for processing later return 99 token = 1 # start and end are both outside of the period else: # try the next period n += 1 start_count *= n df_meds['days_bin'] = df_meds.apply(lambda x: generate_bin(x['start_day'], x['end_day']), axis=1) # Fix the in-between MID = df_meds['days_bin'] == 99 # Replicate the error part to be fixed and concat with the main one df_temp = df_meds[MID] # Bin months based on end_day df_temp['days_bin'] = (df_temp['end_day'] / period_bin).astype(int) + 1 # Value to be used to replace start (+1) or end v = (np.floor(df_meds.loc[MID, 'end_day'] / period_bin) * period_bin).astype(int) df_meds.loc[MID, 'end_day'] = v # Bin months based on end_day df_meds['days_bin'] = (df_meds['end_day'] / period_bin).astype(int) + 1 df_temp['start_day'] = (v + 1).astype(int) df_meds = pd.concat([df_meds, df_temp], axis=0) df_meds['days_bin'].value_counts().sort_index() df_meds['end_day'].max() # Get the total dosage during the period df_meds['total_day'] = df_meds['end_day'] - df_meds['start_day'] + 1 df_meds['total_dosage'] = df_meds['total_day'] * df_meds['daily_dosage'] # Bin the data by days_bin df_med_binned = df_meds.groupby(['id', 'days_bin', 'drug'])['total_dosage'].sum().reset_index() df_med_binned.head() # Convert df to wide format, with each column = dosage of one med # If drug not taken, assumed it's 0 df_med_wide = df_med_binned.pivot(index=['id', 'days_bin'],columns='drug',values='total_dosage').reset_index().fillna(0) df_med_wide.head() ``` # Merge the raw measurements ``` # Check how many is between day 699 and day 720 df_hgb[(df_hgb['time']> 699) & (df_hgb['time'] <= 720)].shape[0] # Sort columns to id, time, value first # First values are blood pressure, and systolic comes before diastolic df_sbp = df_sbp[['id', 'time', 'sbp']] df_merged = df_sbp.merge(df_dbp, on = ['id','time'], how='outer') df_merged = df_merged.merge(df_creatinine, on = ['id','time'], how='outer') df_merged = df_merged.merge(df_glucose, on = ['id','time'], how='outer') df_merged = df_merged.merge(df_ldl, on = ['id','time'], how='outer') df_merged = df_merged.merge(df_hgb, on = ['id','time'], how='outer') df_merged = df_merged.sort_values(['id','time']) df_merged.head() # bin time df_merged['days_bin'] = (df_merged['time'] / period_bin).astype(int) + 1 df_merged = df_merged.drop('time', axis=1) df_merged['days_bin'].value_counts().sort_index() # Aggregate data by days_bin and get median df_merged = df_merged.groupby(['id', 'days_bin']).median().reset_index() df_merged.head() # Merge with med df_merged = df_merged.merge(df_med_wide, on = ['id','days_bin'], how='outer') df_merged.head() # Save output for modelling df_merged.to_csv('CSV/df_daybin.csv', index=False) # Only first 4 bins (720 days) df_merged_4 = df_merged[df_merged['days_bin'] <= 4] # Change NA to 0 for drugs df_merged_4.iloc[:, 8:29] = df_merged_4.iloc[:, 8:29].fillna(0) # Use KNNImputer to fill continuous missing values imputer = KNNImputer(n_neighbors=3) for day in range(1,5): DID = df_merged_4['days_bin'] == day df_day = df_merged_4[DID] # Remove id from imputation df_day.iloc[:,2:8] = pd.DataFrame(imputer.fit_transform(df_day.iloc[:,2:8]), index = df_day.index, columns = df_day.columns[2:8]) df_merged_4[DID] = df_day # Merge with demographic df_demo = pd.read_csv('CSV/T_demo.csv') # Change the unknown in df_demo race to the mode (White) df_demo.loc[df_demo['race'] == 'Unknown','race'] = 'White' df_merged_4 = df_merged_4.merge(df_demo, on='id') # Merge with output df_stage = pd.read_csv('CSV/T_stage.csv') # Change state to 0, 1 df_stage['Stage_Progress'] = np.where(df_stage['Stage_Progress'] == True, 1, 0) df_merged_4 = df_merged_4.merge(df_stage, on='id') # Save output for modelling df_merged_4.to_csv('CSV/df_daybin_4.csv', index=False) df_merged_4.head() ``` # Aggregated data ``` df_agg = df_merged_4.copy() # Take out demographic and outcome df_agg.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True) df_agg_mean = df_agg.groupby('id').mean().reset_index() df_agg_mean.head() # Mean sbp, dbp, creatinine, glucose, ldl, hgb df_agg_mean = df_agg.groupby('id').mean().reset_index() df_agg_mean = df_agg_mean.iloc[:, np.r_[0, 2:8]] df_agg_mean.head() df_agg_mean.shape # Sum drugs df_agg_sum = df_agg.groupby('id').sum().reset_index() df_agg_sum = df_agg_sum.iloc[:, 8:] df_agg_sum.head() df_agg_sum.shape df_agg_fixed = pd.concat([df_agg_mean, df_agg_sum], axis=1) df_agg_fixed.shape # Put back demo df_agg_fixed = df_agg_fixed.merge(df_demo, on = 'id') # Put back outcome df_agg_fixed = df_agg_fixed.merge(df_stage, on = 'id') df_agg_fixed.head() df_agg_fixed.shape df_agg_fixed.to_csv('CSV/df_agg.csv', index=False) ``` # Temporal data * Only use first 2 years of data (most measurements stop at day 699) ``` df_temporal = df_merged_4.copy() df_temporal.head() # Take out demographic and outcome df_temporal.drop( ['race', 'gender', 'age', 'Stage_Progress'], axis=1, inplace=True) # Convert to wide format df_temporal = df_temporal.set_index(['id','days_bin']).unstack() df_temporal.columns = df_temporal.columns.map(lambda x: '{}_{}'.format(x[0], x[1])) # Some subjects don't have data in a time_bin, KNNImpute again df_temporal = pd.DataFrame(imputer.fit_transform(df_temporal), index = df_temporal.index, columns = df_temporal.columns) df_temporal = df_temporal.reset_index() # Put back demo df_temporal = df_temporal.merge(df_demo, on = 'id') # Put back outcome df_temporal = df_temporal.merge(df_stage, on = 'id') df_temporal.head() # Save output for modelling df_temporal.to_csv('CSV/df_temporal.csv', index=False) ``` # Categorize measurements * Set continuous readings to 1=low, 2=normal, 3=high * Categorize medicine by tertile split total dosage to categorize severity (1=low, 2=normal, 3=high) * Categorize medicine by the treatment target, sum binary code ``` # Remove 0, get 75th percentile as threshold for high dosage # Set normal as 1, high as 2 def categorize_drug(df): NID = df > 0 if sum(NID) > 0: threshold = np.percentile(df[NID], 75) df[NID] = np.where(df[NID] > threshold, 2, 1) return df ``` ## Day_bin ``` df_merged_4_cat = df_merged_4.copy() df_merged_4_cat.head() names = ['1', '2', '3'] bins = [0, 90, 120, np.inf] df_merged_4_cat['sbp'] = pd.cut(df_merged_4['sbp'], bins, labels=names) bins = [0, 60, 80, np.inf] df_merged_4_cat['dbp'] = pd.cut(df_merged_4['dbp'], bins, labels=names) bins = [0, 3.9, 7.8, np.inf] df_merged_4_cat['glucose'] = pd.cut(df_merged_4['glucose'], bins, labels=names) bins = [0, 100, 129, np.inf] df_merged_4_cat['ldl'] = pd.cut(df_merged_4['ldl'], bins, labels=names) MID = df_merged_4['gender'] == 'Male' bins = [0, 0.74, 1.35, np.inf] df_merged_4_cat.loc[MID, 'creatinine'] = pd.cut(df_merged_4.loc[MID, 'creatinine'], bins, labels=names) bins = [0, 0.59, 1.04, np.inf] df_merged_4_cat.loc[~MID, 'creatinine'] = pd.cut(df_merged_4.loc[~MID, 'creatinine'], bins, labels=names) bins = [0, 14, 17.5, np.inf] df_merged_4_cat.loc[MID, 'hgb'] = pd.cut(df_merged_4.loc[MID, 'hgb'], bins, labels=names) bins = [0, 12.3, 15.3, np.inf] df_merged_4_cat.loc[~MID, 'hgb'] = pd.cut(df_merged_4.loc[~MID, 'hgb'], bins, labels=names) df_merged_4_cat.head() # Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2 # Need to compute separately for different days_bin for day in range(1, 5): DID = df_merged_4_cat['days_bin'] == day df_day = df_merged_4_cat[DID] df_merged_4_cat = df_merged_4_cat[~DID] df_day.iloc[:, 8:29] = df_day.iloc[:, 8:29].apply(lambda x: categorize_drug(x)).astype(int) df_merged_4_cat = pd.concat([df_merged_4_cat, df_day]) # Label encode race and gender le = LabelEncoder() df_merged_4_cat['race'] = le.fit_transform(df_merged_4_cat['race']) df_merged_4_cat['gender'] = le.fit_transform(df_merged_4_cat['gender']) # Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3 df_merged_4_cat['age'] = pd.qcut(df_merged_4['age'], 3, labels=[1,2,3]) df_merged_4_cat['age'].value_counts() df_merged_4_cat.to_csv('CSV/df_merged_4_cat.csv', index=False) # Group drug by treatment (sum the binary code) df_merged_4_cat_drug = df_merged_4_cat.copy() glucose_col = ['canagliflozin', 'dapagliflozin', 'metformin'] df_merged_4_cat_drug['glucose_treatment'] = df_merged_4_cat_drug[glucose_col].sum(axis=1).astype(int) df_merged_4_cat_drug.drop(glucose_col, axis=1, inplace=True) bp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan'] df_merged_4_cat_drug['bp_treatment'] = df_merged_4_cat_drug[bp_col].sum(axis=1).astype(int) df_merged_4_cat_drug.drop(bp_col, axis=1, inplace=True) cholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin'] df_merged_4_cat_drug['cholesterol_treatment'] = df_merged_4_cat_drug[cholesterol_col].sum(axis=1).astype(int) df_merged_4_cat_drug.drop(cholesterol_col, axis=1, inplace=True) df_merged_4_cat_drug.head() df_merged_4_cat_drug.to_csv('CSV/df_merged_4_cat_drug.csv', index=False) ``` ## Aggregated ``` df_agg_cat = df_agg_fixed names = ['1', '2', '3'] bins = [0, 90, 120, np.inf] df_agg_cat['sbp'] = pd.cut(df_agg_fixed['sbp'], bins, labels=names) bins = [0, 60, 80, np.inf] df_agg_cat['dbp'] = pd.cut(df_agg_fixed['dbp'], bins, labels=names) bins = [0, 3.9, 7.8, np.inf] df_agg_cat['glucose'] = pd.cut(df_agg_fixed['glucose'], bins, labels=names) bins = [0, 100, 129, np.inf] df_agg_cat['ldl'] = pd.cut(df_agg_fixed['ldl'], bins, labels=names) MID = df_agg_fixed['gender'] == 'Male' bins = [0, 0.74, 1.35, np.inf] df_agg_cat.loc[MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[MID, 'creatinine'], bins, labels=names) bins = [0, 0.59, 1.04, np.inf] df_agg_cat.loc[~MID, 'creatinine'] = pd.cut(df_agg_fixed.loc[~MID, 'creatinine'], bins, labels=names) bins = [0, 14, 17.5, np.inf] df_agg_cat.loc[MID, 'hgb'] = pd.cut(df_agg_fixed.loc[MID, 'hgb'], bins, labels=names) bins = [0, 12.3, 15.3, np.inf] df_agg_cat.loc[~MID, 'hgb'] = pd.cut(df_agg_fixed.loc[~MID, 'hgb'], bins, labels=names) df_agg_cat.head() # Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2 df_agg_cat.iloc[:,7:28] = df_agg_fixed.iloc[:,7:28].apply(lambda x: categorize_drug(x)).astype(int) # Label encode race and gender le = LabelEncoder() df_agg_cat['race'] = le.fit_transform(df_agg_cat['race']) df_agg_cat['gender'] = le.fit_transform(df_agg_cat['gender']) # Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3 df_agg_cat['age'] = pd.qcut(df_agg_cat['age'], 3, labels=[1,2,3]) df_agg_cat['age'].value_counts() df_agg_cat.to_csv('CSV/df_agg_cat.csv', index=False) # Group drug by treatment (sum the binary code) df_agg_cat_drug = df_agg_cat.copy() glucose_col = ['canagliflozin', 'dapagliflozin', 'metformin'] df_agg_cat_drug['glucose_treatment'] = df_agg_cat_drug[glucose_col].sum(axis=1).astype(int) df_agg_cat_drug.drop(glucose_col, axis=1, inplace=True) bp_col = ['atenolol','bisoprolol','carvedilol','irbesartan','labetalol','losartan','metoprolol','nebivolol','olmesartan','propranolol','telmisartan','valsartan'] df_agg_cat_drug['bp_treatment'] = df_agg_cat_drug[bp_col].sum(axis=1).astype(int) df_agg_cat_drug.drop(bp_col, axis=1, inplace=True) cholesterol_col = ['atorvastatin','lovastatin','pitavastatin','pravastatin','rosuvastatin','simvastatin'] df_agg_cat_drug['cholesterol_treatment'] = df_agg_cat_drug[cholesterol_col].sum(axis=1).astype(int) df_agg_cat_drug.drop(cholesterol_col, axis=1, inplace=True) df_agg_cat_drug.head() df_agg_cat_drug.to_csv('CSV/df_agg_cat_drug.csv', index=False) ``` ## Temporal ``` df_temporal_cat = df_temporal.copy() names = ['1', '2', '3'] bins = [0, 90, 120, np.inf] for colname in ['sbp_1', 'sbp_2', 'sbp_3', 'sbp_4']: df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names) bins = [0, 60, 80, np.inf] for colname in ['dbp_1', 'dbp_2', 'dbp_3', 'dbp_4']: df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names) bins = [0, 3.9, 7.8, np.inf] for colname in ['glucose_1', 'glucose_2', 'glucose_3', 'glucose_4']: df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names) bins = [0, 100, 129, np.inf] for colname in ['ldl_1', 'ldl_2', 'ldl_3', 'ldl_4']: df_temporal_cat[colname] = pd.cut(df_temporal_cat[colname], bins, labels=names) MID = df_temporal_cat['gender'] == 'Male' bins = [0, 0.74, 1.35, np.inf] for colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']: df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names) bins = [0, 0.59, 1.04, np.inf] for colname in ['creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4']: df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names) bins = [0, 14, 17.5, np.inf] for colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']: df_temporal_cat.loc[MID, colname] = pd.cut(df_temporal_cat.loc[MID, colname], bins, labels=names) bins = [0, 12.3, 15.3, np.inf] for colname in ['hgb_1', 'hgb_2', 'hgb_3', 'hgb_4']: df_temporal_cat.loc[~MID, colname] = pd.cut(df_temporal_cat.loc[~MID, colname], bins, labels=names) df_temporal_cat.head() # Remove 0, get 75th percentile as threshold for high dosage, set normal as 1, high as 2 df_temporal_cat.iloc[:,25:109] = df_temporal_cat.iloc[:,25:109].apply(lambda x: categorize_drug(x)).astype(int) # Label encode race and gender le = LabelEncoder() df_temporal_cat['race'] = le.fit_transform(df_temporal_cat['race']) df_temporal_cat['gender'] = le.fit_transform(df_temporal_cat['gender']) # Group age to young-old (≤74 y.o.) as 1, middle-old (75 to 84 y.o.) as 2, and old-old (≥85 y.o.) as 3 df_temporal_cat['age'] = pd.qcut(df_temporal_cat['age'], 3, labels=[1,2,3]) df_temporal_cat['age'].value_counts() df_temporal_cat.to_csv('CSV/df_temporal_cat.csv', index=False) # Group drug by treatment (sum the binary code) df_temporal_cat_drug = df_temporal_cat.copy() for i in range(1,5): glucose_col = ['canagliflozin_' + str(i), 'dapagliflozin_' + str(i), 'metformin_' + str(i)] df_temporal_cat_drug['glucose_treatment_'+ str(i)] = df_temporal_cat_drug[glucose_col].sum(axis=1).astype(int) df_temporal_cat_drug.drop(glucose_col, axis=1, inplace=True) bp_col = ['atenolol_' + str(i),'bisoprolol_' + str(i),'carvedilol_' + str(i),'irbesartan_' + str(i),'labetalol_' + str(i),'losartan_' + str(i),'metoprolol_' + str(i),'nebivolol_' + str(i),'olmesartan_' + str(i),'propranolol_' + str(i),'telmisartan_' + str(i),'valsartan_' + str(i)] df_temporal_cat_drug['bp_treatment_'+ str(i)] = df_temporal_cat_drug[bp_col].sum(axis=1).astype(int) df_temporal_cat_drug.drop(bp_col, axis=1, inplace=True) cholesterol_col = ['atorvastatin_' + str(i),'lovastatin_' + str(i),'pitavastatin_' + str(i),'pravastatin_' + str(i),'rosuvastatin_' + str(i),'simvastatin_' + str(i)] df_temporal_cat_drug['cholesterol_treatment_'+ str(i)] = df_temporal_cat_drug[cholesterol_col].sum(axis=1).astype(int) df_temporal_cat_drug.drop(cholesterol_col, axis=1, inplace=True) df_temporal_cat_drug.head() df_temporal_cat_drug.to_csv('CSV/df_temporal_cat_drug.csv', index=False) ``` # Compute GFR * CKD-EPI equations ``` def computeGFR(df): gender = df['gender'] f_constant = 1 if gender == 'Male': k = 0.9 a = -0.411 else: k = 0.7 a = -0.329 f_constant = 1.018 race = df['race'] b_constant = 1 if race == 'Black': b_constant = 1.159 gfr = 141 * min(df['creatinine'] / k, 1) * (max(df['creatinine'] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant return gfr ``` ## 180-day bin ``` col_gfr = ['id', 'days_bin', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress'] df_merged_4_gfr = df_merged_4[col_gfr].copy() df_merged_4_gfr['gfr'] = df_merged_4_gfr.apply(lambda x: computeGFR(x), axis=1) df_merged_4_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True) # Categorize GFR df_merged_4_gfr['gfr_cat'] = np.where(df_merged_4_gfr['gfr'] < 60, 1, 2) df_merged_4_gfr['gfr_cat'].value_counts() df_merged_4_gfr.to_csv('CSV/df_merged_4_gfr.csv', index=False) df_merged_4.head() df_merged_4_gfr.head() ``` ## Aggregated ``` df_agg_fixed = pd.read_csv('CSV/df_agg.csv') col_gfr = ['id', 'creatinine', 'race', 'gender', 'age', 'Stage_Progress'] df_agg_gfr = df_agg_fixed[col_gfr].copy() df_agg_gfr['gfr'] = df_agg_gfr.apply(lambda x: computeGFR(x), axis=1) df_agg_gfr.drop(['creatinine', 'race', 'gender', 'age'], axis=1, inplace=True) # Categorize GFR df_agg_gfr['gfr_cat'] = np.where(df_agg_gfr['gfr'] < 60, 1, 2) df_agg_gfr['gfr_cat'].value_counts() df_agg_gfr.to_csv('CSV/df_agg_gfr.csv', index=False) ``` ## Temporal ``` def computeGFR_temporal(df, i): gender = df['gender'] f_constant = 1 if gender == 'Male': k = 0.9 a = -0.411 else: k = 0.7 a = -0.329 f_constant = 1.018 race = df['race'] b_constant = 1 if race == 'Black': b_constant = 1.159 gfr = 141 * min(df['creatinine_' + str(i)] / k, 1) * (max(df['creatinine_' + str(i)] / k, 1)**(-1.209)) * (0.993**df['age']) * f_constant * b_constant return gfr col_gfr = ['id', 'creatinine_1', 'creatinine_2', 'creatinine_3', 'creatinine_4', 'race', 'gender', 'age', 'Stage_Progress'] df_temporal_gfr = df_temporal[col_gfr].copy() for i in range(1, 5): df_temporal_gfr['gfr_' + str(i)] = df_temporal_gfr.apply(lambda x: computeGFR_temporal(x, i), axis=1) df_temporal_gfr.drop('creatinine_' + str(i), axis=1, inplace=True) df_temporal_gfr.drop(['race', 'gender', 'age'], axis=1, inplace=True) # Categorize GFR for i in range(1, 5): df_temporal_gfr['gfr_cat_' + str(i)] = np.where(df_temporal_gfr['gfr_' + str(i)] < 60, 1, 2) df_temporal_gfr.to_csv('CSV/df_temporal_gfr.csv', index=False) ```
github_jupyter
# Scraping Google Maps with Python [Web scraping](https://en.wikipedia.org/wiki/Web_scraping) is the process of extracting data from web pages using software. There are many [techniques](https://en.wikipedia.org/wiki/Web_scraping#Techniques) to scrape data: computer vision, manual copy and pasting, pattern matching, etc – for this tutorial, we will use [HTML parsing](https://en.wikipedia.org/wiki/Web_scraping#HTML_parsing) with HTTP programming. Given the high costs of downloading large amounts of proprietary data, web scraping is sometimes the only option for small scale research projects. Google maps for example, the defacto mapping and navigation platform (as of 2020/2021), has a monopoly on information about the location of businesses across the globe. To get access to this information is very very costly (problems of monoply rule), therefore scraping the data is one of the only options for students. This tutorial demonstrates how to scrape Google Maps POI (points of interest) data in NYC with Python using the Selenium and Beautiful Soup libraries. The first step is to import all the necessary libraries: - [Requests](https://requests.readthedocs.io/en/master/): a HTTP library for Python - [Selenium](https://www.selenium.dev/selenium/docs/api/py/): a tool for automating web browsing - [BeautfiulSoup](https://en.wikipedia.org/wiki/Beautiful_Soup_(HTML_parser)): a package to parse HTML elements - [Shapely](https://shapely.readthedocs.io/en/stable/manual.html): a library for manipulaitng and analysing geometry - [Geopandas](https://geopandas.org/index.html): similar to pandas with extensions to make it easier to work with geospatial data. ``` import requests from bs4 import BeautifulSoup import os import pandas as pd import geopandas as gpd import time import re from shapely.geometry import Point import psycopg2 as pg from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.chrome.options import Options ``` ### Search requests Every request to google maps usually starts from a location and a search query. This first section involves gathering a list of locations (latitudes & longitudes) and search queries (types of POI) to feed into the base URL below. The base url is the request we send to Google and what we'll get back is a selection of POI (businesses) for that location related to the search category. ``` base_url = "https://www.google.com/maps/search/{0}/@{1},{2},16z" ``` ### Search locations Any search in Google maps requires a location to search from, typically a latitude and longitude pair. Below we use the centroids of select zipcodes within NYC to use as our search locations. There are only 11 zip codes below but this could easily be scaled to the entire city or country. ``` conn = pg.connect( host=os.environ['aws_db_host'], port="5432", user=os.environ['aws_db_u'], password=os.environ['aws_db_p'] ) gdf = gpd.read_postgis(''' SELECT region_id, geom AS geom FROM geographies.blockgroups WHERE (STARTS_WITH(region_id, '36061') OR STARTS_WITH(region_id, '36047') OR STARTS_WITH(region_id, '36085') OR STARTS_WITH(region_id, '36005') OR STARTS_WITH(region_id, '36081')) ''', conn) coords = gdf['geom'].centroid xs = coords.apply(lambda x: x.x).tolist() ys = coords.apply(lambda x: x.y).tolist() counties = gdf['region_id'].str[2:5].tolist() ``` <br> Here we outline the search categories. ``` searches = ['Restaurants', 'bakery', 'coffee', 'gym', 'yoga', 'clothing', 'electronics', 'beauty', 'hardware', 'galleries', 'museums', 'Hotels', 'deli', 'liquor', 'bar', 'Groceries', 'Takeout', 'Banks', 'Pharmacies'] searches = ['Restaurants'] ``` ### Sending a GET request The next step is to send a request to Google's servers about the information we would like returned. This type of request is called a [GET request](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) in HTTP programming. Below we'll send a single test request to the servers to see what information we get back. We insert a single lat/lng pair into the **base_url** variable. ``` url = base_url.format(searches[0], xs[0], ys[0]) ``` <br> The next portion of code starts a selenium web driver (the vehicle that powers automated web browsing) and specifies a few browsing options. Specifically we state the web browser should run *headless*, meaning it should not open up a new browser window, and install a Chrome browser. ``` delay = 10 chrome_options = Options() chrome_options.add_argument("--headless") driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options) ``` <br> ### Intentionally slowing down Although web scraping is not [illegal](https://twitter.com/OrinKerr/status/1171116153948626944), researchers should be careful when scraping private websites. Not due to legality issues, but due to being blocked by the company you're trying to scrape from. Web scraping tools like Selenium browse pages in light speed, way faster than any human, therefore it is easy for company's servers to spot when individuals are trying to scrape their sites. This next section will introduce a few options to intentionally slow down our browsing. Specifically we tell the driver object to wait until certain conditions have been met before moving forward. Once conditions have been met we download the HTML information returned by Google. ``` driver.get(url) try: # wait for button to be enabled WebDriverWait(driver, delay).until( EC.element_to_be_clickable((By.CLASS_NAME, 'section-result')) ) driver.implicitly_wait(40) html = driver.find_element_by_tag_name('html').get_attribute('innerHTML') except TimeoutException: print('Loading took too much time!') else: print('getting html') html = driver.page_source finally: driver.quit() ``` <br> Now that we have the HTML data, we feed this into Beautiful Soup for parsing, without having to worry about tripping up Google's servers. ``` soup = BeautifulSoup(html) results = soup.find_all('div', {'class': 'section-result'}) ``` <br> Let's view one item from the results returned ``` results[0] ``` <br> <br> As you can see, there is a ton of information returned but it is not in a human readable format. To interpret this information we will have to go to a page from google maps and understand what our information is represented as in [HTML format](https://www.w3schools.com/whatis/whatis_htmldom.asp). <img src="../static/img/web_elements.png"> <br> <br> Above is an example of a restaurant in Brooklyn as seen from Google maps, alongside the HTML representation. To open up the pop-up, right click anywhere on a page in Google maps and click **inspect elements**. Now hovering over a certain piece of information will tell you what HTML tag, class or ID the element has been assigned. For example, the name of the restaurant above, Park Plaza, is in a **H3** tag inside another **span** tag (highlighted in bliue at the bottom). Now we know the HTML representation, we can more easily parse the data with Beautiful Soup to return the information we want. Below we ask Beatiful Soup to find a H3 tag element and then get the text from the subsequent span element. ``` results[0].find('h3').span.text ``` <br> We can now scale the above logic to get all the attributes we're interested in (name, address, price, tags, etc.), from all the zip codes, and all the POI categories. We'll wrap the POI parsing in a nested loop and save the results to a CSV using Pandas. We also add Python's built-in **try** and **except** objects to the HTML parsing logic so that the loop continues even when there data is not returned as expected (a common thing in web scraping). At the end of the loop we will delay the next loop by 15 seconds to not trigger Google's servers. ``` for search in searches: for idx, coord in enumerate(zip(ys, xs)): filename = search + str(coord[0]).replace('.', '') + '_' + str(coord[1]).replace('.', '') + '.csv' if not os.path.isfile('data/'+filename): url = base_url.format(search, coord[0], coord[1]) delay = 10 chrome_options = Options() chrome_options.add_argument("--headless") driver = webdriver.Chrome('/Users/carlo/.wdm/drivers/chromedriver/mac64/88.0.4324.96/chromedriver', options=chrome_options) driver.get(url) try: # wait for button to be enabled WebDriverWait(driver, delay).until( EC.element_to_be_clickable((By.CLASS_NAME, 'section-result')) ) driver.implicitly_wait(10) html = driver.find_element_by_tag_name('html').get_attribute('innerHTML') except TimeoutException: print('Loading took too much time! iteration {0}'.format(idx)) continue else: html = driver.page_source finally: driver.quit() soup = BeautifulSoup(html) results = soup.find_all('div', {'class': 'section-result'}) data = [] for item in results: try: name = item.find('h3').span.text except: name = None try: rating = item.find('span', {'class': 'cards-rating-score'}).text except: rating = None try: num_reviews = item.find('span', {'class': 'section-result-num-ratings'}).text except: num_reviews = None try: cost = item.find('span', {'class': 'section-result-cost'}).text except: cost = None try: tags = item.find('span', {'class': 'section-result-details'}).text except: tags = None try: addr = item.find('span', {'class': 'section-result-location'}).text except: addr = None try: descrip = item.find('div', {'class': 'section-result-description'}).text except: descrip = None data.append({'name': name, 'rating': rating, 'num_revs': num_reviews, 'cost': cost, 'tags': tags, 'address': addr, 'description': descrip, 'county': counties[idx]}) temp_df = pd.DataFrame(data) temp_df.to_csv('data/' + filename) ``` <br> Now that we have the scraped results from Google Maps, we merge all the CSVs into a sinlge DataFrame for easier analysis. In addition, we also change the county codes to county names (to help with the later (optional) geocoding). ``` import glob all_files = glob.glob('data' + "/*.csv") li = [] for filename in all_files: df = pd.read_csv(filename, dtype={'county': str}) li.append(df) frame = pd.concat(li) frame.drop('Unnamed: 0', axis=1).drop_duplicates().to_csv('nyc_poi_county.csv',index=False) frame = frame.drop('Unnamed: 0', axis=1).drop_duplicates() frame['county'] = frame['county'].replace({'061': 'new york', '081': 'queens', '047': 'brooklyn', '005': 'bronx', '085': 'staten island'}) frame['complete'] = frame['address'] + ' ' + frame['county'] print(frame.shape) frame.head() ``` End! <br> <br> <br> ### (optional) Code to Geocode Results The below loop uses the HERE API to geocode the above results to get latitude and longitude for each result ``` app_id = os.environ['here_app_id'] code = os.environ['here_app_code'] lats = [] lngs = [] for idx, address in enumerate(frame['complete']): try: search = frame['complete'].iloc[idx] url = "https://geocoder.api.here.com/6.2/geocode.json?app_id=%s&app_code=%s&searchtext=%s&country=USA" url = url % (app_id, code, search) r = requests.get(url) features = r.json() view = features['Response']['View'][0] res = view['Result'][0] loc = res['Location'] pt = loc['DisplayPosition'] lat, lng = pt['Latitude'], pt['Longitude'] lats.append(lat) lngs.append(lng) except: lats.append(None) lngs.append(None) ``` <br> Finally we add the points to the DataFrame to create a GeoDataFrame and export it as a GeoJSON file. ``` frame['lat'] = lats frame['lng'] = lngs frame['coords'] = frame.apply(lambda x: Point([x['lng'], x['lat']]), axis=1) gdf_frame = gpd.GeoDataFrame(frame, geometry='coords', crs="EPSG:4326") print(gdf_frame.dropna(subset=['lat']).shape) gdf_frame.head() gdf_frame.dropna(subset=['lat']).to_file("nyc_poi.geojson", driver='GeoJSON') ```
github_jupyter
# Processes and how to use them Processes in Nengo can be used to describe general functions or dynamical systems, including those with randomness. They can be useful if you want a `Node` output that has a state (like a dynamical system), and they're also used for things like injecting noise into Ensembles so that you can not only have "white" noise that samples from a distribution, but can also have "colored" noise where subsequent samples are correlated with past samples. This notebook will first present the basic process interface, then demonstrate some of the built-in Nengo processes and how they can be used in your code. It will also describe how to create your own custom process. ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import nengo ``` ## Interface We will begin by looking at how to run an existing process instance. The key functions for running processes are `run`, `run_steps`, and `apply`. The first two are for running without an input, and the third is for applying the process to an input. There are also two helper functions, `trange` and `ntrange`, which return the time points corresponding to a process output, given either a length of time or a number of steps, respectively. ### `run`: running a process for a length of time The `run` function runs a process for a given length of time, without any input. Many of the random processes in `nengo.processes` will be run this way, since they do not require an input signal. ``` # Create a process (details on the FilteredNoise process below) process = nengo.processes.FilteredNoise( synapse=nengo.synapses.Alpha(0.1), seed=0) # run the process for two seconds y = process.run(2.0) # get a corresponding two-second time range t = process.trange(2.0) plt.figure() plt.plot(t, y) plt.xlabel('time [s]') plt.ylabel('process output'); ``` ### `run_steps`: running a process for a number of steps To run the process for a number of steps, use the `run_steps` function. The length of the generated signal will depend on the process's `default_dt`. ``` process = nengo.processes.FilteredNoise( synapse=nengo.synapses.Alpha(0.1), seed=0) # run the process for 1000 steps y = process.run_steps(1000) # get a corresponding 1000-step time range t = process.ntrange(1000) plt.figure() plt.plot(t, y) plt.xlabel('time [s]') plt.ylabel('process output'); ``` ### `apply`: running a process with an input To run a process with an input, use the `apply` function. ``` process = nengo.synapses.Lowpass(0.2) t = process.trange(5) x = np.minimum(t % 2, 2 - (t % 2)) # sawtooth wave y = process.apply(x) # general to all Processes z = process.filtfilt(x) # specific to Synapses plt.figure() plt.plot(t, x, label='input') plt.plot(t, y, label='output') plt.plot(t, z, label='filtfilt') plt.xlabel('time [s]') plt.ylabel('signal') plt.legend(); ``` Note that Synapses are a special kind of process, and have the additional functions `filt` and `filtfilt`. `filt` works mostly the same as `apply`, but with some additional functionality such as the ability to filter along any axis. `filtfilt` provides zero-phase filtering. ### Changing the time-step (`dt` and `default_dt`) To run a process with a different time-step, you can either pass the new time step (`dt`) when calling the functions, or change the `default_dt` property of the process. ``` process = nengo.processes.FilteredNoise( synapse=nengo.synapses.Alpha(0.1), seed=0) y1 = process.run(2.0, dt=0.05) t1 = process.trange(2.0, dt=0.05) process = nengo.processes.FilteredNoise( synapse=nengo.synapses.Alpha(0.1), default_dt=0.1, seed=0) y2 = process.run(2.0) t2 = process.trange(2.0) plt.figure() plt.plot(t1, y1, label='dt = %s' % 0.05) plt.plot(t2, y2, label='dt = %s' % 0.1) plt.xlabel('time [s]') plt.ylabel('output'); ``` ## `WhiteSignal` The `WhiteSignal` process is used to generate band-limited white noise, with only frequencies below a given cutoff frequency. ``` with nengo.Network() as model: a = nengo.Node(nengo.processes.WhiteSignal(1.0, high=5, seed=0)) b = nengo.Node(nengo.processes.WhiteSignal(1.0, high=10, seed=0)) c = nengo.Node(nengo.processes.WhiteSignal(1.0, high=5, rms=0.3, seed=0)) d = nengo.Node(nengo.processes.WhiteSignal(0.5, high=5, seed=0)) ap = nengo.Probe(a) bp = nengo.Probe(b) cp = nengo.Probe(c) dp = nengo.Probe(d) with nengo.Simulator(model) as sim: sim.run(1.0) plt.figure() plt.plot(sim.trange(), sim.data[ap], label='5 Hz cutoff') plt.plot(sim.trange(), sim.data[bp], label='10 Hz cutoff') plt.plot(sim.trange(), sim.data[cp], label='5 Hz cutoff, 0.3 RMS amplitude') plt.plot(sim.trange(), sim.data[dp], label='5 Hz cutoff, 0.5 s period') plt.xlabel("time [s]") plt.legend(loc=2); ``` Note that the 10 Hz signal (green) has similar low frequency characteristics as the 5 Hz signal (blue), but with additional higher-frequency components. The 0.3 RMS amplitude 5 Hz signal (red) is the same as the original 5 Hz signal (blue), but scaled down (the default RMS amplitude is 0.5). Finally, the signal with a 0.5 s period (instead of a 1 s period like the others) is completely different, because changing the period changes the spacing of the random frequency components and thus creates a completely different signal. Note how the signal with the 0.5 s period repeats itself; for example, the value at $t = 0$ is the same as the value at $t = 0.5$, and the value at $t = 0.4$ is the same as the value at $t = 0.9$. ## `WhiteNoise` The `WhiteNoise` process generates white noise, with equal power across all frequencies. By default, it is scaled so that the integral process (Brownian noise) will have the same standard deviation regardless of `dt`. ``` process = nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 1)) t = process.trange(0.5) y = process.run(0.5) plt.figure() plt.plot(t, y); ``` One use of the `WhiteNoise` process is to inject noise into neural populations. Here, we create two identical ensembles, but add a bit of noise to one and no noise to the other. We plot the membrane voltages of both. ``` process = nengo.processes.WhiteNoise( dist=nengo.dists.Gaussian(0, 0.01), seed=1) with nengo.Network() as model: ens_args = dict(encoders=[[1]], intercepts=[0.01], max_rates=[100]) a = nengo.Ensemble(1, 1, **ens_args) b = nengo.Ensemble(1, 1, noise=process, **ens_args) a_voltage = nengo.Probe(a.neurons, 'voltage') b_voltage = nengo.Probe(b.neurons, 'voltage') with nengo.Simulator(model) as sim: sim.run(0.15) plt.figure() plt.plot(sim.trange(), sim.data[a_voltage], label="deterministic") plt.plot(sim.trange(), sim.data[b_voltage], label="noisy") plt.xlabel('time [s]') plt.ylabel('voltage') plt.legend(loc=4); ``` We see that the neuron without noise (blue) approaches its firing threshold, but never quite gets there. Adding a bit of noise (green) causes the neuron to occasionally jitter above the threshold, resulting in two spikes (where the voltage suddenly drops to zero). ## `FilteredNoise` The `FilteredNoise` process takes a white noise signal and passes it through a filter. Using any type of lowpass filter (e.g. `Lowpass`, `Alpha`) will result in a signal similar to `WhiteSignal`, but rather than being ideally filtered (i.e. no frequency content above the cutoff), the `FilteredNoise` signal will have some frequency content above the cutoff, with the amount depending on the filter used. Here, we can see how an `Alpha` filter (a second-order lowpass filter) is much better than the `Lowpass` filter (a first-order lowpass filter) at removing the high-frequency content. ``` process1 = nengo.processes.FilteredNoise( dist=nengo.dists.Gaussian(0, 0.01), synapse=nengo.Alpha(0.005), seed=0) process2 = nengo.processes.FilteredNoise( dist=nengo.dists.Gaussian(0, 0.01), synapse=nengo.Lowpass(0.005), seed=0) tlen = 0.5 plt.figure() plt.plot(process1.trange(tlen), process1.run(tlen)) plt.plot(process2.trange(tlen), process2.run(tlen)); ``` The `FilteredNoise` process with an `Alpha` synapse (blue) has significantly lower high-frequency components than a similar process with a `Lowpass` synapse (green). ## `PresentInput` The `PresentInput` process is useful for presenting a series of static inputs to a network, where each input is shown for the same length of time. Once all the images have been shown, they repeat from the beginning. One application is presenting a series of images to a classification network. ``` inputs = [[0, 0.5], [0.3, 0.2], [-0.1, -0.7], [-0.8, 0.6]] process = nengo.processes.PresentInput(inputs, presentation_time=0.1) tlen = 0.8 plt.figure() plt.plot(process.trange(tlen), process.run(tlen)) plt.xlim([0, tlen]) plt.ylim([-1, 1]); ``` ## Custom processes You can create custom processes by inheriting from the `nengo.Process` class and overloading the `make_step` and `make_state` methods. As an example, we'll make a simple custom process that implements a two-dimensional oscillator dynamical system. The `make_state` function defines a `state` variable to store the state. The `make_step` function uses that state and a fixed `A` matrix to determine how the state changes over time. One advantage to using a process over a simple function is that if we reset our simulator, `make_step` will be called again and the process state will be restored to the initial state. ``` class SimpleOscillator(nengo.Process): def make_state(self, shape_in, shape_out, dt, dtype=None): # return a dictionary mapping strings to their initial state return {"state": np.array([1., 0.])} def make_step(self, shape_in, shape_out, dt, rng, state): A = np.array([[-0.1, -1.], [1., -0.1]]) s = state["state"] # define the step function, which will be called # by the node every time step def step(t): s[:] += dt * np.dot(A, s) return s return step # return the step function with nengo.Network() as model: a = nengo.Node(SimpleOscillator(), size_in=0, size_out=2) a_p = nengo.Probe(a) with nengo.Simulator(model) as sim: sim.run(20.0) plt.figure() plt.plot(sim.trange(), sim.data[a_p]) plt.xlabel('time [s]'); ``` We can generalize this process to one that can implement arbitrary linear dynamical systems, given `A` and `B` matrices. We will overload the `__init__` method to take and store these matrices, as well as check the matrix shapes and set the default size in and out. The advantage of using the default sizes is that when we then create a node using the process, or run the process using `apply`, we do not need to specify the sizes. ``` class LTIProcess(nengo.Process): def __init__(self, A, B, **kwargs): A, B = np.asarray(A), np.asarray(B) # check that the matrix shapes are compatible assert A.ndim == 2 and A.shape[0] == A.shape[1] assert B.ndim == 2 and B.shape[0] == A.shape[0] # store the matrices for `make_step` self.A = A self.B = B # pass the default sizes to the Process constructor super().__init__( default_size_in=B.shape[1], default_size_out=A.shape[0], **kwargs) def make_state(self, shape_in, shape_out, dt, dtype=None): return {"state": np.zeros(self.A.shape[0])} def make_step(self, shape_in, shape_out, dt, rng, state): assert shape_in == (self.B.shape[1],) assert shape_out == (self.A.shape[0],) A, B = self.A, self.B s = state["state"] def step(t, x): s[:] += dt * (np.dot(A, s) + np.dot(B, x)) return s return step # demonstrate the LTIProcess in action A = [[-0.1, -1], [1, -0.1]] B = [[10], [-10]] with nengo.Network() as model: u = nengo.Node(lambda t: 1 if t < 0.1 else 0) # we don't need to specify size_in and size_out! a = nengo.Node(LTIProcess(A, B)) nengo.Connection(u, a) a_p = nengo.Probe(a) with nengo.Simulator(model) as sim: sim.run(20.0) plt.figure() plt.plot(sim.trange(), sim.data[a_p]) plt.xlabel('time [s]'); ```
github_jupyter
# Jetsoncar Rosey V2 Tensorflow 2.0, all in notebook, optimized with RT ``` import tensorflow as tf print(tf.__version__) tf.config.experimental.list_physical_devices('GPU') # If device does not show and using conda env with tensorflow-gpu then try restarting computer # verify the image data directory import os data_directory = "/media/michael/BigMemory/datasets/jetsoncar/training_data/data/dataset" os.listdir(data_directory)[:10] import matplotlib.pyplot as plt img = plt.imread(os.path.join(data_directory + "/color_images", os.listdir(data_directory + "/color_images")[0])) print(img.shape) plt.imshow(img) ``` ## Create the datagenerator and augmentation framework ``` # Include the custom utils.py and perform tests import importlib utils = importlib.import_module('utils') import numpy as np print(utils.INPUT_SHAPE) img = utils.load_image(os.path.join(data_directory, 'color_images'),os.listdir(data_directory + "/color_images")[0]) print(img.shape) fig = plt.figure(figsize=(20,20)) fig.add_subplot(1, 3, 1) plt.imshow(img) img, _ = utils.preprocess_data(last_color_image=img) print(img.shape) fig.add_subplot(1, 3, 2) plt.imshow(np.squeeze(img)) plt.show() # Load the steering angles and image paths from labels.csv import csv, random import seaborn as sns # these will be 2D arrays where each row represents a dataset x = [] # images y = [] # steering z = [] # speed with open(os.path.join(data_directory, "tags.csv")) as csvfile: reader = csv.DictReader(csvfile) for row in reader: # print(row['Time_stamp'] + ".jpg", row['Steering_angle']) if not float(row['raw_speed']) == 0: x.append(row['time_stamp'] + ".jpg",) # get image path y.append(float(row['raw_steering']),) # get steering value z.append(float(row['raw_speed'])) print("Number of data samples is " + str(len(y))) data = list(zip(x,y)) random.shuffle(data) x,y = zip(*data) # plot of steering angle distribution without correction sns.distplot(y) # plot of speed distribution sns.distplot(z) # Split the training data validation_split = 0.2 train_x = x[0:int(len(x)*(1.0-validation_split))] train_y = y[0:int(len(y)*(1.0-validation_split))] print("Training data shape: " + str(len(train_x))) test_x = x[int(len(x)*(1.0-validation_split)):] test_y = y[int(len(y)*(1.0-validation_split)):] print("Validation data shape: " + str(len(test_x)) + "\n") # Define and test batch generator def batch_generator(data_dir, image_paths, steering_angles, batch_size, is_training): """ Generate training image give image paths and associated steering angles """ images = np.empty([batch_size, utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH, utils.IMAGE_CHANNELS], dtype=np.float32) steers = np.empty(batch_size) while True: i = 0 for index in np.random.permutation(len(image_paths)): img = image_paths[index] steering_angle = steering_angles[index] # argumentation if is_training and np.random.rand() < 0.8: image, steering_angle = utils.augument(data_dir, os.path.join("color_images",img), steering_angle) else: image, _ = utils.preprocess_data(utils.load_image(data_dir, os.path.join("color_images",img))) # add the image and steering angle to the batch images[i] = image steers[i] = steering_angle i += 1 if i == batch_size: break yield images, steers train_generator = batch_generator(data_directory, train_x, train_y, 32, True) validation_generator = batch_generator(data_directory, test_x, test_y, 32, False) train_image = next(train_generator) # returns tuple with steering and throttle print(train_image[0].shape) print(train_image[1][0]) plt.imshow(train_image[0][0]) ``` ## Define the model and start training ``` model = tf.keras.models.Sequential([ tf.keras.Input((utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH, utils.IMAGE_CHANNELS)), tf.keras.layers.Conv2D(32, (11,11), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,2)), tf.keras.layers.Conv2D(32, (7,7), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,2)), tf.keras.layers.Conv2D(64, (5,5), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,2)), tf.keras.layers.Conv2D(64, (3,3), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,2)), tf.keras.layers.Conv2D(32, (3,3), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,3)), tf.keras.layers.Conv2D(16, (3,3), padding='same', kernel_initializer='lecun_uniform'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ELU(), tf.keras.layers.MaxPool2D((2,3)), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='elu'), tf.keras.layers.Dense(1, activation='linear') ]) model.summary() model.compile(loss='mean_squared_error', optimizer='adam') import datetime log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) print("To view tensorboard please run `tensorboard --logdir logs/fit` in the code directory from the terminal with deeplearning env active") checkpoint = tf.keras.callbacks.ModelCheckpoint('rosey_v2.{epoch:03d}-{val_loss:.2f}.h5', # filepath = working directory/ monitor='val_loss', verbose=0, save_best_only=True, mode='auto') model.fit_generator(train_generator, steps_per_epoch=100, epochs=20, validation_data=validation_generator, validation_steps=1, callbacks=[tensorboard_callback, checkpoint]) # Test the model image, steering = next(train_generator) print(steering) print(model.predict(image)) print("") image, steering = next(validation_generator) print(steering) print(model.predict(image)) ``` ## Save the model as tensor RT and export to Jetson format ``` # Load the model that you would like converted to RT model_path = 'model.h5' export_path = "/home/michael/Desktop/model" import shutil if not os.path.isdir(export_path): os.mkdir(export_path) else: response = input("Do you want to delete existing export_path directory? y/n") if response == 'y': shutil.rmtree(export_path) os.mkdir(export_path) loaded_model = tf.keras.models.load_model(model_path) shutil.copy("./utils.py", os.path.join(export_path, "utils.py")) shutil.copy("./__init__.py", os.path.join(export_path, "__init__.py")) shutil.copy("./notes.txt", os.path.join(export_path, "notes.txt")) shutil.copy("./config.yaml", os.path.join(export_path, "config.yaml")) # Save as tf saved_model (faster than h5) tf.saved_model.save(loaded_model, export_path) from tensorflow.python.compiler.tensorrt import trt_convert as trt conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS conversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 32)) conversion_params = conversion_params._replace(precision_mode="INT8") conversion_params = conversion_params._replace(maximum_cached_engines=100) conversion_params = conversion_params._replace(use_calibration=True) def my_calibration_input_fn(): for i in range(20): image, _ = utils.preprocess_data(utils.load_image(data_directory, os.path.join("color_images",x[i]))) yield image.astype(np.float32), converter = tf.experimental.tensorrt.Converter(input_saved_model_dir=export_path,conversion_params=conversion_params) gen = my_calibration_input_fn() converter.convert(calibration_input_fn=my_calibration_input_fn) converter.build(my_calibration_input_fn) if not os.path.isdir(os.path.join(export_path, "rt")): os.mkdir(os.path.join(export_path, "rt")) converter.save(os.path.join(export_path, "rt")) # Test normal saved model saved_model = tf.saved_model.load(export_path) # normal saved model image, _ = next(validation_generator) import time output = saved_model(image.astype(np.float32)) # load once to get more accurate representation of speed start = time.time() output = saved_model(image.astype(np.float32)) stop = time.time() print("inference time: " + str(stop - start)) print("Output: %.20f"%output[8,0]) # Test TRT optimized saved model saved_model = tf.saved_model.load(os.path.join(export_path, "rt")) # normal saved model image, _ = next(validation_generator) import time output = saved_model(image) # load once to get more accurate representation of speed start = time.time() output = saved_model(image) stop = time.time() print("inference time: " + str(stop - start)) print("Output: %.20f"%output[8,0]) # Run many samples through and save distribution validation_generator = batch_generator(data_directory, test_x, test_y, 32, False) test = [] for i in range(50): img, _ = next(validation_generator) test.append(saved_model(img.astype(np.float32))[0][0]) print(str(i), end="\r") sns.distplot(test) ```
github_jupyter
``` from __future__ import print_function import os import time import logging import argparse from visdom import Visdom import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import datasets, transforms from utils import * import dataset # Teacher models import models # Student models start_time = time.time() # os.makedirs('./checkpoint', exist_ok=True) # Training settings parser = argparse.ArgumentParser(description='PyTorch original KD') parser.add_argument('--dataset', choices=['CIFAR10', 'CIFAR100', 'tinyimagenet' ], default='CIFAR10') parser.add_argument('--teacher', choices=['ResNet32', 'ResNet50', 'ResNet56', 'ResNet110' ], default='ResNet110') parser.add_argument('--student', choices=[ 'ResNet8', 'ResNet15', 'ResNet16', 'ResNet20', 'myNet' ], default='ResNet20') parser.add_argument('--n_class', type=int, default=10, metavar='N', help='num of classes') parser.add_argument('--T', type=float, default=20.0, metavar='Temputure', help='Temputure for distillation') parser.add_argument('--batch_size', type=int, default=128, metavar='N', help='input batch size for training') parser.add_argument('--test_batch_size', type=int, default=128, metavar='N', help='input test batch size for training') parser.add_argument('--epochs', type=int, default=20, metavar='N', help='number of epochs to train (default: 20)') parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--device', default='cuda:1', type=str, help='device: cuda or cpu') parser.add_argument('--print_freq', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') config = ['--dataset', 'CIFAR100', '--epochs', '200', '--n_class', '100', '--teacher', 'ResNet110', '--student', 'ResNet8', '--T', '5.0', '--device', 'cuda:0'] args = parser.parse_args(config) device = args.device if torch.cuda.is_available() else 'cpu' load_dir = './checkpoint/' + args.dataset + '/' teacher_model = getattr(models, args.teacher)(args.n_class) teacher_model.load_state_dict(torch.load(load_dir + args.teacher + '.pth')) teacher_model.to(device) st_model = getattr(models, args.student)(args.n_class) # args.student() st_model.to(device) # logging logfile = load_dir + 'KD_' + st_model.model_name + '.log' if os.path.exists(logfile): os.remove(logfile) def log_out(info): f = open(logfile, mode='a') f.write(info) f.write('\n') f.close() print(info) # visualizer vis = Visdom(env='distill') loss_win = vis.line( X=np.array([0]), Y=np.array([0]), opts=dict( title=args.student + ' KD Loss', xtickmin=0, # xtickmax=1, # xtickstep=5, ytickmin=0, # ytickmax=10, # ytickstep=5, # markers=True, # markersymbol='dot', # markersize=5, ), name="loss" ) acc_win = vis.line( X=np.column_stack((0, 0)), Y=np.column_stack((0, 0)), opts=dict( title=args.student + ' KD ACC', xtickmin=0, # xtickstep=5, ytickmin=0, ytickmax=100, # markers=True, # markersymbol='dot', # markersize=5, legend=['train_acc', 'test_acc'] ), name="acc" ) # data normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize, ]) test_transform = transforms.Compose([transforms.ToTensor(), normalize]) if args.dataset == 'tinyimagenet': train_set = dataset.TinyImageNet(root='../data/tiny-imagenet-200', transform=train_transform) test_set = dataset.TinyImageNet(root='../data/tiny-imagenet-200', transform=test_transform) else: train_set = getattr(datasets, args.dataset)(root='../data', train=True, download=True, transform=train_transform) test_set = getattr(datasets, args.dataset)(root='../data', train=False, download=False, transform=test_transform) train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True) test_loader = DataLoader(test_set, batch_size=args.test_batch_size, shuffle=False) # optimizer = optim.SGD(st_model.parameters(), lr=args.lr, momentum=args.momentum) optimizer_sgd = optim.SGD(st_model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer_sgd, milestones=[100, 150]) def distillation(y, labels, teacher_scores, T, alpha): return nn.KLDivLoss()(F.log_softmax(y/T), F.softmax(teacher_scores/T)) * (T*T * 2.0 * alpha) + F.cross_entropy(y, labels) * (1. - alpha) def train(epoch, model, loss_fn): print('Training:') # switch to train mode model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) input, target = input.to(device), target.to(device) optimizer_sgd.zero_grad() # compute outputs _,_,_,_, output = model(input) with torch.no_grad(): _,_,_,_, t_output = teacher_model(input) # print(output.size(), target.size(), teacher_output.size()) # compute gradient and do SGD step loss = loss_fn(output, target, t_output, T=args.T, alpha=0.7) loss.backward() optimizer_sgd.step() output = output.float() loss = loss.float() # measure accuracy and record loss train_acc = accuracy(output.data, target.data)[0] losses.update(loss.item(), input.size(0)) top1.update(train_acc, input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: log_out('[{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1)) return losses.avg, train_acc.cpu().numpy() def test(model): print('Testing:') # switch to evaluate mode model.eval() batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() end = time.time() with torch.no_grad(): for i, (input, target) in enumerate(test_loader): input, target = input.to(device), target.to(device) # compute output _,_,_,_, output = model(input) loss = F.cross_entropy(output, target) output = output.float() loss = loss.float() # measure accuracy and record loss test_acc = accuracy(output.data, target.data)[0] losses.update(loss.item(), input.size(0)) top1.update(test_acc, input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: log_out('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( i, len(test_loader), batch_time=batch_time, loss=losses, top1=top1)) log_out(' * Prec@1 {top1.avg:.3f}'.format(top1=top1)) return losses.avg, test_acc.cpu().numpy(), top1.avg.cpu().numpy() print('StudentNet:\n') print(st_model) best_acc = 0 for epoch in range(1, args.epochs + 1): log_out("\n===> epoch: {}/{}".format(epoch, args.epochs)) log_out('current lr {:.5e}'.format(optimizer_sgd.param_groups[0]['lr'])) lr_scheduler.step() train_loss, train_acc = train(epoch, st_model, loss_fn=distillation) # visaulize loss vis.line(np.array([train_loss]), np.array([epoch]), loss_win, update="append") _, test_acc, top1 = test(st_model) best_acc = max(top1, best_acc) vis.line(np.column_stack((train_acc, top1)), np.column_stack((epoch, epoch)), acc_win, update="append") # torch.save(st_model.state_dict(), load_dir + args.teacher + '_distill_' + args.student + '.pth') # release GPU memory torch.cuda.empty_cache() log_out("@ BEST ACC = {:.4f}%".format(best_acc)) log_out("--- {:.3f} mins ---".format((time.time() - start_time)/60)) ```
github_jupyter
In this lab, we will optimize the weather simulation application written in C++ (if you prefer to use Fortran, click [this link](../../Fortran/jupyter_notebook/profiling-fortran.ipynb)). Let's execute the cell below to display information about the GPUs running on the server by running the nvaccelinfo command, which ships with the NVIDIA HPC compiler that we will be using. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell. ``` !nvaccelinfo ``` ## Exercise 2 ### Learning objectives Learn how to identify and parallelise the computationally expensive routines in your application using OpenACC compute constructs (A compute construct is a parallel, kernels, or serial construct.). In this exercise you will: - Implement OpenACC parallelism using parallel directives to parallelise the serial application - Learn how to compile your parallel application with NVIDIA HPC compiler - Benchmark and compare the parallel version of the application with the serial version - Learn how to interpret NVIDIA HPC compiler feedback to ensure the applied optimization were successful Click on the <b>[miniWeather_openacc.cpp](../source_code/lab2/miniWeather_openacc.cpp)</b> and <b>[Makefile](../source_code/lab2/Makefile)</b> and inspect the code before running below cells. We have already added OpenACC compute directives (`#pragma acc parallel`) around the expensive routines (loops) in the code. Once done, compile the code with `make`. View the NVIDIA HPC compiler feedback (enabled by adding `-Minfo=accel` flag) and investigate the compiler feedback for the OpenACC code. The compiler feedback provides useful information about applied optimizations. ``` !cd ../source_code/lab2 && make clean && make ``` Let's inspect part of the compiler feedback and see what it's telling us. <img src="images/cfeedback1.png"> - Using `-ta=tesla:managed`, instruct the compiler to build for an NVIDIA Tesla GPU using "CUDA Managed Memory" - Using `-Minfo` command-line option, we will see all output from the compiler. In this example, we use `-Minfo=accel` to only see the output corresponding to the accelerator (in this case an NVIDIA GPU). - The first line of the output, `compute_tendencies_x`, tells us which function the following information is in reference to. - The line starting with 227, shows we created a parallel OpenACC loop. This loop is made up of gangs (a grid of blocks in CUDA language) and vector parallelism (threads in CUDA language) with the vector size being 128 per gang. `277, #pragma acc loop gang, vector(128) /* blockIdx.x threadIdx.x */` - The rest of the information concerns data movement. Compiler detected possible need to move data and handled it for us. We will get into this later in this lab. It is very important to inspect the feedback to make sure the compiler is doing what you have asked of it. Now, let's **Run** the application for small values of `nx_glob`,`nz_glob`, and `sim_time`: **40, 20, 1000** ``` !cd ../source_code/lab2 && nsys profile -t nvtx,openacc --stats=true --force-overwrite true -o miniWeather_3 ./miniWeather 40 20 1000 ``` You can see that the changes made actually slowed down the code and it runs slower compared to the non-accelerated CPU only version. Let's checkout the profiler's report. [Download the profiler output](../source_code/lab2/miniWeather_3.qdrep) and open it via the GUI. From the "timeline view" on the top pane, double click on the "CUDA" from the function table on the left and expand it. Zoom in on the timeline and you can see a pattern similar to the screenshot below. The blue boxes are the compute kernels and each of these groupings of kernels is surrounded by purple and teal boxes (annotated with red color) representing data movements. **Screenshots represents profiler report for the values of 400,200,1500.** <img src="images/nsys_slow.png" width="80%" height="80%"> Let's hover your mouse over kernels (blue boxes) one by one from each row and checkout the provided information. <img src="images/occu-1.png" width="60%" height="60%"> **Note**: In the next two exercises, we start optimizing the application by improving the occupancy and reducing data movements. ## Post-Lab Summary If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below. ``` %%bash cd .. rm -f openacc_profiler_files.zip zip -r openacc_profiler_files.zip * ``` **After** executing the above zip command, you should be able to download the zip file [here](../openacc_profiler_files.zip). ----- # <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../profiling_start.ipynb>HOME</a>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<span style="float:center"> <a href=profiling-c-lab3.ipynb>NEXT</a></span> </p> ----- # Links and Resources [OpenACC API Guide](https://www.openacc.org/sites/default/files/inline-files/OpenACC%20API%202.6%20Reference%20Guide.pdf) [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/) [CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads) **NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems). Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community. --- ## Licensing This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
github_jupyter
# Transfer learning & fine-tuning **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/04/15<br> **Last modified:** 2020/05/12<br> **Description:** Complete guide to transfer learning & fine-tuning in Keras. ## Setup ``` import numpy as np import tensorflow as tf from tensorflow import keras ``` ## Introduction **Transfer learning** consists of taking features learned on one problem, and leveraging them on a new, similar problem. For instance, features from a model that has learned to identify racoons may be useful to kick-start a model meant to identify tanukis. Transfer learning is usually done for tasks where your dataset has too little data to train a full-scale model from scratch. The most common incarnation of transfer learning in the context of deep learning is the following workflow: 1. Take layers from a previously trained model. 2. Freeze them, so as to avoid destroying any of the information they contain during future training rounds. 3. Add some new, trainable layers on top of the frozen layers. They will learn to turn the old features into predictions on a new dataset. 4. Train the new layers on your dataset. A last, optional step, is **fine-tuning**, which consists of unfreezing the entire model you obtained above (or part of it), and re-training it on the new data with a very low learning rate. This can potentially achieve meaningful improvements, by incrementally adapting the pretrained features to the new data. First, we will go over the Keras `trainable` API in detail, which underlies most transfer learning & fine-tuning workflows. Then, we'll demonstrate the typical workflow by taking a model pretrained on the ImageNet dataset, and retraining it on the Kaggle "cats vs dogs" classification dataset. This is adapted from [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python) and the 2016 blog post ["building powerful image classification models using very little data"](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html). ## Freezing layers: understanding the `trainable` attribute Layers & models have three weight attributes: - `weights` is the list of all weights variables of the layer. - `trainable_weights` is the list of those that are meant to be updated (via gradient descent) to minimize the loss during training. - `non_trainable_weights` is the list of those that aren't meant to be trained. Typically they are updated by the model during the forward pass. **Example: the `Dense` layer has 2 trainable weights (kernel & bias)** ``` layer = keras.layers.Dense(3) layer.build((None, 4)) # Create the weights print("weights:", len(layer.weights)) print("trainable_weights:", len(layer.trainable_weights)) print("non_trainable_weights:", len(layer.non_trainable_weights)) ``` In general, all weights are trainable weights. The only built-in layer that has non-trainable weights is the `BatchNormalization` layer. It uses non-trainable weights to keep track of the mean and variance of its inputs during training. To learn how to use non-trainable weights in your own custom layers, see the [guide to writing new layers from scratch](https://keras.io/guides/making_new_layers_and_models_via_subclassing/). **Example: the `BatchNormalization` layer has 2 trainable weights and 2 non-trainable weights** ``` layer = keras.layers.BatchNormalization() layer.build((None, 4)) # Create the weights print("weights:", len(layer.weights)) print("trainable_weights:", len(layer.trainable_weights)) print("non_trainable_weights:", len(layer.non_trainable_weights)) ``` Layers & models also feature a boolean attribute `trainable`. Its value can be changed. Setting `layer.trainable` to `False` moves all the layer's weights from trainable to non-trainable. This is called "freezing" the layer: the state of a frozen layer won't be updated during training (either when training with `fit()` or when training with any custom loop that relies on `trainable_weights` to apply gradient updates). **Example: setting `trainable` to `False`** ``` layer = keras.layers.Dense(3) layer.build((None, 4)) # Create the weights layer.trainable = False # Freeze the layer print("weights:", len(layer.weights)) print("trainable_weights:", len(layer.trainable_weights)) print("non_trainable_weights:", len(layer.non_trainable_weights)) ``` When a trainable weight becomes non-trainable, its value is no longer updated during training. ``` # Make a model with 2 layers layer1 = keras.layers.Dense(3, activation="relu") layer2 = keras.layers.Dense(3, activation="sigmoid") model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2]) # Freeze the first layer layer1.trainable = False # Keep a copy of the weights of layer1 for later reference initial_layer1_weights_values = layer1.get_weights() # Train the model model.compile(optimizer="adam", loss="mse") model.fit(np.random.random((2, 3)), np.random.random((2, 3))) # Check that the weights of layer1 have not changed during training final_layer1_weights_values = layer1.get_weights() np.testing.assert_allclose( initial_layer1_weights_values[0], final_layer1_weights_values[0] ) np.testing.assert_allclose( initial_layer1_weights_values[1], final_layer1_weights_values[1] ) ``` Do not confuse the `layer.trainable` attribute with the argument `training` in `layer.__call__()` (which controls whether the layer should run its forward pass in inference mode or training mode). For more information, see the [Keras FAQ]( https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute). ## Recursive setting of the `trainable` attribute If you set `trainable = False` on a model or on any layer that has sublayers, all children layers become non-trainable as well. **Example:** ``` inner_model = keras.Sequential( [ keras.Input(shape=(3,)), keras.layers.Dense(3, activation="relu"), keras.layers.Dense(3, activation="relu"), ] ) model = keras.Sequential( [keras.Input(shape=(3,)), inner_model, keras.layers.Dense(3, activation="sigmoid"),] ) model.trainable = False # Freeze the outer model assert inner_model.trainable == False # All layers in `model` are now frozen assert inner_model.layers[0].trainable == False # `trainable` is propagated recursively ``` ## The typical transfer-learning workflow This leads us to how a typical transfer learning workflow can be implemented in Keras: 1. Instantiate a base model and load pre-trained weights into it. 2. Freeze all layers in the base model by setting `trainable = False`. 3. Create a new model on top of the output of one (or several) layers from the base model. 4. Train your new model on your new dataset. Note that an alternative, more lightweight workflow could also be: 1. Instantiate a base model and load pre-trained weights into it. 2. Run your new dataset through it and record the output of one (or several) layers from the base model. This is called **feature extraction**. 3. Use that output as input data for a new, smaller model. A key advantage of that second workflow is that you only run the base model once on your data, rather than once per epoch of training. So it's a lot faster & cheaper. An issue with that second workflow, though, is that it doesn't allow you to dynamically modify the input data of your new model during training, which is required when doing data augmentation, for instance. Transfer learning is typically used for tasks when your new dataset has too little data to train a full-scale model from scratch, and in such scenarios data augmentation is very important. So in what follows, we will focus on the first workflow. Here's what the first workflow looks like in Keras: First, instantiate a base model with pre-trained weights. ```python base_model = keras.applications.Xception( weights='imagenet', # Load weights pre-trained on ImageNet. input_shape=(150, 150, 3), include_top=False) # Do not include the ImageNet classifier at the top. ``` Then, freeze the base model. ```python base_model.trainable = False ``` Create a new model on top. ```python inputs = keras.Input(shape=(150, 150, 3)) # We make sure that the base_model is running in inference mode here, # by passing `training=False`. This is important for fine-tuning, as you will # learn in a few paragraphs. x = base_model(inputs, training=False) # Convert features of shape `base_model.output_shape[1:]` to vectors x = keras.layers.GlobalAveragePooling2D()(x) # A Dense classifier with a single unit (binary classification) outputs = keras.layers.Dense(1)(x) model = keras.Model(inputs, outputs) ``` Train the model on new data. ```python model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()]) model.fit(new_dataset, epochs=20, callbacks=..., validation_data=...) ``` ## Fine-tuning Once your model has converged on the new data, you can try to unfreeze all or part of the base model and retrain the whole model end-to-end with a very low learning rate. This is an optional last step that can potentially give you incremental improvements. It could also potentially lead to quick overfitting -- keep that in mind. It is critical to only do this step *after* the model with frozen layers has been trained to convergence. If you mix randomly-initialized trainable layers with trainable layers that hold pre-trained features, the randomly-initialized layers will cause very large gradient updates during training, which will destroy your pre-trained features. It's also critical to use a very low learning rate at this stage, because you are training a much larger model than in the first round of training, on a dataset that is typically very small. As a result, you are at risk of overfitting very quickly if you apply large weight updates. Here, you only want to readapt the pretrained weights in an incremental way. This is how to implement fine-tuning of the whole base model: ```python # Unfreeze the base model base_model.trainable = True # It's important to recompile your model after you make any changes # to the `trainable` attribute of any inner layer, so that your changes # are take into account model.compile(optimizer=keras.optimizers.Adam(1e-5), # Very low learning rate loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()]) # Train end-to-end. Be careful to stop before you overfit! model.fit(new_dataset, epochs=10, callbacks=..., validation_data=...) ``` **Important note about `compile()` and `trainable`** Calling `compile()` on a model is meant to "freeze" the behavior of that model. This implies that the `trainable` attribute values at the time the model is compiled should be preserved throughout the lifetime of that model, until `compile` is called again. Hence, if you change any `trainable` value, make sure to call `compile()` again on your model for your changes to be taken into account. **Important notes about `BatchNormalization` layer** Many image models contain `BatchNormalization` layers. That layer is a special case on every imaginable count. Here are a few things to keep in mind. - `BatchNormalization` contains 2 non-trainable weights that get updated during training. These are the variables tracking the mean and variance of the inputs. - When you set `bn_layer.trainable = False`, the `BatchNormalization` layer will run in inference mode, and will not update its mean & variance statistics. This is not the case for other layers in general, as [weight trainability & inference/training modes are two orthogonal concepts]( https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute). But the two are tied in the case of the `BatchNormalization` layer. - When you unfreeze a model that contains `BatchNormalization` layers in order to do fine-tuning, you should keep the `BatchNormalization` layers in inference mode by passing `training=False` when calling the base model. Otherwise the updates applied to the non-trainable weights will suddenly destroy what the model has learned. You'll see this pattern in action in the end-to-end example at the end of this guide. ## Transfer learning & fine-tuning with a custom training loop If instead of `fit()`, you are using your own low-level training loop, the workflow stays essentially the same. You should be careful to only take into account the list `model.trainable_weights` when applying gradient updates: ```python # Create base model base_model = keras.applications.Xception( weights='imagenet', input_shape=(150, 150, 3), include_top=False) # Freeze base model base_model.trainable = False # Create new model on top. inputs = keras.Input(shape=(150, 150, 3)) x = base_model(inputs, training=False) x = keras.layers.GlobalAveragePooling2D()(x) outputs = keras.layers.Dense(1)(x) model = keras.Model(inputs, outputs) loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) optimizer = keras.optimizers.Adam() # Iterate over the batches of a dataset. for inputs, targets in new_dataset: # Open a GradientTape. with tf.GradientTape() as tape: # Forward pass. predictions = model(inputs) # Compute the loss value for this batch. loss_value = loss_fn(targets, predictions) # Get gradients of loss wrt the *trainable* weights. gradients = tape.gradient(loss_value, model.trainable_weights) # Update the weights of the model. optimizer.apply_gradients(zip(gradients, model.trainable_weights)) ``` Likewise for fine-tuning. ## An end-to-end example: fine-tuning an image classification model on a cats vs. dogs dataset To solidify these concepts, let's walk you through a concrete end-to-end transfer learning & fine-tuning example. We will load the Xception model, pre-trained on ImageNet, and use it on the Kaggle "cats vs. dogs" classification dataset. ### Getting the data First, let's fetch the cats vs. dogs dataset using TFDS. If you have your own dataset, you'll probably want to use the utility `tf.keras.preprocessing.image_dataset_from_directory` to generate similar labeled dataset objects from a set of images on disk filed into class-specific folders. Transfer learning is most useful when working with very small datasets. To keep our dataset small, we will use 40% of the original training data (25,000 images) for training, 10% for validation, and 10% for testing. ``` import tensorflow_datasets as tfds tfds.disable_progress_bar() train_ds, validation_ds, test_ds = tfds.load( "cats_vs_dogs", # Reserve 10% for validation and 10% for test split=["train[:40%]", "train[40%:50%]", "train[50%:60%]"], as_supervised=True, # Include labels ) print("Number of training samples: %d" % tf.data.experimental.cardinality(train_ds)) print( "Number of validation samples: %d" % tf.data.experimental.cardinality(validation_ds) ) print("Number of test samples: %d" % tf.data.experimental.cardinality(test_ds)) ``` These are the first 9 images in the training dataset -- as you can see, they're all different sizes. ``` import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for i, (image, label) in enumerate(train_ds.take(9)): ax = plt.subplot(3, 3, i + 1) plt.imshow(image) plt.title(int(label)) plt.axis("off") ``` We can also see that label 1 is "dog" and label 0 is "cat". ### Standardizing the data Our raw images have a variety of sizes. In addition, each pixel consists of 3 integer values between 0 and 255 (RGB level values). This isn't a great fit for feeding a neural network. We need to do 2 things: - Standardize to a fixed image size. We pick 150x150. - Normalize pixel values between -1 and 1. We'll do this using a `Normalization` layer as part of the model itself. In general, it's a good practice to develop models that take raw data as input, as opposed to models that take already-preprocessed data. The reason being that, if your model expects preprocessed data, any time you export your model to use it elsewhere (in a web browser, in a mobile app), you'll need to reimplement the exact same preprocessing pipeline. This gets very tricky very quickly. So we should do the least possible amount of preprocessing before hitting the model. Here, we'll do image resizing in the data pipeline (because a deep neural network can only process contiguous batches of data), and we'll do the input value scaling as part of the model, when we create it. Let's resize images to 150x150: ``` size = (150, 150) train_ds = train_ds.map(lambda x, y: (tf.image.resize(x, size), y)) validation_ds = validation_ds.map(lambda x, y: (tf.image.resize(x, size), y)) test_ds = test_ds.map(lambda x, y: (tf.image.resize(x, size), y)) ``` Besides, let's batch the data and use caching & prefetching to optimize loading speed. ``` batch_size = 32 train_ds = train_ds.cache().batch(batch_size).prefetch(buffer_size=10) validation_ds = validation_ds.cache().batch(batch_size).prefetch(buffer_size=10) test_ds = test_ds.cache().batch(batch_size).prefetch(buffer_size=10) ``` ### Using random data augmentation When you don't have a large image dataset, it's a good practice to artificially introduce sample diversity by applying random yet realistic transformations to the training images, such as random horizontal flipping or small random rotations. This helps expose the model to different aspects of the training data while slowing down overfitting. ``` from tensorflow import keras from tensorflow.keras import layers data_augmentation = keras.Sequential( [layers.RandomFlip("horizontal"), layers.RandomRotation(0.1),] ) ``` Let's visualize what the first image of the first batch looks like after various random transformations: ``` import numpy as np for images, labels in train_ds.take(1): plt.figure(figsize=(10, 10)) first_image = images[0] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation( tf.expand_dims(first_image, 0), training=True ) plt.imshow(augmented_image[0].numpy().astype("int32")) plt.title(int(labels[0])) plt.axis("off") ``` ## Build a model Now let's built a model that follows the blueprint we've explained earlier. Note that: - We add a `Rescaling` layer to scale input values (initially in the `[0, 255]` range) to the `[-1, 1]` range. - We add a `Dropout` layer before the classification layer, for regularization. - We make sure to pass `training=False` when calling the base model, so that it runs in inference mode, so that batchnorm statistics don't get updated even after we unfreeze the base model for fine-tuning. ``` base_model = keras.applications.Xception( weights="imagenet", # Load weights pre-trained on ImageNet. input_shape=(150, 150, 3), include_top=False, ) # Do not include the ImageNet classifier at the top. # Freeze the base_model base_model.trainable = False # Create new model on top inputs = keras.Input(shape=(150, 150, 3)) x = data_augmentation(inputs) # Apply random data augmentation # Pre-trained Xception weights requires that input be scaled # from (0, 255) to a range of (-1., +1.), the rescaling layer # outputs: `(inputs * scale) + offset` scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1) x = scale_layer(x) # The base model contains batchnorm layers. We want to keep them in inference mode # when we unfreeze the base model for fine-tuning, so we make sure that the # base_model is running in inference mode here. x = base_model(x, training=False) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dropout(0.2)(x) # Regularize with dropout outputs = keras.layers.Dense(1)(x) model = keras.Model(inputs, outputs) model.summary() ``` ## Train the top layer ``` model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()], ) epochs = 20 model.fit(train_ds, epochs=epochs, validation_data=validation_ds) ``` ## Do a round of fine-tuning of the entire model Finally, let's unfreeze the base model and train the entire model end-to-end with a low learning rate. Importantly, although the base model becomes trainable, it is still running in inference mode since we passed `training=False` when calling it when we built the model. This means that the batch normalization layers inside won't update their batch statistics. If they did, they would wreck havoc on the representations learned by the model so far. ``` # Unfreeze the base_model. Note that it keeps running in inference mode # since we passed `training=False` when calling it. This means that # the batchnorm layers will not update their batch statistics. # This prevents the batchnorm layers from undoing all the training # we've done so far. base_model.trainable = True model.summary() model.compile( optimizer=keras.optimizers.Adam(1e-5), # Low learning rate loss=keras.losses.BinaryCrossentropy(from_logits=True), metrics=[keras.metrics.BinaryAccuracy()], ) epochs = 10 model.fit(train_ds, epochs=epochs, validation_data=validation_ds) ``` After 10 epochs, fine-tuning gains us a nice improvement here.
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Sandford+-2020,-Section-3:-Methods" data-toc-modified-id="Sandford+-2020,-Section-3:-Methods-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Sandford+ 2020, Section 3: Methods</a></span><ul class="toc-item"><li><span><a href="#Imports" data-toc-modified-id="Imports-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Imports</a></span></li><li><span><a href="#Plotting-Configs" data-toc-modified-id="Plotting-Configs-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Plotting Configs</a></span></li><li><span><a href="#Figure-2:-HR-&amp;-Kiel-Diagrams" data-toc-modified-id="Figure-2:-HR-&amp;-Kiel-Diagrams-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Figure 2: HR &amp; Kiel Diagrams</a></span></li></ul></li></ul></div> # Sandford+ 2020, Section 3: Methods ## Imports ``` import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec %matplotlib inline ``` ## Plotting Configs ``` output_dir = './figures/' mpl.rc('axes', grid=True, lw=2) mpl.rc('ytick', direction='in', labelsize=14) mpl.rc('ytick.major', size=5, width=1) mpl.rc('xtick', direction='in', labelsize=14) mpl.rc('xtick.major', size=5, width=1) mpl.rc('ytick', direction='in', labelsize=14) mpl.rc('ytick.major', size=5, width=1) mpl.rc('grid', lw=0) mpl.rc('figure', dpi=300) ``` ## Figure 2: HR & Kiel Diagrams ``` log_age = 10 # Select 10 Gyr old isochrones metallicities = [-0.5, -1.0, -1.5, -2.0, -2.5] # Select isochrone metallicities c = plt.cm.get_cmap('plasma', len(metallicities) + 1) # Initialize figure fig = plt.figure(figsize=(5, 9)) gs = GridSpec(2, 1, hspace=0) ax1 = plt.subplot(gs[0, 0]) ax2 = plt.subplot(gs[1, 0]) # Loop through metallicities for i, feh in enumerate(metallicities): iso = pd.read_hdf('./isochrones.h5', f'{feh:1.1f}') # Load isochrone iso = iso[(iso['log10_isochrone_age_yr'] == log_age) & (10**iso['log_Teff'] >= 3500)] # Select on age and effective temperature rgb_idx = (np.abs(iso['Bessell_V'] + 0.5)).idxmin() # Find RGB star w/ M_V = -0.5 # Plot Isochrones ax1.plot(10**iso['log_Teff'], iso['Bessell_V'], c=c(i), lw=2, zorder=-1, label=r'$\log(Z)=$'+f'{feh:1.1f}') ax2.plot(10**iso['log_Teff'], iso['log_g'], c=c(i), lw=2, zorder=-1, label=r'$\log(Z)=$'+f'{feh:1.1f}') # Plot Reference Stars ax1.scatter(10**iso['log_Teff'][rgb_idx], iso['Bessell_V'][rgb_idx], marker='*', c=[c(i)], edgecolor='k', lw=0.5, s=150) ax2.scatter(10**iso['log_Teff'][rgb_idx], iso['log_g'][rgb_idx], marker='*', c=[c(i)], edgecolor='k', lw=0.5, s=150) if feh == -1.5: trgb_idx = (np.abs(iso['Bessell_V'] + 2.5)).idxmin() # Find TRGB star w/ M_V = -2.5 msto_idx = (np.abs(iso['Bessell_V'] - 3.5)).idxmin() # Find MSTO star w/ M_V = +3.5 ax1.scatter(10**iso['log_Teff'][trgb_idx], iso['Bessell_V'][trgb_idx], marker='o', c=[c(2)], edgecolor='k', lw=0.5, s=100, label=f'TRGB') ax2.scatter(10**iso['log_Teff'][trgb_idx], iso['log_g'][trgb_idx], marker='o', c=[c(2)], edgecolor='k', lw=0.5, s=100, label=f'TRGB') ax1.scatter(10**iso['log_Teff'][rgb_idx], iso['Bessell_V'][rgb_idx], marker='*', c=[c(2)], edgecolor='k', lw=0.5, s=150, label=f'RGB') ax2.scatter(10**iso['log_Teff'][rgb_idx], iso['log_g'][rgb_idx], marker='*', c=[c(2)], edgecolor='k', lw=0.5, s=150, label=f'RGB') ax1.scatter(10**iso['log_Teff'][msto_idx], iso['Bessell_V'][msto_idx], marker='s', c=[c(2)], edgecolor='k', lw=0.5, s=100, label=f'MSTO') ax2.scatter(10**iso['log_Teff'][msto_idx], iso['log_g'][msto_idx], marker='s', c=[c(2)], edgecolor='k', lw=0.5, s=100, label=f'MSTO') # Axes ax1.set_ylabel(r'$M_V$', size=24) ax2.set_ylabel(r'$\log(g)$', size=24) ax2.set_xlabel(r'$T_{eff}$', size=24) ax1.set_ylim(-3.5, 7) ax1.invert_xaxis() ax1.invert_yaxis() ax2.invert_xaxis() ax2.invert_yaxis() # Legend ax1.legend(fontsize=10, loc='upper left') plt.tight_layout() fig.savefig('./figures/hr_kiel.png') plt.show() ```
github_jupyter
CER024 - Create Controller certificate ====================================== This notebook creates a certificate for the Controller endpoint. It creates a controller-privatekey.pem as the private key and controller-signingrequest.csr as the signing request. The private key is a secret. The signing request (CSR) will be used by the CA to generate a signed certificate for the service. Steps ----- ### Parameters ``` import getpass app_name = "controller" scaledset_name = "control" container_name = "controller" prefix_keyfile_name = "controller" common_name = "controller-svc" country_name = "US" state_or_province_name = "Illinois" locality_name = "Chicago" organization_name = "Contoso" organizational_unit_name = "Finance" email_address = f"{getpass.getuser().lower()}@contoso.com" ssl_configuration_file = "service.openssl.cnf" days = "398" # the number of days to certify the certificate for test_cert_store_root = "/var/opt/secrets/test-certificates" extendedKeyUsage = "extendedKeyUsage = critical, clientAuth, serverAuth" ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportability, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) # Display an install HINT, so the user can click on a SOP to install the missing binary # if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" # Load this notebook as json to get access to the expert rules in the notebook metadata. # try: j = load_json("cer024-create-controller-cert.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "expanded_rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["expanded_rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Create a temporary directory to stage files ``` # Create a temporary directory to hold configuration files import tempfile temp_dir = tempfile.mkdtemp() print(f"Temporary directory created: {temp_dir}") ``` ### Helper function to save configuration files to disk ``` # Define helper function 'save_file' to save configuration files to the temporary directory created above import os import io def save_file(filename, contents): with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file: text_file.write(contents) print("File saved: " + os.path.join(temp_dir, filename)) print("Function `save_file` defined successfully.") ``` ### Get endpoint hostname ``` import json import urllib endpoint_name = "sql-server-master" if app_name == "master" else app_name endpoint = run(f'azdata bdc endpoint list --endpoint="{endpoint_name}"', return_output=True) endpoint = json.loads(endpoint) endpoint = endpoint['endpoint'] print(f"endpoint: {endpoint}") hostname = urllib.parse.urlparse(endpoint).hostname print(f"hostname: {hostname}") ``` ### Get name of the ‘Running’ `controller` `pod` ``` # Place the name of the 'Running' controller pod in variable `controller` controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True) print(f"Controller pod name: {controller}") ``` ### Create the DNS alt\_names for data plane in secure clusters Get the cluster configuration from the Big Data Cluster using `azdata bdc config`, and pull the Active Directory DNS names out of it, and place them into the certificate configuration file as DNS alt\_names ``` import json alt_names = "" bdc_fqdn = "" hdfs_vault_svc = "hdfsvault-svc" bdc_config = run("azdata bdc config show", return_output=True) bdc_config = json.loads(bdc_config) dns_counter = 3 # DNS.1 and DNS.2 are already in the certificate template. if app_name == "gateway" or app_name == "master": alt_names += f'DNS.{str(dns_counter)} = {pod}.{common_name}\n' dns_counter = dns_counter + 1 alt_names += f'DNS.{str(dns_counter)} = {pod}.{common_name}.{namespace}.svc.cluster.local\n' dns_counter = dns_counter + 1 if "security" in bdc_config["spec"] and "activeDirectory" in bdc_config["spec"]["security"]: domain_dns_name = bdc_config["spec"]["security"]["activeDirectory"]["domainDnsName"] sub_domain_name = bdc_config["spec"]["security"]["activeDirectory"]["subdomain"] alt_names += f"DNS.{str(dns_counter)} = {common_name}.{domain_dns_name}\n" dns_counter = dns_counter + 1 if app_name == "gateway" or app_name == "master": alt_names += f'DNS.{str(dns_counter)} = {pod}.{domain_dns_name}\n' dns_counter = dns_counter + 1 if sub_domain_name: bdc_fqdn = f"{sub_domain_name}.{domain_dns_name}" else: bdc_fqdn = domain_dns_name if app_name in bdc_config["spec"]["resources"]: app_name_endpoints = bdc_config["spec"]["resources"][app_name]["spec"]["endpoints"] for endpoint in app_name_endpoints: if "dnsName" in endpoint: alt_names += f'DNS.{str(dns_counter)} = {endpoint["dnsName"]}\n' dns_counter = dns_counter + 1 # Special case for the controller certificate # if app_name == "controller": alt_names += f"DNS.{str(dns_counter)} = localhost\n" dns_counter = dns_counter + 1 # Add hdfsvault-svc host for key management calls. # alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}\n" dns_counter = dns_counter + 1 # Add hdfsvault-svc FQDN for key management calls. # if bdc_fqdn: alt_names += f"DNS.{str(dns_counter)} = {hdfs_vault_svc}.{bdc_fqdn}\n" dns_counter = dns_counter + 1 print("DNS alt_names (data plane):") print(alt_names) ``` ### Create the DNS alt\_names for control plane in secure clusters Get the cluster configuration from the Big Data Cluster using `azdata bdc endpoint list`, and pull the Active Directory DNS names out of it for the control plane expternal endpoints (Controller and Management Proxy), and place them into the certificate configuration file as DNS alt\_names ``` import json from urllib.parse import urlparse if app_name == "controller" or app_name == "mgmtproxy": bdc_endpoint_list = run("azdata bdc endpoint list", return_output=True) bdc_endpoint_list = json.loads(bdc_endpoint_list) # Parse the DNS host name from: # # "endpoint": "https://monitor.aris.local:30777" # for endpoint in bdc_endpoint_list: if endpoint["name"] == app_name: url = urlparse(endpoint["endpoint"]) alt_names += f"DNS.{str(dns_counter)} = {url.hostname}\n" dns_counter = dns_counter + 1 print("DNS alt_names (control plane):") print(alt_names) ``` ### Create alt\_names If the Kuberenetes service is of “NodePort” type, then the IP address needed to validate the cluster certificate could be for any node in the Kubernetes cluster, so here all node IP addresses in the Big Data Cluster are added as alt\_names. Otherwise (if not NodePort, and therefore LoadBalancer), add just the hostname as returned from `azdata bdc endpoint list` above. ``` service_type = run(f"kubectl get svc {common_name}-external -n {namespace} -o jsonpath={{.spec.type}}", return_output=True) print(f"Service type for '{common_name}-external' is: '{service_type}'") print("") if service_type == "NodePort": nodes_ip_address = run("kubectl ""get nodes -o jsonpath={.items[*].status.addresses[0].address}""", return_output=True) nodes_ip_address = nodes_ip_address.split(' ') counter = 1 for ip in nodes_ip_address: alt_names += f"IP.{counter} = {ip}\n" counter = counter + 1 else: alt_names += f"IP.1 = {hostname}\n" print("All (DNS and IP) alt_names:") print(alt_names) ``` ### Generate Certificate Configuration file NOTE: There is a special case for the `controller` certificate, that needs to be generated in PKCS\#1 format. ``` certificate = f""" [ req ] # Options for the `req` tool (`man req`). default_bits = 2048 default_keyfile = {test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey{".pkcs8" if app_name == "controller" else ""}.pem distinguished_name = req_distinguished_name string_mask = utf8only # SHA-1 is deprecated, so use SHA-2 instead. default_md = sha256 req_extensions = v3_req [ req_distinguished_name ] countryName = Country Name (2 letter code) countryName_default = {country_name} stateOrProvinceName = State or Province Name (full name) stateOrProvinceName_default = {state_or_province_name} localityName = Locality Name (eg, city) localityName_default = {locality_name} organizationName = Organization Name (eg, company) organizationName_default = {organization_name} organizationalUnitName = Organizational Unit (eg, division) organizationalUnitName_default = {organizational_unit_name} commonName = Common Name (e.g. server FQDN or YOUR name) commonName_default = {common_name} emailAddress = Email Address emailAddress_default = {email_address} [ v3_req ] subjectAltName = @alt_names subjectKeyIdentifier = hash basicConstraints = CA:FALSE keyUsage = digitalSignature, keyEncipherment {extendedKeyUsage} [ alt_names ] DNS.1 = {common_name} DNS.2 = {common_name}.{namespace}.svc.cluster.local # Use the namespace applicable for your cluster {alt_names} """ print(certificate) save_file(ssl_configuration_file, certificate) ``` ### Copy certificate configuration to `controller` `pod` ``` import os cwd = os.getcwd() os.chdir(temp_dir) # Use chdir to workaround kubectl bug on Windows, which incorrectly processes 'c:\' on kubectl cp cmd line run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "mkdir -p {test_cert_store_root}/{app_name}"') run(f'kubectl cp {ssl_configuration_file} {controller}:{test_cert_store_root}/{app_name}/{ssl_configuration_file} -c controller -n {namespace}') os.chdir(cwd) ``` ### Generate certificate Use openssl req to generate a certificate in PKCS\#10 format. See: - https://www.openssl.org/docs/man1.0.2/man1/req.html ``` cmd = f"openssl req -config {test_cert_store_root}/{app_name}/service.openssl.cnf -newkey rsa:2048 -sha256 -nodes -days {days} -out {test_cert_store_root}/{app_name}/{prefix_keyfile_name}-signingrequest.csr -outform PEM -subj '/C={country_name}/ST={state_or_province_name}/L={locality_name}/O={organization_name}/OU={organizational_unit_name}/CN={common_name}'" run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "{cmd}"') ``` ### Convert the private key to PKCS12 format The private key for controller needs to be converted to PKCS12 format. ``` cmd = f'openssl rsa -in {test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pkcs8.pem -out {test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem' run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "{cmd}"') ``` ### Clean up temporary directory for staging configuration files ``` # Delete the temporary directory used to hold configuration files import shutil shutil.rmtree(temp_dir) print(f'Temporary directory deleted: {temp_dir}') print('Notebook execution complete.') ``` Related ------- - [CER030 - Sign Management Proxy certificate with generated CA](../cert-management/cer030-sign-service-proxy-generated-cert.ipynb) - [CER034 - Sign Controller certificate with cluster Root CA](../cert-management/cer034-sign-controller-generated-cert.ipynb) - [CER044 - Install signed Controller certificate](../cert-management/cer044-install-controller-cert.ipynb)
github_jupyter
<img style="float: center;" src="images/CI_horizontal.png" width="600"> <center> <span style="font-size: 1.5em;"> <a href='https://www.coleridgeinitiative.org'>Website</a> </span> </center> Ghani, Rayid, Frauke Kreuter, Julia Lane, Adrianne Bradford, Alex Engler, Nicolas Guetta Jeanrenaud, Graham Henke, Daniela Hochfellner, Clayton Hunter, Brian Kim, Avishek Kumar, Jonathan Morgan, and Benjamin Feder. _source to be updated when notebook added to GitHub_ # Table of Contents JupyterLab contains a dynamic Table of Contents that can be accessed by clicking the last of the six icons on the left-hand sidebar. # Dataset Preparation ---------- In this notebook, we will walk through preparing our data for machine learning. In practice, the data preparation should take some time as you will need to think deeply about the question at the heart of your project. ## The Machine Learning Process The Machine Learning Process is as follows: - [**Understand the problem and goal.**](#problem-formulation) *This sounds obvious but is often nontrivial.* Problems typically start as vague descriptions of a goal - improving health outcomes, increasing graduation rates, understanding the effect of a variable *X* on an outcome *Y*, etc. It is really important to work with people who understand the domain being studied to dig deeper and define the problem more concretely. What is the analytical formulation of the metric that you are trying to optimize? - [**Formulate it as a machine learning problem.**](#problem-formulation) Is it a classification problem or a regression problem? Is the goal to build a model that generates a ranked list prioritized by risk, or is it to detect anomalies as new data come in? Knowing what kinds of tasks machine learning can solve will allow you to map the problem you are working on to one or more machine learning settings and give you access to a suite of methods. - **Data exploration and preparation.** Next, you need to carefully explore the data you have. What additional data do you need or have access to? What variable will you use to match records for integrating different data sources? What variables exist in the data set? Are they continuous or categorical? What about missing values? Can you use the variables in their original form, or do you need to alter them in some way? - [**Feature engineering.**](#feature-generation) In machine learning language, what you might know as independent variables or predictors or factors or covariates are called "features." Creating good features is probably the most important step in the machine learning process. This involves doing transformations, creating interaction terms, or aggregating over data points or over time and space. - **Method selection.** Having formulated the problem and created your features, you now have a suite of methods to choose from. It would be great if there were a single method that always worked best for a specific type of problem. Typically, in machine learning, you take a variety of methods and try them, empirically validating which one is the best approach to your problem. - [**Evaluation.**](#evaluation) As you build a large number of possible models, you need a way choose the best among them. We'll cover methodology to validate models on historical data and discuss a variety of evaluation metrics. The next step is to validate using a field trial or experiment. - [**Deployment.**](#deployment) Once you have selected the best model and validated it using historical data as well as a field trial, you are ready to put the model into practice. You still have to keep in mind that new data will be coming in, and the model might change over time. Here, to reiterate, we will work through all the steps we can accomplish querying directly from our Athena database, and then in the following notebook, we will bring our table we created in this notebook into python and complete the machine learning process. ## Problem Formulation First, you need to turn something into a real objective function. What do you care about? Do you have data on that thing? What action can you take based on your findings? Do you risk introducing any bias based on the way you model something? ### Four Main Types of ML Tasks for Policy Problems - **Description**: [How can we identify and respond to the most urgent online government petitions?](https://dssg.uchicago.edu/project/improving-government-response-to-citizen-requests-online/) - **Prediction**: [Which students will struggle academically by third grade?](https://dssg.uchicago.edu/project/predicting-students-that-will-struggle-academically-by-third-grade/) - **Detection**: [Which police officers are likely to have an adverse interaction with the public?](https://dssg.uchicago.edu/project/expanding-our-early-intervention-system-for-adverse-police-interactions/) - **Behavior Change**: [How can we prevent juveniles from interacting with the criminal justice system?](https://dssg.uchicago.edu/project/preventing-juvenile-interactions-with-the-criminal-justice-system/) ### Our Machine Learning Problem > Out of low-income households, can we predict which ones did not purchase a 100% whole wheat product in a year's time? If so, what are the most important household features? This is an example of a *binary prediction classification problem*. Note the time windows are completely arbitrary. You could use an outcome window of 5, 3, 1 years or 1 day. The outcome window will depend on how often you receive new data, how accurate your predictions are for a given time period, or on what time-scale you can use the output of the data. > By low-income households, we're referring to only those who are WIC participants or WIC-eligible. ## Access the Data As always, we will bring in the python libraries that we need to use, as well as set up our connection to the database. ``` # pandas-related imports import pandas as pd # database interaction imports from pyathenajdbc import connect conn = connect(s3_staging_dir = 's3://usda-iri-2019-queryresults/', region_name = 'us-gov-west-1', LogLevel = '0', workgroup = 'workgroup-iri_usda') ``` ## Define our Cohort Since the machine learning problem focuses on finding the features most important in predicting if a low-income household will not purchase 100% whole wheat product at least once in a year, we will focus just on households that were either WIC-eligible or participants in a given year. Here, we will train our models on data from low-income households in 2014 and their presence of 100% whole wheat purchases in 2015 and test on low-income households in 2015 buying 100% whole wheat product(s) in 2016. Let's first see how many of these households we will have in our testing and training datasets. > We already created our 2014 and 2015 household tables, `init_train` and `init_test` in the `iri_usda_2019_db` database, by changing the years from the code used to create `project_q2_cohort` in the [Second Data Exploration](02_02_Data_Exploration_Popular_Foods.ipynb) notebook. We also subsetted the `panid` to only include households who had static purchasing data (`projection61k` > 0) the year we're predicting on and the year prior (i.e. 2014 and 2015 for our training set). `init_train` and `init_test` contain the exact same variables as the `demo_all` table in the `iri_usda` Athena database. ``` # get count for 2014 low-income households qry = ''' select count(*) as num_2014 from iri_usda_2019_db.init_train ''' pd.read_sql(qry, conn) # get count for 2015 low-income households qry = ''' select count(*) as num_2015 from iri_usda_2019_db.init_test ''' pd.read_sql(qry, conn) ``` ## Create Foundation for Training and Testing Datasets Now that we have defined our cohorts for our testing and training datasets, we need to combine our available datasets so that each low-income household is a row containing demographic data from the previous year, if they purchased a 100% whole wheat proudct in the following calendar year, and aggregate purchasing data from the prior year. For the purchasing data, we want to aggregate the amount the household spent and their total amount of trips. To do this, we will first find all households that purchased any 100% whole wheat product in our given prediction years (2015 and 2016), and then we will join it to our low-income household datasets from the previous year. Because we will be relying on the table of households who purchased any 100% whole wheat product to create our desired table in Athena, we will save it as a permanent table. Then, we will join this table with our low-income cohort and one containing aggregate purchasing data for the prior year for these households. > Note: It is possible to do this process in one step. However, for your understanding and ease in reproducibility, we broke it down into multiple steps to avoid a larger subquerying process. ``` # see existing table list table_list = pd.read_sql('show tables IN iri_usda_2019_db;', conn) print(table_list) # get a series of tab_name values s = pd.Series(list(table_list['tab_name'])) # create table to find households that bought 100% whole wheat products in 2015 or 2016 if('ml_aggregate' not in s.unique()): print('creating table') qry = ''' create table iri_usda_2019_db.ml_aggregate with( format = 'Parquet', parquet_compression = 'SNAPPY' ) as select t.panid, t.year, sum(t.dollarspaid) as dollarspaid from iri_usda.pd_pos_all p, iri_usda.trip_all t where p.upc = t.upc and (t.year = '2016' or t.year = '2015') and p.upcdesc like '%100% WHOLE WHEAT%' and p.year = t.year group by t.panid, t.year ; ''' with conn.cursor() as cursor: cursor.execute(qry) else: print('table already exists') ``` <font color = red><h2> Checkpoint 1: What question are we asking?</h2> </font> Above, we are creating an aggregated table of all purchases in which a product with "100% Whole Wheat" in the description was purchased. However, we might want to broaden the definition to include other whole grains. For example, you might want to include corn tortillas or oatmeal, to make sure you're catching as many of the different types of whole grains that people may purchase. How would you include these other whole grain items in your table? ## Creating Train and Test Sets Now that we've created the aggregated table for households that purchased any 100% whole wheat products, we can combine that with `init_train` and `init_test` to get demographic data and define our label. Let's first take a look at the `ml_aggregate` table to see how it looks. Remember, this is a table that contains each household that purchased a 100% whole wheat product along with the total dollars paid in that year for 100% whole wheat products. ``` # view ml_aggregate qry = ''' select * from iri_usda_2019_db.ml_aggregate limit 10 ''' pd.read_sql(qry, conn) ``` We can now join `ml_aggregate` with `init_train` and `init_test` to grab the demographic data. Since we would like to match households that purchased 100% whole wheat products in either 2015 or 2016 to low-income households in `init_train` and `init_test` (those with no 100% whole wheat product purchases the following year will have NAs), we will left join `ml_aggregate` to `init_train` and `init_test`. Also, we will add our dependent variable, `label`, using a `case when` statement that is `yes` when the household purchased 100% whole wheat products in the following calendar year. ``` # match ml_aggregate with demographic data for just our training cohort # left join so that we maintain all low-income households who didn't buy any 100% whole wheat products if('ml_combined_train' not in s.unique()): qry = ''' create table iri_usda_2019_db.ml_combined_train with( format = 'Parquet', parquet_compression = 'SNAPPY' ) as select c.panid, c.hhsize, c.hhinc, c.race, c.hisp, c.ac, c.fed, c.femp, c.med, c.memp, c.mocc, c.marital, c.rentown, c.cats, c.dogs, c.hhtype, c.region, c.wic_june, c.snap_june, c.projection61k, case when a.dollarspaid > 0 then 0 else 1 end as label from iri_usda_2019_db.init_train c left join ( select * from iri_usda_2019_db.ml_aggregate a where year = '2015' ) a on c.panid = a.panid ''' with conn.cursor() as cursor: cursor.execute(qry) else: print('table already exists') # match ml_aggregate with demographic data for just our testing cohort # left join so that we maintain all low-income households who didn't buy any 100% whole wheat products if('ml_combined_test' not in s.unique()): qry = ''' create table iri_usda_2019_db.ml_combined_test with( format = 'Parquet', parquet_compression = 'SNAPPY' ) as select c.panid, c.hhsize, c.hhinc, c.race, c.hisp, c.ac, c.fed, c.femp, c.med, c.memp, c.mocc, c.marital, c.rentown, c.cats, c.dogs, c.hhtype, c.region, c.wic_june, c.snap_june, c.projection61k, case when a.dollarspaid > 0 then 0 else 1 end as label from iri_usda_2019_db.init_test c left join ( select * from iri_usda_2019_db.ml_aggregate a where year = '2016' ) a on c.panid = a.panid ''' with conn.cursor() as cursor: cursor.execute(qry) else: print('table already exists') # verify ml_combined_train is what we want qry = ''' select * from iri_usda_2019_db.ml_combined_train limit 5 ''' pd.read_sql(qry, conn) # verify ml_combined_test is what we want qry = ''' select * from iri_usda_2019_db.ml_combined_test limit 5 ''' pd.read_sql(qry, conn) ``` Finally, we want to add in the amount spent and number of trips in 2014 or 2015 for these households in the IRI database. We will first confirm that we can find the amount spent and number of trips a household took according to the `trip_all` table in either 2014 or 2015 for households in `ml_combined_train` and `ml_combined_test`. > Recall that to calculate the amount spent, you can subtract `coupon` from `dollarspaid`. The number of trips per household is the distinct value of `tripnumber` and `purdate`. ``` # find aggregate purchasing information by households in 2014 and 2015 qry = ''' select panid, year, round(sum(dollarspaid) - sum(coupon),2) as total, count(distinct(purdate, tripnumber)) as num_trips from iri_usda.trip_all where year in ('2014', '2015') and panid in ( select distinct panid from iri_usda_2019_db.ml_combined ) group by year, panid limit 5 ''' pd.read_sql(qry, conn) ``` Now that we can find aggregate purchasing data in 2014 and 2015 for households in `ml_combined_train` and `ml_combined_test`, we can perform another left join using this query. We just need to make sure that we are matching based on `panid`, and making sure that we are selecting the purchasing data from the year prior for each row in `ml_combined_train` and `ml_combined_test`. This will be our final table we create before moving onto the [Machine Learning](04_02_Machine_Learning.ipynb) notebook. ``` if('ml_model_train' not in s.unique()): qry = ''' create table iri_usda_2019_db.ml_model_train with( format = 'Parquet', parquet_compression = 'SNAPPY' ) as select a.*, b.total, b.num_trips from iri_usda_2019_db.ml_combined_train a left join (select panid, round(sum(dollarspaid) - sum(coupon),2) as total, count(distinct(purdate, tripnumber)) as num_trips from iri_usda.trip_all where year in ('2014') and panid in ( select distinct panid from iri_usda_2019_db.ml_combined_train ) group by panid ) b on a.panid = b.panid ''' with conn.cursor() as cursor: cursor.execute(qry) else: print('table already exists') if('ml_model_test' not in s.unique()): qry = ''' create table iri_usda_2019_db.ml_model_test with( format = 'Parquet', parquet_compression = 'SNAPPY' ) as select a.*, b.total, b.num_trips from iri_usda_2019_db.ml_combined_test a left join (select panid, round(sum(dollarspaid) - sum(coupon),2) as total, count(distinct(purdate, tripnumber)) as num_trips from iri_usda.trip_all where year in ('2015') and panid in ( select distinct panid from iri_usda_2019_db.ml_combined_test ) group by panid ) b on a.panid = b.panid ''' with conn.cursor() as cursor: cursor.execute(qry) else: print('table already exists') # verify ml_model_train is what we want qry = ''' select * from iri_usda_2019_db.ml_model_train limit 5 ''' pd.read_sql(qry, conn) # verify ml_model_test is what we want qry = ''' select * from iri_usda_2019_db.ml_model_test limit 5 ''' pd.read_sql(qry, conn) # and that tables have unique PANID values, ie a row is a household in the given year qry = ''' select count(*) recs, count(distinct panid) from iri_usda_2019_db.ml_model_train ''' pd.read_sql(qry, conn) # same for test set qry = ''' select count(*) recs, count(distinct panid) from iri_usda_2019_db.ml_model_test ''' pd.read_sql(qry, conn) ``` Now we should have everything we need from our Athena data tables to run some machine learning models to tackle our guiding question.
github_jupyter
# Introduction to sharing interactive Jupyter notebooks ## From the workshop '[Getting Started with Reproducible and Open Research](https://escience-academy.github.io/2020-02-11-Reproducible-and-Open-Research/)' _Date: 11-12 February 2020_ _Author: Sam Nooij_ --- In this example notebook, I will: 1. Load data from [gapminder](https://www.gapminder.org) 2. Use the Python library `pandas` to explore and visualise the data 3. Create (interactive) figures with `matplotlib`, `plotnine` and `ipywidgets` ## Loading data ``` import pandas as pd from pathlib import Path #this function helps use paths in a platform-independent way pd.__version__ data_path = Path("./data/") ``` ### Check which files were downloaded ``` list(data_path.glob('*')) population = pd.read_csv(data_path / "population_total.csv") population["country"] population["2089"] ``` ### Open all three csv files downloaded from gapminder.org as DataFrames ``` population = pd.read_csv(data_path / "population_total.csv", index_col="country") life_expectancy = pd.read_csv(data_path / "life_expectancy_years.csv", index_col="country") income = pd.read_csv(data_path / "income_per_person_gdppercapita_ppp_inflation_adjusted.csv", index_col="country") population.head() population.index population.T.head() #transposes the table ``` ## Visualise population sizes per year in a few countries **1 Portugal** ``` population.T["Portugal"].plot() ``` **2 The Netherlands** ``` population.T["Netherlands"].plot() ``` **3 Japan** ``` population.T["Japan"].plot() ``` **4 Kenya** ``` population.T["Kenya"].plot() ``` ### Now start looking into the life expectancy data ``` life_expectancy.info() ``` **For which countries do we have population size data, but not the life expectancy?** ``` life_expectancy.index ^ population.index set(population.index) - set(life_expectancy.index) ``` **And for which countries do we have income data, but no life expectancy?** ``` income.index ^ life_expectancy.index ``` ### As an example, visualise income vs. life expectancy in Yemen ``` yemen = pd.concat([income.T["Yemen"].rename("income"), life_expectancy.T["Yemen"].rename("life_expectancy")], axis = 1) from plotnine import ggplot, geom_point, aes ggplot(yemen, aes("income", "life_expectancy")) + geom_point() ``` ### Now try out matplotlib ``` from matplotlib import pyplot as plt import numpy as np fig, ax = plt.subplots() fig2, ax2 = plt.subplots(2, 2) ``` **As an example, we will look at the income vs. life expectancy again, but now for all countries, whose population sizes are represented as circle size.** ``` example_df = pd.concat([ population["1900"].rename("population"), life_expectancy["1900"].rename("life_expectancy"), income["1900"].rename("income") ], axis = 1, join = "inner") fig, ax = plt.subplots() ax.scatter(example_df["income"], example_df["life_expectancy"], s=np.sqrt(example_df["population"])/50, alpha=0.3) ax.set_xlabel("annual income per capita, inflation corrected in USD") ax.set_ylabel("life expectancy") fig.savefig("bubbleplot.svg") plt.show() ``` Generally, life expectancy is higher in countries with a higher annual income (Figure 1). ![My first bubble plot](bubbleplot.svg) **Figure 1. My first bubble plot.** Saved as svg image. --- ### Now modularise the code for easier re-use of the same function **Also increase the plot size for more convenient viewing in the notebook.** ``` def gapminder_bubble(year: int): year_str = str(year) x = pd.concat([ population[year_str].rename("population"), life_expectancy[year_str].rename("life_expectancy"), income[year_str].rename("income")], axis = 1, join = "inner") fig, ax = plt.subplots(figsize=(12, 8)) ax.scatter(x["income"], x["life_expectancy"], s=np.sqrt(x["population"])/50, alpha=0.3) ax.set_xlabel("annual income per capita, inflation corrected in USD") ax.set_ylabel("life expectancy") ax.set_xlim(300, 1e5) ax.set_ylim(0, 90) ax.set_xscale("log") return fig, ax #We are going to make a function for plotting different years fig, ax = gapminder_bubble(1980) ``` **These are the life expectancies per annual income for all listed countries in the year 1980.** --- _Notes to self:_ > Now you could lookup ipywidgets to make interactive figures with a slider for years. > Look at the [ipywidgets documentation](https://ipywidgets.readthedocs.io/en/stable/user_install.html) for installation instructions. (It involves installing `ipywidgets` and `nodejs` with conda, and then running another command in the shell to install the extension.) The figure below only works in interactive environments, e.g. by running the notebook on [MyBinder](https://mybinder.org/v2/gh/samnooij/reproducible_science_workshop-20200211/master?filepath=analysis%2FGapminder.ipynb). It is an interactive view of the above figure, using a slider to select the year (between 1800 and 2018). ``` from ipywidgets import interact interact(gapminder_bubble, year=(1800, 2018)) ```
github_jupyter
## Part-1: Introduction ``` ## Loading the libraries import spacy # open-source NLP library in Python with several pre-trained models from spacy import displacy # spacy's built-in library to visualise the behavior of the entity recognition model interactively nlp = spacy.load("en_core_web_sm") # English pipeline optimized for CPU ## Sample sentence doc = nlp('European authorities fined Google a record $5.1 billion on Wednesday for abusing its power in the mobile phone market and ordered the company to alter its practices') print([(X.text, X.label_) for X in doc.ents]) ``` In the above code, the arguments passed are: * doc.ents: entity token spans * X.text: the original entity text * X.label_: the entity type's string description ``` ## Visualizing the entities displacy.render(doc, jupyter=True, style='ent') print(spacy.explain("NORP")) print(spacy.explain("ORG")) ## Visualizing the dependency tree displacy.render(doc, jupyter = True, style='dep', options = {'distance': 50}) ``` ## Part-2: Example ``` import requests target_url = "https://raw.githubusercontent.com/sharvitomar/text-file/main/temp.txt" response = requests.get(target_url) data = response.text data article = nlp(data) print(article) ## 1. Number of entities in the article len(article.ents) from collections import Counter ## 2. Number of unique labels of the entities labels = [x.label_ for x in article.ents] Counter(labels) ## 3. The 3 most frequent tokens items = [x.text for x in article.ents] Counter(items).most_common(3) ## 4. Visualise 1 sentence sentences = [x for x in article.sents] displacy.render(sentences[2], jupyter=True, style='ent') ## 5. Visualizating the entire article displacy.render(article, jupyter=True, style='ent') ``` ## Part-3: Adding custom tokens as NE ``` doc = nlp('Tesla to build a U.K. factory for $6 million') print([(X.text, X.label_) for X in doc.ents]) ``` Right now, spaCy foes not recognize "Tesla" as a company. ``` from spacy.tokens import Span # Get the hash value of the ORG entity label ORG = doc.vocab.strings['ORG'] # Create a Span for the new entity new_ent = Span(doc, 0, 1, label=ORG) # Add the entity to the existing Doc object doc.ents = list(doc.ents) + [new_ent] ``` In the code above, the arguments passed to Span() are: * doc - the name of the Doc object * 0 - the start index position of the token in the doc * 1 - the stop index position(exclusive) of the token in the doc * label = ORG - the label assigned to our entity ``` print([(X.text, X.label_) for X in doc.ents]) displacy.render(doc, jupyter=True, style='ent') ```
github_jupyter
``` from datetime import datetime import backtrader as bt import pandas as pd import numpy as np import vectorbt as vbt df = pd.DataFrame(index=[datetime(2020, 1, i + 1) for i in range(9)]) df['open'] = [1, 1, 2, 3, 4, 5, 6, 7, 8] df['high'] = df['open'] + 0.5 df['low'] = df['open'] - 0.5 df['close'] = df['open'] data = bt.feeds.PandasData(dataname=df) size = np.array([5, 5, -5, -5, -5, -5, 5, 5, 0]) class CommInfoFloat(bt.CommInfoBase): """Commission schema that keeps size as float.""" params = ( ('stocklike', True), ('commtype', bt.CommInfoBase.COMM_PERC), ('percabs', True), ) def getsize(self, price, cash): if not self._stocklike: return self.p.leverage * (cash / self.get_margin(price)) return self.p.leverage * (cash / price) class CashValueAnalyzer(bt.analyzers.Analyzer): """Analyzer to extract cash and value.""" def create_analysis(self): self.rets = {} def notify_cashvalue(self, cash, value): self.rets[self.strategy.datetime.datetime()] = (cash, value) def get_analysis(self): return self.rets class TestStrategy(bt.Strategy): def __init__(self): self.i = 0 def log(self, txt, dt=None): dt = dt or self.data.datetime[0] dt = bt.num2date(dt) print('%s, %s' % (dt.isoformat(), txt)) def notify_order(self, order): if order.status in [bt.Order.Submitted, bt.Order.Accepted]: return # Await further notifications if order.status == order.Completed: if order.isbuy(): buytxt = 'BUY COMPLETE {}, size = {:.2f}, price = {:.2f}'.format( order.data._name, order.executed.size, order.executed.price) self.log(buytxt, order.executed.dt) else: selltxt = 'SELL COMPLETE {}, size = {:.2f}, price = {:.2f}'.format( order.data._name, order.executed.size, order.executed.price) self.log(selltxt, order.executed.dt) elif order.status in [order.Expired, order.Canceled, order.Margin]: self.log('%s ,' % order.Status[order.status]) pass # Simply log # Allow new orders self.orderid = None def next(self): if size[self.i] > 0: self.buy(size=size[self.i]) elif size[self.i] < 0: self.sell(size=-size[self.i]) self.i += 1 def bt_simulate(shortcash): cerebro = bt.Cerebro() comminfo = CommInfoFloat(commission=0.01) cerebro.broker.addcommissioninfo(comminfo) cerebro.addstrategy(TestStrategy) cerebro.addanalyzer(CashValueAnalyzer) cerebro.broker.setcash(100.) cerebro.broker.set_checksubmit(False) cerebro.broker.set_shortcash(shortcash) cerebro.adddata(data) return cerebro.run()[0] strategy = bt_simulate(True) strategy.analyzers.cashvalueanalyzer.get_analysis() portfolio = vbt.Portfolio.from_orders(df.close, [np.nan] + size[:-1].tolist(), fees=0.01) print(portfolio.cash(free=False)) print(portfolio.value()) strategy = bt_simulate(False) strategy.analyzers.cashvalueanalyzer.get_analysis() print(portfolio.cash(free=True)) print(portfolio.value()) ```
github_jupyter
## Plot difference between 2 30yr means of zonal mean zonal wind in : #### HadGEM3-GC31-MM for selected season (DJF) and over selected latitude range (0-90) ##### Created as part of PAMIP group during CMIP6 hackathon 2021 ##### Created by : Phoebe Hudson / Colin Manning ``` from itertools import chain from glob import glob import matplotlib import matplotlib.pyplot as plt import xarray as xr import numpy as np # Set figsize and resolution plt.rcParams['figure.figsize'] = (10, 6) plt.rcParams['figure.dpi'] = 100 ### Set data directory where data structure follows # /badc/cmip6/data/<mip_era>/<activity_id>/<institution_id>/<source_id>/<experiment_id>/<variant_label>/<table_id>/<variable_id>/<grid_label>/<version> # and experiment id follows # r = realization, i = initialization, p = physics, f = forcing data_dir = "/badc/cmip6/data/CMIP6/CMIP/MOHC/HadGEM3-GC31-MM/1pctCO2/r1i1p1f3/Amon/ua/gn/latest" #data_dir = "/badc/cmip6/data/CMIP6/CMIP/*/*/1pctCO2/*/Amon/ua/gn/latest" !ls {data_dir} %%time ds = xr.open_mfdataset(data_dir + '/*.nc') ds ### Calculate zonal mean of zonal wind (mean over all longitudes) and sub-select by latitude (N hem = 0,90) ds_lonmean = ds.mean(dim='lon') ds_lonmean = ds_lonmean.sel(lat=slice(0, 90)) ds_lonmean.time ### Select winter months (DJF=Dec-Feb) from zonal mean zonal wind is_winter = ds_lonmean['time'].dt.season == 'DJF' ds_lonmean_winter = ds_lonmean.isel(time=is_winter) ds_lonmean_winter ### Calculate difference between two 30-yr means of zonal mean zonal winds for # 1850-01-16-1880-12-16 and # 1969-01-16-1999-12-16 ds_lonmean_endyrs = ds_lonmean_winter.sel(time=slice('1969-01-16', '1999-12-16')).mean(dim='time') ds_lonmean_startyrs = ds_lonmean_winter.sel(time=slice('1850-01-16', '1880-12-16')).mean(dim='time') ds_lonmean_yrsdiff = ds_lonmean_endyrs - ds_lonmean_startyrs ds_lonmean_yrsdiff = ds_lonmean_yrsdiff.ua.values ds_lonmean_yrsdiff ### Plot zonal mean zonal wind difference for selected season in selected latitude region #ds_lonmean.ua.sel(time='1850-01-16').plot() fig, axes = plt.subplots(nrows=1, ncols=1) #, figsize=(15,15)) ax = plt.subplot(1,1,1) cmap = plt.get_cmap('RdBu_r') plt.contourf(ds_lonmean.lat, ds_lonmean.plev, ds_lonmean_yrsdiff, cmap='RdBu_r', levels=np.arange(-1,1,0.1), extend='both') #plt.pcolormesh(ds_lonmean.lat, ds_lonmean.plev, ds_lonmean_yrsdiff.ua.values, cmap='RdBu_r') plt.xlabel('Lat') plt.ylabel('Pressure Level (hPa)') plt.ylim([100000, 10000]) plt.title('HadGEM3-GC31-MM') #axs=np.append(axs,ax) normalize=matplotlib.colors.Normalize(vmin=-1, vmax=1) cax, _ = matplotlib.colorbar.make_axes(ax, location = "bottom", pad=0.05) cbar = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=normalize,orientation="horizontal") #cbar.set_label('Zonal windspeed ',size=16) cbar.ax.tick_params(labelsize=16) plt.show() ```
github_jupyter