code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style='color: #690027;' markdown="1"> # <h1>From leaf to label: stomata detection</h1> # </div> # This notebook allows you to upload and test your own photomicrographs with the deep learning system discussed in the paper *From leaf to label: a robust automated workflow for stomata detection* by *<NAME>, <NAME> and <NAME>*. # <img src="https://drive.google.com/uc?export=view&id=1g1wIt37A07yDi7w9uCza3eMt1oEPLJ5v" alt="Overview" style="width:600px;"/> # As illustrated above, your photomicrograph (A) will be divided in small overlapping patches (B) by using a sliding window approach. A deep neural network (VGG19) is trained to label these patches (C). Positively labeled patches of a photomicrograph (D) are clustered which results in the detection (E) depending on the threshold of your choice. # # To start, please run the following cell by clicking the button "run" or by using shift-enter. # # Before starting, if you run this notebook on Colab you might want GPU acceleration. Therefore click: *Edit* > *Notebook Settings* > *Hardware accelerator GPU* > *Save* # ### Load libraries # # We start by loading a few python libraries: # # - [PIL](https://pillow.readthedocs.io/en/stable/): a handy Python imaging library # - [numpy](https://numpy.org): the fundamental package for scientific computing with Python # - [sklearn](https://scikit-learn.org/stable/): the scikit-learn machine learning package, more specifically the clustetring functionality # - [os](https://docs.python.org/3/library/os.html): a Python library for using operating system dependent functionality, e.g., reading, writing, listing files # - [matplotlib](https://matplotlib.org): a Python library for making graphs from PIL import Image, ImageEnhance, ImageOps import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth import os import matplotlib.pyplot as plt # In order to specify which NVidia GPU we will use we need to run the following commands. This assumes that Keras and Tensorflow are GPU enabled. See the [TensorFlow](https://www.tensorflow.org/install/gpu) documentation for more information. # If there is a NVidia enable GPU run this os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = '0' # The code that produced the results presented in the manuscript is based on Tensorflow and Keras. At the time of the writing, TensorFlow 2.0 was not yet available. However, since we relied on a TF 1.0 and Keras implementation. However, the principles for training your own deep learning model stay the same and should be sufficient to get started with [Keras](https://keras.io/getting_started/intro_to_keras_for_researchers/) yourself. from keras import backend as K from keras.layers import Input, Convolution2D, Conv2D, MaxPooling2D, Activation, concatenate, Dropout, GlobalAveragePooling2D, Flatten, Dense from keras.models import Model, load_model from keras.engine.topology import get_source_inputs from keras.utils import get_file from keras.utils import layer_utils import keras from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.preprocessing.image import ImageDataGenerator import tensorflow as tf # ### Set data # In order to train a deep learning model you need data. As discussed before, the deep learning model we discuss here will be trained based on rectangular patches. In order to get a robust model both, positive and negative examples must be presented to the system. # # The data needs to be split into three parts: # - The training set, i.e., the data which is used for changing the weights of the (deep) neural network; # - The validation, i.e., the data which is used to see how well the learning process goes and to tune the model's hyperparameters; # - The test data, i.e., the data you feed to the trained system afterwards when the detector is deployed. # # This notebook concerns the training and validation of the deep learning system for stomata detection, and a small dataset limited to *Carapa procera* is used for didactic purposes. This also restrains the computational needs (a full training with multiple species (cf. the paper) needs more patience. # We start by downloading and unzipping the dataset: # !wget https://zenodo.org/record/3902280/files/data.zip # !unzip 'data.zip' train_dir = "./data/training/" val_dir = "./data/validation/" # The training and validation data consists of patches of 120 by 120 pixels. A positive patch shows a stomata: # # <img src="https://drive.google.com/uc?export=view&id=11oZG14b8ZnbzooeeQpYaQeUe8wSbErmV" width="120" /> # # A negative patch of *Carapa procera* has no stomata (or only partially) within the patch: # # <img src="https://drive.google.com/uc?export=view&id=1vwcYiZDJffjQ6gXCpThwvbcCoPhgseBS" width="120" /> # # In order to obtain these patches you will need labeled microphotographs (i.e., microphotographs of which you have the x,y coordinates of the center position of the stomata that are shown). Based on these labels, the patches can be cropped by using the [crop function of PIL](https://pillow.readthedocs.io/en/stable/reference/Image.html) or simply by [matrix slicing](https://numpy.org/doc/1.18/reference/arrays.indexing.html) in Python Numpy. # The preprocessor defines the data augmentation that will be applied to the dataset. This consists of random rotations, horizontal and vertical flips of the patches: train_datagen = keras.preprocessing.image.ImageDataGenerator(rotation_range=180, horizontal_flip=True, vertical_flip=True, rescale=1/255.) # Apart from the data augmentation that will be applied, we need to set a [ImageDataGenerator](https://keras.io/api/preprocessing/image/#imagedatagenerator-class) that defines the size of the patches (120 x 120 pixels), the color mode, the batch size (i.e., the number of samples used in one training iteration), the classification mode of the task (i.e., binary classification: a patch can be positive or negative), whether the data needs to be shuffled or not and the seed (starting point) of the random number generator. Additionally, you need to provide a path to the directory with the training data. batch_size = 128 train_generator = train_datagen.flow_from_directory( directory=r"./data/training/", target_size=(120, 120), color_mode="rgb", batch_size=batch_size, class_mode="binary", shuffle=True, seed=53 ) # For the purpose of validation you also need to provide a ImageDataGenerator. This is defined with the same properties as the one for training but without data augmentation. # + test_datagen = ImageDataGenerator(rescale=1/255.) validation_generator = test_datagen.flow_from_directory( r"./data/validation/", target_size=(120, 120), color_mode="rgb", batch_size=batch_size, class_mode='binary') # - # ### Set model parameters # We start from a [VGG19](https://arxiv.org/abs/1409.1556) with two dense layers on top. The convolutional neural layers are pre-trained on [ImageNet](https://ieeexplore.ieee.org/abstract/document/5206848). Consequently, only the dense layers are trained. These pre-trained weights can be downloaded from Keras by adding the keyword "imagenet". number_dense_neurons = 2048 # + # VGG19 Fine tuned from keras.applications import VGG19 # We start from a VGG19 base (convolutional neural layers) with weights pre-trained on ImageNet vgg19_base = VGG19(weights='imagenet',include_top=False,input_shape=(120,120,3)) x = vgg19_base.output x = Flatten()(x) # We add our own dense layered classifier on top x = Dense(2*number_dense_neurons,activation='relu')(x) x = Dropout(0.5)(x) x = Dense(number_dense_neurons,activation='relu')(x) x = Dropout(0.5)(x) # Output layer x = Dense(1,activation='sigmoid')(x) model = Model(inputs=vgg19_base.input, outputs=x) # Only the dense layers are trained, hence, the convolutional neural layers are set not trainable: for layer in vgg19_base.layers: layer.trainable = False # How does the network looks like model.summary() # - # ### Train model # These parameters were optimized by using the [Adam](https://arxiv.org/pdf/1412.6980.pdf) learning rule for which the learning rate was tuned and finally set to 0.000005. Aditionally, we need to configure the training loss and metrics for validation. learning_rate = 0.000005 # Initiate Stochastic Gradient Descent with momentum, learning rate to tune opt = keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, amsgrad=False) # Define the losses and metrics for validation model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['binary_accuracy']) # Training can be done by using the function *fit*. We train for 50 epochs. Note that we configured our architecture in a way that only the weights of the dense layers are adjusted. epochs = 50 history = model.fit( train_generator, epochs=epochs, validation_data=validation_generator) # Now we are ready, our network is trained and can be deployed for use. In order to be able to use your system, you need to save the model parameters. This can be done by calling *model.save(path)* with *path*, the path to the file in which you want to save the parameters. Additionally, the *fit* returns a *history* object. This records the training and validation progress over the epochs. Consequently, this is useful to keep track of the training process, for example, when comparing multiple hyperparameter settings. # + # Save the Carapa procera deep learning model model.save('my_carapa_procera_model') # Plot the training and validation losses over time plt.plot(history.history['loss'], label='Training loss') plt.plot(history.history['val_loss'], label='Validation loss') # - # ### Load a deep learning model # # Now we trained a first deep learning model on the *Carapa procera*. This is stored into the object *model*. If you want to start from a saved deep learning model you just load it from a file by calling: *model = load_model(path_to_model)*. # ### Image and detection parameters # # The model makes use of a sliding window approach. Although not the most (computational) efficient, it's very simple to understand. The window has a size of 120 by 120 pixels and we use a step of 10 pixels. We start by loading our image: demo_image = './data/Carapa_procero_demo.jpg' # you can use any other Carapa procero microphotograph image = Image.open(demo_image) fig, ax = plt.subplots(figsize=(20, 10)) image = np.array(image) # conversion to a Numpy array ax.imshow(image) shift = 10 patch_size = 120 # Also part of the detection parameters is the number of slides we will do: no_x_shifts = (np.shape(image)[0] - patch_size) // shift no_y_shifts = (np.shape(image)[1] - patch_size) // shift print("We will do "+str(no_x_shifts*no_y_shifts)+" slides. Consequently, the deep learning model will be applied to "+str(no_x_shifts*no_y_shifts)+" windows.") # ### Classification with a deep learning model # # Now that we identified all the windows, we apply the deep learning model. This is done by calling the *predict* function. The image we apply needs to be converted and normalized before applying the deep learning model. Since the output of the deep learning model is between 0 and 1, we also have to set a threshold from which we accept the output as a positive classification. The higher this threshold, the more precise the system will be in detecting stomata. However, if the threshold is too high, the system won't be able to detect stomata at all. Here we use the same threshold as determined in the paper: threshold = 0.7 stomata = [] offset = patch_size // 2 for x in np.arange(no_x_shifts + 1): for y in np.arange(no_y_shifts + 1): # center of the window x_c = x * shift + offset y_c = y * shift + offset # extraction of the window and conversion before applying the deep learning model patch = image[x_c - offset:x_c + offset, y_c - offset:y_c + offset, :] patch = patch.astype('float32') patch /= 255 # applying the deep learning model y_model = model.predict(np.expand_dims(patch, axis=0)) # if the output of the network is above the if y_model[0] > threshold: stomata.append([x_c, y_c]) # ### Clustering the detected stomata # # All positively labeled patches are clustered by using mean shift clustering. This technique groups neighboring (or even overlapping) positively labeled patches from which the resulting stoma coordinates are derived. Therefore, we can rely on the package [MeanShift](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.MeanShift.html) which is available in [scikit-learn](https://scikit-learn.org). # + bandwidth = patch_size // 2 ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(stomata) stomata = np.array([[x[1], x[0]] for x in ms.cluster_centers_]) # cluster_centers_ is inverted # - # ### Plotting the results fig, ax = plt.subplots(figsize=(20, 10)) ax.imshow(image) ax.plot(stomata[:,0], stomata[:,1], 'xr', alpha=0.75, markeredgewidth=3, markersize=12)
LeafToLabelStomata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Theil Sen Regression # + [markdown] nteract={"transient": {"deleting": false}} # Theil Sen Regression is an algorithm that choose the median of the slopes of all lines through pairs of points. Also, it involves in fitting multiple regression models on subsets of the training data and combining the coefficients together in the end. # + outputHidden=false inputHidden=false import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") # yahoo finance is used to fetch data import yfinance as yf yf.pdr_override() # + outputHidden=false inputHidden=false # input symbol = 'AMD' start = '2014-01-01' end = '2018-08-27' # Read data dataset = yf.download(symbol,start,end) # View Columns dataset.head() # + outputHidden=false inputHidden=false # Create more data dataset['Increase_Decrease'] = np.where(dataset['Volume'].shift(-1) > dataset['Volume'],1,0) dataset['Buy_Sell_on_Open'] = np.where(dataset['Open'].shift(-1) > dataset['Open'],1,-1) dataset['Buy_Sell'] = np.where(dataset['Adj Close'].shift(-1) > dataset['Adj Close'],1,-1) dataset['Return'] = dataset['Adj Close'].pct_change() dataset = dataset.dropna() dataset.head() # + outputHidden=false inputHidden=false dataset.shape # + outputHidden=false inputHidden=false X = dataset['Open'] y = dataset['Adj Close'] # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} X = np.array(X).reshape(1170,-1) y = np.array(y).reshape(1170,-1) # + outputHidden=false inputHidden=false from sklearn.linear_model import TheilSenRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedKFold from numpy import absolute, arange # + outputHidden=false inputHidden=false # Define the model model = TheilSenRegressor() # Define model evaluation cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # Evaluate model scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) # Change scores to be positive scores = absolute(scores) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.fit(X, y) plt.scatter(X,y) xaxis = arange(X.min(), X.max(), 0.01) y_pred = model.predict(xaxis.reshape((len(xaxis), 1))) plt.plot(xaxis, y_pred, color='r') plt.title('Theil Sen Regression for Stock Price') plt.show() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} def evaluate_model(X, y, model): # Define model evaluation cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # Evaluate model scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1) # Scores to be positive return absolute(scores) # + outputHidden=false inputHidden=false results = evaluate_model(X, y, model) print('Average MAE: %.3f (%.3f)' % (results.mean(), results.std())) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.coef_ # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.intercept_ # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} model.predict(y) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} from sklearn.metrics import r2_score r2_score(X,y)
Stock_Algorithms/Theil_Sen_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="UEBilEjLj5wY" # STAT 453: Deep Learning (Spring 2020) # Instructor: <NAME> (<EMAIL>) # # Course website: http://pages.stat.wisc.edu/~sraschka/teaching/stat453-ss2020/ # GitHub repository: https://github.com/rasbt/stat453-deep-learning-ss20 # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 536, "status": "ok", "timestamp": 1524974472601, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="GOzuY8Yvj5wb" outputId="c19362ce-f87a-4cc2-84cc-8d7b4b9e6007" # %load_ext watermark # %watermark -a '<NAME>' -v -p torch # + [markdown] colab_type="text" id="rH4XmErYj5wm" # # LeNet-5 CIFAR10 Classifier # - # This notebook implements the classic LeNet-5 convolutional network [1] and applies it to the CIFAR10 object classification dataset. The basic architecture is shown in the figure below: # # ![](lenet-5_1.jpg) # # # LeNet-5 is commonly regarded as the pioneer of convolutional neural networks, consisting of a very simple architecture (by modern standards). In total, LeNet-5 consists of only 7 layers. 3 out of these 7 layers are convolutional layers (C1, C3, C5), which are connected by two average pooling layers (S2 & S4). The penultimate layer is a fully connexted layer (F6), which is followed by the final output layer. The additional details are summarized below: # # - All convolutional layers use 5x5 kernels with stride 1. # - The two average pooling (subsampling) layers are 2x2 pixels wide with stride 1. # - Throughrout the network, tanh sigmoid activation functions are used. (**In this notebook, we replace these with ReLU activations**) # - The output layer uses 10 custom Euclidean Radial Basis Function neurons for the output layer. (**In this notebook, we replace these with softmax activations**) # # **Please note that the original architecture was applied to MNIST-like grayscale images (1 color channel). CIFAR10 has 3 color-channels. I found that using the regular architecture results in very poor performance on CIFAR10 (approx. 50% ACC). Hence, I am multiplying the number of kernels by a factor of 3 (according to the 3 color channels) in each layer, which improves is a little bit (approx. 60% Acc).** # # ### References # # - [1] <NAME>, <NAME>, <NAME>, and <NAME>. Gradient-based learning applied to document recognition. Proceedings of the IEEE, november 1998. # + [markdown] colab_type="text" id="MkoGLH_Tj5wn" # ## Imports # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ORj09gnrj5wp" import os import time import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import matplotlib.pyplot as plt from PIL import Image if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True # + [markdown] colab_type="text" id="I6hghKPxj5w0" # ## Model Settings # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 23936, "status": "ok", "timestamp": 1524974497505, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="NnT0sZIwj5wu" outputId="55aed925-d17e-4c6a-8c71-0d9b3bde5637" ########################## ### SETTINGS ########################## # Hyperparameters RANDOM_SEED = 1 LEARNING_RATE = 0.001 BATCH_SIZE = 128 NUM_EPOCHS = 10 # Architecture NUM_FEATURES = 32*32 NUM_CLASSES = 10 # Other if torch.cuda.is_available(): DEVICE = "cuda:0" else: DEVICE = "cpu" GRAYSCALE = False # - # ### MNIST Dataset # + ########################## ### CIFAR-10 Dataset ########################## train_mean = (0.5, 0.5, 0.5) train_std = (0.5, 0.5, 0.5) resize_transform = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(train_mean, train_std)]) # Note transforms.ToTensor() scales input images # to 0-1 range train_dataset = datasets.CIFAR10(root='data', train=True, transform=resize_transform, download=True) test_dataset = datasets.CIFAR10(root='data', train=False, transform=resize_transform) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # + device = torch.device(DEVICE) torch.manual_seed(0) for epoch in range(2): for batch_idx, (x, y) in enumerate(train_loader): print('Epoch:', epoch+1, end='') print(' | Batch index:', batch_idx, end='') print(' | Batch size:', y.size()[0]) x = x.to(device) y = y.to(device) break # + ########################## ### MODEL ########################## class LeNet5(nn.Module): def __init__(self, num_classes, grayscale=False): super(LeNet5, self).__init__() self.grayscale = grayscale self.num_classes = num_classes if self.grayscale: in_channels = 1 else: in_channels = 3 self.features = nn.Sequential( nn.Conv2d(in_channels, 6*in_channels, kernel_size=5), nn.Tanh(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(6*in_channels, 16*in_channels, kernel_size=5), nn.Tanh(), nn.MaxPool2d(kernel_size=2) ) self.classifier = nn.Sequential( nn.Linear(16*5*5*in_channels, 120*in_channels), nn.Tanh(), nn.Linear(120*in_channels, 84*in_channels), nn.Tanh(), nn.Linear(84*in_channels, num_classes), ) def forward(self, x): x = self.features(x) x = torch.flatten(x, 1) logits = self.classifier(x) probas = F.softmax(logits, dim=1) return logits, probas # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="_lza9t_uj5w1" torch.manual_seed(RANDOM_SEED) model = LeNet5(NUM_CLASSES, GRAYSCALE) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) # + [markdown] colab_type="text" id="RAodboScj5w6" # ## Training # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 1547} colab_type="code" executionInfo={"elapsed": 2384585, "status": "ok", "timestamp": 1524976888520, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="Dzh3ROmRj5w7" outputId="5f8fd8c9-b076-403a-b0b7-fd2d498b48d7" def compute_accuracy(model, data_loader, device): correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) logits, probas = model(features) _, predicted_labels = torch.max(probas, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 start_time = time.time() for epoch in range(NUM_EPOCHS): model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(DEVICE) targets = targets.to(DEVICE) ### FORWARD AND BACK PROP logits, probas = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, NUM_EPOCHS, batch_idx, len(train_loader), cost)) model.eval() with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d | Train: %.3f%%' % ( epoch+1, NUM_EPOCHS, compute_accuracy(model, train_loader, device=DEVICE))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) # + [markdown] colab_type="text" id="paaeEQHQj5xC" # ## Evaluation # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 6514, "status": "ok", "timestamp": 1524976895054, "user": {"displayName": "<NAME>", "photoUrl": "//lh6.googleusercontent.com/-cxK6yOSQ6uE/AAAAAAAAAAI/AAAAAAAAIfw/P9ar_CHsKOQ/s50-c-k-no/photo.jpg", "userId": "118404394130788869227"}, "user_tz": 240} id="gzQMWKq5j5xE" outputId="de7dc005-5eeb-4177-9f9f-d9b5d1358db9" with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader, device=DEVICE))) # + class UnNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, tensor): """ Parameters: ------------ tensor (Tensor): Tensor image of size (C, H, W) to be normalized. Returns: ------------ Tensor: Normalized image. """ for t, m, s in zip(tensor, self.mean, self.std): t.mul_(s).add_(m) return tensor unorm = UnNormalize(mean=train_mean, std=train_std) # + test_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True) for features, targets in test_loader: break _, predictions = model.forward(features[:8].to(DEVICE)) predictions = torch.argmax(predictions, dim=1) d = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'} fig, ax = plt.subplots(1, 8, figsize=(20, 10)) for i in range(8): img = unorm(features[i]) ax[i].imshow(np.transpose(img, (1, 2, 0))) ax[i].set_xlabel(d[predictions[i].item()]) plt.show()
L12-cnns/code/lenet-5/cnn-lenet5-cifar10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Website Phishing (scikit-learn) # --- # #### <NAME> # <EMAIL> # Universidad Nacional de Colombia, Sede Medellín # Facultad de Minas # Medellín, Colombia # # #### Contenido # * [Comprensión del problema real](#Comprensión-del-problema-real) # * [Comprensión de los datos](#Comprensión-de-los-datos) # * [Preparación de los datos](#Preparación-de-los-datos) # * [Modelado y Evaluación](#Modelado-y-Evaluación) # * [Análisis de resultados](#Análisis-de-resultados) # # Comprensión del problema real # --- # # ### Objetivos del negocio # El problema de suplantación de identidad (phishing) se considera un problema vital en la industria .COM, especialmente en la banca electrónica y el comercio electrónico. Cuando un sitio web se considera **Sospechoso**, significa que puede ser **Phishy** o **Legítimo**, lo que implica que el sitio web tiene características legítimas y de phishing. El problema requiere realizar la clasificación de cuando un sitio web es legítimo, phishing o sospechoso. # # ### Objetivos de la míneria de datos # El autor del conjunto de datos a identificado diferentes características relacionadas con sitios web legítimos y phishy. Se recopiló 1353 sitios web diferentes de diferentes fuentes. Los sitios web de prostitución fueron obtenidos del conjunto de datos [Phishtank](www.phishtank.com), sitio gratuito donde los usuarios pueden enviar, verificar, rastrear y compartir datos de phishing. Los sitios web legítimos se recopilaron de Yahoo y los directorios mediante un script web desarrollado en PHP. El script PHP fue conectado con un navegador y se recolectaron 548 sitios web legítimos de 1353 sitios web. Hay 702 URL de phishing y 103 URL sospechosas. Se pretende por tanto modelar y evaluar con que precisión es posible clasificar sitios web como phishing. # # ### Referencias # Abdelhamid et al.,(2014a) Phishing Detection based Associative Classification Data Mining. Expert Systems With Applications (ESWA), 41 (2014) 5948-5959. # - Fuente: [UCI](https://archive.ics.uci.edu/ml/datasets/Website+Phishing) # # Comprensión de los datos # --- # # Las características o atributos recopilados tienen los valores categóricos: Legitimate, Suspicious y Phishy. Estos valores han sido reemplazados con los valores numéricos 1, 0 y -1 respectivamente. Los detalles de cada característica se mencionan en el documento de investigación del autor. # # **Características del conjunto de datos** # - SFH: manejador de formulario en servidor (si esta vacío o "about:blank" es phishing) # - popUpWindow: ventana emergente (si contiene campos de texto es phishing) # - SSLfinal_State: estado final SSL (no tiene certificados de confianza es phishing) # - Request_URL: URL de la solicitud (> 66% de contenido de otro dominio es phishing) # - URL_of_Anchor: enlace anclado en el sitio (> 67% cantidad de etiquetas "a" fuera del dominio o ningún link es phishing) # - web_traffic: popularidad del sitio web (no tiene tráfico o no es reconocido por Alexa es phishing) # - URL_Length: longitud de la URL del sitio (> 54 catacteres es phishing) # - age_of_domain: tiempo de vida del sitio (> a 6 meses es phishing) # - having_IP_Address: ¿el sitio utiliza dirección IP? (es phishing cuando utiliza) # # **Variable objetivo del conjunto de datos** # - Result: indicador de sitio legítimo (1), sospechoso (0) y phishing (-1) # # **Nota:** Cada una de las características puede tener el valor 0, 1 y -1 según como se haya interpretado en el sitio web analizado. El valor 0 indica que la característica es sospechosa, un valor de 1 representa que el atributo es legítimo y -1 indica que es malicioso o phishing. # ### Carga de los datos # + ## ## Eliminamos salida de warnings ## import warnings as ws ws.filterwarnings("ignore") ## ## Importamos las librerias requeridas ## import time as tm import itertools from itertools import chain import numpy as np import pandas as pd from scipy import stats import seaborn as sns sns.set(style="darkgrid") import matplotlib.pyplot as plt # %matplotlib inline from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score from sklearn.metrics import jaccard_similarity_score from sklearn.metrics import classification_report from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC ## ## Función para colorear fondo en un Data Frame ## http://pandas.pydata.org/pandas-docs/stable/style.html ## def highlight_max(s): is_max = s == s.max() return ['background-color: cyan' if v else '' for v in is_max] # + ## ## Realizamos la lectura del archivo con el conjunto de datos ## data = pd.read_csv('PhishingData.csv', sep=",") ## ## Creamos una variable con la cantidad de registros total ## N,M = data.shape ## ## Imprimimos una muestra de los datos ## data.head() # + ## ## Agrego una nueva variable tipo texto representando la variable objetivo ## if not 'Class' in data.columns: data["Class"] = "" data["Class"][data["Result"] == 1] = "Legitimate" data["Class"][data["Result"] == 0] = "Suspicious" data["Class"][data["Result"] == -1] = "Phishing" ## ## Imprimimos una muestra de los datos ## data.head() # - # ### Exploración de los datos # # #### Validación balance de la variable objetivo ## ## Confirmamos la densidad de registros ## f, ax = plt.subplots(figsize=(10, 6)) sns.countplot(x="Class", data=data, ax=ax) ## ## Mostramos balance de registros en porcentaje por categoria ## category = pd.Categorical.from_array(data.Class) freqs = category.describe().freqs class_name = list(freqs.index.categories) class_name = [class_name[2], class_name[0], class_name[1]] class_freq = list([str(int(round(i, 2)*100))+"%" for i in freqs.values]) class_freq = [[class_freq[2], class_freq[0], class_freq[1]]] pd.DataFrame(data=class_freq, columns=class_name) # El resultado anterior indica que el **41%** de los datos corresponde a sitios legítimos y el **52%** a sitios categorizados como phishing, dejanto tanto solo un **8%** de sitios que son sospechosos. Podemos considerar que la muestra de registros se encuentra balanceada. # #### Estadísticos del conjunto de datos ## ## Verificamos la lectura de los datos con estadísticos básicos ## data.describe() # La tabla anterior permite deducir que todas las variables del conjunto de datos se encuentran **a una misma escala**, por lo tanto, no se requiere realizar procesos de normalización o estandarización. También deducimos que los datos no siguen ninguna distribución dado el rango de valores que toman: {-1, 0, 1} # #### Análisis de correlaciones entre atributos ## ## Graficamos histogramas por variable y dispersión para cada combinación par ## sns.pairplot(data) ## ## Correlaciones para las variables del conjunto de datos ## f, ax = plt.subplots(figsize=(12, 8)) sns.heatmap(data.corr(method = 'pearson'), fmt="d", linewidths=.5, ax=ax) # Las gráficas anteriores permiten concluir que los atributos **no siguen una distribución normal**. Igualmente, entendemos que **no pueden correlacionarse**, es decir, ningún par de características sigue una tendencia proporcional o inversa que este cercana a 1 o -1. Por lo tanto, no se procederá con análisis de componentes principales (PCA) ni con selección de características (Feature Selection). # ### Verificación de calidad de datos # Realizamos un chequeo de los tipos de dato que tiene cada variable de la muestra, debemos asegurarnos que tanto las características como la variable objetivo tengan tipos numéricos para asegurar un adecuado procesamiento de las metodologías de modelado. data.info() # El primer indicador es que ninguno de los atributos tiene datos **Nulos**. Por otro lado, confirmamos que los tipos de datos para las características y la variable objetivo son correctos: **int64**. La variable *Class* no se tiene en cuenta en este análisis ya que será retirada en la fase de preparación de datos. # # Preparación de los datos # --- # # Considerando que no se requiere realizar eliminación de características por correlaciones, transformación de tipos de datos, estandarización o normalización, selección de características y análisis de componentes principales, procederemos con el retiro de la variable **Class** del conjunto de datos, dado que no se requieren más tareas exploratorias. Igualmente, separaremos la variable objeto de las características para pronostico e finalmente se generará el conjunto de entrenamiento y prueba. # ### Selección de los datos # + ## ## Eliminamos las columnas no requeridas ## if 'Class' in data.columns: data.drop(columns=["Class"], inplace=True) ## ## Presentamos un parte de los datos sin la columna eliminada ## data.head() # + ## ## Separamos la varible objetivo del conjunto de datos ## X_columns = [name for name in data.columns if name != "Result"] # Nombres caracteristicas modelado y = data.loc[:,"Result"] # Datos variable objetivo X = data.loc[:,X_columns] # Datos caracteristicas modelado X_names = X.columns # Nombres caracteristicas modelado ## ## Convertimos los valores objetivos en enteros, para que no genere error el fit ## y = y.astype('int') ## ## Realizamos una copia del conjunto de características y variable objetivo original ## X_org = X.copy() y_org = y.copy() # - # ### Construcción de datos ## ## Validamos escala de valores entre las caractetisticas de modelado ## f, ax = plt.subplots(figsize=(14, 6)) sns.boxplot(data=X, ax=ax) # Como se expresó anteriormente, **no se realizará escalado** de los datos, dado que la gráfica anterior evidencia que las unidades de los atributos esta dentro del mismo rango. # # Modelado y Evaluación # --- # # ### Selección de la técnica de modelado # # Para realizar el modelado predictivo se utilizaran las siguientes metodologías de clasificación: # - [Nearest Neighbors](http://scikit-learn.org/stable/modules/neighbors.html) # - [Decision Tree](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html) # - [Random Forest](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier) # - [C-Support Vector](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC) # # Para cada una se realizará un proceso de modelado y evaluación utilizando [cross-validation](http://scikit-learn.org/stable/modules/cross_validation.html#computing-cross-validated-metrics) e [hyper-parameters tuning](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV). Al final del proceso y luego de examinar los scores (accuracy, confusion matrix) será seleccionado el modelo con mejor **desempeño**. Los parámetros generales del ajuste de hiperparametros son los siguientes: # - Cross Validation: 5-Folds # - Scoring: macro # - Number of Jobs: 3 # # Para comenzar, realizamos partición de los datos en **Entrenamiento** y **Prueba** utilizando una proporción del conjunto de datos 70% y 30%, respectivamente y un valor único semilla para que siempre escoja los mismos valores aleatoriamente (44). # + ## ## Definimos variables para el estándar de partición 70% - 30% ## test_size = 0.3 random_state = 44 ## ## Partición de los datos para probar todos los modelos ## X_train, X_test, y_train_true, y_test_true = train_test_split( X, y, # datos originales test_size=test_size, # float/int, tamaño de la muestra de prueba random_state=random_state) # semilla del generador aleatorio # - ## ## Construimos una función personalizada para ejecutar cada metodología ## def grid_search_custom(classifier, tuned_params, scores): scores_result = [] for score in scores: print() print("Parámetros ajustados del score: %s" % score) print() # Entrenamiento utilizando cross-validation (folds = 5) clf = GridSearchCV(classifier, tuned_params, cv=5, scoring='%s_macro' % score, n_jobs=3) clf.fit(X_train, y_train_true) print("Mejores parámetros encontrados:") print(clf.best_params_) print() # Predicción con datos de prueba para validar metricas y_true, y_pred = y_test_true, clf.predict(X_test) print("Reporte de clasificación detallado:") print() print(classification_report(y_true, y_pred)) print() # Calculo de las métricas de precisión accuracy_result = accuracy_score(y_true, y_pred) conf_matrix_result = confusion_matrix(y_true, y_pred) scores_result.append([score, round(accuracy_result, 4), round(1-accuracy_result, 4), clf.best_params_, conf_matrix_result]) return(scores_result) # ### Metodología K-Nearest Neighbors # + print(__doc__) # Ajuste de parámetros para la metodología tuned_params = [{'n_neighbors': [11, 31, 51, 71, 91]}] # Metricas seleccionadas para validar metodología scores = ['precision'] # Ejecutamos grid search para la metodología knn_result = grid_search_custom(KNeighborsClassifier(), tuned_params, scores) # - # ### Metodología Decision Tree # + print(__doc__) # Ajuste de parámetros para la metodología tuned_params = [{'max_depth': [10, 20, 30, 40, 50]}] # Metricas seleccionadas para validar metodología scores = ['precision'] # Ejecutamos grid search para la metodología tree_result = grid_search_custom(DecisionTreeClassifier(), tuned_params, scores) # - # ### Metodología Random Forest # + print(__doc__) # Ajuste de parámetros para la metodología tuned_params = [{'n_estimators': [20, 40, 60, 80, 100]}] # Metricas seleccionadas para validar metodología scores = ['precision'] # Ejecutamos grid search para la metodología forest_result = grid_search_custom(RandomForestClassifier(), tuned_params, scores) # - # ### Metodología C-Support Vector Machine # + print(__doc__) # Ajuste de parámetros para la metodología tuned_params = [{'kernel': ['rbf'], 'gamma': [0.1, 0.01, 0.001], 'C': [1, 10, 100]}, {'kernel': ['linear'], 'C': [1, 10, 100]}] # Metricas seleccionadas para validar metodología scores = ['precision'] # Ejecutamos grid search para la metodología svm_result = grid_search_custom(SVC(), tuned_params, scores) # - # ### Evaluación del modelado # A continuación se presentará un reporte de los resultados obtenidos del proceso de modelado con las 4 metodologías seleccionadas: K-Nearest Neighbors, Decision Tree, Random Forest y C-Support Vector Machine. Este reporte muestra las métricas de accuracy y error rate. Se evaluaran los resultados y se tomará una decisión respecto del modelo con **mejor desempeño**. # + ## ## Construimos un Data Frame con todos los resultados de la evaluación ## data_indexes = ["KNearestNeighbors", "DecisionTree", "RandomForest", "SupportVectorMachine"] data_columns = ["score", "accuracy", "error_rate", "best_params", "confusion_matriz"] data_values = [list(chain.from_iterable(knn_result)), list(chain.from_iterable(tree_result)), list(chain.from_iterable(forest_result)), list(chain.from_iterable(svm_result))] data_result = pd.DataFrame(data = data_values, columns = data_columns, index = data_indexes) ## ## Mostramos los resultados de la evaluación con todos los scores ## Con un fondo de color se muestra la mejor estimación ## data_result.iloc[:,:-1].style.apply(highlight_max, subset=['accuracy']) # - # Los resultados de **desempeño** anteriores muestran con claridad que el algorítmo que mejor se comporta, dadas las medidas de Accuracy es **C-Support Vector Machine** y los mejores parámetros del clasificador con *'C': 10, 'gamma': 0.1, 'kernel': 'rbf'*; sin embargo, realizaremos un análisis más detallado de los resultados tomando como base los algorítmos con mayor desempeño: **Random Forest** y **C-Support Vector Machine**. # + ## ## Nueva matrix de confusión solo con los registros de los clasificadores seleccionados ## conf_matrix = list(chain.from_iterable(data_result.iloc[:,4:5].values)) conf_matrix_new = [conf_matrix[2], conf_matrix[3]] data_index_new = [data_indexes[2], data_indexes[3]] ## ## Elimino los registros de sospechosos ## conf_matrix_copy = conf_matrix_new.copy() conf_matrix_new = [] for i in range(len(conf_matrix_copy)): conf_matrix_aux1 = [] for j in range(len(conf_matrix_copy[i])): if j != 1: conf_matrix_aux2 = [] for k in range(len(conf_matrix_copy[i][j])): if k != 1: conf_matrix_aux2.append(conf_matrix_copy[i][j][k]) conf_matrix_aux1.append(conf_matrix_aux2) conf_matrix_new.append(np.array(conf_matrix_aux1, int)) # - # # Análisis de resultados # --- # # Tomando como referencia los resultados anteriores, con las diferentes evaluaciones de desempeño de los modelos seleccionados, se procederá a realizar un análisis de los resultados de las predicciones utilizando **matrices de confusión** y los indicadores de **Sensibilidad** y **Especifidad**. # # #### Matriz de confusión: # # | Pronostico # | P N # ---------|------------ # P | TP FN # Real | # N | FP TN # # - TP: Verdadero positivo (correcto) # - TN: Verdadero negativo (correcto) # - FP: Falso positivo (mal clasificado) # - FN: Falso negativo (mal clasificado) # # #### Sensibilidad o tasa verdadera positiva: # * Mide la proporción de ejemplos positivos que fueron correctamente clasificados. # # $$\text{sensitibity} = \frac{\text{TP}}{\text{TP} + \text{FN}}$$ # # #### Especifidad o tasa verdadera negativa: # * Mide la proporción de ejemplos negativos correctamente clasificados. # # $$\text{specifity} = \frac{\text{TN}}{\text{TN} + \text{FP}}$$ # # #### Precisión o valor predictivo positivo: # * Proporción de casos positivos que fueron verdaderamente positivos. # # $$\text{precision} = \frac{\text{TP}}{\text{TP} + \text{FP}}$$ # # #### Valor predictivo negativo: # * Proporción de casos negativos que fueron verdaderamente negativos. # # $$\text{negative predictive value} = \frac{\text{TN}}{\text{TN} + \text{FN}}$$ ## ## Graficamos las matrices de confusión para los mejores estimadores ## pred, true = ('predicted', 'true') f, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 5)) for i in range(len(axes)): axes[i].set_title(data_index_new[i]) axes[i].set_xlabel(pred); axes[i].set_ylabel(true) sns.heatmap(conf_matrix_new[i], annot=True, fmt="d", linewidths=.5, ax=axes[i]) # + ## ## Calculamos los indicadores requeridos para análisis ## data_ind_values = [] for i in range(len(conf_matrix_new)): tp, fn, fp, tn = conf_matrix_new[i].ravel() prevalence = (tp+fn)/(tp+tn+fp+fn) sensitivity = tp/(tp+fn) specifity = tn/(tn+fp) precision = tp/(tp+fp) neg_pred_val = tn/(tn+fn) data_ind_values.append([data_index_new[i], #round(prevalence,4), round(sensitivity,4), round(specifity,4), round(precision,4), round(neg_pred_val,4)]) data_ind_columns = ['estimator', 'sensitivity', 'specifity', 'precision', 'negative pred value'] data_ind = pd.DataFrame(columns = data_ind_columns, data = data_ind_values) data_ind.style.apply(highlight_max, subset = ['sensitivity', 'specifity', 'precision', 'negative pred value'])
Phishing Website.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import torch # $\textbf{Definitions:}$ $\\$ # # $\mbox{---Gradient---}$ $\\$ $\\$ # Vector formed by partial derivatives of scalar function, f(x) in which $x = \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\x_n \end{bmatrix}$ $\\$ # Gradient maps $\mathbb{R}^n \rightarrow \mathbb{R}$ $\\$ # # $$\nabla f(\mathbf{x})=\frac{\partial f(\mathbf{x})}{\partial x_1}\hat{x}_1+\frac{\partial f(\mathbf{x})}{\partial x_2}\hat{x}_2+\ldots+\frac{\partial f(\mathbf{x})}{\partial x_n}\hat{x}_n$$ # # $$\nabla f(x) = \left[\frac{\partial f}{\partial x_1}\frac{\partial f}{\partial x_2}\dots\frac{\partial f}{\partial x_n}\right]$$ # # Note: Input is a column vector, outputs a row vector. $\\$ # Gradient is the rate of change wrt each dimension/component and corresponds to steepest slope due to linear independence used for gradient descent. $\\$ # # $\mbox{---Jacobian---}$ $\\$ # Matrix formed by partial derivatives of vector function of scalar functions, maps $\mathbb{R}^n \rightarrow \mathbb{R}^m$ $\\$ # # $$J_\mathbf{f} = \frac{\partial (f_1,\ldots,f_m)}{\partial(x_1,\ldots,x_n)} = \left[ # \begin{matrix} # \frac{\partial f_1}{\partial x_1} &amp; \cdots &amp; \frac{\partial f_1}{\partial x_n} \\ # \vdots &amp; \ddots &amp; \vdots \\ # \frac{\partial f_m}{\partial x_1} &amp; \cdots &amp; \frac{\partial f_m}{\partial x_n} # \end{matrix} # \right]$$ # The Jacobian is the gradient applied to multiple rows, commonly used as a change of basis/unit conversion: # # $$\iiint_R f(x,y,z) \,dx\,dy\,dz = \iiint_S f(x(u,v,w),y(u,v,w),z(u,v,w))\left|\frac{\partial (x,y,z)}{\partial(u,v,w)}\right|\,du\,dv\,dw$$ # Note: Gradient = Jacobian if $m = 1$. # # $\mbox{---Hessian---}$ $\\$ # Gradient applied to Gradient, Double Gradient: $\\$ # $$\begin{align}D[\nabla f(\mathbf x)] &amp;= D[D[f(\mathbf x)]]\\ # &amp;=\left(D\left[\frac{\partial f}{\partial x_1}\right]^T, \ldots, D\left[\frac{\partial f}{\partial x_n}\right]^T\right)\end{align}$$ # Which expands to give us the Hessian matrix: # $$D^2[f(\mathbf x)]=\left(\begin{matrix}\frac{\partial^2 f}{\partial x_1^2} &amp; \ldots &amp; \frac{\partial^2 f}{\partial x_1\partial x_n}\\ # \vdots &amp; \ddots &amp; \vdots \\ # \frac{\partial^2 f}{\partial x_n\partial x_1}&amp; \ldots &amp; \frac{\partial^2 f}{\partial x_n^2}\end{matrix}\right)$$ # # Note: Inputs are column vectors, outputs are row vectors. (Transposed first because the first gradient outputs a row vector) $\\$ # The Hessian represents the rate of change of gradient, analogous to curvature. Used to computationally determine the position of a min/max point in optimization, which is darn impossible to visualize past 2 dimensions. # # $\textbf{Analytic Gradient:}$ $\\$ # $\mbox{---Linear Form---}$ $\\$ # $f(x) = a^T x$ $\\$ # Component-wise derivative yields corresponding dot-product coefficent of each component k. $\\$ # Assemble each partial derivatives into vector: $\\$ # $\nabla f(x) = \begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix} = a$ # # -General Linear Form: $\\$ # $f(x) = a^Tx + b$ $\\$ # $\nabla f(x) = a$ # # # $\mbox{---Quadratic Form---}$ $\\$ # $f(x) = x^T A x$ $\\$ # Tracing through 2x2 example: $\\$ # $\nabla f(x) = (A + A^T)x$ $\\$ # For pd matrices, $A = A^T$ so: $\\$ # $\nabla f(x) = 2Ax$ # # -General Quadratic Form, which builds from gradient of general linear form: $\\$ # $f(x) = \frac{1}{2}x^T A x + b^Tx + c$ $\\$ # $\nabla f(x) = \frac{1}{2}(A^T + A)x + b$ $\\$ # For symmteric matrix A: $\\$ # $\nabla f(x) = Ax + b$ # # -Mixed Quadratic Form: $\\$ # $f(x,y) = x^T A y$ $\\$ # Wrt x: $\nabla_x f(x,y) = Ay$ $\\$ # Wrt y: $\nabla_y f(x,y) = A^Tx$ $\\$ # Taking the right partial derivative, transpose. $\\$ # Matrices are pd, so wrt y: $\nabla_y f(x,y) = Ax$ # # $\textbf{Analytic Hessian:}$ $\\$ # Tracing through 2x2 example again: $\\$ # $\mbox{---Linear Form---}$ $\\$ # $f(x) = a^T x$ $\\$ # $\nabla f(x)$ does not depend on x, so $\nabla^2 f(x) = 0$. # # $\mbox{---Quadratic Form---}$ $\\$ # $f(x) = x^T A x$ $\\$ # $\nabla^2 f(x) = A + A^T$ $\\$ # For symmteric matrix A: $\\$ # $\nabla^2 f(x) = 2A$ # # Mixed Quadratic Form: $\\$ # $f(x,y) = x^T A y$ $\\$ # Wrt xx, yy: $H_{xx} = H_{yy} = 0$ $\\$ # Wrt xy, yx: $H_{xy} = H_{yx} = 2A$ # # Simultaneous gradient descent (continuous time): # $\dot x = -D_1f_1(x,y),\ \dot y = -D_2f_2(x,y)$, simgrad Jacobian # $J(x,y) = \begin{bmatrix} D_1^2f_1(x,y) & D_{12}f_1(x,y) \\ D_{21}f_2(x,y) & D_2^2f_2(x,y) \end{bmatrix}$ # # (discrete time): # $x^+ = x - \gamma_x D_1f_1(x,y),\ # y^+ = y - \gamma_y D_2f_2(x,y)$ # + m = 2 n = 2 #Random pd matrices, Cholesky form: np.random.seed(0) A1 = np.random.randn(n,n) A1 = A1.T @ A1 A2 = np.random.randn(n,n) A2 = A2.T @ A2 #Random matricies, not pd: B1 = np.random.randn(n,m) B2 = np.random.randn(n,m) C1 = np.random.randn(m,m) C2 = np.random.randn(m,m) #Define e,h vectors: e1 = np.random.randn(n) e2 = np.random.randn(m) h1 = np.random.randn(m) h2 = np.random.randn(n) #Convert Matrices into tensors A1 = torch.tensor(A1, dtype = torch.float) A2 = torch.tensor(A2, dtype = torch.float) B1 = torch.tensor(B1, dtype = torch.float) B2 = torch.tensor(B2, dtype = torch.float) C1 = torch.tensor(C1, dtype = torch.float) C2 = torch.tensor(C2, dtype = torch.float) e1 = torch.tensor(e1, dtype = torch.float) e2 = torch.tensor(e2, dtype = torch.float) h1 = torch.tensor(h1, dtype = torch.float) h2 = torch.tensor(h2, dtype = torch.float) x1 = torch.ones((n, 1), requires_grad=True) x2 = torch.ones((m, 1), requires_grad=True) #Generic Quadratic Cost: #B_ij, C_ij still rather vague def f1(x1,x2): return (0.5 * x1.t() @ A1 @ x1) + (x1.t() @ B1 @ x2) + (0.5 * x2.t() @ C1 @ x2) + (e1.t() @ x1) + (h1.t() @ x2) def f2(x1,x2): return (0.5 * x2.t() @ A2 @ x2) + (x2.t() @ B2 @ x1) + (0.5 * x1.t() @ C2 @ x1) + (e2.t() @ x2) + (h2.t() @ x1) # + #Analytical Gradient: #D wrt x1: def D1f1(x1,x2): return (A @ x1) + 0.5 * (B1 @ x2) + e1 def D1f2(x1,x2): return 0.5 * (B2.t() @ x2) + (C2 @ x1) + h2 #D wrt x2: def D2f1(x1,x2): return 0.5 * (B1.t() @ x1) + (C1 @ x2) + h1 def D2f2(x1,x2): return (A2 @ x2) + 0.5 * (B2 @ x1) + e2 #Analytical Hessian: #H wrt x1: def H11f1(x1, x2): return A1 def H11f2(x1, x2): return C2 #H wrt x2: def H22f1(x1, x2): return C1 def H22f2(x1, x2): return A2 # + #Computational Gradient: #tensors = [tensor.zero_grad() for tensor in tensors] ''' -Possible solutions: -Make functions just expressions -use backward -Seeing an example would be nice ''' print(f1(x1,x2).grad) #Computational Hessian: #print(torch.autograd(x1).autograd(x1)) #print(torch.autograd(x2).autograd(x2)) # -
Derivatives/Derivative Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Gaussian Transformation # #### In Machine learning algorithm like Linear and Logistic Regression, the algorithm assumes that the variable are normally distributed.So, gaussian distributed variables may boost the machine learning algorithm performance. # ### <span style="color:red">So, gaussian transformation is applied to logistic and linear regression</span>. # If variables are not normally distributed, sometimes it is possible to find a mathematical transformation so that the transformed variable is gaussian. # #### The remaining machine learning models, including Neural Networks, SVM, Tree Based methods and PCA do not make any assumption over the distribution of the independent variables. Usually we get a better model performance from a gaussian distribution. import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import scipy.stats as stats dataset = pd.read_csv('data/train.csv',usecols=['Age','Fare','Survived']) dataset.head() dataset.isnull().sum() def impute_na(data, variable): df = dataset.copy() df[variable+'_random'] = df[variable] random_sample = df[variable].dropna().sample(df[variable].isnull().sum(), random_state = 0) random_sample.index = df[df[variable].isnull()].index df.loc[df[variable].isnull(), variable+'_random'] = random_sample return df[variable+'_random'] dataset['Age'] = impute_na(dataset,'Age') dataset.isnull().sum() # ### Q-Q Plot def diagnostic_plots(df, variable): plt.figure(figsize = (15,6)) plt.subplot(1, 2, 1) df[variable].hist() plt.subplot(1, 2, 2) stats.probplot(df[variable], dist = 'norm', plot = plt) plt.show() diagnostic_plots(dataset, 'Age') diagnostic_plots(dataset, 'Fare') # ### Logarithmic Transformation dataset['log_fare'] = np.log(dataset['Fare'] + 1) # We have done +1 because suppose the data of fare in one of feature is Zero,so you cannot find log(0).So, we added it by 1. diagnostic_plots(dataset,'log_fare') # #### The logarithmic distribution does a good job in making Fare variable look gaussian distributed. # ### Reciprocal Transformation dataset['Rec_fare'] = 1/(dataset['Fare']+1) diagnostic_plots(dataset,'Rec_fare') # ### Square Root Transformation dataset['sqr_fare'] = dataset['Fare'] ** (1/2) diagnostic_plots(dataset,'sqr_fare') # ### Exponential Transformation dataset['exp_fare'] = dataset['Fare'] **(1/5) diagnostic_plots(dataset, 'exp_fare') # ### BoxCox Transformation dataset['fare_boxcox'], param = stats.boxcox(dataset.Fare+1) print('Optimal Lambda : ', param) diagnostic_plots(dataset, 'fare_boxcox')
Fitting Data in Gaussian Distribution (Gaussian Transformation)/.ipynb_checkpoints/Gaussian Transformation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: myvenv # language: python # name: .myvenv # --- # # EB Factorization # We compute the unique EB factorization of an inversed TN matrix. # See the book Totally TN matrices, and specifically the factorization in Theorem 2.2.2, for more details. # # A matrix $A\in \mathbb R^{n\times n}$ is an Invertibel TN matrix (ITN) if and only if it can be expressed as: # \begin{equation*} # A = L D U, # \end{equation*} # Where # \begin{align*} # L:=&[L_n(\ell_k)\cdots L_2(\ell_{k-n+2})] [L_n(\ell_{k-n+1})\cdots L_3(\ell_{k-2n+4})]\cdots [L_n(\ell_1)],\\ # U:=&[U_n(u_1)][U_{n-1}(u_2)U_n(u_3)]\cdots [U_2(u_{k-n+2})\cdots U_n(u_k)], # \end{align*} # $D$ is a diagonal matrix with positive entries, $\ell_i,u_i \ge 0$ for all $i$, # and $k:=\frac{(n-1)n}{2}$. # ## TP/TN/OSC/EB Functions # + # #%%writefile TP_TN_OSC_funcs.py import numpy as np import matplotlib.pyplot as plt from scipy.special import comb import networkx as nx import functools as fnt from itertools import combinations import MC # my multiplicative compound matrix computation '''Implementation here of the EB factorization on an invertible TN matrix is based on chapter 2 of the book "Totally Nonnegative Matrices", Fallat & Johnson.''' def E(n, i, j): '''Returns the E_{ij} matrix''' mat = np.zeros((n,n)) mat[i-1,j-1] = 1 return mat def L(n, i, a): '''Returns the L_i(a) matrix''' return np.identity(n) + a*E(n, i, i-1) def U(n, i, a): '''Returns the U_i(a) matrix''' return np.identity(n) + a*E(n, i-1, i) def compute_L_factorization( A, abs_thres=0 ): '''This function computes the L factorization of a square matrix''' n = A.shape[0] k = comb(n, 2, exact=True) Lmat = [] vals = [] Um = A for j in range(n-1): for i in range(n-1,j,-1): val = Um[i,j] / Um[i-1,j] if Um[i-1,j] != 0 else 0 if np.abs(val) < abs_thres: # < 10*np.finfo(np.float).eps: val = 0 vals.append(val) Lmat.append(L(n,i+1, val)) Um = np.matmul( L(n,i+1, -val), Um ) return Lmat, Um, np.asarray(vals) def EB_factorization_ITN( A, abs_thres=0 ): '''This function computes the EB factorization of an inversed TN matrix. See Theorem 2.2.2 for more details. Given an inversed TN matrix A, the following holds: A = Lmat{1}*Lmat{2}*...*Lmat{end}*Dmat*Umat{end}*Umat{end-1}*...*Umat{1}. For example, for n=4: A = L_4(l_1)*L_3(l_2)*L_2(l_3)*L_4(l_4)*L_3(l_5)*L_4(l_6)*D* U_4(u_6)*U_3(u_5)*U_4(u_4)*U_2(u_3)*U_3(u_2)*U_4(l_1), Returned parameters: Lmat - a list of the L matrices in the order as in the multiplication. For example, for n=4: [L_4(valsL(1)),L_3(valsL(2)),L_2(valsL(3)),L_4(valsL(4)),L_3(valsL(5)),L_4(valsL(6))]. Dmat - the diagonal matrix. Umat - a list of the U matrices in the REVERSED order of the multiplication. For example, for n=4: [U_4(valsU(1)),U_3(valsU(2)),U_2(valsU(3)),U_4(valsU(4)),U_3(valsU(5)),U_4(valsU(6))]. valsL - the l_i values corresponding to the order of the L matrices in the multiplication: L_n*..*L_2*L_n*...L_3*...L_n valsU - the u_i values corresponding to the REVERSED order of the U matrices in the multiplication: U_n*U_{n-1}*U_n*...*U_2*U_3*...*U_n. For example, for a 4x4 matrix A we have A = Lmat{1}(valsL(1))*Lmat{2}(valsL(2))*...*Lmat{6}(valsL(6))*Dmat* Umat{6}(valsU(6))*Umat{5}(valsU(5))*...*Umat{1}(valsU(1)). ''' if A.shape[0] != A.shape[1]: print('Error: input matrix must be square for EB factorization of an ITN matrix !!') return Lmat, Um, valsL = compute_L_factorization( A, abs_thres ) Umat_tmp, Dmat, valsU = compute_L_factorization( Um.transpose(), abs_thres ) Umat = [x.transpose() for x in Umat_tmp] return Lmat, Dmat, Umat, Um, valsL, valsU def compute_L_indexes( n ): '''This function computes the L matrix indexes. For example, for n=4, the indexes are [4 3 2 4 3 4]''' xbase = np.array(range(n,1,-1)) x = xbase for i in range(1,n-1): x = np.concatenate((x,xbase[:-i])) return x def display_EB_factorization( Lmat, Dmat, Umat, valsL, valsU ): '''This function displays the factorization matrices in the order of the factorization multiplication (left to right). For the exact order of each input parameter, see the function EB_factorization_ITN() ''' n = Lmat[0].shape[0] idxs = compute_L_indexes( n ) k = idxs.shape[0] print("Factorization matrices in the order as in the factorization form (left-most to right-most matrix):") # L matrices for i in range(k): print("L{0}({1:4.2f})=\n{2}".format(idxs[i], valsL[i], Lmat[i])) # D matrix print("D=\n{}".format(Dmat)) # U matrices idxs = np.flip( idxs ) valsu = np.flip( valsU ) for i in range(k): print("U{0}({1:4.2f})=\n{2}".format(idxs[i], valsu[i], Umat[(k-1)-i])) def EB_factorization_k2n(k): '''This function returns the n value given k. k is the number of L and U parameters in the EB factorization on a square I-TN matrix of size n. n = (1+sqrt(1+8*k))/2.''' return int((1 + np.sqrt(1+8*k))/2) def EB_factorization_n2k(n): '''This function returns the k value given n. k is the number of L and U parameters in the EB factorization on a square I-TN matrix of size n. k = ((n-1)*n)/2''' return int(((n-1)*n)/2) def lexichog_order(n, p): '''This function returns the p'th order lexicography indxes array based on the array 0, ..., n-1. For example, for n=4 and p=2, the function returns: np.array[[0,1], [0,2], [0,3], [1,2], [1,3], [2,3]]''' return np.array(list(combinations(np.arange(n, dtype=int), p))) # lexicography order of the p inxedes in 0, ..., n-1 def lexicog2linear(n, r, c): '''This function converts a lexicography matrix index to a linear index. The function assumes that all indexes starts from 0. Inputs: r = [r_1, r_2,..., r_p] c = [c_1, c_2,..., c_p] where r_i, c_i get values between 0 to n-1. The function returns the tuple (i, j) correponding to row i and column j of r and c, respectively (where 0 in the first row/column). For example, for n=4, r=[0,3], c=[1,3] we get i=3, j=4. ''' if len(r) != len(c): print('Error: r and c length missmatch !!') return lp = lexichog_order(n, len(r)) # np array of lexicography order kvec = np.arange(len(lp)) return kvec[(lp==r).all(axis=1)][0], kvec[(lp==c).all(axis=1)][0] def linear2lexicog(n, p, i, j): '''This function converts a linear index to a lexicography index. For example, for n=4, p=3, i=2, and j=0 we get r=[0,2,3], c=[0,1,2] ''' lp = lexichog_order(n, p) # np array of lexicography order if (i>=len(lp)) or (j>=len(lp)): print('Error: i and/or j larger than {} !!'.format(len(lp-1))) return return lp[i], lp[j] def old_draw_EB_factorization_ITN( valsL, d, valsU, ax, font_size=34, font_color='r', perc_round=4, base_weight=1, tol=10*np.finfo(np.float).eps ): '''Obsolete. See the function below instead. This function draws the graph corresponding to the given EB factorization (in the form of the L matrix parameters, the digonal of the diagonal natrix and the U matrix parameters.''' k = valsL.shape[0] n = EB_factorization_k2n(k) #int((1 + np.sqrt(1+8*k))/2) idxs = compute_L_indexes( n ) G = nx.Graph() # all nodes in the graph (total of n rows and 2*(k+1) columns) for j in range(2*(k+1)): for i in range(n): G.add_node(j*n+i,pos=(j,i)) # edges corresponding to the L matrices for j in range(k): #if( valsL[j] != 0): # L_k(m) adds an edge from node k to node k-1 of weight m if(np.abs(valsL[j]) > tol): # L_k(m) adds an edge from node k to node k-1 of weight m G.add_edge(j*n+idxs[j]-1,(j+1)*n+idxs[j]-2, weight=valsL[j]) for i in range(n): # all horizontal edges of weight 1 G.add_edge(i+j*n,(j+1)*n+i, weight=base_weight) # horizontal edges corresponding to the D matrix for i in range(n): G.add_edge(i+k*n,i+(k+1)*n, weight=d[i]) # edges corresponding to the U matrices valsu = np.flip(valsU) idxs = np.flip(idxs) for j in range(k+1,2*(k+1)-1): m = j-(k+1) # the corresponding index in idxs and valsU #if( valsu[m] != 0 ): # U_k(m) adds an edge from k-1 to k of weight m if(np.abs(valsu[m]) > tol): # U_k(m) adds an edge from k-1 to k of weight m G.add_edge(j*n+idxs[m]-2,(j+1)*n+idxs[m]-1, weight=valsu[m]) for i in range(n): # all horizontal edges of weight 1 G.add_edge(j*n+i,(j+1)*n+i, weight=base_weight) nn = np.array(range(1,n+1)) lnames = {k:v for (k,v) in enumerate(nn)} rnames = {k:v for (k,v) in zip( range((2*k+1)*n,((2*(k+1))*n)), nn)} nnames = {**lnames, **rnames} # node names pos = nx.get_node_attributes(G,'pos') nx.draw(G, pos, ax=ax) #edge_labels=dict([((u,v,),round(d['weight'],perc_round)) for u,v,d in G.edges(data=True)]) edge_labels={(u,v,):round(d['weight'],perc_round) for u,v,d in G.edges(data=True)} nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, ax=ax); # node labels (names) nx.draw_networkx_labels(G, pos, ax=ax, labels=nnames, font_size=font_size, font_color=font_color); def draw_EB_factorization_ITN( valsL, d, valsU, ax, compress_f = True, font_size=24, font_color='r', perc_round=4, base_weight=1, tol=10*np.finfo(np.float).eps, noffset=0.2 ): '''This function draws the graph corresponding to the given EB factorization (in the form of the L matrix parameters, the digonal of the diagonal natrix and the U matrix parameters). The function supports compressing the graph in the sense of removing L and/or U matrices with parameters equal to zero. Inputs: valsL, valsU - see the output parameters of the function EB_factorization_ITN() d - the diagonal of the diagonal matrix D, i.e. [d_{11},d_{22},...,d_{nn}] ''' n = EB_factorization_k2n(valsL.shape[0]) #int((1 + np.sqrt(1+8*k))/2) idxs = compute_L_indexes( n ) if compress_f: # remove L/U matrices with zero parameters locl = valsL!=0 locu = valsU!=0 else: locl = np.ones(valsL.size, dtype=bool) locu = np.ones(valsU.size, dtype=bool) vL = valsL[locl] lidxs = idxs[locl] # indexes corresponding to vL nvL = vL.size vU = valsU[locu] uidxs = idxs[locu] # indxes corresponding to vU nvU = vU.size num_h_nodes = nvL+nvU+2 # total number of horizontal nodes #G = nx.Graph() # undirected graph G = nx.DiGraph() # directed graph # all nodes in the graph (total of n rows and num_h_nodes columns) for j in range(num_h_nodes): for i in range(n): G.add_node(j*n+i,pos=(j,i)) # edges corresponding to the L matrices for j in range(nvL): if(np.abs(vL[j]) > tol): # L_k(m) adds an edge from node k to node k-1 of weight m G.add_edge(j*n+lidxs[j]-1,(j+1)*n+lidxs[j]-2, weight=vL[j]) for i in range(n): # all horizontal edges of weight 1 G.add_edge(i+j*n,(j+1)*n+i, weight=base_weight) # horizontal edges corresponding to the D matrix for i in range(n): G.add_edge(i+nvL*n,i+(nvL+1)*n, weight=d[i]) # edges corresponding to the U matrices vu = np.flip(vU) uidxs = np.flip(uidxs) for j in range(nvL+1,num_h_nodes-1): m = j-(nvL+1) # the corresponding index in uidxs and vu if(np.abs(vu[m]) > tol): # U_k(m) adds an edge from k-1 to k of weight m G.add_edge(j*n+uidxs[m]-2,(j+1)*n+uidxs[m]-1, weight=vu[m]) for i in range(n): # all horizontal edges of weight 1 G.add_edge(j*n+i,(j+1)*n+i, weight=base_weight) nn = np.array(range(1,n+1)) lnames = {k:v for (k,v) in enumerate(nn)} rnames = {k:v for (k,v) in zip( range((num_h_nodes-1)*n,(num_h_nodes*n)), nn)} nnames = {**lnames, **rnames} # node names pos = nx.get_node_attributes(G,'pos') nx.draw(G, pos, ax=ax) edge_labels={(u,v,):round(d['weight'],perc_round) for u,v,d in G.edges(data=True)} nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, ax=ax); # node labels (names) - we shift the position of the names of the source and sink nodes to the left and # right, respectively. pos_n = pos.copy() for k in range(n): for (o,v) in zip([0,n*(num_h_nodes-1)], [-noffset, noffset]): pos_n[k+o] = (pos_n[k+o][0]+v, pos_n[k+o][1]) nx.draw_networkx_labels(G, pos_n, ax=ax, labels=nnames, font_size=font_size, font_color=font_color); def compute_matrix_from_EB_factorization( valsL, valsD, valsU ): '''This function multiplies all factorization matrices corresponding to the factorization parameters given to the function, to obtain the original matrix. Basicall, the function computes: A = (L_n(valsL_1)*..*L_2(valsL_{n-2}))*(L_n(valsL_{n-1})*..)..*(L_n(valsL_k))*diag(valsD)* (U_n(valsU_k))*(U_{n-1}(valsU_{k-1})*U_n(valsU_{k-2}))*.....*U_n(valsU_1). For example, for n=4, the function computes: A = L_4(valsL_1)*L_3(valsL_2)*L_2(valsL_3)*L_4(valsL_4)*L_3(valsL_5)*L_4(valsL_6)*diag(valsD)* U_4(valsU_6)*U_3(valsU_5)*U_4(valsU_4)*U_2(valsU_3)*U_3(valsU_2)*U_4(valsU_1). ''' k = valsL.shape[0] n = EB_factorization_k2n(k) #int((1 + np.sqrt(1+8*k))/2) idxs = compute_L_indexes( n ) # product of all L matrices, multiplied by D, multiplied by the product of all U matrices return fnt.reduce(np.matmul, [L(n, idxs[i], valsL[i]) for i in range(k)]) @ \ np.diag(valsD) @ \ fnt.reduce(np.matmul, [U(n, idxs[i], valsU[i]) for i in reversed(range(k))]) def show_EB_config( valsL, valsU, valsD=0, mode=False ): '''This function returns the EB factorization configuration, in a form of a string, given the L and U matrices parameters. If mode==False (default), the L and U parameters are not displayed, otherwise they are displayed together with the diagonal entries of the matrix D (valsD). For the exact order of valsL and valsU parameters, see the function EB_factorization_ITN(). For example, show_EB_config( np.array([1,0,5,0,9,0]), np.array([0,.1,0.3,0.7,0,0]), np.array([1,2,3,4]), True ) yields: 'L4(1)*L2(5)*L3(9)*D([1 2 3 4])*U4(0.7)*U2(0.3)*U3(0.1)', and show_EB_config( np.array([1,0,5,0,9,0]), np.array([0,.1,0.3,0.7,0,0])) yields: 'L4*L2*L3*U4*U2*U3'. ''' idxs = compute_L_indexes( EB_factorization_k2n(valsL.shape[0]) ) sr = '' loc = valsL!=0 vl = valsL[loc] ids = idxs[loc] for i in range(len(vl)): # the L matrices sr += 'L'+str(ids[i]) if mode: sr += '('+str(vl[i])+')' sr += '*' if mode: # The D matrix sr += 'D('+str(valsD)+')*' loc = valsU!=0 vl = np.flip(valsU[loc]) ids = np.flip(idxs[loc]) for i in range(len(vl)): # the U matrices sr += 'U'+str(ids[i]) if mode: sr += '('+str(vl[i])+')' sr += '*' return sr[:-1] def is_TP( A, tol=10*np.finfo(np.float).eps ): '''This function returns True [False] if A is [is not] a TP matrix. A matrix is TP is all MC are > tol''' return all([(MC.compute_MC_matrix(A, p)[0]>tol).all() for p in range(1,A.shape[0]+1)]) def is_TN( A ): '''This function returns True [False] if A is [is not] a TN matrix.''' return all([(MC.compute_MC_matrix(A, p)[0]>=0).all() for p in range(1,A.shape[0]+1)]) def is_invertible( A, tol=10*np.finfo(np.float).eps ): '''This function returns True [False] if A is [is not] an invertible matrix. A matrix is invertible if det(A)>tol''' return (A.shape[0]==A.shape[1]) and (np.abs(np.linalg.det(A))>tol) def is_ITN( A, tol=10*np.finfo(np.float).eps ): '''This function returns True [False] if A is [is not] an inversible TN matrix.''' return is_TN(A) and is_invertible(A, tol) def is_OSC( A, tol ): '''This function returns True [False] if A is [is not] an oscillatory matrix.''' return is_ITN(A, tol) and is_TP(np.linalg.matrix_power(A, A.shape[0]-1), tol) def is_factorization_osc(lvals, uvals, dvals, lindxs = None): '''This function checks if the given factorization (given by the l, u, and d parameters) results in an oscillatory matrix.''' n = EB_factorization_k2n(lvals.shape[0]) if lindxs is None: lindxs = compute_L_indexes(n) return (dvals>0).all() and all([ (lvals[j]>0).any() and (uvals[j]>0).any() for i in lindxs for j in np.where(lindxs==i)]) def is_factorization_TP(lvals, uvals, dvals, lindxs = None): '''This function checks if the given factorization (given by the l, u, and d parameters) results in a TP matrix.''' return (dvals>0).all() and (lvals>0).all() and (uvals>0).all() def show_mat_latex_format(A, fmt='4f'): '''This function prints a matrix in a latex format to the screen.''' print('\\begin{bmatrix}') for j, row in enumerate(A,1): for x in row[:-1]: print(f'{x:.{fmt}}', end=' & ') print(f"{row[-1]:.{fmt}}", end='') if j < A.shape[0]: print(" \\\\") print('\n\\end{bmatrix}') def osc_exp(A, tol=0): '''Returns the exponent of the oscillatory matrix A. It is assumed that A is oscillatory (i.e. no checking is done).''' for r in range(1,A.shape[0]): if(is_TP(np.linalg.matrix_power(A,r), tol)): break return r # - # ### Basic Example # + V = np.array([[1,1,1],[1,2,4],[1,3,9]]) # define the matrix V # ============================================================== print("V = \n{}".format(V)) _, ax = plt.subplots(figsize=(16,6)) Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( V ) display_EB_factorization( Lmat, Dmat, Umat, valsL, valsU ) draw_EB_factorization_ITN( valsL, np.diagonal(Dmat), valsU, ax ) ax.set_title("EB factorization graph of V") # show the plot in a pylustrator window # check factorization VV = compute_matrix_from_EB_factorization( valsL, np.diagonal(Dmat), valsU ) print("VV (should be equal to V) = \n{}".format(VV)) # compute MC matrix Q, lp = MC.compute_MC_matrix(V, 2) print('Q=\n{}'.format(Q)) print('MC order=\n{}'.format(lp+1)) # the indexes in lp starts from 0 # - # ## Few paper examples # + # example 1 n = 3 tol = 1e-10 # =========== D = np.diag([1,2,3]) A = L(n,3,1)@L(n,2,2)@L(n,3,3)@D@U(n,3,2)@U(n,2,1)@U(n,3,1) print(A, is_TP(A, tol)) show_mat_latex_format(L(n,3,-3)@L(n,2,-2)@L(n,3,-1)@A) show_mat_latex_format(D@U(n,3,2)@U(n,2,1)@U(n,3,1)) # the diagram of example 1 savefig_flag1 = 0; #savefig_name1 = 'EB_diag_exp1.png' savefig_name1 = 'EB_diag_exp1.eps' ######################################### plt1, ax1 = plt.subplots(figsize=(16,6)) Lmat1, Dmat1, Umat1, Um1, valsL1, valsU1 = EB_factorization_ITN( A ) display_EB_factorization( Lmat1, Dmat1, Umat1, valsL1, valsU1 ) draw_EB_factorization_ITN(valsL1, np.diagonal(Dmat1), valsU1, ax1) #ax1.set_title("EB factorization graph of A1") if savefig_flag1: plt1.savefig(savefig_name1, dpi=150) print("Figure saved in {}.".format(savefig_name1)) # examples of two matrices with the same s value # ============================================== print('\nExample of two matrices with the same s value:') print('================================================') n = 4 D1 = np.diag([2,1,3,5]) D2 = np.diag([1,2,3,4]) #A1 = fnt.reduce(np.matmul, [L(n,4,2.5),L(n,3,1.4),L(n,2,4.3),L(n,4,5.1),L(n,3,6.5),D1,U(n,3,2.4),U(n,4,1.2),U(n,2,5.4),U(n,3,4.1),U(n,4,3.5)]) #A2 = fnt.reduce(np.matmul, [L(n,4,5.1),L(n,3,3),L(n,2,2.2),L(n,4,7),L(n,3,2.4),D2,U(n,3,1.2),U(n,4,3.6),U(n,2,2.3),U(n,3,6.1),U(n,4,5.2)]) A1 = fnt.reduce(np.matmul, [L(n,4,2.5),L(n,3,1),L(n,2,4),L(n,4,5.5),L(n,3,6.5),D1,U(n,3,3),U(n,4,2),U(n,2,4),U(n,3,2),U(n,4,3)]) A2 = fnt.reduce(np.matmul, [L(n,4,5),L(n,3,3),L(n,2,2),L(n,4,6),L(n,3,2.5),D2,U(n,3,1.5),U(n,4,3),U(n,2,2),U(n,3,4),U(n,4,3)]) print(is_TP(A1@A2, tol), is_TP(A2@A1, tol), is_TP(A1@A1, tol), is_TP(A2@A2, tol)) print('A1:') show_mat_latex_format(A1, fmt='2f') print('A2:') show_mat_latex_format(A2, fmt='2f') print('================================================') # examples of two matrices with the same s_\ell and s_u values # ============================================================ print('\nExample of two matrices with the same s_ell and s_u value:') n = 4 D1 = np.diag([2,1,3,5]) D2 = np.diag([1,2,3,4]) A1 = fnt.reduce(np.matmul, [L(n,4,2.5),L(n,3,1),L(n,2,4),L(n,4,5.5),L(n,3,6.5),L(n,4,1),D1,U(n,3,1),U(n,4,2),U(n,2,4),U(n,3,2),U(n,4,3)]) A2 = fnt.reduce(np.matmul, [L(n,4,5),L(n,3,3),L(n,2,2),L(n,4,6),L(n,3,2.5),L(n,4,2),D2,U(n,3,2),U(n,4,1),U(n,2,2),U(n,3,4),U(n,4,3)]) print(is_TP(A1@A2, tol), is_TP(A2@A1, tol), is_TP(A1@A1, tol), is_TP(A2@A2, tol)) print('A1:') show_mat_latex_format(A1, fmt='2f') print('A2:') show_mat_latex_format(A2, fmt='2f') # example 5 # ============= print('\nExample 5\n') n=5 tol = 1e-9 #A1=L(n,5,0.5)@L(n,4,0.4)@L(n,3,0.3)@L(n,2,0.2)@L(n,5,0.5)@L(n,4,0.4)@L(n,3,0.3); #A2=U(n,3,0.3)@U(n,4,0.4)@U(n,5,0.5)@U(n,2,0.2)@U(n,3,0.3)@U(n,4,0.4)@U(n,5,0.5); #A1=L(n,5,1)@L(n,4,3)@L(n,3,2)@L(n,2,2)@L(n,5,3)@L(n,4,4)@L(n,3,2); #A2=U(n,3,3)@U(n,4,4)@U(n,5,5)@U(n,2,2)@U(n,3,3)@U(n,4,4)@U(n,5,5); A1=L(n,5,1)@L(n,4,3)@L(n,3,2)@L(n,2,2)@L(n,5,3)@L(n,4,4)@L(n,3,2); A2=U(n,3,3)@U(n,4,4)@U(n,5,5)@U(n,2,2)@U(n,3,3)@U(n,4,4)@U(n,5,5); A = A1@A2; print(A) print('A:') show_mat_latex_format(A, fmt='0f') #print('A^2:') #show_mat_latex_format(A@A) print(is_TP(A@A,tol), is_TP(A@A@A,tol), is_TP(A@A@A@A,tol)) # New example 5 (example of s_\ell and s_u) # ========================================== print('\nNew Example 5 (with s_ell and s_u)\n') n=4 tol = 1e-9 A1 = L(n,4,1)@L(n,3,3)@L(n,2,2) A2 = U(n,3,1)@U(n,4,2)@U(n,2,1)@U(n,3,2)@U(n,4,1) A = A1@A2; print('A:') show_mat_latex_format(A, fmt='0f') print(f'A is TP: {is_TP(A, tol)}, A^2 is TP: {is_TP(A@A,tol)}, A^3 is TP: {is_TP(A@A@A,tol)}') print('A^2=') show_mat_latex_format(A@A, fmt='0f') print(f'(A^2)^(3)={MC.compute_MC_matrix(A@A, 3)[0]}') # example of q_\ell and q_u # =========================== print('\nExample with q_ell and q_u\n') n = 5 A1=L(n,2,2)@L(n,5,3)@L(n,4,4)@L(n,3,2)@L(n,5,2)@L(n,4,1)@L(n,5,2) A2=U(n,5,1)@U(n,4,2)@U(n,5,3)@U(n,3,3)@U(n,2,2) A = A1@A2; print('A:') show_mat_latex_format(A, fmt='0f') print('A^2:') show_mat_latex_format(A@A, fmt='0f') print(f'A is TP: {is_TP(A, tol)}, A^2 is TP: {is_TP(A@A,tol)}, A^3 is TP: {is_TP(A@A@A,tol)}, A^4 is TP: {is_TP(A@A@A@A,tol)}') # example of a basic oscillatory (example 2) # ========================================== print('\nExample basic oscillatory\n') n=4 A=L(n,3,1)@L(n,2,2)@L(n,4,3)@U(n,3,4)@U(n,4,5)@U(n,2,6) print('A=') show_mat_latex_format(A, fmt='0f') # example if adding one more term but r(A) deosn't change # ======================================================= print('\nExample additional terms in prop. 9\n') n = 3 D = np.array([[1,0,0], [0,1,0], [0,0,1]]) A1 = L(n,2,1)@L(n,3,2)@D@U(n,3,2)@U(n,2,1) print('A1:') show_mat_latex_format(A1, fmt='0f') A2 = A1@U(n,3,4) print('A2:') show_mat_latex_format(A2, fmt='0f') # Last example of adding a term that reduces r from n-1 # ======================================================= print('\nExample for adding a term that reduces r from n-1\n') n = 4 A = fnt.reduce(np.matmul, [L(n,4,2),L(n,3,1),L(n,2,3),L(n,4,1),L(n,3,2),U(n,2,1),U(n,3,2),U(n,4,3)]) print('A:') show_mat_latex_format(A, fmt='0f') print(f'A is TP: {is_TP(A,tol)}, A^2 is TP: {is_TP(A@A, tol)}. A^3 is TP: {is_TP(A@A@A, tol)}') B = fnt.reduce(np.matmul, [L(n,4,2),L(n,3,1),L(n,2,3),L(n,4,1),L(n,3,2),U(n,4,1),U(n,2,1),U(n,3,2),U(n,4,3)]) print('B:') show_mat_latex_format(B, fmt='0f') print(f'B is TP: {is_TP(B,tol)}, B^2 is TP: {is_TP(B@B, tol)}. B^3 is TP: {is_TP(B@B@B, tol)}') print(MC.compute_MC_matrix(B, 2)[0]) #print(nx.__version__) # shows networkx version # + # Future research # ================= n, tol = 4, 1e-9 A1 = fnt.reduce(np.matmul, [L(n,4,2),L(n,3,1),L(n,2,3),U(n,3,1),U(n,4,1),U(n,2,1),U(n,3,2),U(n,4,3)]) A2 = fnt.reduce(np.matmul, [L(n,4,1),L(n,3,3),L(n,2,2),L(n,4,1),L(n,3,2),U(n,2,3),U(n,3,1),U(n,4,2)]) print('A1:') show_mat_latex_format(A1, fmt='0f') print('A2:') show_mat_latex_format(A2, fmt='0f') print(f'A1^2 is TP: {is_TP(A1@A1,tol)}, A2^2 is TP: {is_TP(A2@A2, tol)}. A1A2 is TP: {is_TP(A1@A2, tol)}, \ A2A1 is TP: {is_TP(A2@A1, tol)}') Q = A2@A1 print(f'Q^(2) = {MC.compute_MC_matrix(Q, 2)[0]}\nQ2^(3)={MC.compute_MC_matrix(Q, 3)[0]}') # checking python version from platform import python_version print(f'python version={python_version()}') # + # Future research 2 # ================== '''Trying to deduce r from an arbitrary factorizarion''' n, tol = 6, 1e-9 #l_order = [ 3, 2, 3, 5,4, 5] #l_order = [ 3,2, 5,3, 6,5,4, 6,5, 6] #l_order = [ 2, 7,6,4,3, 7,6,5,4, 7,6,5, 7,6, 7] l_order = [6,5,4,3,2, 3, 4, 5,6] D = np.eye(n) q = 1 u_order = list(reversed(compute_L_indexes(n))) #print(u_order) LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ D @ UU for i in range(n-1,0,-1): print(f"A^{i} is TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print(f"A^{i} is not TP") # + # the case of Z_5 n, tol = 6, 1e-9 l_order = [6,5,4,3,2] D = np.eye(n) q = 1 u_order = list(reversed(compute_L_indexes(n))) #print(u_order) LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ D @ UU for i in range(n-1,0,-1): print(f"A^{i} is TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print(f"A^{i} is not TP") # + # Examples in the updated version of the paper # ============================================= ''' The case where L = Z_3(1) and U^T=Z_4(1)''' n, tol = 5, 1e-9 #l_order = [5,4,3,2,5,4,5] #u_order = [4,5,2,3,4,5] l_order = [5,4,2,3,4] u_order = [2,3,4,5] D = np.eye(n) ql, qu = 1, 2 LL = fnt.reduce(np.matmul, [L(n,i,ql) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,qu) for i in u_order]) A = LL @ D @ UU print('A=') #p=3 #np.set_printoptions(precision=4) #print(f'A^({p})=\n{MC.compute_MC_matrix(A, p)[0]}') show_mat_latex_format(A, fmt='0f') print('A^2=') show_mat_latex_format(A@A, fmt='0f') print(f'AA^(2) = {MC.compute_MC_matrix(A@A, 2)[0]}\nAA^(3)={MC.compute_MC_matrix(A@A, 3)[0]}\nAA^(4)={MC.compute_MC_matrix(A@A, 4)[0]}') print('A^3=') show_mat_latex_format(A@A@A, fmt='0f') for i in range(n-1,0,-1): print(f"A^{i} is TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print(f"A^{i} is not TP") # - # Future research 3 # ================== n, tol = 6, 1e-9 l_order = [2, 6,5, 4,3, 6,5,4, 6, 5, 6] D = np.eye(n) q = 1 u_order = l_order # recall that this is not a real assignment but a reference to the same value of l_order LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ D @ UU for i in range(n-1,0,-1): print(f"A^{i} is TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print(f"A^{i} is not TP") # # Trying to find exponent of arbitrary osc # + def sebi(n,val=2,absnt_i={}): '''Returns a list with elements in [n,n-1,...,val] that are not in the set absnt_i''' return [i for i in range(n,val-1,-1) if i not in absnt_i] def sebi_all(n): '''Returns all L indexes''' return list(compute_L_indexes(n)) def say_if_power_TP(A,tol=0,pre=''): '''Prints if A^i is TP or not for all i=1,...,n-1''' for i in range(A.shape[0]-1,0,-1): print(f"{pre}A^{i} is", end=' ') print("TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print("not TP") # ------------------------------------------------------------------------------------------------- n,tol,q = 5,1e-9,1 #l_order = sebi(n,2,{3,4})+sebi(n,3,{5})+sebi(n,4,{4})+sebi(n,5,{5}) #l_order = sebi(n,2,{3})+sebi(n,3,{5,4})+sebi(n,4,{4,5})+sebi(n,5) l_order = sebi(n,2,{4,5})+sebi(n,3,{5})+sebi(n,4,{5})+sebi(n,5) print(f'{l_order = }') u_order = l_order[::-1] #sebi_all(n)[::-1] print(f"{u_order = }") D = np.eye(n) A = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) @ D @ fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) #for i in range(n-1,0,-1): # print(f"A^{i} is TP") if is_TP(np.linalg.matrix_power(A,i), tol) else print(f"A^{i} is not TP") say_if_power_TP(A, tol) print(f"r(A) = {osc_exp(A,tol)}") # - # ## Analyzing the minimum exponent of oscillatory matrices # + '''Here we try to evaluate the configurations that yield in exponent that is n-2 (or more general less than n-1)''' n = 4 D = np.eye(n) q = 1 tol = 1e-10 # ============= l_order = [2,3,4] u_order = l_order # recall that this is not a real assignment but a reference to the same value of l_order LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ D @ UU print(f"A^2 is TP: {is_TP(A@A,tol)}") show_mat_latex_format(A) # - # ## Analyzing MC of L and U matrices # Here we compute $(L_i(q))^{(p)}$ for all $i=2,\dots,n$, given $n$ and $q$. # + tags=["parameters"] n = 4; p = 2; q = 1; # Evaluating (L_i(q))^(p) for all i=2,...,n. print('p =', p) for i in range(2, n+1): Lmat = L(n, i, q) Q, _ = MC.compute_MC_matrix(Lmat, p) print('{}. L =\n {}\nL^({}) =\n{}'.format(i,Lmat, p, Q)) # - # ## Evaluating $A^{(p)}$, where $A\in \mathbb R_+^{4\times 4}$ is given by its EB factorization # + n = 4; p = 2; q = 5; # We ignore D in the EB factorization as it only scales rows and columns # example of an oscillatory matrix L4p = MC.compute_MC_matrix(L(n,4,q),p)[0] L3p = MC.compute_MC_matrix(L(n,3,q),p)[0] L2p = MC.compute_MC_matrix(L(n,2,q),p)[0] U4p = MC.compute_MC_matrix(U(n,4,q),p)[0] U3p = MC.compute_MC_matrix(U(n,3,q),p)[0] U2p = MC.compute_MC_matrix(U(n,2,q),p)[0] Ap = fnt.reduce(np.matmul, [L4p, L3p, L2p, L4p, L3p, L4p, U4p, U3p, U2p]) print('p = ', p) #print('test =\n{}'.format(L4p @ L4p)) #print('L4p=\n{}\nL4p*L3p=\n{}\nL4p*L3p*L2p=\n{}'.format(L4p, L4p @ L3p, L4p @ L3p @ L2p)) #print('U4p=\n{}\nU4p*U3p=\n{}\nU4p*U3p*U2p=\n{}'.format(U4p, U4p @ U3p, U4p @ U3p @ U2p)) print('Ap=\n{}'.format(Ap)) # - # ## Evaluating $A^{(p)}$, where $A\in \mathbb R_+^{3\times 3}$ is given by its EB factorization # + n = 3 p = 2 q = 2 L3p = MC.compute_MC_matrix(L(n,3,q),p)[0] L2p = MC.compute_MC_matrix(L(n,2,q),p)[0] L3pL2p = L3p @ L2p @ L3p # can also use fnt.reduce(np.matmul, [L3p, L2p, L3p]) U3p = MC.compute_MC_matrix(U(n,3,q),p)[0] U2p = MC.compute_MC_matrix(U(n,2,q),p)[0] U2pU3p = U3p @ U2p @ U3p #np.matmul(U3p, U2p) Ap = L3pL2p @ U2pU3p #np.matmul(L3pL2p, U2pU3p) print('L3p=\n{}\nL3p*L2p=\n{}'.format(L3p, L3pL2p)) print('U2p=\n{}\nU2p*U3p=\n{}'.format(U2p, U2pU3p)) print('Ap=\n{}'.format(Ap)) # - # ## Evaluating for $A\in\mathbb R^{4\times 4}$ OSC, if $A^2$ is TP # + # valsL corresponds to L4, L3, L2, L4, L3, L4 (as the order in the factorization) # valsU corresponds to U4, U3, U2, U4, U3, U4 (the inverse order in the factorization) valsL = np.array([1,0,1,0,1,0]) # parameters based on factorization order (l_1 to l_k) valsU = np.array([0,1,1,1,0,0]) # parameters based on factorization order (u_1 to u_k) valsD = np.array([1,1,1,1]) A = compute_matrix_from_EB_factorization( valsL, valsD, valsU ) n = A.shape[0] print('A=\n{}'.format(A)) #for p in range(1,n+1): # print('A^({})=\n{}'.format(p, MC.compute_MC_matrix(A, p)[0])) isosc = is_OSC( A, 1e-9 ) print('A is OSC.') if isosc else print('A is not osc.') A2 = A @ A print('A^2=\n{}'.format(A2)) isTP = is_TP( A2 ) print('A^2 is TP.') if isTP else print('A^2 is not TP.') #print('A^2=\n{}'.format(A2)) #for p in range(1,n+1): # print('(A^2)^({})=\n{}'.format(p, MC.compute_MC_matrix(A2, p)[0])) # - # ## Osc cases where $A^{w}$ is TP, w=(n-1)/m, where $m$ is the number od STEPs '''Here we start from the first STEP = L_n L_{n-1} ...L_2, and every time add another STEP with one less matrix on the right (i.e. the second STEP is L_n...L_3, the third STEP is L_n...L_4, etc.)''' n = 5 q = 1 num_steps = 2 tol = 1e-10 # ============= base_l = list(range(n,1,-1)) l_order = base_l.copy() for i in range(num_steps): l_order += base_l[:-i] print(l_order) u_order = l_order LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ UU w = np.ceil((n-1)/num_steps).astype('int') print(f'A^{w} is TP: {is_TP(np.linalg.matrix_power(A, w), tol)}') '''Here we start with L_2 L_3...L_n, i.e. from each STEP take only the last matrix. Then, for num_adds=1 we replace the STEp before the last (i.e. L_{n-1}) with the full STEP, i.e. L_n L_{n-1}. For num_adds = 2, we replace the second STEP from the last (i.e. L_{n-2}) with its full STEP, i.e. L_n L_{n-1} L_{n-2}. etc. Note, due to numerical computations, there could be wrong results for large n (e.g., n=9 yields wrong result for num_adds = 3, 4) ''' n = 9 q = 1 num_adds = 5 # a value between 0 and n-2 tol = 1e-10 # ============= l_order = [] base_l = list(range(n,1,-1)) for i in range(n-1): cur_step = base_l[:-i] if i>0 else base_l if n-2-i <= num_adds: l_order += cur_step else: l_order.append(cur_step[-1]) #print(cur_step, l_order) print(l_order) w=n-1-num_adds u_order = l_order LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) A = LL @ UU print(f'A^{w} is TP: {is_TP(np.linalg.matrix_power(A, w), tol)}') # ## Generating Example # + n=5 np.set_printoptions(precision=4,suppress=True) l_order = [5,4,3,2,5,4,3] u_order = l_order #li = l_order li = [1, 1.5, 0.8, 2.1, 1.6, 1.3, 1.8] ui = [lii/2+i/10 for i, lii in enumerate(li)] print(li, ui, sep='\n') LL = fnt.reduce(np.matmul, [L(n,i,li[i]/10) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,ui[i]/10) for i in u_order]) A = LL @ UU print(f'A=\n{A}\nA^2=\n{A@A}') print(is_TP(A@A)) # in a latex format: print('A=') show_mat_latex_format(A) print('A^2=') show_mat_latex_format(A@A) # - # ## Evaluating all $A\in\mathbb R^{5\times 5}$ OSC, where $A^2$ is TP # + import itertools from tqdm import tqdm '''Evaluating all n=5 oscillatory matrix configuration possibilities such that A^2 or A^3 is TP.''' n = 5 tol = 1e-10 rslt_file = 'osc_n'+str(n)+'_pwr_TP.txt' # save a list of all configurations resulting in A^2 TP and A^3 TP # ====================================================================================== usr = input("This may take an hour to run. Do you want to continue [Y|N]?") if usr.upper() == 'N': raise KeyboardInterrupt #assert(False) #raise D = np.eye(n) k = EB_factorization_n2k(n) lst = list(itertools.product([0, 1], repeat=k)) l_indxs = compute_L_indexes(n) len_lst = len(lst) cases2 = [] cases3 = [] dvals = np.diag(D) for x in tqdm(range(len_lst)): # debug #if x == 140: # break if not is_factorization_osc(np.array(lst[x]), np.ones(k), dvals, l_indxs): # here we check only the l values continue LL = fnt.reduce(np.matmul, [L(n, i, q) for i, q in zip(l_indxs, lst[x])]) for y in range(len_lst): if not is_factorization_osc(np.array(lst[x]), np.array(lst[y]), dvals, l_indxs): continue UU = fnt.reduce(np.matmul, [U(n, i, q) for i, q in zip(np.flip(l_indxs), np.flip(lst[y]))]) A = LL @ D @ UU if is_TP( A @ A, tol ): #print(f"A={show_EB_config(np.array(lst[x]), np.flip(lst[y]))} => A^2 is TP") cases2.append(show_EB_config(np.array(lst[x]), np.array(lst[y]))) elif is_TP( A @ A @ A, tol ): cases3.append(show_EB_config(np.array(lst[x]), np.array(lst[y]))) # write results to file f = open(rslt_file, "w+") f.write(f"n={n}, {len(cases2)} cases of A^2 TP:\n=========================================\n") for i in range(len(cases2)): f.write(f"{i}. {cases2[i]}\n") f.write(f"\n n={n}, {len(cases3)} cases of A^3 TP:\n=============================================\n") for i in range(len(cases3)): f.write(f"{i}. {cases3[i]}\n") f.write("\nDone.\n") f.close() print(f"Found {len(cases2)} cases of A^2 TP and {len(cases3)} cases of A^3 TP (out of {np.power(2,2*k)} cases)") print(f"Results stored in {rslt_file}") # - # ## Evaluating all possibilities where for $A\in\mathbb R^{4\times 4}$ basic OSC, $A^2$ is TP # + # testing all possibilities ''' The order of indxl (x=4,3) is l_1 to l_k in the order of L matrix multiplication (L4,L3,L2,L4,L3,L4) The order of indxu (x=4,3) is u_1 to u_k, which is IN THE REVRESED ORDER of the U multiplication in the factorization. That is, the indexes are based on the order U4,U3,U2,U4,U3,U4, where in the multiplication the order is U4*U3*U4*U2*U3*U4. So, e.g., ind4u=[0,3,5] means that the parameters for U4 are in indexes (0, 3, 5) when counting in the order U4,U3,U2,U4,U3,U4. This means that indxl=indxu, x=4,3. ''' ind4l = [0, 3, 5]; ind3l = [1, 4]; ind4u = [0, 3, 5] ind3u = [1, 4]; tol = 1e-6 valsD = np.array([1,1,1,1]) #valsD = np.array([5,.1,3,2]) # ================================ vl = np.array([0,0,1,0,0,0]) vu = np.array([0,0,1,0,0,0]) for i4l in range(len(ind4l)): for i3l in range(len(ind3l)): valsL = vl.copy() valsL[ind4l[i4l]] = 1 valsL[ind3l[i3l]] = 1 #print('valL = {}'.format(valsL)) for i4u in range(len(ind4u)): for i3u in range(len(ind3u)): valsU = vu.copy() valsU[ind4u[i4u]] = 1 valsU[ind3u[i3u]] = 1 #print('valsL={}, valsU={}'.format(valsL, valsU)) A = compute_matrix_from_EB_factorization( valsL, valsD, valsU ) #if (A@A>tol).all(): # print('valsL={}, valsU={} (A={}) => A^2 > 0'.format(valsL, valsU, show_EB_config(valsL, valsU))) if( is_TP( A @ A, tol ) ): print('valsL={}, valsU={} (A={}) => A^2 is TP'.format(valsL, valsU, show_EB_config(valsL, valsU))) # - # ## Evaluating all possibilities where for $A\in\mathbb R^{5\times 5}$ basic OSC, $A^3$ is TP ''' testing all possibilities For the exact order of the indexes, see above. ''' ind5l = [0, 4, 7, 9] ind4l = [1, 5, 8] ind3l = [2, 6] ind5u = ind5l # order is reversed from multiply order, i.e. U5 U4, U3, U2, U5, U4,... ind4u = ind4l ind3u = ind3l tol = 1e-7 valsD = np.array([1,1,1,1,1]) #valsD = np.array([2.5, 5,.1,3,2]) # ============================================= tot_num = 0 vl = np.array([0,0,0,2,0,0,0,0,0,0]) vu = vl.copy() #np.array([0,0,0,1,0,0,0,0,0,0]) for i5l in range(len(ind5l)): for i4l in range(len(ind4l)): for i3l in range(len(ind3l)): valsL = vl.copy() valsL[ind5l[i5l]] = 1 valsL[ind4l[i4l]] = 1 valsL[ind3l[i3l]] = 1 #print('valL = {}'.format(valsL)) for i5u in range(len(ind5u)): for i4u in range(len(ind4u)): for i3u in range(len(ind3u)): valsU = vu.copy() valsU[ind5u[i5u]] = 1 valsU[ind4u[i4u]] = 1 valsU[ind3u[i3u]] = 1 #print('valsL={}, valsU={}'.format(valsL, valsU)) A = compute_matrix_from_EB_factorization( valsL, valsD, valsU ) if is_TP( A @ A @ A, tol ): print('valsL={}, valsU={} (A={}) => A^3 is TP'.format(valsL, valsU, show_EB_config(valsL, valsU))) tot_num += 1 #if (valsL==np.array([0,0,0,1,1,1,1,0,0,0])).all():# and (valsU==np.array([0,0,1,1,0,0,0,1,1,0])).all(): # print(is_TP( A @ A @ A, tol )) #print('\nTotal of {} configurations (possibly with repetition) resulting in A^3 TP'.format(tot_num)) print(f'\nTotal of {tot_num} configurations (possibly with repetition) resulting in A^3 TP') # ## Analyzing the case n=4 basic oscillatory with $A^2$ TP # + n = 4 q = 1 tol = 1e-12 # ============= LL = fnt.reduce(np.matmul, [L(n,3,q),L(n,2,q),L(n,4,q)]) UU = fnt.reduce(np.matmul, [U(n,4,q),U(n,2,q),U(n,3,q)]) A = LL @ UU Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( A ) #print('l={}, u={},\n d={}'.format(valsL, valsU, np.diag(Dmat))) _, ax = plt.subplots(figsize=(16,6)) draw_EB_factorization_ITN( valsL, np.diagonal(Dmat), valsU, ax ) B = A @ A isTP = is_TP( B, tol) print('B is TP =>', isTP) #print('A^2=\n{}'.format(MC.compute_MC_matrix(A@A, 3)[0])) #Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( A @ A ) #print('l={}, u={},\n d={}'.format(valsL, valsU, np.diag(Dmat))) #print(LL, UU, LL@UU, sep='\n') # B = fnt.reduce(np.matmul, [LL, UU, LL, UU]) # Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( B ) # print('l={}, u={},\n d={}'.format(valsL, valsU, np.diag(Dmat))) # print(show_EB_config( valsL, valsU, np.diag(Dmat), True )) # C = fnt.reduce(np.matmul, [LL, LL, UU, UU]) # Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( C ) # print('l={}, u={},\n d={}'.format(valsL, valsU, np.diag(Dmat))) #print(MC.compute_MC_matrix(B, p)[0], MC.compute_MC_matrix(C,p)[0], sep='\n') #print(MC.compute_MC_matrix(L(n,2,q), 2)[0]) #print(fnt.reduce(np.matmul, [L(n,4,q), L(n,2,q), L(n,3,q)])) # - # ## Analyzing the case n=5 basic oscillatory with $A^3$ TP n = 5 q = 1 tol = 1e-10 # seems to change results !!!! # ========= l_order = [2,5,4,3,5,4,5] #[5,4,2,3] u_order = [4,3,2,4,5] #[5,4,2,3] LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) # valsL=[1 1 1 1 0 0 0 0 0 0], valsU=[0 0 1 1 0 1 0 1 0 0] (A=L5*L4*L3*L2*U5*U4*U2*U3) => A^3 is TP A = LL @ UU Lmat, Dmat, Umat, _, valsL, valsU = EB_factorization_ITN( A ) print('l={}, u={},\n d={}'.format(valsL, valsU, np.diag(Dmat))) _, ax = plt.subplots(figsize=(16,6)) draw_EB_factorization_ITN( valsL, np.diagonal(Dmat), valsU, ax ) B = A @ A @ A isTP = is_TP( B, tol) print('A^3 is TP =>', isTP) print('A^2 is TP =>', is_TP(A@A, tol)) # ## Paper Example # + A1 = np.array([[3, 1, 0, 0], [1, 4, 1, 0.1], [0.1, 1, 5, 3], [0, 0, 2, 7]]) A2 = np.array([[3, 1, 0.1, 0], [1, 4, 1, 0], [0, 3, 6, 1], [0, 0.1, 5, 6]]) savefig_flag1, savefig_flag2, savefig_flag12, savefig_flag21 = 1, 0, 0, 0; savefig_name1, savefig_name2, savefig_name12, savefig_name21 = 'A1.eps', 'A2.png', 'A12.png', 'A21.png' # ========================================================================================================= print("A1 = {}".format(A1)) plt1, ax1 = plt.subplots(figsize=(16,6)) Lmat1, Dmat1, Umat1, Um1, valsL1, valsU1 = EB_factorization_ITN( A1 ) print('valsL1={}, valsU1={}'.format(valsL1, valsU1)) display_EB_factorization( Lmat1, Dmat1, Umat1, valsL1, valsU1 ) draw_EB_factorization_ITN(valsL1, np.diagonal(Dmat1), valsU1, ax1) #ax1.set_title("EB factorization graph of A1") if savefig_flag1: plt1.savefig(savefig_name1, dpi=150) print("Figure saved in {}.".format(savefig_name1)) print("A2 = {}".format(A2)) plt2, ax2 = plt.subplots(figsize=(16,6)) Lmat2, Dmat2, Umat2, Um2, valsL2, valsU2 = EB_factorization_ITN( A2 ) print('valsL2={}, valsU2={}'.format(valsL2, valsU2)) draw_EB_factorization_ITN(valsL2, np.diagonal(Dmat2), valsU2, ax2) ax2.set_title("EB factorization graph of A2"); if savefig_flag2: plt2.savefig(savefig_name2, dpi=150) print("Figure saved in {}.".format(savefig_name2)) A12 = np.matmul( A1, A2 ) plt12, ax12 = plt.subplots(figsize=(16,6)) print("A1*A2 = {}".format(A12)) Lmat12, Dmat12, Umat12, Um12, valsL12, valsU12 = EB_factorization_ITN( A12 ) draw_EB_factorization_ITN(valsL12, np.diagonal(Dmat12), valsU12, ax12) ax12.set_title("EB factorization graph of A1*A2"); if savefig_flag12: plt12.savefig(savefig_name12, dpi=150) print("Figure saved in {}.".format(savefig_name12)) A21 = np.matmul( A2, A1 ) plt21, ax21 = plt.subplots(figsize=(16,6)) print("A2*A1 = {}".format(A21)) Lmat21, Dmat21, Umat21, Um21, valsL21, valsU21 = EB_factorization_ITN( A21 ) draw_EB_factorization_ITN(valsL21, np.diagonal(Dmat21), valsU21, ax21) ax21.set_title("EB factorization graph of A2*A1"); if savefig_flag21: plt21.savefig(savefig_name21, dpi=150) print("Figure saved in {}.".format(savefig_name21)) # A1^2 A1p = np.matmul( A1, A1 ) plt1p, ax1p = plt.subplots(figsize=(16,6)) print("A1*A1 = {}".format(A1p)) Lmat1p, Dmat1p, Umat1p, Um1p, valsL1p, valsU1p = EB_factorization_ITN( A1p ) draw_EB_factorization_ITN(valsL1p, np.diagonal(Dmat1p), valsU1p, ax1p) ax1p.set_title("EB factorization graph of A1*A1"); # check AA1p = compute_matrix_from_EB_factorization( valsL1p, np.diagonal(Dmat1p), valsU1p ) print("AA1p =\n {}".format(AA1p)) # - # ## Generating Example # + # generating OSC matrix size 4 # the indexes in the factorization are: 4,3,2,4,3,4 # # The values of the l parameters and the u parameters defined below are ordered # based on the factorization indexes, e.g. valsL[2] and valsU[2] correspond to # factorization index 2. #valsL = np.array([1,0,1,0,1,0]) # parameters based on factorization order #valsU = np.array([1,1,1,0,0,0]) # parameters based on factorization order #valsD = np.array([1,1,1,1]) valsL = np.array([2, 3, 0]) # parameters based on factorization order valsU = np.array([5, 10, 0]) # parameters based on factorization order valsD = np.array([1,1,1]) #valsL = np.array([1, 1/3, 3/2, 1/3, 2, 1/6]) # parameters based on factorization order #valsU = np.flip(np.array([0, 4/15, 5/14, 5/2, 7/5, 8/7])) # parameters based on factorization order #valsD = np.array([2, 3/2, 2/3, 1/2]) # ===================================================== k = valsL.shape[0] mat = compute_matrix_from_EB_factorization( valsL, valsD, valsU ) print("mat = \n{}".format(mat)) #matdet = np.linalg.det(mat) #print("|mat| = {}".format(matdet)) _, ax = plt.subplots(figsize=(16,6)) draw_EB_factorization_ITN(valsL, valsD, valsU, ax) ax.set_title("EB factorization graph of mat") # checking mat^2 mat2 = mat @ mat #np.matmul(mat, mat) print("mat^2 = \n{}".format(mat2)) Lmat2, Dmat2, Umat2, _, valsL2, valsU2 = EB_factorization_ITN(mat2, 0.0001) display_EB_factorization( Lmat2, Dmat2, Umat2, valsL2, valsU2 ) _, ax1 = plt.subplots(figsize=(16,6)) draw_EB_factorization_ITN(valsL2, np.diagonal(Dmat2), valsU2, ax1) ax1.set_title("EB factorization graph of mat^2"); # checking mat^2 mat3 = mat @ mat2 #np.matmul(mat, mat2) print("mat^3 = \n{}".format(mat3)) Lmat3, Dmat3, Umat3, _, valsL3, valsU3 = EB_factorization_ITN(mat3, 0.0001) #display_EB_factorization( Lmat3, Dmat3, Umat3, valsL3, valsU3 ) _, ax2 = plt.subplots(figsize=(16,6)) draw_EB_factorization_ITN(valsL3, np.diagonal(Dmat3), valsU3, ax2) ax2.set_title("EB factorization graph of mat^3"); # - # # Scratch Pad # + # n=5 # r = np.array([1,2, 3,4]) # c = np.array([0,1, 2,4]) # i,j = lexicog2linear(n, r, c) # rr, cc = linear2lexicog(n, len(r), i, j) # print(i, j) # print(rr,cc) # rrr, ccc = linear2lexicog(8, 5, 3, 5) # print(rrr, ccc) # idxs = compute_L_indexes( 4 ) # a = np.array([1, 2, 3, 0, 5, 0]) # loc = a!=0 # print(np.ones(a.size, dtype=bool)) # print(a.size, a[loc].size, a[loc], idxs[loc]) # n = 4 # q = 2.5 # a1 = L(n,2,q)@L(n,4,q+2) # a2 = L(n,4,q+2)@L(n,2,q) # print(a1,a2,a1-a2,sep='\n') # n=5 # llmc = MC.compute_MC_matrix(L(n,5,q),n-1)[0] # print(L(n,5,q),llmc, L(n,2,q),sep='\n') n=4 tol = 1e-10 # ============= A1 = fnt.reduce(np.matmul, [L(n,4,1),L(n,2,2),L(n,3,3),U(n,4,4),U(n,2,5),U(n,3,6)]) A2 = fnt.reduce(np.matmul, [L(n,4,.1),L(n,2,.2),L(n,3,.8),U(n,4,2.5),U(n,2,2),U(n,3,1.5)]) B = A2 @ A1 print(f'n=4: A1*A2 is TP: {is_TP(B,tol)}') n=5 # ============= A1 = fnt.reduce(np.matmul, [L(n,3,1),L(n,2,2),L(n,4,3),L(n,5,3.5),U(n,4,4),U(n,2,5),U(n,3,6),U(n,5,9)]) A2 = fnt.reduce(np.matmul, [L(n,3,2.5),L(n,2,5),L(n,4,11),L(n,5,1.4),U(n,4,2.4),U(n,2,1.1),U(n,3,1.6),U(n,5,4)]) A3 = fnt.reduce(np.matmul, [L(n,3,1.4),L(n,2,3),L(n,4,6),L(n,5,2.2),U(n,4,2),U(n,2,2.5),U(n,3,2.6),U(n,5,6)]) B = A1 @ A2 @ A3 print(f'n=5: A1*A2*A3 is TP: {is_TP(B,tol)}') q1 = L(n,3,1)@L(n,4,2)@L(n,5,3)@L(n,2,4) q2 = L(n,3,1)@L(n,2,4)@L(n,4,2)@L(n,5,3) print(f'q1= {q1}, \nq2={q2}, \nq1==q2: {(q1==q2).all()}') Q = np.random.rand(4,4) Q2 = MC.compute_MC_matrix(Q, 2)[0] Qt2 = MC.compute_MC_matrix(Q.transpose(), 2)[0] print(f'Q2 similar to Qt2: {np.allclose(Q2, Qt2.transpose())}, \ndiff=\n{(Q2-Qt2.transpose())}') # + # n = 6; # a = 1; # i = 4; # Lm = L(n, 3, a) # mcLm, lidx = MC.compute_MC_matrix( Lm, 3) # print(mcLm, lidx.transpose(), sep='\n') # all combination of n binary digits # import itertools # lst = list(itertools.product([0, 1], repeat=n)) # print(np.array(lst)) # lvals1 = np.array([0,1,1,0,1,1]) # uvals1 = np.array([0,0,1,1,1,1]) # dvals1 = np.array([1,1,1,1]); # A1 = compute_matrix_from_EB_factorization( lvals1, dvals1, uvals1 ) # print(is_TP(A1@A1)) # lvals2 = np.array([0,0,1,1,1,1]) # uvals2 = np.array([0,1,1,0,1,1]) # dvals2 = np.array([1,1,1,1]); # A2 = compute_matrix_from_EB_factorization( lvals2, dvals2, uvals2 ) # print(is_TP(A2@A2)) # print(is_TP(A1@A2), is_TP(A2@A1)) # n = 6; # C = fnt.reduce(np.matmul, [L(n,2,1),L(n,3,2),L(n,4,3)][::1]) # #print(C, end='\n\n') # R = L(n,4,4)@L(n,5,5)@L(n,6,6) # #print(R) # Q1 = fnt.reduce(np.matmul, [L(n,i,i) for i in range(n,n-5,-1)]) # print(Q1) #lvals = np.array([4,3,2,4,3,4]) #id = np.where(lvals == 3)[0] #q = np.array([0,0,1,0,1,1]) #q1 = (q[id]>0).any() #print(q1) lindxs = None #np.array([4,3,2,4,3,4]) dvals = np.array([4,1,1,1]) lvals = np.array([1,1,1,1,0.1,3]) uvals = np.array([1,1,1,4,2,1]) print(is_factorization_osc(lvals, uvals, dvals, lindxs)) print(is_factorization_TP(lvals, uvals, dvals, lindxs)) #b = [print(j) for j in np.wprint(np.where(lindxs==3)) #a = all([(lvals[j]>0).any() and (uvals[j]>0).any() for i in lindxs for j in np.where(lindxs==i)]) #print(a) # n = 6 # q = 1 # tol = 1e-10 # seems to change results !!!! # # ========= # l_order = [i for i in range(2,n+1,1)] # u_order = reversed(l_order) # LL = fnt.reduce(np.matmul, [L(n,i,q) for i in l_order]) # UU = fnt.reduce(np.matmul, [U(n,i,q) for i in u_order]) # A = LL @ UU # print(A) # This show a mutable example def increase_by_one(array): array += 1 data = np.ones((100,1)) increase_by_one(data) print(data[0]) x = np.array([1,2,3,4,5,6,7,8]) idx = np.array([0,3,4]) print(x[idx]) print('\n') y = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9,10,11,12], [13,14,15,16]]) r_indx = np.array([0,2]) c_indx = np.array([1,3]) #print(r_indx, r_indx[:, None], sep=',') print(y[r_indx[:, None],c_indx]) # or: print(y[np.ix_(r_indx, c_indx)]) ####################### nn = 3 num_h_nodes = 3 d = {0:(0,0), 1:(0,1), 2:(0,2), 3:(1,0), 4:(1,1), 5:(1,2), 6:(2,0), 7:(2,1), 8:(2,2)} print(d) d1 = d.copy() offset = nn*(num_h_nodes-1) for k in range(nn): for (o,v) in zip([0,offset], [-0.2, 0.2]): d[k+o] = (d[k+o][0]+v, d[k+o][1]) #d[k] = (d[k][0]-0.2, d[k][1]) #d[k+offset] = (d[k+offset][0]+0.2, d[k+offset][1]) print(d) print(d1) print('\n======================') n = 5 stop = n-1 a = 1 A = fnt.reduce(np.matmul, [L(n,i,a) for i in range(n,stop-1,-1)]) print(A) show_mat_latex_format(A, fmt='0f') # - # ## Future Research # + # Jacobi matrix n = 8 ql, qu = 1, 2 A = fnt.reduce(np.matmul, [L(n,i,ql) for i in range(2,n+1)])@fnt.reduce(np.matmul, [U(n,i,qu) for i in range (n,1,-1)]) print(A)
old_eb_factorization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.2 64-bit # language: python # name: python3 # --- type(3.14) 3.14e-3 3.14e-2 3.14e-1 3.14e0 3.14e1 3.14e2 0.001 * 6 5e4 float('nan') float('nAn') float('inF') float('inFiniTy')
Week 2/jupyter notebook/Section/Section_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Python # # <br/> # # <section class="objectives panel panel-warning"> # <div class="panel-heading"> # <h2><span class="fa fa-certificate"></span> Learning Objectives </h2> # </div> # <ul> # <li> Get used to the Jupyter Notebook. </li> # <li> Get used to common operations in Python </li> # <li> Understand what a library is and how to use it.</li> # <li> Analyse some data and plots some graphs of those data </li> # </ul> # # </section> # # # # ## Jupyter Notebook # # Jupyter notebook is a web based interactive computational environment with code, text, mathematics, plot and exectution all in the same place. It's how we'll be doing the whole course, though we wouldn't use it for more script based programming. # # A few shortcuts to make your life easier: # # - crtl + enter - Run the contents of this cell # - B - moves out from inside this cell and selects the entire cell # - esc - moves from this window to select the whole box # - enter - will move you back in # - M - changes the whole box to markdown # - Y - changes the whole box to code # # ## Python scripts # # The Jupyter notebook is great for teaching but may not be completely suitable when you come to do more extensive work with Python; for that we suggest you use Spyder or your pet text editor. # # Spyder is an Interactive Development Environment (IDE) that allows you to edit a Python script and then run it locally in an interpreter console, all in one window. If you prefer working in the command line, you can edit your script in whatever editor you prefer, then run it with the command # # ``` # python name_of_your_python_script.py # ``` # # # ## Libraries # # Words are useful, but what’s more useful are the sentences and stories we build with them. Similarly, while a lot of powerful tools are built into languages like Python, even more live in the libraries they are used to build. # # Numpy is one of the essential libraries we use in python. We can import it like so: # We're going to demonstrate how to use the NumPy library with some statistics on arthuritis patient inflammmation. We can import csv files, where the data currently is, like so: # So lets examine this. By typing `numpy.loadtxt()` we're saying, from the numpy library use the loadtxt function. It is the equivalent of selecting a file from a directory, or saying thing.component, for example, car.tyres might say, from the car I would like to inspect the tyres. # # `loadtxt()` is a function, which in this case we call with two _keyword arguments_. The filename and the delimiter both need to be input as character strings. # <br/> # # <section class="objectives panel panel-warning"> # <div class="panel-heading"> # <h2><span class="fa fa-certificate"></span> Strings and Print Formatting </h2> # </div> # <br/> # <p>Note that above we defined strings with single quote marks, `'`, but we could have used double quote marks, `"`, instead.</p> # # <p>Python strings have some built-in functionality which can be useful for printing information. We use `.format` at the end of a string to insert variables which are not strings into a string. If we have multiple items to insert then we use `{0}, {1}, {2}... etc` and order them accordinglt in the format `()`.</p> # </section> # # <section class="objectives panel panel-warning"> # <div class="panel-heading"> # <h2><span class="fa fa-certificate"></span> Tip </h2> # </div> # <br/> # You can use the `%whos` command at any time to see what variables you have created and what modules you have loaded into the computers memory. As this is an IPython command, it will only work if you are in an IPython terminal or the Jupyter Notebook. # </section> # ## Arrays # # Now back to the patients. We have the patient infomation stored in memory as the variable `p_data`, and we can check it's still there with `type`, lets find out more about the array. # These two commands tell us that the variable is a NumPy array and then the extent of the array. In this case the rows are individual patients and the columns are their daily inflammation measurements. In this case we have 60 rows and 40 columns, known as the dimensions of the array. Using these attributes we can index the data to extract single data values: # We can also index a specific row or column. This can my done by using a colon in our indexing. For example, `p_data[:,15]` will give us column 15: # and `p_data[15,:]` gives us the 15 row. The `:` says give me all the elements in this domain. # Thus we can make the domain smaller and essentially crop the array. By using the colon between the two limits: # Arrays also know how to perform common mathematical operations on their values. The simplest operations with data are arithmetic: add, subtract, multiply, and divide. When you do such operations on arrays, the operation is done on each individual element of the array. Thus: # This will create a new array whose elements have the value of two times the value of the corresponding elements in `crop_arr`. However what if instead of taking an array and doing arithmetic with a single value (as above) you did the arithmetic operation with another array of the same shape, the operation will be done on corresponding elements of the two arrays. Thus: # ### Arrays and Statistics # # Often, we want to do more than add, subtract, multiply, and divide values of data. Arrays also know how to do more complex operations on their values. If we want to find the average inflammation for all patients on all days, for example, we can just ask the array for its mean value # `mean` is a method of the array, i.e., a function that belongs to it in the same way that the member `shape` does. If variables are nouns, methods are verbs: they are what the thing in question knows how to do. We need empty parentheses for `data.mean()`, even when we’re not passing in any parameters, to tell Python to go and do something for us. `data.shape` doesn’t need `()` because it is just a description but `data.mean()` requires the `()` because it is an action. # When analyzing data, though, we often want to look at partial statistics, such as the maximum value per patient or the average value per day. One way to do this is to create a new temporary array of the data we want, then ask it to do the calculation: # We don’t actually need to store the row in a variable of its own. Instead, we can combine the selection and the method call: # What if we need the maximum inflammation for all patients, or the average for each day? In this case we need to average across an 'axis' of the array i.e. in x or y if we consider the data as a 2D array. # # To support this, most array methods allow us to specify the axis we want to work on. If we ask for the average across axis 0 (rows in our 2D example), we get: # As a quick check, we can ask this array what its shape is: # The expression `(40,)` tells us we have an N×1 vector, so this is the average inflammation per day for all patients. If we average across axis 1 (columns in our 2D example), we get: # which is the average inflammation per patient across all days. # # ## Matplotlib # # The mathematician <NAME> once said, “The purpose of computing is insight, not numbers,” and the best way to develop insight is often to visualize data. Visualization deserves an entire lecture (or course) of its own, but we can explore a few features of Python’s `matplotlib` library here. While there is no “official” plotting library, this package is the de facto standard. First, we will import the `pyplot` module from `matplotlib` and use two of its functions to create and display a heat map of our data: # <section class="objectives panel panel-warning"> # <div class="panel-heading"> # <h2><span class="fa fa-certificate"></span> Inline with Jupyter Notebooks </h2> # </div> # <br/> # If you’re using an Jupyter notebook, you’ll need to execute the following command in order for your matplotlib images to appear in the notebook when `show()` is called: # # <pre> # % matplotlib inline # </pre> # # The `%` indicates an IPython magic function - a function that is only valid within the notebook environment. Note that you only have to execute this function once per notebook. # </section> # Let’s take a look at the average inflammation over time: # Here, we have put the average per day across all patients in the variable `ave_inflammation`, then asked `matplotlib.pyplot` to create and display a line graph of those values. The result is roughly a linear rise and fall, which is suspicious: based on other studies, we expect a sharper rise and slower fall. Let’s have a look at two other statistics: # The maximum value rises and falls perfectly smoothly, while the minimum seems to be a step function. Neither result seems particularly likely, so either there’s a mistake in our calculations or something is wrong with our data. # <section class="objectives panel panel-warning"> # <div class="panel-heading"> # <h2><span class="fa fa-certificate"></span> Scientists dislike typing </h2> # </div> # <br/> # We will always use the syntax `import numpy` to import NumPy. However, in order to save typing, it is often suggested to make a shortcut like so: `import numpy as np`. If you ever see Python code online using a NumPy function with `np` (for example, `np.loadtxt(...)`), it’s because they’ve used this shortcut. Similarly, for the rest of the workshop, we will abbreviate `matplotlib.pyplot` to `plt`. # # </section> # ### Subplots # # You can group similar plots in a single figure using subplots. This script below uses a number of new commands. # # The function `matplotlib.pyplot.figure()` creates a space into which we will place all of our plots. # The parameter `figsize` tells Python how big to make this space. Each subplot is placed into the figure using the `subplot` command. The `subplot` command takes 3 parameters. The first denotes how many total rows of subplots there are, the second parameter refers to the total number of subplot columns, and the final parameters denotes which subplot your variable is referencing. # # Each `subplot` is stored in a different variable (axes1, axes2, axes3). Once a subplot is created, the axes are can be titled using the `set_xlabel()` command (or `set_ylabel()`). Here are our three plots side by side: # The call to `loadtxt` reads our data, and the rest of the program tells the plotting library how large we want the figure to be, that we’re creating three sub-plots, what to draw for each one, and that we want a tight layout. (Perversely, if we leave out that call to `fig.tight_layout()`, the graphs will actually be squeezed together more closely.) # ### `subplots()` # # Adding multiple axes to a figure with `plt.subplot()` is great, but can become tedious if you want to add several. Fortunately, there is also a function that allows you to define several axes and their arrangement at the same time as the figure, `plt.subplots()`. # # This function returns two variables - the figure and an array of axes objects with the specified shape. Referencing the axes array allows things to be plotted on the individual subplots. So the code from above can be rewritten: # Create figure and all axes at once # <section class="objectives panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> Sorting Out References </h2> # </div> # <br/> # What does the following program print out? # # <pre> # # first, second = 'Grace', 'Hopper' # third, fourth = second, first # print(third, fourth) # # </pre> # # </section> # <section class="objectives panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> Slicing strings </h2> # </div> # <br/> # A section of an array is called a slice. We can take slices of character strings as well: # # <pre> # element = 'oxygen' # print('first three characters:', element[0:3]) # print('last three characters:', element[3:6]) # </pre> # What is the value of element[:4]? What about element[4:]? Or element[:]? # # What is element[-1]? What is element[-2]? Given those answers, explain what element[1:-1] does. # # # </section> # <section class="objectives panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> Thin Slices </h2> # </div> # <br/> # The expression element[3:3] produces an empty string, i.e., a string that contains no characters. If data holds our array of patient data, what does data[3:3, 4:4] produce? What about data[3:3, :]? # # </section> # <section class="objectives panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> Make your own plot </h2> # </div> # <br/> # Create a plot showing the standard deviation (`numpy.std`) of the inflammation data for each day across all patients. # # </section> # Calling subplots() with no arguments defaults to returning the figure and a single axes # <section class="objectives panel panel-success"> # <div class="panel-heading"> # <h2><span class="fa fa-pencil"></span> Moving plots around </h2> # </div> # <br/> # Modify the program to display the three plots on top of one another instead of side by side. # # </section> # Create figure and all axes at once
02b-Python-Intro-short/02b-Python-Intro-Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="b4fce2ad" outputId="c70dac3a-0bfb-4323-c938-654962d596f4" # #!pip install -U transformers # #!pip install -U datasets # #!pip install optuna import os import sys HOME = os.path.abspath('..') sys.path.append(HOME) os.chdir(HOME) import pandas as pd # #!pip install transformers from transformers import RobertaConfig, RobertaModel,RobertaForSequenceClassification, Trainer,AutoModelForSequenceClassification, EarlyStoppingCallback from transformers import AutoTokenizer from transformers.models.roberta import RobertaPreTrainedModel import torch from torch import nn from transformers import TrainingArguments import glob import optuna from itertools import product import numpy as np from pprint import pprint import csv from transformers import set_seed from sklearn.preprocessing import StandardScaler from typing import Dict, List, Optional, Set, Tuple, Union import matplotlib.pyplot as plt from torchvision.transforms import transforms from PIL import Image, ImageFilter import random # + colab={"base_uri": "https://localhost:8080/"} id="cpUQ_Pq4f6TK" outputId="e8c80ba2-e887-4880-d924-86aa08ddd84b" MODEL_NAME = "distilbert-base-uncased" #"roberta-base" TARGET_COL = 'averageRating'#'' MODEL_FOLDER = 'everything_as_text_and_images'#'everything_as_text' text_input_col = 'text_input' CATEGORIES_AS_TEXT = True NUMERIC_AS_TEXT = True DATE_AS_TEXT = True ADJUST_INFLATION = False USE_COLUMN_NAMES = False DEBUG = False IMG_SIZE = 224 FINAL_MODEL_NAME = f"{MODEL_NAME}-{TARGET_COL}" if ADJUST_INFLATION: FINAL_MODEL_NAME+='-inflation_adjusted' if USE_COLUMN_NAMES: FINAL_MODEL_NAME+='-with_column_names' FINAL_MODEL_PATH = f'models/{MODEL_FOLDER}/{FINAL_MODEL_NAME}' TRIALS_DF_PATH = f'models/{MODEL_FOLDER}/{FINAL_MODEL_NAME}_hparams_trials.csv' TEST_PERFORMANCE_PATH = f'models/{MODEL_FOLDER}/{FINAL_MODEL_NAME}_test_stats_best_model.csv' if USE_COLUMN_NAMES: assert CATEGORIES_AS_TEXT|NUMERIC_AS_TEXT|DATE_AS_TEXT, "can't use column names as text if there are no columns to treat as text!" print('Final model name: ',FINAL_MODEL_NAME) print('Saving at: ',MODEL_FOLDER) # + colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["53c58fddfd8140a38c81c77f0726c864", "25b66346a56143cc921abe4556a9f6a3", "d3b13cad58694f2aa1400827e8a7f619", "c12944a1bd2541bbb210cb1c3585c855", "02e0d67806dc4a8e993cfd9f5a451bfa", "6e3b82dda5fa468d851c065a6cb538c0", "a43b2fb60dba45e4aa831ac03bb99323", "<KEY>", "06b2527e9e904b5199a36d2216033e25", "77b1c79a951d42d8a79ee3c472852192", "<KEY>", "<KEY>", "4b1d2bd0e14043f781d15615263b64ec", "<KEY>", "feb6afab6c2247d48db7ba792d1daf85", "aa8df3ea2fb54cd2b9e2882e0649ee98", "<KEY>", "5c9ea04fc2524e6694b88fc6cda31ff8", "128d7ae29eb74e469d25af1941d13c7d", "<KEY>", "<KEY>", "<KEY>", "f4c6a7d98e284719999c9d4d2c5ff366", "<KEY>", "b2cd9e361f404c15ab7b85e343f04176", "22fff9b519434cfe8644ee9478c23285", "7e42e1172adb461d871c7676afde511a", "<KEY>", "7503acc7897e4264bcc1e50bd683da3a", "<KEY>", "2b15021c19ee4608941e9c340af1fc94", "a189fac46b0648d0924bbe6ac7b8036a", "<KEY>", "0f0136838a2b44dfb71e8d7f98cc374a", "e32e63ab861449adb2f5d9a31d5785ff", "bd10fe03b50d4f2fb4911166e4219b18", "<KEY>", "<KEY>", "793ca2e7b3b24100ba6fa5551d44e03a", "b66319ac17e649aaa314f8e83cf7543c", "<KEY>", "<KEY>", "0ab61b2ec2054e61becc95bc2187b62d", "76eca6e8d5454b4a9693974954e60c9f"]} id="1a1ebf2c" outputId="5a4b7506-ee0b-40da-f081-a56ce25839a8" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True) def read_images_split(split,path = 'data/processed/posters/',id_col="imdb_id"): split_images = [] for row in split: name = f'{int(row)}.jpg' img_name = os.path.join(path,name) missing_image = plt.imread(os.path.join(path,'missing.jpg')) # Use you favourite library to load the image try: image = plt.imread(img_name) except FileNotFoundError: image = missing_image if len(image.shape)==2: image = np.repeat(np.expand_dims(image,-1),3,-1) split_images.append(image) return split_images class IMDbDataset(torch.utils.data.Dataset): def __init__(self, encodings, labels, transform): self.encodings = encodings self.labels = labels self.transform = transform def __getitem__(self, idx): item = {} item['labels'] = torch.tensor(self.labels[idx]) for key, val in self.encodings.items(): if key == 'images': item['images'] = Image.fromarray(val[idx].astype(np.uint8)) item['images'] = self.transform(item['images']) else: item[key] = torch.tensor(val[idx]) return item def __len__(self): return len(self.labels) def process_text_data(data_:pd.DataFrame,text_col,padding ="max_length", truncation = True, na_filler = ""): ''' ''' data = data_.copy() data[text_col] = data[text_col].fillna(na_filler) encodings = tokenizer(data[text_col].tolist(), padding=padding, truncation=truncation) return encodings def columns_to_single_text(df, cols_to_transform, new_col_name = 'text_input', sep = tokenizer.sep_token, nan_replacement = tokenizer.unk_token ): ''' Creates a new column called new_col_name with with all columns in cols_to_transform concatenated into a single text ''' df[new_col_name] = df[cols_to_transform].astype(str).replace('nan',nan_replacement).agg(f' {sep} '.join, axis=1) class NAFiller: def __init__(self,train): self.train = train def fit(self,column = 'Budget',groupby=['top_genre','top_country']): self.mapping = self.train.groupby(groupby)[column].median().reset_index() self.mapping = self.mapping.rename(columns={column:'na_filler'}) self.median = self.train[column].median() self.column=column def transform(self,test,round = False): self.na_filler = test.merge(self.mapping,how='left')['na_filler'] self.na_filler = self.na_filler.fillna(self.median) test[self.column] = test[self.column].reset_index(drop=True).fillna(self.na_filler).values if round: test[self.column] = test[self.column].round().astype(int) def fit_transform(self,test,column = 'Budget',groupby=['top_genre','top_country']): self.fit(column,groupby) self.transform() self.column=column def create_dataset_split(split, text_cols, text_input_col, TARGET_COL, transform, numeric_cols = [], images = None, new_col_name = 'text_input', sep = tokenizer.sep_token, nan_replacement = tokenizer.unk_token): if TARGET_COL == 'revenue_worldwide_BOM': split[TARGET_COL] = np.log1p(split[TARGET_COL]) print('log transforming target') #If all columns in text_cols are combined into a single text. A n columns_to_single_text(split,text_cols) #Get split encodings split_encodings = process_text_data(split,text_input_col) if numeric_cols: split_encodings['numeric_features'] = split[numeric_cols].values.tolist() if images: split_encodings['images'] = images #get labels split_labels = split[TARGET_COL].tolist() #Create dataset objects split_dataset = IMDbDataset(split_encodings, split_labels,transform) return split_dataset def date_to_season(doy): doy = doy.dayofyear # "day of year" ranges for the northern hemisphere spring = range(80, 172) summer = range(172, 264) fall = range(264, 355) # winter = everything else if doy in spring: season = 1 #'spring' elif doy in summer: season = 2 #'summer' elif doy in fall: season = 3 #'fall' else: season = 4 #'winter' return season def cyclical_encoding(data, col, max_val, min_val = 1, drop = True): """Encoding of cyclical features using sine and cosine transformation. Examples of cyclical features are: hour of day, month, day of week. :param df: A dataframe containing the column we want to encode :type df: :py:class:`pandas.DataFrame` :param col: The name of the column we want to encode. :type col: str :param max_val: The maximum value the variable can have. e.g. in hour of day, max value = 23 :type max_val: int :param min_val: The minimum value the variable can have. e.g. in hour of day, min value = 1, defaults to 1 :type min_val: int :return: dataframe with three new variables: sine and cosine of the features + the multiplicationof these two columns :rtype: :py:class:`pandas.DataFrame` """ data[col] = data[col] - min_val #ensure min value is 0 data[col + '_sin'] = np.sin(2 * np.pi * data[col] / max_val) data[col + '_cos'] = np.cos(2 * np.pi * data[col] / max_val) if drop: data.drop(col,axis=1,inplace=True) # + colab={"base_uri": "https://localhost:8080/"} id="6dfa9287" outputId="d8012d7f-61f3-4ce4-f2f1-04d708456137" from torchvision.transforms import RandAugment class GaussianBlur(object): """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[.1, 2.]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x all_cols = ['Budget', 'averageRating', 'cast', 'countries', 'director', 'genres', 'imdb_id', 'languages', 'overview', 'production companies', 'release_date', 'revenue_worldwide_BOM', 'runtimeMinutes', 'title'] transform_train = transforms.Compose([ transforms.Resize((IMG_SIZE,IMG_SIZE)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) transform_train_augmented = transforms.Compose([ transforms.RandomResizedCrop(size=IMG_SIZE, scale=(0.8, 1.0)), transforms.RandomRotation(degrees=15), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomHorizontalFlip(), transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), #RandAugment(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) #Train/test transforms transform_test = transforms.Compose([ transforms.Resize((IMG_SIZE,IMG_SIZE)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) train_ids = pd.read_csv('data/processed/train.csv',usecols=['imdb_id'])['imdb_id'].tolist() val_ids = pd.read_csv('data/processed/val.csv',usecols=['imdb_id'])['imdb_id'].tolist() test_ids = pd.read_csv('data/processed/test.csv',usecols=['imdb_id'])['imdb_id'].tolist() df = pd.read_csv('data/processed/df.csv',usecols = all_cols,parse_dates=['release_date']).sample(frac=1,random_state=42) #shuffle #Additional auxilary columns df['top_genre'] = df['genres'].apply(lambda x: x.split(', ')[0]) df['top_country'] = df['countries'].apply(lambda x: x.split(', ')[0] if isinstance(x,str) else x) categoric_cols = ['cast', 'countries', 'director', 'genres', 'languages', 'production companies'] text_cols = ['title','overview'] date_cols = ['release_date'] if (not DATE_AS_TEXT): #If date is not as text, include numeri date features df['year'] = df['release_date'].dt.year df['month'] = df['release_date'].dt.month df['day'] = df['release_date'].dt.day df['season'] = df['release_date'].apply(date_to_season) df['dayofweek'] = df['release_date'].dt.dayofweek cyclical_encoding(df, 'month', max_val = 12, min_val = 1, drop = True) cyclical_encoding(df, 'day', max_val = 31, min_val = 1, drop = True) #TODO: Not exactly true cyclical_encoding(df, 'season', max_val = 4, min_val = 1, drop = True) cyclical_encoding(df, 'dayofweek', max_val = 6, min_val = 0, drop = True) df[categoric_cols] = df[categoric_cols].apply(lambda x: x.str.replace('|',', '),axis=0) #Change pipe to comma, its more meaningful df['runtimeMinutes'] = pd.to_numeric(df['runtimeMinutes'],errors='coerce') numeric_cols = list(df.dtypes.index[(df.dtypes == int)|(df.dtypes == float)].drop(['imdb_id', 'averageRating', 'revenue_worldwide_BOM'])) if CATEGORIES_AS_TEXT: text_cols+=categoric_cols if NUMERIC_AS_TEXT: text_cols+=numeric_cols if DATE_AS_TEXT: text_cols+=date_cols # + colab={"base_uri": "https://localhost:8080/"} id="b4f979b1" outputId="a6b275dd-9901-4562-8854-3c9f19652eeb" #Create splits if DEBUG: train = df[df['imdb_id'].isin(train_ids)].sample(frac=0.2) val = df[df['imdb_id'].isin(val_ids)].sample(frac=0.2) test = df[df['imdb_id'].isin(test_ids)] else: train = df[df['imdb_id'].isin(train_ids)] val = df[df['imdb_id'].isin(val_ids)] test = df[df['imdb_id'].isin(test_ids)] #Get images per split train_images = read_images_split(train['imdb_id'].tolist()) val_images = read_images_split(val['imdb_id'].tolist()) test_images = read_images_split(test['imdb_id'].tolist()) #Fill na in some columns with statistics naf = NAFiller(train) sc = StandardScaler() cols_to_impute = [i for i in numeric_cols if ('cos' not in i)&('sin' not in i)] for col in cols_to_impute: naf.fit(column = col,groupby=['top_genre','top_country']) naf.transform(train,round=True) naf.transform(val,round=True) naf.transform(test,round=True) if not NUMERIC_AS_TEXT: train[numeric_cols] = sc.fit_transform(train[numeric_cols]) val[numeric_cols] = sc.transform(val[numeric_cols]) test[numeric_cols] = sc.transform(test[numeric_cols]) # + colab={"base_uri": "https://localhost:8080/"} id="b3cb2f40" outputId="df6bfe9e-0f23-441f-fc96-a81cd8831620" numeric_cols = numeric_cols if not NUMERIC_AS_TEXT else [] train_dataset=create_dataset_split(train,text_cols,text_input_col,TARGET_COL,transform_train,numeric_cols,images = train_images) train_dataset_augmented=create_dataset_split(train,text_cols,text_input_col,TARGET_COL,transform_train_augmented,numeric_cols,images = train_images) val_dataset=create_dataset_split(val,text_cols,text_input_col,TARGET_COL,transform_test,numeric_cols,images = val_images) test_dataset=create_dataset_split(test,text_cols,text_input_col,TARGET_COL,transform_test,numeric_cols,images = test_images) # - from torchvision import models # + tags=[] from transformers.models.distilbert.modeling_distilbert import * def set_parameter_requires_grad(model, feature_extracting): if feature_extracting: for param in model.parameters(): param.requires_grad = False class AugmentedDistilBertForSequenceClassification(DistilBertForSequenceClassification): def __init__(self, config): super().__init__(config) self.total_num_features = config.dim + config.num_extra_features self.num_labels = config.num_labels self.config = config self.distilbert = DistilBertModel(self.config) self.ln = nn.LayerNorm(self.total_num_features,eps=1e-12,elementwise_affine=True) output_mlp_hidden_dim = self.total_num_features if self.config.concat_mode=='cls' else config.dim self.pre_classifier = nn.Linear(output_mlp_hidden_dim, output_mlp_hidden_dim) self.classifier = nn.Linear(self.total_num_features, self.config.num_labels) self.dropout = nn.Dropout(self.config.seq_classif_dropout) self.image_model = models.resnet18(pretrained=True) #set_parameter_requires_grad(self.model, feature_extract) num_ftrs = self.image_model.fc.in_features if num_ftrs == self.config.num_image_features: #Then it doesn't make sense to add additional layer to reduce dim self.image_model.fc = nn.Sequential(nn.Dropout(self.config.resnet_dropout), ) else: self.image_model.fc = nn.Sequential(nn.Dropout(self.config.resnet_dropout), nn.Linear(num_ftrs, self.config.num_image_features), ) # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, numeric_features: Optional[torch.Tensor] = None, images: Optional[torch.Tensor] = None ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict distilbert_output = self.distilbert( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) images_features = self.image_model(images) if images is not None else None hidden_state = distilbert_output[0] # (bs, seq_len, dim) cls_embeds = hidden_state[:, 0] # (bs, dim) THIS IS THE CLS EMBEDDING features = cls_embeds if self.config.concat_mode == 'cls': features = [features,numeric_features,images_features] features = torch.cat([f for f in features if f is not None], dim=-1) #TODO: Include image features here #features = self.ln(features) features = self.pre_classifier(features) # (bs, dim) features = nn.ReLU()(features) # (bs, dim) features = self.dropout(features) # (bs, dim) if self.config.concat_mode == 'dropout': features = [features,numeric_features,images_features] features = torch.cat([f for f in features if f is not None], dim=-1) #TODO: Include image features here logits = self.classifier(features) # (bs, num_labels) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + distilbert_output[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions, ) def get_model(model_name, seed, num_numeric_features, resnet_dropout, seq_classif_dropout, concat_mode, num_image_features = 0, problem_type = 'regression', num_labels = 1, combine_method = 'weighted_feature_sum_on_transformer_cat_and_numerical_feats'): set_seed(seed) config = DistilBertConfig.from_pretrained(model_name, problem_type = problem_type, num_labels = num_labels) tabular_config = TabularConfig(num_labels=num_labels, cat_feat_dim=0, numerical_feat_dim=5, combine_feat_method=combine_method, column_info=column_info_dict, task='regression') config.num_extra_features = num_numeric_features + num_image_features config.resnet_dropout = resnet_dropout config.num_image_features = num_image_features config.concat_mode = concat_mode config.seq_classif_dropout = seq_classif_dropout config.tabular_config = tabular_config return AugmentedDistilBertForSequenceClassification(config) # - #Example of input to language model train['text_input'].iloc[15] class DictWriter: def __init__(self,file_path,field_names): self.field_names = field_names self.file_path = file_path self.create_file() #Crerate file if it doesnt exist. def create_file(self): if not os.path.exists(self.file_path): print('creating file') f = open(self.file_path, 'w') w = csv.DictWriter(f, field_names) w.writeheader() f.close() else: print('file already exist. Will append rows to it.') def add_rows(self,rows): with open(self.file_path, 'a') as f: w = csv.DictWriter(f,self.field_names) for r in rows: w.writerow(r) FINAL_MODEL_PATH # # Grid search # + epochs = 15 num_evals = 20 patience = 2 if DEBUG else 30 callbacks=[EarlyStoppingCallback(early_stopping_patience=patience)] eval_steps = 50 if DEBUG else 100 hparams = {'batch_size' : [8,16,32], 'augment_images':[True,False], 'learning_rate' : [1e-5, 2e-5, 3e-5,5e-5], 'weight_decay' : [0.1,0.01], 'resnet_dropout':[0.5], 'concat_mode':['dropout'], 'num_image_features':[2**9], 'repeats': range(1)} combs = list(product(*[range(len(i)) for i in list(hparams.values())])) scores = np.zeros([len(i) for i in list(hparams.values())]) #trials_df_rows = [] field_names = list(hparams.keys()) + ['score'] dw = DictWriter(TRIALS_DF_PATH,field_names) currernt_trials_df = pd.read_csv(TRIALS_DF_PATH) #This can be empty or not. done_trials = currernt_trials_df.drop('score',axis=1).to_dict(orient='records') #empty list or not best_score = min(float('inf'),currernt_trials_df['score'].min()) print(f'current best val score = {best_score}') for idx,comb_indexes in enumerate(combs): comb_values = {name:val[idx] for name,val,idx in zip(hparams.keys(),hparams.values(),comb_indexes)} if comb_values not in done_trials: #Check if trial alrready exists. If it does, skip. print('training with following hparams:') pprint(comb_values) training_args = TrainingArguments(output_dir=f"{MODEL_NAME}-{TARGET_COL}", per_device_train_batch_size = comb_values['batch_size'], learning_rate=comb_values['learning_rate'], weight_decay=comb_values['weight_decay'], seed = 42, fp16=True, per_device_eval_batch_size = 16, warmup_ratio=0.06, num_train_epochs = epochs, evaluation_strategy = "steps", save_strategy = "steps", load_best_model_at_end=True, eval_steps = eval_steps, save_steps = eval_steps, save_total_limit = 1, log_level = 'error', disable_tqdm = True ) multi_modal_model = get_model(model_name = MODEL_NAME, seed = training_args.seed, num_numeric_features = len(numeric_cols), resnet_dropout = comb_values['resnet_dropout'], concat_mode = comb_values['concat_mode'], num_image_features = comb_values['num_image_features']) trainer = Trainer( model=multi_modal_model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, callbacks = callbacks ) trainer.train() score = trainer.evaluate()['eval_loss'] comb_values['score'] = score dw.add_rows([comb_values]) #Append to dataframe #trials_df_rows.append(comb_values) if score<best_score: print(f'got a better model, with score {np.round(score,4)} saving...') best_score = score trainer.save_model(FINAL_MODEL_PATH) print('saved') else: print('skipping trial because already exists') # - # # Random Search # + class RandomSearch: def __init__(self,tried_hparams = []): self.tried_hparams = tried_hparams def get_rand_comb_value(self): space = {'batch_size' : int(np.random.choice([8,16,32])), 'augment_images':bool(np.random.choice([True,False])), 'learning_rate' : float(np.random.choice([1e-5, 2e-5, 3e-5,5e-5,7e-5,1e-4])),#10**(-np.random.uniform(4,5.5)),# 'weight_decay' : float(np.random.choice([0.1,0.01])), 'resnet_dropout':float(np.random.choice(np.arange(0,.6,.1))), 'seq_classif_dropout':0.2, 'concat_mode':str(np.random.choice(['dropout','cls'])), 'num_image_features':int(np.random.choice(2**np.arange(4,10))), 'repeats': 0} if space not in self.tried_hparams: self.tried_hparams.append(space) return space return self.get_rand_comb_value() def get_current_trials(trials_df_path = TRIALS_DF_PATH): currernt_trials_df = pd.read_csv(trials_df_path) #This can be empty or not. done_trials = currernt_trials_df.drop('score',axis=1).to_dict(orient='records') #empty list or not best_score = min(float('inf'),currernt_trials_df['score'].min()) return done_trials,best_score # + tags=[] epochs = 1 if DEBUG else 15 num_evals = 20 patience = 1 if DEBUG else 30 callbacks=[EarlyStoppingCallback(early_stopping_patience=patience)] eval_steps = 50 if DEBUG else 100 num_trials = 200 field_names = list(RandomSearch().get_rand_comb_value().keys()) + ['score'] dw = DictWriter(TRIALS_DF_PATH,field_names) done_trials,best_score = get_current_trials() RS = RandomSearch(tried_hparams = list(done_trials)) print(f'current best val score = {best_score}') remaining_trials = range(len(done_trials),num_trials) all_combs = [RS.get_rand_comb_value() for _ in range(len(done_trials),num_trials)] for trial,comb_values in zip(remaining_trials,all_combs): print(f'Trial {trial+1}:\n') #comb_values = RS.get_rand_comb_value() if comb_values not in done_trials: #Check if trial alrready exists. If it does, skip. print('training with following hparams:') pprint(comb_values) print('\n') training_args = TrainingArguments(output_dir=f"{MODEL_NAME}-{TARGET_COL}", per_device_train_batch_size = comb_values['batch_size'], learning_rate=comb_values['learning_rate'], weight_decay=comb_values['weight_decay'], seed = 42, fp16=True, per_device_eval_batch_size = 16, warmup_ratio=0.06, num_train_epochs = epochs, evaluation_strategy = "steps", save_strategy = "steps", load_best_model_at_end=True, eval_steps = eval_steps, save_steps = eval_steps, save_total_limit = 1, log_level = 'error', disable_tqdm = True ) multi_modal_model = get_model(model_name = MODEL_NAME, seed = training_args.seed, num_numeric_features = len(numeric_cols), resnet_dropout = comb_values['resnet_dropout'], seq_classif_dropout= comb_values['seq_classif_dropout'], concat_mode = comb_values['concat_mode'], num_image_features = comb_values['num_image_features']) trainer = Trainer( model=multi_modal_model, args=training_args, train_dataset=train_dataset_augmented if comb_values['augment_images'] else train_dataset, eval_dataset=val_dataset, callbacks = callbacks ) trainer.train() score = trainer.evaluate()['eval_loss'] comb_values['score'] = score if not DEBUG: dw.add_rows([comb_values]) #Append to dataframe #trials_df_rows.append(comb_values) if score<best_score: print(f'got a better model, with score {np.round(score,4)} saving...') best_score = score if not DEBUG: trainer.save_model(FINAL_MODEL_PATH) print('saved') else: print('skipping trial because already exists') # + #Test set performance best_model = AugmentedDistilBertForSequenceClassification.from_pretrained(FINAL_MODEL_PATH, problem_type='regression', num_labels=1) trainer_best_model = Trainer(model=best_model) predictions = trainer_best_model.predict(test_dataset) preds = predictions.predictions.flatten() labels = predictions.label_ids if TARGET_COL == 'revenue_worldwide_BOM': preds = np.expm1(preds) labels = np.expm1(labels) mse = ((preds-labels)**2).mean() mae = (np.abs(preds-labels)).mean() errors = {'MAE':mae,'MSE':mse,'RMSE':np.sqrt(mse)} pd.DataFrame([errors]).to_csv(TEST_PERFORMANCE_PATH, index=False) # - errors # + import matplotlib.pyplot as plt from scipy.stats import boxcox plt.hist(np.log1p(labels),bins=40) plt.hist(np.log1p(preds),bins=40) #plt.xscale('log') # + import matplotlib.pyplot as plt from scipy.stats import boxcox plt.hist(np.log1p(labels),bins=40) plt.hist(np.log1p(preds),bins=40) #plt.xscale('log')
notebooks/train_augmented_distilbert_V1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GPU Accelerated Elastic Deep Learning Service in Cloud Pak for Data # #### Notebook created by <NAME>, <NAME>, <NAME> (January 2021) # # Watson Machine Learning Accelerator in Cloud Pak for Data offers GPU Accelerated Elastic Deep Learning service. This service enables multiple data scientists to accelerate deep learning model training across multiple GPUs and server, share GPUs in a dynamic fashion, and drives data scientist productivity and overall GPU utilization. # # In this notebook, you will learn how to scale PyTorch model with multiple GPUs with GPU Accelerated Elastic Deep Learning service, monitor the running job, and debug any issues seen. # # This notebook uses Watson Machine learning Accelerator 2.2 with Cloud Pak for Data 3.5. # # ### Contents # # - [The big picture](#The-big-picture) # - [Changes to your code](#Changes-to-your-code) # - [Set up API end point and log on](#Set-up-API-end-point-and-log-on) # - [Submit job via API](#Submit-job-via-API) # - [Monitor running job](#Monitor-running-job) # - [Training metrics and logs](#Training-metrics-and-logs) # - [Download trained model](#Download-trained-model) # - [Further information and useful links](#Further-information-and-useful-links) # - [Appendix](#Appendix) # # # # ## The big picture # [Back to top](#Contents) # # This notebook details the process of taking your PyTorch model and making the changes required to train the model using [IBM Watson Machine Learning GPU Accelerated Elastic Deep Learning service](https://developer.ibm.com/series/learning-path-get-started-with-watson-machine-learning-accelerator/) (WML Accelerator) # # # The image below shows the various elements required to use Elastic Deep Learning Service. In this notebook we will step through each of these elements in more detail. Through this process you will offload your code to a WML Accelerator cluster, monitor the running job, retrieve the output and debug any issues seen. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/5_running_job.png) is also available. # # ![overall](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/5_running_job.gif) # ## Changes to your code # [Back to top](#Contents) # # In this section we will use the PyTorch Resnet 50 model and make the required changes needed to use this model with the elastic distributed training engine (EDT). An overview of these changes can be seen in the diagram below. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/2_code_adaptations.png) is also available. # # ![code](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/2_code_adaptations.gif) # # # The key changes to your code in order to use elastic distributed training are the following: # - Importing libraries and setting up environment variables # - Data loading function for elastic distributed training # - Extract parameters for training # - Replace training and testing loops with the loop equivalents for elastic distributed training # # For the purpose of this tutorial we train RestNet50 model with Elastic Distributed Training (EDT). # # See the blog associated with this notebook with more detailed explanation of the above changes. # https://developer.ibm.com/articles/elastic-distributed-training-edt-in-watson-machine-learning-accelerator/ # # See more information about the Elastic Distributed Training API in # [IBM Documentation](https://www.ibm.com/docs/en/wmla/2.2.0?topic=SSFHA8_2.2.0/wmla_workloads_elastic_distributed_training.html). # # # # Your modified code should be made available in a directory which also contains the EDT helper scripts: `edtcallback.py`, `emetrics.py` and `elog.py`. # # # ## Define helper methods # Define the required helper methods. # # # + # import tarfile import tempfile import os import json import pprint import pandas as pd from IPython.display import display, FileLink, clear_output import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) from matplotlib import pyplot as plt # %pylab inline import base64 import json import time import urllib import tarfile def query_job_status(job_id,refresh_rate=3) : execURL = dl_rest_url +'/execs/'+ job_id['id'] pp = pprint.PrettyPrinter(indent=2) keep_running=True res=None while(keep_running): res = req.get(execURL, headers=commonHeaders, verify=False) monitoring = pd.DataFrame(res.json(), index=[0]) pd.set_option('max_colwidth', 120) clear_output() print("Refreshing every {} seconds".format(refresh_rate)) display(monitoring) pp.pprint(res.json()) if(res.json()['state'] not in ['PENDING_CRD_SCHEDULER', 'SUBMITTED','RUNNING']) : keep_running=False time.sleep(refresh_rate) return res def query_executor_stdout_log(job_id) : execURL = dl_rest_url +'/scheduler/applications/'+ job_id['id'] + '/executor/1/logs/stdout?lastlines=1000' #'https://{}/platform/rest/deeplearning/v1/scheduler/applications/wmla-267/driver/logs/stderr?lastlines=10'.format(hostname) commonHeaders2={'accept': 'text/plain', 'X-Auth-Token': access_token} print (execURL) res = req.get(execURL, headers=commonHeaders2, verify=False) print(res.text) def query_train_metric(job_id) : #execURL = dl_rest_url +'/execs/'+ job_id['id'] + '/log' execURL = dl_rest_url +'/execs/'+ job_id['id'] + '/log' #'https://{}/platform/rest/deeplearning/v1/scheduler/applications/wmla-267/driver/logs/stderr?lastlines=10'.format(hostname) commonHeaders2={'accept': 'text/plain', 'X-Auth-Token': access_token} print (execURL) res = req.get(execURL, headers=commonHeaders2, verify=False) print(res.text) # save result file def download_trained_model(job_id) : from IPython.display import display, FileLink # save result file commonHeaders3={'accept': 'application/octet-stream', 'X-Auth-Token': access_token} execURL = dl_rest_url +'/execs/'+ r.json()['id'] + '/result' res = req.get(execURL, headers=commonHeaders3, verify=False, stream=True) print (execURL) tmpfile = model_dir + '/' + r.json()['id'] +'.zip' print ('Save model: ', tmpfile ) with open(tmpfile,'wb') as f: f.write(res.content) f.close() def make_tarfile(output_filename, source_dir): with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) # + import os model_dir = f'./resnet-wmla' model_main = f'elastic-main.py' model_callback = f'edtcallback.py' model_elog = f'elog.py' os.makedirs(model_dir, exist_ok=True) # - # Resnet50 model: elastic-main.py # This is the main file that is required by the elastic distributed training engine. It acts as the program main entrance. # # # + # %%writefile {model_dir}/{model_main} # #!/usr/bin/env python from __future__ import print_function import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import transforms, models from callbacks import Callback from fabric_model import FabricModel from edtcallback import EDTLoggerCallback import torch import os ## Define model and extract training parameters def get_max_worker(): import argparse parser = argparse.ArgumentParser(description='EDT Example') parser.add_argument('--numWorker', type=int, default='16', help='input the max number ') parser.add_argument('--gpuPerWorker', type=int, default='1', help='input the path of initial weight file') args, _ = parser.parse_known_args() num_worker = args.numWorker * args.gpuPerWorker print ('args.numWorker: ', args.numWorker , 'args.gpuPerWorker: ', args.gpuPerWorker) return num_worker BATCH_SIZE_PER_DEVICE = 64 NUM_EPOCHS = 3 MAX_NUM_WORKERS = get_max_worker() START_LEARNING_RATE = 0.4 LR_STEP_SIZE = 30 LR_GAMMA = 0.1 MOMENTUM = 0.9 WEIGHT_DECAY = 1e-4 ## Define dataset location DATA_DIR = os.getenv("DATA_DIR") if DATA_DIR is None: DATA_DIR = '/tmp' print("DATA_DIR: " + DATA_DIR) TRAIN_DATA = DATA_DIR + "/cifar10" TEST_DATA = DATA_DIR + "/cifar10" ## <<NAME>> Documentation of Callback function class LRScheduleCallback(Callback): def __init__(self, step_size, gamma): super(LRScheduleCallback, self).__init__() self.step_size = step_size self.gamma = gamma def on_epoch_begin(self, epoch): if (epoch != 0) and (epoch % self.step_size == 0): for param_group in self.params['optimizer'].param_groups: param_group['lr'] *= self.gamma print("LRScheduleCallback epoch={}, learning_rate={}".format(epoch, self.params['optimizer'].param_groups[0]['lr'])) ## Data loading function for EDT def getDatasets(): transform_train = transforms.Compose([ transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) return (torchvision.datasets.CIFAR10(root=TRAIN_DATA, train=True, download=True, transform=transform_train), torchvision.datasets.CIFAR10(root=TEST_DATA, train=False, download=True, transform=transform_test)) def custom_train(model, data, eva, train_loader, fn_args): device = 'cuda' if torch.cuda.is_available() else 'cpu' inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) opt = model.get_optimizer() opt.zero_grad() outputs = model(inputs) cri = model.get_loss_function() loss = cri(outputs, labels) loss.backward() acc = eva(outputs, labels) return acc, loss def custom_test(model, test_iter, fn_args): device = 'cuda' if torch.cuda.is_available() else 'cpu' cri = model.get_loss_function() valid_loss = 0.0 counter = 0 for(inputs, labels) in test_iter: inputs, labels = inputs.to(device), labels.to(device) output = model(inputs) loss = cri(output, labels) valid_loss += loss.item() counter += 1 valid_loss /= counter return valid_loss def main(model_type): print('==> Building model..' + str(model_type)) model = models.__dict__[model_type]() optimizer = optim.SGD(model.parameters(), lr=START_LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) loss_function = F.cross_entropy edt_m = FabricModel(model, getDatasets, loss_function, optimizer, enable_onnx=True, fn_step_train=custom_train, fn_test=custom_test, user_callback=[LRScheduleCallback(LR_STEP_SIZE, LR_GAMMA)], driver_logger=EDTLoggerCallback()) print('==> epochs:' + str(NUM_EPOCHS) + ', batchsize:' + str(BATCH_SIZE_PER_DEVICE) + ', engines_number:' + str(MAX_NUM_WORKERS)) edt_m.train(NUM_EPOCHS, BATCH_SIZE_PER_DEVICE, MAX_NUM_WORKERS, num_dataloader_threads=4, validation_freq=10, checkpoint_freq=0) if __name__ == '__main__': main("resnet50") # - # ### EDT helper scripts: edtcallback.py # The edtcallback.py scripts counts model loss and accuracy and logs them to a the driver log. # # + # %%writefile {model_dir}/{model_callback} # #! /usr/bin/env python from __future__ import print_function import sys import os from callbacks import LoggerCallback from emetrics import EMetrics from elog import ELog ''' EDTLoggerCallback class define LoggerCallback to trigger Elog. ''' class EDTLoggerCallback(LoggerCallback): def __init__(self): self.gs =0 def log_train_metrics(self, loss, acc, completed_batch, worker=0): acc = acc/100.0 self.gs += 1 with EMetrics.open() as em: em.record(EMetrics.TEST_GROUP,completed_batch,{'loss': loss, 'accuracy': acc}) with ELog.open() as log: log.recordTrain("Train", completed_batch, self.gs, loss, acc, worker) def log_test_metrics(self, loss, acc, completed_batch, worker=0): acc = acc/100.0 with ELog.open() as log: log.recordTest("Test", loss, acc, worker) # - # ### EDT helper scripts: elog.py # The elog.py script defines the path and content of the training and test log. # # + # %%writefile {model_dir}/{model_elog} import time import os ''' ELog class define the path and content of train and test log. ''' class ELog(object): def __init__(self,subId,f): if "TRAINING_ID" in os.environ: self.trainingId = os.environ["TRAINING_ID"] elif "DLI_EXECID" in os.environ: self.trainingId = os.environ["DLI_EXECID"] else: self.trainingId = "" self.subId = subId self.f = f def __enter__(self): return self def __exit__(self, type, value, tb): self.close() @staticmethod def open(subId=None): if "LOG_DIR" in os.environ: folder = os.environ["LOG_DIR"] elif "JOB_STATE_DIR" in os.environ: folder = os.path.join(os.environ["JOB_STATE_DIR"],"logs") else: folder = "/tmp" if subId is not None: folder = os.path.join(folder, subId) if not os.path.exists(folder): os.makedirs(folder) f = open(os.path.join(folder, "stdout"), "a") return ELog(subId,f) def recordText(self,text): timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) timestr = "["+ timestr + "]" if self.f: self.f.write(timestr + " " + text + "\n") self.f.flush() def recordTrain(self,title,iteration,global_steps,loss,accuracy,worker): text = title text = text + ", Timestamp: " + str(int(round(time.time() * 1000))) text = text + ", Global steps: " + str(global_steps) text = text + ", Iteration: " + str(iteration) text = text + ", Loss: " + str(float('%.5f' % loss) ) text = text + ", Accuracy: " + str(float('%.5f' % accuracy) ) self.recordText(text) def recordTest(self,title,loss,accuracy,worker): text = title text = text + ", Timestamp: " + str(int(round(time.time() * 1000))) text = text + ", Loss: " + str(float('%.5f' % loss) ) text = text + ", Accuracy: " + str(float('%.5f' % accuracy) ) self.recordText(text) def close(self): if self.f: self.f.close() # + ### Package model files for training import requests, json import pandas as pd import datetime # from IPython.display import display import matplotlib.pyplot as plt import matplotlib.dates as mdates # %matplotlib inline # plt.rcParams['figure.figsize'] = [24, 8.0] #import seaborn as sns pd.set_option('display.max_columns', 999) pd.set_option('max_colwidth', 300) import tarfile import tempfile import os #Package the updated model files into a tar file ending with `.modelDir.tar` # + def make_tarfile(output_filename, source_dir): with tarfile.open(output_filename, "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir)) MODEL_DIR_SUFFIX = ".modelDir.tar" tempFile = tempfile.mktemp(MODEL_DIR_SUFFIX) make_tarfile(tempFile, model_dir) print(" tempFile: " + tempFile) files = {'file': open(tempFile, 'rb')} # - # ## Set up API end point and log on # [Back to top](#Contents) # # In this section we set up the API endpoint which will be used in this notebook. # The following sections use the Watson ML Accelerator API to complete the various tasks required. # We've given examples of a number of tasks but you should refer to the documentation at to see more details # of what is possible and sample output you might expect. # # - https://www.ibm.com/support/knowledgecenter/SSFHA8_2.2.0/cm/deeplearning.html # + import requests, json import pandas as pd import datetime # from IPython.display import display import matplotlib.pyplot as plt import matplotlib.dates as mdates # %matplotlib inline # plt.rcParams['figure.figsize'] = [24, 8.0] #import seaborn as sns pd.set_option('display.max_columns', 999) pd.set_option('max_colwidth', 300) import tarfile import tempfile import os import base64 import urllib # + #hostname='wmla-console-wmla.apps.cpd35-beta.cpolab.ibm.com' # please enter Watson Machine Learning Accelerator host name #hostname = 'wmla-console-liqbj.apps.wml1x210.ma.platformlab.ibm.com' hostname = 'wmla-console-wmla.apps.cpd35-beta.cpolab.ibm.com' login='dse_user:cpd4ever' # please enter the login and password # hostname='wmla-console-xwmla.apps.wml1x180.ma.platformlab.ibm.com' # login='admin:password' es = base64.b64encode(login.encode('utf-8')).decode("utf-8") print(es) commonHeaders={'Authorization': 'Basic '+es} req = requests.Session() auth_url = 'https://{}/auth/v1/logon'.format(hostname) print(auth_url) a=requests.get(auth_url,headers=commonHeaders, verify=False) access_token=a.json()['accessToken'] print(access_token) # - # ### Log on # # # Obtain login session tokens to be used for session authentication within the RESTful API. Tokens are valid for 8 hours. dl_rest_url = 'https://{}/platform/rest/deeplearning/v1'.format(hostname) commonHeaders={'accept': 'application/json', 'X-Auth-Token': access_token} req = requests.Session() # ### Check deep learning framework details # # Check what framework plugins are available and see example execution commands. In this demonstration we will use **edtPyTorch** r = requests.get(dl_rest_url+'/execs/frameworks', headers=commonHeaders, verify=False).json() # Using the raw json, easier to see the examples given print(json.dumps(r, indent=4)) # ## Submit job via API # [Back to top](#Contents) # # Now we need to structure our API job submission. There are various elements to this process as seen in the diagram below. Note that **this** Jupyter notebook is the one referred to below. A [static version](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/4_api_setup.png) is also available. # # ![code](https://github.com/IBM/wmla-assets/raw/master/WMLA-learning-journey/shared-images/4_api_setup.gif) # # framework_name = 'edtPyTorch' # DL Framework to use, from list given above # local_dir_containing_your_code = 'resnet-wmla' # number_of_GPU = '2' # number of GPUs for elastic distribution # name_of_your_code_file = 'elastic-main.py' # Main model file as opened locally above # # # args = '--exec-start {} \ # --cs-datastore-meta type=fs\ # --model-dir {} \ # --numWorker={} \ # --model-main {} \ # '.format(framework_name, local_dir_containing_your_code, number_of_GPU, name_of_your_code_file) # # print ("args: " + args) # + framework_name = 'edtPyTorch' # DL Framework to use, from list given above #dataset_location = 'pytorch-mnist' # relative path of your data set under $DLI_DATA_FS local_dir_containing_your_code = 'resnet-wmla' number_of_GPU = '2' # number of GPUs for elastic distribution name_of_your_code_file = 'elastic-main.py' # Main model file as opened locally above args = '--exec-start edtPyTorch --cs-datastore-meta type=fs --numWorker 2 \ --model-main elastic-main.py --model-dir resnet-wmla' print ("args: " + args) # - # ## Monitor running job # [Back to top](#Contents) # # Once the job is submitted successfully we can monitor the running job. # # + r = requests.post(dl_rest_url+'/execs?args='+args, files=files, headers=commonHeaders, verify=False) if not r.ok: print('submit job failed: code=%s, %s'%(r.status_code, r.content)) job_status = query_job_status(r.json(),refresh_rate=5) # - # ## Training metrics and logs # # #### Retrieve and display the model training metrics: # [Back to top](#Contents) # # After the job completes then we can retrieve the output, logs and saved models. # # query_executor_stdout_log(r.json()) # ## Download trained model from Watson Machine Learning Accelerator # # download_trained_model(r.json()) # ## Further information and useful links # [Back to top](#Contents) # # **WML Accelerator Introduction videos:** # - WML Accelerator overview video (1 minute): http://ibm.biz/wmla-video # - Overview of adapting your code for Elastic Distributed Training via API: [video](https://youtu.be/RnZtYNX6meM) | [PDF](docs/wmla_api_pieces.pdf) (screenshot below) # # **Further WML Accelerator information & documentation** # - [Learning path: Get started with Watson Machine Learning Accelerator](http://ibm.biz/wmla-learning-path) # - [IBM Documentation on Watson Machine Learning Accelerator](https://www.ibm.com/docs/en/wmla/2.2.0) # - [Blog: Expert Q&A: Accelerate deep learning on IBM Cloud Pak for Data](https://www.ibm.com/blogs/journey-to-ai/2020/10/expert-qa-accelerate-deep-learning-on-ibm-cloud-pak-for-data) # # # # ## Appendix # [Back to top](#Contents) # # # #### This is version 1.0 and its content is copyright of IBM. All rights reserved. # # #
dli-learning-path/tutorials-cpd-wmla/CPD35x-elastic-distributed-training-via-WMLA-api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="kc5cIgeEmv8o" colab_type="text" # # Exporting GPT-2 # In this notebook, we'll show how to export [OpenAI's GPT-2 text generation model](https://github.com/openai/gpt-2) for serving. # + [markdown] id="RAWs29lAktOK" colab_type="text" # First, we'll download the GPT-2 code repository: # + id="gHs3aaFaLUXq" colab_type="code" colab={} # !git clone --no-checkout https://github.com/openai/gpt-2.git # !cd gpt-2 && git reset --hard <PASSWORD> # + [markdown] id="A4al4P14nmni" colab_type="text" # Next we'll specify the model size (choose one of 124M, 355M, or 774M): # + id="3Y4bt6hkfuxY" colab_type="code" colab={} cellView="form" import sys MODEL_SIZE = "124M" #@param {type:"string"} if MODEL_SIZE not in {"124M", "355M", "774M"}: print("\033[91m{}\033[00m".format('ERROR: MODEL_SIZE must be "124M", "355M", or "774M"'), file=sys.stderr) # + [markdown] id="C6xRx0Monh_j" colab_type="text" # We can use `download_model.py` to download the model: # + id="Kb50Z6NjbJBN" colab_type="code" colab={} # !python3 ./gpt-2/download_model.py $MODEL_SIZE # + [markdown] id="zz2ioOcpoPjV" colab_type="text" # Next, we'll install the required packages: # + id="Vk4Q2RR-UZQm" colab_type="code" colab={} # !pip install tensorflow==1.14.* numpy==1.* boto3==1.* # + id="KkVf5FmuUMrl" colab_type="code" colab={} import sys import os import time import json import numpy as np import tensorflow as tf from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def # + [markdown] id="6Ay7qiQFoWRn" colab_type="text" # Now we can export the model for serving: # + id="GdnYXr1IKaF0" colab_type="code" colab={} sys.path.append(os.path.join(os.getcwd(), 'gpt-2/src')) import model, sample def export_for_serving( model_name='124M', seed=None, batch_size=1, length=None, temperature=1, top_k=0, models_dir='models' ): """ Export the model for TF Serving :model_name=124M : String, which model to use :seed=None : Integer seed for random number generators, fix seed to reproduce results :length=None : Number of tokens in generated text, if None (default), is determined by model hyperparameters :temperature=1 : Float value controlling randomness in boltzmann distribution. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions. :top_k=0 : Integer value controlling diversity. 1 means only 1 word is considered for each step (token), resulting in deterministic completions, while 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value. :models_dir : path to parent folder containing model subfolders (i.e. contains the <model_name> folder) """ models_dir = os.path.expanduser(os.path.expandvars(models_dir)) hparams = model.default_hparams() with open(os.path.join(models_dir, model_name, 'hparams.json')) as f: hparams.override_from_dict(json.load(f)) if length is None: length = hparams.n_ctx elif length > hparams.n_ctx: raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx) with tf.Session(graph=tf.Graph()) as sess: context = tf.placeholder(tf.int32, [batch_size, None]) np.random.seed(seed) tf.set_random_seed(seed) output = sample.sample_sequence( hparams=hparams, length=length, context=context, batch_size=batch_size, temperature=temperature, top_k=top_k ) saver = tf.train.Saver() ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name)) saver.restore(sess, ckpt) export_dir=os.path.join(models_dir, model_name, "export", str(time.time()).split('.')[0]) if not os.path.isdir(export_dir): os.makedirs(export_dir) builder = tf.saved_model.builder.SavedModelBuilder(export_dir) signature = predict_signature_def(inputs={'context': context}, outputs={'sample': output}) builder.add_meta_graph_and_variables(sess, [tf.saved_model.SERVING], signature_def_map={"predict": signature}, strip_default_attrs=True) builder.save() export_for_serving(top_k=40, length=256, model_name=MODEL_SIZE) # + [markdown] id="hGfSohMrowmg" colab_type="text" # ## Upload the model to AWS # # Cortex loads models from AWS, so we need to upload the exported model. # + [markdown] id="BfB5QZ82ozj9" colab_type="text" # Set these variables to configure your AWS credentials and model upload path: # + id="B2RNuNk7o1c5" colab_type="code" colab={} cellView="form" AWS_ACCESS_KEY_ID = "" #@param {type:"string"} AWS_SECRET_ACCESS_KEY = "" #@param {type:"string"} S3_UPLOAD_PATH = "s3://my-bucket/text-generator/gpt-2" #@param {type:"string"} import sys import re if AWS_ACCESS_KEY_ID == "": print("\033[91m {}\033[00m".format("ERROR: Please set AWS_ACCESS_KEY_ID"), file=sys.stderr) elif AWS_SECRET_ACCESS_KEY == "": print("\033[91m {}\033[00m".format("ERROR: Please set AWS_SECRET_ACCESS_KEY"), file=sys.stderr) else: try: bucket, key = re.match("s3://(.+?)/(.+)", S3_UPLOAD_PATH).groups() except: print("\033[91m {}\033[00m".format("ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)"), file=sys.stderr) # + [markdown] id="ics0omsrpS8V" colab_type="text" # Upload the model to S3: # + id="BnKncToppUhN" colab_type="code" colab={} import os import boto3 s3 = boto3.client("s3", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) for dirpath, _, filenames in os.walk("models/{}/export".format(MODEL_SIZE)): for filename in filenames: filepath = os.path.join(dirpath, filename) filekey = os.path.join(key, MODEL_SIZE, filepath[len("models/{}/export/".format(MODEL_SIZE)):]) print("Uploading s3://{}/{} ...".format(bucket, filekey), end = '') s3.upload_file(filepath, bucket, filekey) print(" ✓") print("\nUploaded model export directory to {}/{}".format(S3_UPLOAD_PATH, MODEL_SIZE)) # + [markdown] id="IIMVPhe2qkU4" colab_type="text" # <!-- CORTEX_VERSION_MINOR x2 --> # We also need to upload `vocab.bpe` and `encoder.json`, so that the [encoder](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/encoder.py) in the [pre-inference request handler](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/handler.py) can encode the input text before making a request to the model. # + id="YdN8MtZxsO9V" colab_type="code" colab={} print("Uploading s3://{}/{}/vocab.bpe ...".format(bucket, key), end = '') s3.upload_file(os.path.join("models", MODEL_SIZE, "vocab.bpe"), bucket, os.path.join(key, "vocab.bpe")) print(" ✓") print("Uploading s3://{}/{}/encoder.json ...".format(bucket, key), end = '') s3.upload_file(os.path.join("models", MODEL_SIZE, "encoder.json"), bucket, os.path.join(key, "encoder.json")) print(" ✓") # + [markdown] id="MsoxwahIpnTO" colab_type="text" # <!-- CORTEX_VERSION_MINOR --> # That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/text-generator) for how to deploy the model as an API.
examples/tensorflow/text-generator/gpt-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Read Network Info from file import pickle import cv2 import os import numpy as np # + def sigmoid(s): return 1.0 / (np.exp(-s) + 1.0) # Forward propagation def forward(data , weights , biases): data = data.reshape(data.shape[0], 1) curr = data for i in range(len(biases)): bias = biases[i] weight = weights[i] mult = np.dot(weight , curr) curr = sigmoid(mult + bias) return curr # + # saving network information from model filename = "7_model.sav" with open(filename , 'rb') as file: net_info = pickle.load(file) weights = net_info["weights"] biases = net_info["biases"] # - # # Classify test data label_names = ['უ', 'ყ', 'მ', 'შ', 'ძ', 'წ', 'ს', 'ხ', 'ლ', 'ჩ' , '-'] def classify(data , weights , biases): ans = forward(data , weights , biases) res = [0] * len(ans) print("ans",ans) print("res",res) ind = -1 for i in range(len(ans)): if ans[i] > 0.5: res[i] = 1 ind = i else: res[i] = 0 if (sum(res) > 1): return '-' return label_names[ind] root_folder = "./data/ასოები/testing_data/" data_list = [] for image_name in os.listdir(root_folder): img = cv2.imread(root_folder + image_name, cv2.IMREAD_GRAYSCALE) resized = cv2.resize(img, dsize=(25, 25), interpolation=cv2.INTER_CUBIC) (thresh, im_bw) = cv2.threshold(resized.astype(np.uint8), 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) reshaped = im_bw.reshape((625, 1)).astype(int) data_list.append((reshaped, image_name)) classified_data = [] gamoicno = 0 ver_gamoicno = 0 for data, img_name in data_list: classified_label = classify(data, weights, biases) classified_data.append((img_name,classified_label)) if img_name[0] == classified_label: gamoicno +=1 else: ver_gamoicno +=1 print("gamoicno: " + str(gamoicno) + " ver gamoicno: " + str(ver_gamoicno)) print(classified_data)
7_scoring.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- # + # Dependencies import numpy as np # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # + engine = create_engine("sqlite:///hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # - # Create our session (link) from Python to the DB session = Session(engine) # ## D1: Determine the Summary Statistics for June # + # 1. Import the sqlalchemy extract function. from sqlalchemy import extract import pandas as pd # 2. Write a query that filters the Measurement table to retrieve the temperatures for the month of June. june_temp = session.query(Measurement).filter(extract('month', Measurement.date) == 6) # - # 3. Convert the June temperatures to a list. june_temp_list = [temp.tobs for temp in june_temp] # print(june_temp_list) # 4. Create a DataFrame from the list of temperatures for the month of June. june_df = pd.DataFrame(june_temp_list, columns = ['temps']) # june_df.head() # 5. Calculate and print out the summary statistics for the June temperature DataFrame. june_df.describe() # ## D2: Determine the Summary Statistics for December # 6. Write a query that filters the Measurement table to retrieve the temperatures for the month of December. dec_temp = session.query(Measurement).filter(extract('month', Measurement.date) == 12) # 7. Convert the December temperatures to a list. dec_temp_list = [temp.tobs for temp in dec_temp] # 8. Create a DataFrame from the list of temperatures for the month of December. dec_df = pd.DataFrame(dec_temp_list, columns = ['temps']) # dec_df.head() # 9. Calculate and print out the summary statistics for the Decemeber temperature DataFrame. dec_df.describe()
SurfsUp_Challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> # ___ # # Recommender Systems with Python # # Welcome to the code notebook for Recommender Systems with Python. In this lecture we will develop basic recommendation systems using Python and pandas. There is another notebook: *Advanced Recommender Systems with Python*. That notebook goes into more detail with the same data set. # # In this notebook, we will focus on providing a basic recommendation system by suggesting items that are most similar to a particular item, in this case, movies. Keep in mind, this is not a true robust recommendation system, to describe it more accurately,it just tells you what movies/items are most similar to your movie choice. # # There is no project for this topic, instead you have the option to work through the advanced lecture version of this notebook (totally optional!). # # Let's get started! # # ## Import Libraries import numpy as np import pandas as pd # ## Get the Data column_names = ['user_id', 'item_id', 'rating', 'timestamp'] df = pd.read_csv('u.data', sep='\t', names=column_names) df.head() # Now let's get the movie titles: movie_titles = pd.read_csv("Movie_Id_Titles") movie_titles.head() # We can merge them together: df = pd.merge(df,movie_titles,on='item_id') df.head() # # EDA # # Let's explore the data a bit and get a look at some of the best rated movies. # # ## Visualization Imports import matplotlib.pyplot as plt import seaborn as sns sns.set_style('white') # %matplotlib inline # Let's create a ratings dataframe with average rating and number of ratings: df.groupby('title')['rating'].mean().sort_values(ascending=False).head() df.groupby('title')['rating'].count().sort_values(ascending=False).head() ratings = pd.DataFrame(df.groupby('title')['rating'].mean()) ratings.head() # Now set the number of ratings column: ratings['num of ratings'] = pd.DataFrame(df.groupby('title')['rating'].count()) ratings.head() # Now a few histograms: plt.figure(figsize=(10,4)) ratings['num of ratings'].hist(bins=70) plt.figure(figsize=(10,4)) ratings['rating'].hist(bins=70) sns.jointplot(x='rating',y='num of ratings',data=ratings,alpha=0.5) # Okay! Now that we have a general idea of what the data looks like, let's move on to creating a simple recommendation system: # ## Recommending Similar Movies # Now let's create a matrix that has the user ids on one access and the movie title on another axis. Each cell will then consist of the rating the user gave to that movie. Note there will be a lot of NaN values, because most people have not seen most of the movies. moviemat = df.pivot_table(index='user_id',columns='title',values='rating') moviemat.head() # Most rated movie: ratings.sort_values('num of ratings',ascending=False).head(10) # Let's choose two movies: starwars, a sci-fi movie. And <NAME>, a comedy. ratings.head() # Now let's grab the user ratings for those two movies: starwars_user_ratings = moviemat['Star Wars (1977)'] liarliar_user_ratings = moviemat['Liar Liar (1997)'] starwars_user_ratings.head() # We can then use corrwith() method to get correlations between two pandas series: similar_to_starwars = moviemat.corrwith(starwars_user_ratings) similar_to_liarliar = moviemat.corrwith(liarliar_user_ratings) # Let's clean this by removing NaN values and using a DataFrame instead of a series: corr_starwars = pd.DataFrame(similar_to_starwars,columns=['Correlation']) corr_starwars.dropna(inplace=True) corr_starwars.head() # Now if we sort the dataframe by correlation, we should get the most similar movies, however note that we get some results that don't really make sense. This is because there are a lot of movies only watched once by users who also watched star wars (it was the most popular movie). corr_starwars.sort_values('Correlation',ascending=False).head(10) # Let's fix this by filtering out movies that have less than 100 reviews (this value was chosen based off the histogram from earlier). corr_starwars = corr_starwars.join(ratings['num of ratings']) corr_starwars.head() # Now sort the values and notice how the titles make a lot more sense: corr_starwars[corr_starwars['num of ratings']>100].sort_values('Correlation',ascending=False).head() # Now the same for the comedy Liar Liar: corr_liarliar = pd.DataFrame(similar_to_liarliar,columns=['Correlation']) corr_liarliar.dropna(inplace=True) corr_liarliar = corr_liarliar.join(ratings['num of ratings']) corr_liarliar[corr_liarliar['num of ratings']>100].sort_values('Correlation',ascending=False).head() # # Great Job!
study_bdt/MachineLearning/source_code/19-Recommender-Systems/01-Recommender Systems with Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=[] # IT18113600 # <NAME> # Q1) # a) import os # -----------------LIST APPROACH---------------------- # def termsAsList() : # termList = [] # os.chdir('F:\\SLIIT\Year 3\\Semester 2\\IRWA\\Assignment\\assignment\\docs') # #assign the files to a list # fileList = os.listdir(os.getcwd()) # for file in fileList : # with open(file, 'r') as f: # #get the sentences in the documents, convert them to the lower case and split # words = f.read().lower().split() # print("hello") # for word in words : # #tokenize, remove unnessesary commas and others(special symbols) # if word[-1] in [',','!','?','.']: # word = word[:-1] # print(word) # #if the word not in the dictionary keys, new key is created # if word not in termList : # termList.append(word) # return termList # print(termsAsList()) #--------------------DICTOINARY APPROACH---------------------------------- documentList = ['D1.txt', 'D2.txt', 'D3.txt'] def identifyTerms() : #dictionary initializing termDictionary = {} #get the path of the related documents directory os.chdir('F:\\SLIIT\Year 3\\Semester 2\\IRWA\\Assignment\\assignment\\docs') #assign the files to a list fileList = os.listdir(os.getcwd()) for file in fileList : with open(file, 'r') as f: #get the sentences in the documents, convert them to the lower case and split words = f.read().lower().split() for word in words : #tokenize, remove unnessesary commas and others(special symbols) if word[-1] in [',','!','?','.']: word = word[:-1] print(word) #if the word not in the dictionary keys, new key is created if word not in termDictionary.keys() : termDictionary[word] = [f.name] #if the word is already exists in the dictionary, then the relavent document name # will be added to the value list of that key else : if file not in termDictionary[word]: termDictionary[word] += [f.name] return termDictionary identifyTerms() # + tags=[] # b) incidentDic = {} #get the inverted index dictionary incidentDic = identifyTerms() # incidentMatrix = [] incidentMatrix = {} for i in incidentDic.keys() : internalList = [] #if the document exists as a dictionary value of a particualr key, #then incident matrix value will be updates as 1, if not 0 for document in documentList: if document in incidentDic[i]: internalList.append(1) else: internalList.append(0) # if 'D1.txt' in incidentDic[i]: # internalList.append(1) # else: # internalList.append(0) # if 'D2.txt' in incidentDic[i]: # internalList.append(1) # else: # internalList.append(0) # if 'D3.txt' in incidentDic[i]: # internalList.append(1) # else: # internalList.append(0) if i not in incidentMatrix.keys(): incidentMatrix[i] = internalList # incidentMatrix.append(internalList) #print the Column headers print("{}\t\t{}".format("Terms", ['D1', 'D2', 'D3'])) #print rows for i, j in incidentMatrix.items(): print("{}\t\t{}".format(i, j)) # + def writeToFile(dictionary, path): os.chdir(path) with open('index-file.txt', 'w') as indexFile: for word, files in dictionary.items(): indexFile.write(word + " ") for file in files: indexFile.write(file[:file.find(".txt")] + " ") indexFile.write(f'{len(files)}\n') writeToFile(identifyTerms(), 'F:\\SLIIT\Year 3\\Semester 2\\IRWA\\Assignment\\assignment\\writes') # + tags=[] # c) resultSet = identifyTerms() # intialize three lists to store the documents related to the keys listFrodo = [] listOrc = [] listSword = [] listSam = [] listBlue = [] # go through the inverted matrix and identify the specific keys for key in resultSet : if key == "frodo": listFrodo = resultSet[key] elif key == "orc": listOrc = resultSet[key] elif key == "sword": listSword = resultSet[key] elif key == "sam": listSam = resultSet[key] elif key == "blue": listBlue = resultSet[key] print(listFrodo) print(listOrc) print(listSword) print(listSam) print(listBlue) # + # Logical Operators for future use # AND operation for three parameters def AND_op(postingList1, postingList2, postingList3) : return set(postingList1).intersection(postingList2).intersection(postingList3) # AND operation for two parameters def AND_op2(postingList1, postingList2) : return set(postingList1).intersection(postingList2) # NOT operation for two parameters def NOT_op(listParam, notVariable) : notList = [] for i in notVariable : if i not in listParam: notList.append(i) # print("Not List {}".format(notList)) return notList # TODO: Check this again to identify how the symmetric difference works # return set(listParam).symmetric_difference(notVariable) # + tags=[] # c) # q1) #Frodo AND orc AND sword print("Answer for Frodo AND orc AND sward: {}".format(AND_op(listFrodo, listOrc, listSword))) # q2) #Sam AND blue sam_and_blue = AND_op2(listSam, listBlue) #NOT Frodo not_frodo = NOT_op(listFrodo, sam_and_blue) # Sam AND blue AND NOT Frodo output = AND_op2(sam_and_blue, not_frodo) print("Answer for Sam AND blue AND NOT Frodo: {}".format(output)) # + # Q2) import nltk nltk.download() # + tags=[] # 1) / 4) import nltk import os # def getTokenizedList(file): # tokenList = [] # invertedIndex = {} # # for file in fileList2 : # # with open(file, 'r') as f: # # #get the sentences in the documents, convert them to the lower case and split # # words = f.read().split() # #split file to sentences # sentences = file.split(".") # for sentence in sentences : # #retreive words from sentences # words = sentence.split() # for word in words : # if word[-1] in [',','!','?','.']: # word = word[:-1] # # print(word) # if word not in tokenList : # tokenList.append(word) # if word not in invertedIndex.keys(): # invertedIndex[word] = file.name # else : # if file not in invertedIndex[word]: # invertedIndex[word] += file.name # return tokenList, invertedIndex # print(tokenList) # getTokenizedList('C:\\Users\\User\\AppData\\Roaming\\nltk_data\\corpora\\reuters\\training') os.chdir('C:/Users/User/AppData/Roaming/nltk_data/corpora/reuters/training') #assign the files to a list fileList2 = os.listdir(os.getcwd()) print(len(fileList2)) tokenList = [] invertedIndex = {} for file in fileList2 : with open(file, 'r') as f: # for w in getTokenizedList(f.read())[0]: # retrievedList.append(w) #split file to sentences sentences = nltk.sent_tokenize(f.read().lower()) for sentence in sentences : #retreive words from sentences words = nltk.word_tokenize(sentence) for word in words : if word not in tokenList : tokenList.append(word) if word not in invertedIndex.keys(): invertedIndex[word] = [file] else : if file not in invertedIndex[word]: invertedIndex[word] += [file] # print(invertedIndex) # for file in fileList2 : # with open(file, 'r') as f: # # for w in getTokenizedList(f.read())[0]: # # retrievedList.append(w) # # for file in fileList2 : # # with open(file, 'r') as f: # # #get the sentences in the documents, convert them to the lower case and split # # words = f.read().split() # #split file to sentences # sentences = f.read().lower().split(".") # for sentence in sentences : # #retreive words from sentences # words = sentence.split() # for word in words : # if word[-1] in [',','!','?','.']: # word = word[:-1] # # print(word) # # word = nltk.word_tokenize(word) # if word not in tokenList : # tokenList.append(word) # if word not in invertedIndex.keys(): # invertedIndex[word] = [file] # else : # if file not in invertedIndex[word]: # invertedIndex[word] += [file] print(invertedIndex) def getPostingList(index): return invertedIndex[index] print(getPostingList('bahia')) # print(invertedIndex) # + tags=[] print(len(tokenList)) # + tags=[] # 2) from nltk.stem import PorterStemmer ps = PorterStemmer() def stemming(tokenList) : stemmedList = [] for word in tokenList: stemmedList.append(ps.stem(word)) return stemmedList stemmedWordList = stemming(tokenList) # print(len(tokenList)) # print(len(stemmedWordList)) stemmedFinalList = [] # TODO: Recheck this # Remove duplicates for word in stemmedWordList: if word not in stemmedFinalList: stemmedFinalList.append(word) # if word in stemmedFinalList: # print(word) print(stemmedFinalList) # print(stemmedWordList) # - # 5) def addPostingList(key, value): if key not in invertedIndex.keys: invertedIndex[key] = value else: if value is not in invertedIndex[key]: invertedIndex[key] += value return invertedIndex # + tags=[] invertedIndex = {} os.chdir('C:\\Users\\User\\AppData\\Roaming\\nltk_data\\corpora\\reuters\\training') #assign the files to a list fileList2 = os.listdir(os.getcwd()) print(len(fileList2)) retrievedList = [] for file in fileList2 : with open(file, 'r') as f: for w in getTokenizedList(f.read()): retrievedList.append(w) print(retrievedList) # + tags=[] # Q3) # 1) word = '<PASSWORD>' permutermWord = word + '$' changedWordList = [] # Run the loop for number of characters(including #) for w in range(0, len(permutermWord)): changingWord = '' # Get the each character and add it to a separate string, # here it will increment the starting index according to the w value for i in range(w, len(permutermWord)): changingWord += permutermWord[i] if len(changingWord) < len(permutermWord): for j in range(0, len(permutermWord) - len(changingWord)): changingWord += permutermWord[j] print(changingWord) changedWordList.append(changingWord) # added the words to a list for future use # print(changedWordList) # + tags=[] # 2) def levenshtein_distance(word1, word2): #compare the length of the words and get the difference, #that difference will be initiated that distanca value if len(word1)> len(word2): difference = len(word1) - len(word2) word1 = word1[:len(word2)] elif len(word2)> len(word1): difference = len(word2) - len(word1) word2 = word2[:len(word1)] else: difference = 0 #if the letters are not matched, then the difference will be increased by 1 for character in range(len(word1)): if word1[character] != word2[character]: difference += 1 return difference print("Edit Distance(Levenshtein) Between two words: " + str(levenshtein_distance('cat', 'dogs'))) # + tags=[] # 3) soundexDic = {'AEIOUHWY': ['A', 'E', 'I', 'O', 'U', 'H', 'W', 'Y'], 'BFPV' : ['B', 'F', 'P', 'V'], 'CGJKQSXZ': ['C', 'G', 'J', 'K', 'Q', 'S', 'X', 'Z'], 'DT': ['D', 'T'], 'L': ['L'], 'MN': ['M', 'N'], 'R': ['R'] } def soundex(word): letters = [i for i in word.upper()] soundex_string = letters[0] characterVal = 0 # print(soundex_string) for w in letters[1:]: if w in soundexDic['AEIOUHWY']: soundex_string = soundex_string + str(0) elif w in soundexDic['BFPV']: soundex_string = soundex_string + str(1) elif w in soundexDic['CGJKQSXZ']: soundex_string = soundex_string + str(2) elif w in soundexDic['DT']: soundex_string = soundex_string + str(3) elif w in soundexDic['L']: soundex_string = soundex_string + str(4) elif w in soundexDic['MN']: soundex_string = soundex_string + str(5) elif w in soundexDic['R']: soundex_string = soundex_string + str(6) # for key in soundexDic.keys(): # if w in soundexDic[key]: # soundex_string = soundex_string + str(int(characterVal)) # characterVal += 1 soundex_final = soundex_string[0] # get the values from the second character(index wise 1st) for w in soundex_string[1:]: # TODO: Remove second consecative charatcter # if w <= len(soundex_string) - 2: # here it checks wether the last added value to the string is equal to this value # if it is equal, it will not be added. # Rule: Cannot have pair of consecutive digits if w != soundex_final[-1]: soundex_final = soundex_final + w soundex_final = soundex_final.replace('0', '') # if the character count is less than 4, then we need to fill the remaining spaces with 0 # ex: H65 --> H650 if len(soundex_final) < 4 : for i in range(0, 4 - len(soundex_final)): soundex_final = soundex_final + str(0) # when the character count is greater than 4, we take only the first 4 characters elif len(soundex_final) > 4 : soundex_final = soundex_final[:4] return soundex_final print("Soundex Output: {}".format(soundex('herman'))) print("Soundex Output: {}".format(soundex('hermann'))) # -
IT18113600_Assignment_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + nbsphinx="hidden" # Delete this cell to re-enable tracebacks import sys ipython = get_ipython() def hide_traceback(exc_tuple=None, filename=None, tb_offset=None, exception_only=False, running_compiled_code=False): etype, value, tb = sys.exc_info() value.__cause__ = None # suppress chained exceptions return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value)) ipython.showtraceback = hide_traceback # + nbsphinx="hidden" # JSON output syntax highlighting from __future__ import print_function from pygments import highlight from pygments.lexers import JsonLexer, TextLexer from pygments.formatters import HtmlFormatter from IPython.display import display, HTML from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" def json_print(inpt): string = str(inpt) formatter = HtmlFormatter() if string[0] == '{': lexer = JsonLexer() else: lexer = TextLexer() return HTML('<style type="text/css">{}</style>{}'.format( formatter.get_style_defs('.highlight'), highlight(string, lexer, formatter))) globals()['print'] = json_print # - # ## Using The Workbench # The [Workbench API](../api/stix2.workbench.rst) hides most of the complexity of the rest of the library to make it easy to interact with STIX data. To use it, just import everything from ``stix2.workbench``: from stix2.workbench import * # ### Retrieving STIX Data # # To get some STIX data to work with, let's set up a DataSource and add it to our workbench. # + from taxii2client import Collection collection = Collection("http://127.0.0.1:5000/trustgroup1/collections/91a7b528-80eb-42ed-a74d-c6fbd5a26116/", user="admin", password="<PASSWORD>") tc_source = TAXIICollectionSource(collection) add_data_source(tc_source) # - # Now we can get all of the indicators from the data source. response = indicators() # Similar functions are available for the other STIX Object types. See the full list [here](../api/stix2.workbench.rst#stix2.workbench.attack_patterns). # # If you want to only retrieve *some* indicators, you can pass in one or more [Filters](../api/datastore/stix2.datastore.filters.rst). This example finds all the indicators created by a specific identity: response = indicators(filters=Filter('created_by_ref', '=', 'identity--adede3e8-bf44-4e6f-b3c9-1958cbc3b188')) # The objects returned let you easily traverse their relationships. Get all Relationship objects involving that object with ``.relationships()``, all other objects related to this object with ``.related()``, and the Identity object for the creator of the object (if one exists) with ``.created_by()``. For full details on these methods and their arguments, see the [Workbench API](../api/stix2.workbench.rst) documentation. for i in indicators(): for rel in i.relationships(): print(rel.source_ref) print(rel.relationship_type) print(rel.target_ref) for i in indicators(): for obj in i.related(): print(obj) # If there are a lot of related objects, you can narrow it down by passing in one or more [Filters](../api/datastore/stix2.datastore.filters.rst) just as before. For example, if we want to get only the indicators related to a specific piece of malware (and not any entities that use it or are targeted by it): malware = get('malware--c0931cc6-c75e-47e5-9036-78fabc95d4ec') indicator = malware.related(filters=Filter('type', '=', 'indicator')) print(indicator[0]) # ### Creating STIX Data # # To create a STIX object, just use that object's class constructor. Once it's created, add it to the workbench with [save()](../api/stix2.workbench.rst#stix2.workbench.save). identity = Identity(name="ACME Threat Intel Co.", identity_class="organization") save(identity) # You can also set defaults for certain properties when creating objects. For example, let's set the default creator to be the identity object we just created: set_default_creator(identity) # Now when we create an indicator (or any other STIX Domain Object), it will automatically have the right ``create_by_ref`` value. # + indicator = Indicator(pattern_type="stix", pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']") save(indicator) indicator_creator = get(indicator.created_by_ref) print(indicator_creator.name) # - # Defaults can also be set for the [created timestamp](../api/stix2.workbench.rst#stix2.workbench.set_default_created), [external references](../api/stix2.workbench.rst#stix2.workbench.set_default_external_refs) and [object marking references](../api/stix2.workbench.rst#stix2.workbench.set_default_object_marking_refs). # <div class="alert alert-warning"> # # **Warning** # # The workbench layer replaces STIX Object classes with special versions of them that use "wrappers" to provide extra functionality. Because of this, we recommend that you **either use the workbench layer or the rest of the library, but not both**. In other words, don't import from both ``stix2.workbench`` and any other submodules of ``stix2``. # # </div>
docs/guide/workbench.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Dependencies import json import requests import pandas as pd # from config import petfinder_api_key, petfinder_secret_key petfinder_api_key = "<KEY>" petfinder_secret_key = "<KEY>" # + # Request token for the API data = { 'grant_type': 'client_credentials', 'client_id': petfinder_api_key, 'client_secret': petfinder_secret_key } token_response = requests.post('https://api.petfinder.com/v2/oauth2/token', data=data) token_response_json = token_response.json() # print(json.dumps(token_response_json, indent = 4)) token_response_json # - headers = { 'Authorization': f"Bearer {token_response_json['access_token']}" } headers # + # Pull data from petfinder.com API url = "https://api.petfinder.com/v2/animals?" response = requests.get(url, headers=headers) response_json = response.json() # print(json.dumps(response_json, indent = 4)) response_json # - # Extract out list of animal dictionaries (also known as records) animal_records = response_json['animals'] animal_records # Use Pandas to directly convert list of records to DataFrame animals_df = pd.DataFrame(animal_records) animals_df # Export and save json data to json file with open('petfinder_data_v2.json', 'w') as outfile: json.dump(response_json, outfile) # Put the json file to dataframe # json_file = "./petfinder_data_v2.json" petfinder_data_df = pd.read_json("./petfinder_data_v2.json") petfinder_data_df.head() # ## Web Scraping # Dependencies from bs4 import BeautifulSoup import requests # URL of page to be scraped url = "http://dog.rescueme.org/California" response = requests.get(url) print(response.text) soup = BeautifulSoup(response.text, 'html.parser') print(soup.prettify()) # + names = soup.find_all('span', class_= '_cpn') for name in names: print(name.text) # -
.ipynb_checkpoints/Pet_Adoption_Analysis_REVISED-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # ## Read CSV files ordered_data=pd.read_csv("orderedSort.csv") particial_ordered_data=pd.read_csv("particialOrderedSort.csv") random_data=pd.read_csv("randomSort.csv") reversed_data=pd.read_csv("reverseSort.csv") # ## Draw a log/log algorithm growth graph # T=k*N*(N+1)*0.5 # 2T=K*N*(N+1) # 2T=K*N^2 # N=sqr(2T)/K # + plt.figure(figsize=(15,10)) x=np.array(random_data['N'],dtype=float) plt.plot(np.log(random_data['N']),np.log(random_data['Time']),'-o',label="random_data") plt.plot(np.log(ordered_data['N']),np.log(ordered_data['Time']),'-o',label="ordered_data") plt.plot(np.log(particial_ordered_data['N']),np.log(particial_ordered_data['Time']),'-o',label="partial_ordered_data") plt.plot(np.log(reversed_data['N']),np.log(reversed_data['Time']),'-o',label="reversed_data") plt.ylabel("Time milionsecond[log(T)]") plt.xlabel("Memory Space [log(N)]") plt.legend() # -
Assignment2/ipynb/InsertionSort_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np old_data = pd.read_csv('prior_data.csv') new_data = pd.read_csv('eps_updated_tgts_no_bp_rp_run.csv') # + old_data = old_data.rename(columns={'KIC': 'ID'}) old_data.head() # + new_data = new_data.rename(columns={'eps_error': 'eps_err', 'teff': 'Teff', 'teff_error': 'Teff_err', 'dnu_error': 'dnu_err', 'numax_error': 'numax_err'}) new_data.head() # - fig, ax = plt.subplots(figsize=[16,9]) ax.scatter(old_data.numax, old_data.dnu, label='old_data', alpha=0.3) ax.scatter(new_data.numax, new_data.dnu, label='new_data', alpha=0.3) ax.set_xlabel('numax') ax.set_ylabel('dnu') ax.set_xscale('log') ax.set_yscale('log') ax.set_ylim([1,150]) ax.legend() fig, ax = plt.subplots(figsize=[16,9]) log_numax = np.log10(old_data.numax).values log_dnu = np.log10(old_data.dnu).values z = np.polyfit(log_numax, log_dnu, deg=1) p = np.poly1d(z) ax.scatter(np.log10(new_data.numax), np.log10(new_data.dnu) - p(np.log10(new_data.numax)), c=new_data.bp_rp, marker='D', s=82, alpha=0.3, label='new data') CS = ax.scatter(log_numax, log_dnu - p(log_numax), c=old_data.bp_rp, s=82, alpha=0.3, label='old data') cbar = fig.colorbar(CS) cbar.ax.set_ylabel('bp_rp') ax.set_xlabel('log numax') ax.set_ylabel('log dnu') ax.legend() fig, ax = plt.subplots(figsize=[16,9]) ax.scatter(old_data.numax, old_data.eps, c=old_data.bp_rp, s=35, label='old_data', alpha=0.4) ax.scatter(new_data.numax, new_data.eps, c=new_data.bp_rp, marker='D', s=35, label='new_data', alpha=0.4) ax.legend() ax.set_xlabel('numax') ax.set_ylabel('eps') ax.set_xscale('log') df = pd.concat([old_data, new_data], sort=False) df.to_csv('prior_data2.csv', index=False) fig, ax = plt.subplots(figsize=[16,9]) ax.scatter(old_data.dnu, old_data.eps, c=old_data.bp_rp, s=35, label='old_data', alpha=0.4) ax.scatter(new_data.dnu, new_data.eps, c=new_data.bp_rp, marker='D', s=35, label='new_data', alpha=0.4) ax.legend() ax.set_xlabel('numax') ax.set_ylabel('eps') ax.set_xscale('log')
pbjam/data/Compare Results .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 3: The math module # ### Instructions # Use the sqrt() function within the math module to assign the square root of 16.0 to a. # # Use the ceil() function within the math module to assign the ceiling of 111.3 to b. # # Use the floor() function within the math module to assign the floor of 89.9 to c. # # import math # ### Answer a = math.sqrt(16.0) b = math.ceil(111.3) c = math.floor(89.9) print(a, b, c) # ## 4: Variables within modules # ### Instructions # Assign the square root of pi to a. # # Assign the ceiling of pi to b. # # Assign the floor of pi to c. # + import math print(math.pi) # - # ### Answer PI = math.pi a = math.sqrt(PI) b = math.ceil(PI) c = math.floor(PI) print(a, b, c) # ## 5: The csv module # ### Instructions # Read in all of the data from "nfl.csv" into a list variable named nfl using the csv module. # # Answer # + import csv f = open("nfl.csv") csvreader = csv.reader(f) nfl = list(csvreader) # - print(nfl) # ## 6: Counting how many times a team won # ### Instructions # Fill in the mission code to do the following: # # Import and use the csv module to load data from our "nfl.csv" file. # # Count how many games the "New England Patriots" won from 2009-2013. To do this, set a counter to 0, and increment by 1 whenever you see a row whose winner column is equal to "New England Patriots". # # Assign the count to patriots_wins. # ### Answer # + import csv f = open("nfl.csv") reader = csv.reader(f) data = list(reader) patriots_wins = 0 for row in data: if row[2] == "New England Patriots": patriots_wins += 1 print(patriots_wins) # -
python_introduction/intermediate/.ipynb_checkpoints/Modules-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="N7qWdbGwjQsq" # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="csMC7G6s39Rh" # ## Advanced Exercise in Data Science: Kalman Fidlers # # In this notebook, we'll implement an [Extended Kalman Filter](https://en.wikipedia.org/wiki/Extended_Kalman_filter) for terrain-based navigation using GPS, an altimeter, and INS. The code below is "pseudocode" and will not run out of the box because some of the inputs (like the terrain map) need to be supplied and not all variables are explicitly defined. Variable names are descriptive to enable filling in the details... it's an exercise for the dedicated student! # + [markdown] id="2DuAw9Es5d_d" # ## Calculate Map Gradients # * Load map data # * Create an interpolation function # + id="xZFw2DQZ13lr" ### Get Gradient Jacobians (Change in h(x) i.e. ground level/ Change in x/y) grad_lat = (np.gradient(map_terrain, axis = 0))/75 grad_lon = (np.gradient(map_terrain, axis = 1))/75 grid_points = np.array(list(product(map_lat_range, map_lon_range))) map_grad_stack_lat = grad_lat.reshape(-1,1) map_grad_stack_lon = grad_lon.reshape(-1,1) # lat, lon func_map_grad_lat = LinearNDInterpolator( \ np.vstack((grid_points[:,0], grid_points[:,1])).T, \ map_grad_stack_lat, \ fill_value=np.nan, \ rescale=False) func_map_grad_lon = LinearNDInterpolator( \ np.vstack((grid_points[:,0], grid_points[:,1])).T, \ map_grad_stack_lon, \ fill_value=np.nan, \ rescale=False) # + [markdown] id="jroGgW2M56xa" # ## Terrain Altimeter Sensor # + id="hPEQM99l6HwR" # Load Altimeter data z_alt = # LOAD ALTIMETER DATA # + id="tbjc1VLi50fS" # Filtering utilities import numpy as np from scipy.fftpack import fft from scipy import signal def median_filter(s, f_size): return signal.medfilt(s, f_size) def freq_filter(s, f_size, cutoff): lpf=signal.firwin(f_size, cutoff, window='hamming') return signal.convolve(s, lpf, mode='same') def comb_filter(s, f_size, cutoff, fs): median_s=median_filter(s, f_size) return freq_filter(median_s, f_size, cutoff/fs) # + id="Gl2yZdIL5-e5" fs=200 f_size = 55 cutoff = 10 g = 9.80665 z_alt_lp = comb_filter(z_alt.flatten(), f_size, cutoff, fs) # + [markdown] id="6EiJop8W6SQ5" # ## Set Up Navigation Filter # # [pos, vel, ins_drift, ins_drift_rate] # + id="DqthiqKR6QV7" # Predict and Update Functions def predict(x, P, F, Q): x = F @ x P = F @ P @ F.T + Q return x, P def update(x, P, z, H, R, debug = False): dim_x = len(x) y = z - H @ x S = H @ P @ H.T + R K = P @ H.T @ np.linalg.pinv(S) x = x + K @ y P = (np.eye(dim_x) - K @ H) @ P y2 = z - H @ x if debug: return x, P, y, y2 else: return x, P def update_terrain(x, P, z, H, R, func_map): dim_x = len(x) # Get current LLA z_pred = func_map(x[0], x[1]) # Update y = z - z_pred S = H @ P @ H.T + R K = P @ H.T @ np.linalg.pinv(S) x = x + K @ y P = (np.eye(dim_x) - K @ H) @ P return x, P # + id="8JMJgnIz6Qa4" # Process Model F = np.array([[1, dt, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, dt, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, dt, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0] [0, 0, 0, 0, 0, 0, 1, dt], [0, 0, 0, 0, 0, 0, 0, dt]]) # Measurement Models H_vel = np.array([[0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0]]) H_gps = np.array([[1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0]]) # + id="cOQjUHDu6Qe5" # Logging arrays x_mat, P_mat, residual_mat, grad_mat = [], [], [], [] # Initialize filter ind_sim = 1000 # Initial Conditions x0 = np.array([0, 0, 0, 0, 0, 0]) P0 = np.diag([100**2, 100**2, 10**2, 10**2, 10**2, 10**2, 10**2, 10**2]) # Measurement noise R_vel = np.diag([10, 10]) R_gps = np.diag([10, 10]) R_alt = np.diag([100]) # Process Noise Q = np.diag([10**2, 10**2, 1**2, 1**2, .1**2, .1**2, .01**2, .01**2]) for i in range(ind_test): z_vel = # velocity data z_gps = # gps data z_alt = # Filtered altimeter data # Obtain Jacobian from Terrain Map dzdx = func_map_grad_x(x[0], x[1]) dzdy = func_map_grad_y(x[0], x[1]) H_map = np.array([[dzdx[0], 0, 0, 0, 0, 0, 0, 0], [0, 0, dzdy[0], 0, 0, 0, 0, 0]]) ## Update x, P = update(x, P, z_vel, H_vel_bias, R_vel) if i % c_rate == 0 and flag_sensor: if sensor == 'gps': x, P = update(x, P, z_gps, H_gps, R_gps) elif sensor == 'terrain': x, P, y_pre, y_post = update_terrain(x, P, z_alt, H_alt, R_alt, func_map) ## Log x_mat.append(x), P_mat.append(P) ## Predict x, P = predict(x, P, F, Q) # + [markdown] id="TlhcRLa5-sgY" # ## Plot Results # + id="0PaoCZgE6Qix" plt.figure(figsize = (16,4)) plt.subplot(1,2,1) plt.plot(x_mat[:,0], ll_mat[:,1]) plt.xlabel('x'), plt.ylabel('y') plt.subplot(1,2,2) pos_err = np.linalg.norm(x_true - x) plt.plot(abs(pos_err)) # + id="665hh7uH6QnS"
colabs/Kalman Filters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="1aH_fOVolA69" outputId="bfa44d86-2430-480e-c960-1ae31467996f" import pandas as pd import os import numpy as np from sklearn.model_selection import train_test_split import gensim from gensim.models.word2vec import Word2Vec import gensim.downloader as api from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import load_model import matplotlib.pyplot as plt from time import time import tensorflow.keras # #!pip install textblob # #!pip install tweepy import sys,tweepy,csv,re from textblob import TextBlob import matplotlib.pyplot as plt import nltk from nltk.tokenize import RegexpTokenizer from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords import string import unicodedata from autocorrect import Speller # + id="1OqK6EY_lLGB" test_data = pd.read_csv("data/preprocessed_train.csv") test_data.text = test_data.selected_text.map(lambda x:str(x)) test_data.sentiment = test_data.sentiment.astype("category") test_data.sentiment = test_data.sentiment.cat.codes # + id="rmJj_ZJ2lO2-" #tokenizing and padding from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=5000) tokenizer.fit_on_texts(test_data.text.values) maxlen = 100 # + id="bh0v74AVlRsO" #Load the Model model = load_model('cnn_model.h5') # - def preprocessTweets(t): all_stopwords = stopwords.words('english') all_stopwords.remove('not') all_stopwords.remove('no') url = r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''' t = map(lambda x:str(x).lower(), t) #lower case t = map(lambda x:re.sub(r"\b[^\s]+@[^\s]+[.][^\s]+\b", "", x), t) #email t = map(lambda x:re.sub(url, "", x), t) #url t = map(lambda x:re.sub(r'(\d[a-zA-Z]{0,})', "", x), t) #numbers [^a-zA-z.,!?/:;\"\'\s] t = map(lambda x:re.sub(r'^\s*|\s\s*', ' ', x).strip(), t) #white space t = map(lambda x:''.join([c for c in x if c not in string.punctuation]), t) #punctuations t = map(lambda x:re.sub(r'[^a-zA-z0-9.,!?/:;\"\'\s]', '', x), t) #special char t = map(lambda x:unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('utf-8', 'ignore'), t) #unicode spell = Speller(lang="en") tokenizer = RegexpTokenizer(r'\w+') t = map(lambda x:tokenizer.tokenize(x), t) #remove punctuation and tokenize t = map(lambda x:[spell(i) for i in x], t) #spell check t = list(t) return [ " ".join([word for word in x[2:] if word not in all_stopwords ]) for x in list(t)] tweets = [] tweetText = [] mapping={0:"Negative",1:"Neutral",2:"Positive"} # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Ys_7gmzGjJKD" outputId="cb4c4758-55eb-477e-9049-29c75c2ee560" # class SentimentAnalysis: def DownloadData(): # authenticating consumerKey ='' consumerSecret ='' accessToken = '' accessTokenSecret = '' auth = tweepy.OAuthHandler(consumerKey, consumerSecret) auth.set_access_token(accessToken, accessTokenSecret) api = tweepy.API(auth) searchTerm = input("Enter Keyword/Tag to search about: ") NoOfTerms = int(input("Enter how many tweets to search: ")) # searching for tweets tweets = tweepy.Cursor(api.search, q=searchTerm, lang = "en").items(NoOfTerms) # Open/create a file to append data to csvFile = open('result.csv', 'a') csvWriter = csv.writer(csvFile) polarity = 0 positive = 0 negative = 0 neutral = 0 tweetText = [] for tweet in tweets: tweetText.append(tweet.text) tweetText = preprocessTweets(tweetText) print(tweetText) for tweet in tweetText: X_test1 = tokenizer.texts_to_sequences([str(cleanTweet(tweet).encode('utf-8'))]) X_test1 = pad_sequences(X_test1, padding='post', maxlen = maxlen) analysis = np.argmax(model.predict(X_test1), axis = -1)[0] print("[" +mapping[analysis]+"] Sentence: "+tweet) polarity += analysis if (analysis == 0): negative += 1 elif (analysis == 1): neutral += 1 elif (analysis == 2): positive += 1 csvWriter.writerow(tweetText) csvFile.close() positive = percentage(positive, NoOfTerms) negative = percentage(negative, NoOfTerms) neutral = percentage(neutral, NoOfTerms) # finding average reaction polarity = polarity / NoOfTerms print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.") print() print("General Report: ") m=max(positive,negative,neutral) if (m==negative): print("Negative") elif (m==neutral): print("Neutral") else: print("Positive") print() print("Detailed Report: ") print(str(positive) + "% people thought it was positive") print(str(negative) + "% people thought it was negative") print(str(neutral) + "% people thought it was neutral") plotPieChart(positive, negative, neutral, searchTerm, NoOfTerms) def cleanTweet(tweet): return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t]) | (\w +:\ / \ / \S +)", " ", tweet).split()) # function to calculate percentage def percentage(part, whole): temp = 100 * float(part) / float(whole) return format(temp, '.2f') def plotPieChart(positive, negative, neutral, searchTerm, noOfSearchTerms): labels = ['Positive [' + str(positive) + '%]', 'Neutral [' + str(neutral) + '%]', 'Negative [' + str(negative) + '%]'] sizes = [positive, neutral, negative] colors = ['yellowgreen', 'gold', 'red'] patches, texts = plt.pie(sizes, colors=colors, startangle=90) plt.legend(patches, labels, loc="best") plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.') plt.axis('equal') plt.tight_layout() plt.show() # - DownloadData()
FINAL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """Running basic code: Importing packages, setting working directory, printing out date""" import os as os os.chdir('C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/') import datetime as dt str(dt.datetime.now()) from sklearn.metrics import confusion_matrix import seaborn as sns #from pandas_ml import ConfusionMatrix data_path = 'C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/data/' output_path = 'C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/' from HPnex import functions as f from HPnex import classification as classify from HPnex import fitting_functions as fitt from HPnex import prediction as pred from HPnex import predict_multi as pred_multi import numpy as np import networkx as nx #np.random.seed(42) from sklearn.ensemble import RandomForestClassifier #from pandas_ml import ConfusionMatrix from matplotlib import pyplot as plt import seaborn as sns import scipy.stats as stats from sklearn import model_selection import math height = 6 font = 12 import sklearn from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC, LinearSVC from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV #from sklearn.cross_validation import from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import learning_curve #from pandas_ml import ConfusionMatrix from textblob import TextBlob from sklearn.linear_model import SGDClassifier from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score from xgboost import XGBClassifier #### Standardize continuous variables from sklearn.preprocessing import StandardScaler from sklearn import preprocessing #from pandas_ml import ConfusionMatrix from HPnex import functions as f ### Running cross validation scores and predictions from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, precision_recall_fscore_support import matplotlib.style as style style.use('fivethirtyeight') plt.rcParams['font.family'] = 'Times New Roman' sns.set_context("notebook", font_scale=1.30, rc={"lines.linewidth": 0.8}) import itertools as itertools import pandas as pd import joblib IUCN = pd.read_csv(data_path+ '\IUCN Mammals, Birds, Reptiles, and Amphibians.csv',encoding='latin1') IUCN["ScientificName"] = IUCN["Genus"].map(str) +' '+IUCN["Species"] IUCN_list = set(IUCN.ScientificName.unique().tolist()) IUCN.loc[IUCN.ScientificName== 'Homo sapiens', 'Order'] = 'Humans' np.random.seed(42) # - # %%time clf_binary = joblib.load('C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/Binary_model.joblib') clf_multi = joblib.load('C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/Multiclass_model_humans.joblib') # %%time BPnx = nx.read_graphml("C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/BPnx_Order_humans.graphml") Gc_complete_Order = nx.read_graphml("C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/CompleteNetwork_Order_Humans.graphml") ### Encoding categorical variables virus_df = pd.read_pickle('C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/virus_df.pkl') le = preprocessing.LabelEncoder() le.fit(virus_df.viral_family.unique()) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() import pickle dictionary = pickle.load(open("C:\Users\Falco\Desktop\directory\Missing_links_in_viral_host_communities/outputs/dictionary_order_humans.pkl", "rb")) inv_dictionary = {v: k for k, v in dictionary.iteritems()} P_data = pd.read_csv('C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/data/network_prediction_data.csv', low_memory=False) P_data['ScientificName'] = P_data['ScientificNameToLowestKnownRank'] P_data.ScientificNameToLowestKnownRank.replace('piliocolobus tholloni', 'Piliocolobus tholloni', inplace = True) P_data['Order'] = P_data.Order.str.upper() P_data.replace('ARTIODACTYLA', 'CETARTIODACTYLA', inplace=True) #P_data.head() P_data.shape # + #P_data.IDCertainty.value_counts() # - uncertain = ['field ID certainty unknown', 'unidentified by barcoding - field ID uncertain'] # + #P_data[P_data.IDCertainty == 'field ID certainty unknown'].ScientificName.value_counts() # + #P_data[P_data.IDCertainty == 'field ID uncertain'].groupby(['Order', 'ScientificName']).size() # + #P_data[P_data.IDCertainty == 'field ID uncertain'].groupby(['Order', 'ScientificName']).size().xs('PRIMATES', level =0) # - print('There were %s unique viruses detected in P1P2' %(len(P_data.VirusGroup.unique()))) print('There were %s unique novel viruses detected in P1P2' %(len(P_data[P_data.VirusStatus == 'new'].VirusGroup.unique()))) print('There were %s unique novel viruses detected wildlife (excluding humans)' %(len(P_data[(P_data.VirusStatus == 'new') &(P_data.Order !='HUMANS')].VirusGroup.unique()))) print('There were %s unique novel viruses detected wildlife (excluding humans) correctly identified species' %(len(P_data[(P_data.VirusStatus == 'new') &(P_data.Order !='HUMANS') & (~P_data.IDCertainty.isin(uncertain))].VirusGroup.unique()))) # + #P_data.groupby(['Order', 'VirusStatus']).ScientificNameToLowestKnownRank.nunique() # - P_data_new_certain = P_data[(P_data.VirusStatus == 'new') &(P_data.Order !='HUMANS') & (~P_data.IDCertainty.isin(uncertain))] P = (P_data_new_certain.groupby(['VirusGroup', 'ViralFamily', 'VirusStatus']).agg({'ScientificName':'unique', 'Order':'unique'})).reset_index() P['PubMed'] = P['ScientificName'].str.len() P = P[P.ViralFamily!='Bunyaviridae'] P.ViralFamily.replace('Novel', 'Caliciviridae', inplace = True) new = P len(new.VirusGroup.dropna().unique()) new.head() new.ViralFamily.unique() len(new.ViralFamily.unique()) new['hosts_n'] = new['ScientificName'].str.len() len(new.VirusGroup.unique()) new['hosts_n'].mean() new['hosts_n'].std() # + # %%time qq = new.groupby('ViralFamily').agg({'hosts_n': ['mean', 'count']}) qq.columns = ['mean', 'count'] qq.sort_values('mean', ascending= False, inplace = True) order_y = qq.index f, ax = plt.subplots(figsize = (4,6)) sns.boxplot(data=new, x='hosts_n', y= 'ViralFamily',ax=ax, order = order_y, color="#fdcdac") sns.stripplot(x='hosts_n', y= 'ViralFamily', data=new, size=3.5, jitter=True, alpha=0.5, color='#252525',order = order_y, ax=ax) #for i, txt in enumerate(qq['count'].astype(str).values): # ax.text(s=txt, x=0.5, y=order_y[i]) qq['String'] = "mean: " +qq['mean'].round(1).astype(str) + ', n = ' +qq['count'].round(1).astype(str) #df['bar'].astype(str)+'_'+df['foo']+'_'+df['new'] nobs = qq["String"].values nobs = [i for i in nobs] pos = range(len(nobs)) for tick,label in zip(pos,ax.get_yticklabels()): ax.text(x=15.005, y=pos[tick], s=nobs[tick]) ax.set_xlabel('Number of host species') ax.set_ylabel('Family of novel PREDICT viruses') plt.tight_layout() #f.savefig(output_path + '/human_links.png', dpi=500) #f.savefig(output_path + '/human_links.svg') plt.show() # - new.ScientificName.head() new.iloc[0] new.iloc[0]['ScientificName'] new.iloc[0]['ViralFamily'] i = 0 a = pred.run_predictions(virus=new.iloc[i]['VirusGroup'], hosts=new.iloc[i]['ScientificName'], PubMed=2, ViralFamily='Filoviridae', BPnx=BPnx, Gc_complete=Gc_complete_Order, virus_df=virus_df, clf_binary=clf_binary, plot=True) # %%time i = 3 print(new.iloc[i]['VirusGroup']) print(new.iloc[i]['ScientificName']) print(new.iloc[i]['PubMed']) print(new.iloc[i]['ViralFamily']) a, n_e = pred.run_predictions(virus=new.iloc[i]['VirusGroup'], hosts=new.iloc[i]['ScientificName'], PubMed=new.iloc[i]['PubMed'], ViralFamily=new.iloc[i]['ViralFamily'], BPnx=BPnx, Gc_complete=Gc_complete_Order, virus_df=virus_df, clf_binary=clf_binary, plot=True) if n_e == 0: print ('NO PREDICTIONS available for %s as no new connections were made to the known virus network' % (new.iloc[i]['VirusGroup'])) P.iloc[100]['VirusGroup'] new.iloc[i]['ScientificName'] # + cell_style="center" # %%time i = 31 print(new.iloc[i]['VirusGroup']) print(new.iloc[i]['ScientificName']) print(new.iloc[i]['PubMed']) print(new.iloc[i]['ViralFamily']) b, n_e = pred_multi.run_predictions(virus=new.iloc[i]['VirusGroup'], hosts=new.iloc[i]['ScientificName'], PubMed=new.iloc[i]['PubMed'], ViralFamily=new.iloc[i]['ViralFamily'], BPnx=BPnx, Gc_complete=Gc_complete_Order, virus_df=virus_df, clf_multi=clf_multi, IUCN = IUCN, inv_dictionary=inv_dictionary, plot=True) if n_e == 0: print ('NO PREDICTIONS available for %s as no new connections were made to the known virus network' % (new.iloc[i]['VirusGroup'])) # - new.shape # + # %%time RESULT_binary = [] RESULT_multi = [] for index, row in new.reset_index().iterrows(): if index % 50 == 0: print ('running predictions for '+ row['VirusGroup']+', virus number '+str(index)) #for i in range(0, 50): binary, n_e = pred.run_predictions(virus=row['VirusGroup'], hosts=row['ScientificName'], PubMed=row['PubMed'], ViralFamily=row['ViralFamily'], BPnx=BPnx, Gc_complete=Gc_complete_Order, virus_df=virus_df, clf_binary=clf_binary, plot=False) multi, n_e = pred_multi.run_predictions(virus=row['VirusGroup'], hosts=row['ScientificName'], PubMed=row['PubMed'], ViralFamily=row['ViralFamily'], BPnx=BPnx, Gc_complete=Gc_complete_Order, virus_df=virus_df, clf_multi=clf_multi, IUCN = IUCN, inv_dictionary=inv_dictionary, plot=False) if n_e == 0: print ('NO PREDICTIONS available for %s as no new connections were made to the known virus network' % (row['VirusGroup'])) else: RESULT_binary.append(binary) RESULT_multi.append(multi) r_binary = pd.concat(RESULT_binary, axis=0) r_multi = pd.concat(RESULT_multi, axis=0) r_binary.to_pickle( 'C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/P1_PREDICTIONS_binary.pkl' ) r_multi.to_pickle( 'C:/Users/Falco/Desktop/directory/Missing_links_in_viral_host_communities/outputs/P1_PREDICTIONS_multi.pkl' ) # - r_binary.shape r_multi.shape # + active="" # temp_Gc, to_predict, virus_df_temp, new_network_data = pred_multi.generete_temp_network( # virus='New_Ebolavirus', # hosts=['PRIMATES', 'Humans'], # PubMed=10, # ViralFamily='Flaviviridae', # BPnx=BPnx_order, # Gc_complete=Gc_complete_Order, # virus_df=virus_df) # + active="" # virus='New_Ebolavirus' # hosts=['PRIMATES', 'Humans'] # PubMed=10 # ViralFamily='Flaviviridae' # # temp_BPnx = BPnx_order.copy() ## making a copy of original Bipartite network # #print (temp_BPnx.number_of_nodes()) ## checking number of nodes # virus_nodes = [x for x,y in temp_BPnx.nodes(data=True) if y['type']=='virus'] #creating list of virus nodes from bipartite network # df = pd.DataFrame({'Virus2':virus_nodes}) # converting them to a dataframe # df['Virus1'] = virus # dataframe with all possible combinations of new virus and viruses from BPnx # temp_BPnx.add_node(virus, virusname=virus, type='virus', bipartite = 1) # for h in hosts: # print (h) # temp_BPnx.add_edge(virus, h) ## adding new edge to the Bpnxtemp # - # list(nx.common_neighbors(temp_BPnx, 'New_Ebolavirus','Monkeypox virus')) # temp_BPnx.edges('Monkeypox virus') # temp_BPnx.edges('New_Ebolavirus') # + active="" # df.n_shared_hosts.value_counts() # + active="" # %%time # preds = [] # for i in range(0,10): # p = pred.run_predictions(virus ='New_Ebolavirus', # hosts = ['Macaca fascicularis'], # PubMed = 2, # ViralFamily = 'Flaviviridae', # BPnx = BPnx, # Gc_complete = Gc_complete, # virus_df = virus_df, # clf_binary=clf_binary, # clf_multi= clf_multi, # inv_dictionary= inv_dictionary, # plot = False) # preds.append(p) # # prediction_final = pd.concat(preds, axis=0) # prediction_final.shape # # summary = prediction_final.groupby('0_pr').agg({'0_prob':['mean', 'std', 'count']}) # summary.columns = ['mean probability', 'std', 'average number of links per prediction'] # summary['average number of links per prediction'] = summary['average number of links per prediction']/50 # summary.index.rename('Link Type', inplace= True) # summary
code/predictions_novel_viruses/01_Prediction_novel_viruses_using_both_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''base'': conda)' # language: python # name: python37764bitbaseconda4c9e592f7d1f479687c3709091ecd74e # --- # # Motif densities vs disorder fraction, by category - IUPred *long* # # Aggregated IUpred 1.0 *long* scores and derived structural categories are used. # # Both Anchor and overall motif densities are plotted. # + import json import glob import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import sys sys.path.append('../src/') import aepathdisorder as aepd # %matplotlib inline # - ehec_anchor = aepd.load_single_table('../data/anchor/EHEC_effectors/EHEC_anchor.table') ehec_iupred_agg = aepd.load_multi_tables(path_to_folder='../data/iupred_agg-clas/EHEC_effectors/', regex='*long*.table') # + fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=ehec_iupred_agg['disorder_fraction'], y=ehec_anchor['anchor_motif_density'], hue=ehec_iupred_agg['disorder_category'], palette = ['#1f77b4', '#6ba5cd', '#d62728', '#e47171'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01,1) plt.ylim(-0.01,1) plt.title('EHEC: Anchor motif density vs disorder fraction'); plt.savefig('../figures/EHEC_motif_density_vs_iupred-long_disfrac_by_category.png', dpi=300) # + fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=ehec_iupred_agg['disorder_fraction'], y=ehec_anchor['overall_motif_density'], hue=ehec_iupred_agg['disorder_category'], palette = ['#1f77b4', '#6ba5cd', '#d62728', '#e47171'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01,1) plt.ylim(-0.01,0.5) plt.title('EHEC: Overall motif density vs disorder fraction'); plt.savefig('../figures/EHEC_overall_motif_density_vs_iupred-long-disfrac_by_category.png', dpi=300) # - ehec_anchor[ehec_iupred_agg['disorder_category']=='IDP'] ehec_iupred_agg[ehec_iupred_agg['disorder_category']=='IDP'] epec_anchor = aepd.load_single_table('../data/anchor/EPEC_effectors/EPEC_anchor.table') epec_iupred_agg = aepd.load_multi_tables(path_to_folder='../data/iupred_agg-clas/EPEC_effectors/', regex='*long*table') # + fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=epec_iupred_agg['disorder_fraction'], y=epec_anchor['anchor_motif_density'], hue=epec_iupred_agg['disorder_category'], palette = ['#6ba5cd', '#1f77b4', '#d62728', '#e47171'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01,1) plt.ylim(-0.01,1) plt.title('EPEC: Anchor motif density vs disorder fraction'); plt.savefig('../figures/EPEC_anchor_motif_density_vs_iupred-long-disfrac_by_category.png', dpi=300) # + fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=epec_iupred_agg['disorder_fraction'], y=epec_anchor['overall_motif_density'], hue=epec_iupred_agg['disorder_category'], palette = ['#6ba5cd', '#1f77b4', '#d62728', '#e47171'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01, 1) plt.ylim(-0.01, 0.5) plt.title('EPEC: Overall motif density vs disorder fraction'); plt.savefig('../figures/EPEC_overall_motif_density_vs_iupred-long-disfrac_by_category.png', dpi=300) # - epec_iupred_agg[epec_iupred_agg['disorder_category']=='IDP'] epec_anchor[epec_iupred_agg['disorder_category']=='IDP'] cr_anchor = aepd.load_single_table('../data/anchor/CR_effectors/CR_anchor.table') cr_iupred_agg = aepd.load_multi_tables(path_to_folder='../data/iupred_agg-clas/CR_effectors/', regex='*long*table') # + import seaborn as sns fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=cr_iupred_agg['disorder_fraction'], y=cr_anchor['anchor_motif_density'], hue=cr_iupred_agg['disorder_category'], palette = ['#1f77b4', '#6ba5cd', '#e47171', '#d62728'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01,1) plt.ylim(-0.01,1) plt.title('CR: Anchor motif density vs disorder fraction'); plt.savefig('../figures/CR_anchor_motif_density_vs_iupred-long-disfrac_by_category.png', dpi=300) # + import seaborn as sns fig=plt.figure(figsize=(4,4)) ax = fig.add_subplot(111) sns.scatterplot(x=cr_iupred_agg['disorder_fraction'], y=cr_anchor['overall_motif_density'], hue=cr_iupred_agg['disorder_category'], palette = ['#1f77b4', '#6ba5cd', '#e47171', '#d62728'], ax=ax) #handles, labels = ax.get_legend_handles_labels() #ax.legend([handles[0], handles[2], handles[4], handles[3], handles[1]], ['Categories', 'IDP', 'PDR', 'NDP', 'ORD']) plt.xlim(-0.01, 1) plt.ylim(-0.01, 0.5) plt.title('CR: Motif density vs disorder fraction'); plt.savefig('../figures/CR_overall_motif_density_vs_iupred-long-disfrac_by_category.png', dpi=300) # - cr_iupred_agg[cr_iupred_agg['disorder_category']=='IDP'] cr_anchor[cr_iupred_agg['disorder_category']=='IDP']
notebooks/motif-density_vs_disorder-fraction_iupred-long_scatterplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Constants DATA_PATH = 'C:/Users/moham/Downloads/mohamed/machinefy_project/data/raw/survey_results_public.csv' EXPORT_PATH = 'C:/Users/moham/Downloads/mohamed/machinefy_project/data/preprocessing/1_preprocessing_df.pkl' REPLACE_DICT = { 'Age1stCode' : {'Younger than 5 years':4, 'Older than 85':86}, 'YearsCode' : {'Less than 1 year':0, 'More than 50 years':51}, 'YearsCodePro' : {'Less than 1 year' : 0 , 'More than 50 years': 51}} # + #Load Packages import pandas as pd import numpy as np import logging import pickle pd.set_option('display.max_columns', 1000) pd.set_option('display.max_rows', 1000) # - # Functions def split_answers(data_series, delimiter=';'): """ Split multiple answers in a single string to a list of single strings each represnting a singel answer Parameters: * data_series (pd.Series) : String series with answers * delimiter (string) : Another decimal integer Default to ";" Returns : (pd.Series): If column contains """ #Sub functions def is_splittable(pd_series, delimiter): """ Check if result multiple should be splitted - Return boolean """ return pd_series.str.contains(delimiter) def is_splittable(pd_series, delimiter): """ function to split single answer """ return pd_series.str.split(delimiter) # ----------------------------------------------------- #Check if multiple answer exist - if none: return original splitted_values = is_splittable(data_series, delimiter) if not splitted_values.any(): return data_series #Else, split each value to a list modified_series = split_answers(data_series , delimiter) #Replace NAs with empty lists mask_null = modified_series.isnull() modified_series.loc[mask_null] = modified_series.loc[mask_null].apply(lambda x: []) return modified_series # Proccessing # Preprocess Data raw_df = pd.read_csv(DATA_PATH) df = raw_df.copy() # 1 - Replace Values and Parse for col, replacment in REPLACE_DICT.items(): df[col] = df[col].replace(replacment).astype(np.float32) # 2 - Split multible answers object_cols = df.select_dtypes(include = 'object').columns.tolist() for col in object_cols: df[col] = split_answers(df[col]) # Expoert Data df.to_pickle(EXPORT_PATH)
notebooks/01_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Hbw-GRN-lbpO" colab_type="text" # ### Steps to load Models, make predictions and view predictions # # Notice: You may encounter an error while loading the model due to multi GPU compatibility: https://github.com/keras-team/keras/issues/9562 # # If you see such an error, you can always use Colab and train a new model, for which the code is provided in the notebook. # # Step 1- Run the first 12 code cells of the notebook. # # Step 2- Go to the last code cell of the notebook. This cell is to load model weights. # # Step 3- Go to second last code cell of the notebook. This cell is to make predictions and load them. # + [markdown] id="gQZ-wrpwqkpw" colab_type="text" # Step 1 note: Don't forget to change pickle paths. This is the path your dataset is loaded from. # # Step 2 note: Don't forget to modify "model_path" variable so you can use the shared model weight files. This is the path where your model weights reside in. # # Step 3 Note: Don't forget to modify "plot_path" variable. This variable determines where to save the plot. # + [markdown] id="irM5oz84wmvE" colab_type="text" # ### Links of the shared data and weight files # + [markdown] id="WyCtnIYcq7b1" colab_type="text" # __________________________ # + [markdown] id="huEojNoSq0Y_" colab_type="text" # Links of the shared Weight Files: # # 250 epochs: # # https://drive.google.com/drive/folders/1xUVIkAGRHxXUhNj-V2-q6M7pnXSn_V4e?usp=sharing # # # 500 epochs: # # https://drive.google.com/drive/folders/1nPecL3pfFkPDTQTnz3PofWWtdk-u3IZF?usp=sharing # + [markdown] id="VyKlsoCarAE3" colab_type="text" # ______________________ # + [markdown] id="4EcADONlrCif" colab_type="text" # Links of the shared Pickle Files: # # Half Faces: # # Ground Truths: # # https://drive.google.com/file/d/1-JVnG_wVJR3VgAwi6-Hhu2C-ZAyQ2-_9/view?usp=sharing # # Occluded Images: # https://drive.google.com/file/d/1-7E0x-UGFjotUH8UJAWruM9Y0rwEzYzV/view?usp=sharing # # # # Unrestricted Occlusions: # # Ground Truths: # # https://drive.google.com/file/d/19li26wV60jhrf8UtUhGH6xuocDqiHqPG/view?usp=sharing # # Occluded Images: # # https://drive.google.com/file/d/179YgtbT7A0YFJsQyFULbQPgPZzdmnySA/view?usp=sharing # # + [markdown] id="zhLIHTlkwvBw" colab_type="text" # ### The Code # + [markdown] id="PoWfF1Z226T-" colab_type="text" # Step 1- Run the first 12 code cells # # + id="3bLWZ7uNEgwk" colab_type="code" outputId="672346b5-8b97-49af-c3b2-37a6b91027ab" colab={"base_uri": "https://localhost:8080/", "height": 36} import numpy as np import matplotlib.pyplot as plt import pickle import os import keras import tensorflow as tf from keras.backend.tensorflow_backend import set_session from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout, LeakyReLU, Conv2DTranspose, ReLU from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import Reshape from tensorflow.keras import layers import datetime from keras import initializers config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 # fraction of memory config.gpu_options.visible_device_list = "0" set_session(tf.Session(config=config)) # + id="hQWow1vnIbGe" colab_type="code" outputId="3dbcad05-e745-445e-ac6f-ed38ddd4a224" colab={"base_uri": "https://localhost:8080/", "height": 36} from google.colab import drive drive.mount("/content/drive", force_remount=False) # + [markdown] id="St-UfS4sk5sC" colab_type="text" # Step 1 note: Don't forget to change pickle paths. # + id="2bEPxtcJISlx" colab_type="code" outputId="707a727f-6d1c-41fa-9398-2184fdd13ee4" colab={"base_uri": "https://localhost:8080/", "height": 54} path = '/content/drive/My Drive/biometrics_data/' xname = '1_half_face_occluded.pickle' yname = '1_half_face_labels.pickle' pickle_in = open(os.path.join(path,xname),"rb") x = pickle.load(pickle_in) pickle_in = open(os.path.join(path,yname),"rb") y = pickle.load(pickle_in) print(x.shape) print(y.shape) # + id="cTpaol_6KMRG" colab_type="code" outputId="9b8100b2-f236-4022-d18b-ea2fc19f2021" colab={"base_uri": "https://localhost:8080/", "height": 260} #I use this because with 200,200 images, I exceed GPU memory. from skimage.transform import resize x = resize(x, (len(x),64,64,1), anti_aliasing=False) y = resize(y, (len(y),64,64,1), anti_aliasing=False) # Print the shape after resize print(x.shape) print(y.shape) #Draw the image to be sure occluded image is the same as the ground truth one fig=plt.figure(figsize=(6, 6)) fig.add_subplot(1, 2, 1) plt.imshow(x[1,:,:,0],cmap="gray") fig.add_subplot(1, 2, 2) plt.imshow(y[1,:,:,0],cmap="gray") # + id="vWxHj7P1djZv" colab_type="code" outputId="8c9fd2f8-59b4-42cc-cd4b-cec11ee3480c" colab={"base_uri": "https://localhost:8080/", "height": 204} #I print pixel values to be sure occluded and ground truth images are aligned print(x[0,0:5,0:5,0]) print(y[0,0:5,0:5,0]) # + id="PFe3-3yPEldc" colab_type="code" outputId="9b28297a-be5e-475a-8dd7-f1a5cd075768" colab={"base_uri": "https://localhost:8080/", "height": 168} #Here is the function to create a generator. I also use the function and create a generator in the same cell. def creategen(): generator = Sequential() generator.add(Conv2D(64, (5,5) , strides = (2,2), input_shape = x.shape[1:] , padding = "SAME",kernel_initializer = 'random_normal')) generator.add(BatchNormalization()) generator.add(ReLU()) generator.add(Dropout(0.3)) generator.add(Conv2D(128, (5,5) , strides = (2,2),padding = "SAME",kernel_initializer = 'random_normal')) generator.add(BatchNormalization()) generator.add(ReLU()) generator.add(Dropout(0.3)) generator.add(Conv2D(256, (5,5) , strides = (2,2), padding = "SAME",kernel_initializer = 'random_normal')) generator.add(BatchNormalization()) generator.add(ReLU()) generator.add(Dropout(0.3)) # I would use these if there was a bottleneck in the network. #generator.add(Flatten()) #generator.add(Dense(64)) #generator.add(BatchNormalization()) #generator.add(ReLU(alpha=0.2)) #generator.add(Dense(8*8*128)) #generator.add(BatchNormalization()) #generator.add(ReLU(alpha=0.2)) #generator.add(layers.Reshape((8,8,128))) generator.add(Conv2DTranspose(128, (5, 5), strides=(2, 2), padding='same', use_bias=False)) generator.add(BatchNormalization()) generator.add(ReLU()) generator.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) generator.add(BatchNormalization()) generator.add(ReLU()) generator.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation = "tanh")) return generator generator = creategen() # + id="1W9TidqzEubv" colab_type="code" colab={} #Here is the function to create a discriminator. #I also use the function and create a discriminator in the same cell. def createdisc(): discriminator = Sequential() discriminator.add(Conv2D(64, (5,5) , strides = (2,2), input_shape = x.shape[1:] , padding = "SAME",kernel_initializer = 'random_normal')) discriminator.add(BatchNormalization()) discriminator.add(LeakyReLU(alpha=0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Conv2D(128, (5,5) , strides = (2,2),padding = "SAME",kernel_initializer = 'random_normal')) discriminator.add(BatchNormalization()) discriminator.add(LeakyReLU(alpha=0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Conv2D(256, (5,5) , strides = (2,2), padding = "SAME",kernel_initializer = 'random_normal')) discriminator.add(BatchNormalization()) discriminator.add(LeakyReLU(alpha=0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Flatten()) discriminator.add(Dense(1)) return discriminator discriminator = createdisc() # + id="QmBb2AVRUYek" colab_type="code" outputId="e30044e3-921c-416a-c25c-94fca0188bf3" colab={"base_uri": "https://localhost:8080/", "height": 877} generator.summary() # + id="ZadB3ZS4vzoR" colab_type="code" outputId="57875b87-91d1-47f0-e0af-06c2af483a6e" colab={"base_uri": "https://localhost:8080/", "height": 690} # Learning rate of discriminator is advised to be the double of the generator. # I train the generator two times in each step of training. First is with Gan loss, second is with L2 loss. # That is why I use x4 learning rate in discriminator. opt_disc = Adam(lr=0.00004) discriminator.trainable = True discriminator.compile(loss = "binary_crossentropy", optimizer = opt_disc) discriminator.summary() # + id="k3wLocRlM2fY" colab_type="code" outputId="6d3d7308-3ff3-4660-f0bc-b485acc93287" colab={"base_uri": "https://localhost:8080/", "height": 877} opt_gen = Adam(lr=0.00001) generator.compile(loss='mean_squared_error', optimizer = opt_gen) generator.summary() # + id="bLMiISsSFQyL" colab_type="code" colab={} #Here is the function to create a GAN model. I also use the function and create GAN model in the same cell. def creategan(generator,discriminator): gan = Sequential() gan.add(generator) discriminator.trainable = False gan.add(discriminator) return(gan) gan = creategan(generator,discriminator) # + id="NCaQuj77wZ8W" colab_type="code" outputId="2cee3a22-bb8c-49a2-bfc7-e4a5659cf474" colab={"base_uri": "https://localhost:8080/", "height": 241} opt_gan = Adam(lr=0.00001) gan.compile(loss = "binary_crossentropy", optimizer = opt_gan) gan.summary() # + id="wi-J96roLL9W" colab_type="code" colab={} # This is to save models. It saves both the architectures and the weights. # While loading the model, you will only need weights since you declared the architecture in the previous cells. def save_models(gan,discriminator,generator,path,epoch): datenow = str(datetime.datetime.now().strftime('%m-%d-%H:%M')) gan.save_weights(os.path.join(path,"{0}_wgan_{1}.h5".format(epoch,datenow))) gan.save(os.path.join(path,"{0}_mgan_{1}.h5".format(epoch,datenow))) discriminator.save_weights(os.path.join(path,"{0}_wd_{1}.h5".format(epoch,datenow))) discriminator.save(os.path.join(path,"{0}_md_{1}.h5".format(epoch,datenow))) generator.save_weights(os.path.join(path,"{0}_wg_{1}.h5".format(epoch,datenow))) generator.save(os.path.join(path,"{0}_mg_{1}.h5".format(epoch,datenow))) # + id="tpfaOehsWRKw" colab_type="code" colab={} # This is to obtain test losses in each training step. I use this function in the "train" function. def test(x,y): gan_inp_t = x gan_label_t = np.ones([len(gan_inp_t)]) gan_predict_t = None disc_inp_t = None disc_label_t = np.zeros([len(gan_inp_t)*2]) disc_label_t[len(gan_inp_t):] = 1 disc_predict_t = None gen_predict_t = generator.predict(gan_inp_t) disc_inp_t = np.concatenate((gen_predict_t,y), axis = 0) disc_predict_t = discriminator.predict(disc_inp_t) d_loss_t = discriminator.test_on_batch(disc_inp_t,disc_label_t) gan_loss_t = gan.test_on_batch(gan_inp_t,gan_label_t) return (gan_loss_t, d_loss_t) # + id="W7vPoV2QlqyU" colab_type="code" colab={} # In the previous versions, I used this function to pretrain the discriminator in each training step. # I do not use this function in this version. def pretrain(x,y,nepoch): gen_predict = None gan_inp = x gan_label = np.ones(64) gan_predict = None disc_inp = None disc_label = np.zeros(64*2) disc_label[64:] = 1 disc_predict = None for epoch in range(nepoch): for batch_ctr in range(65): gen_predict = generator.predict(gan_inp[batch_ctr*64:(batch_ctr+1)*64]) disc_inp = np.concatenate((gen_predict,y[batch_ctr*64:(batch_ctr+1)*64]), axis = 0) disc_predict = discriminator.predict(disc_inp) d_loss = discriminator.train_on_batch(disc_inp,disc_label) gan_loss = gan.test_on_batch(gan_inp[batch_ctr*64:(batch_ctr+1)*64], gan_label) (tgan,tdisc)= test(x[4160:],y[4160:]) print("Pretrain Epoch Gan Loss: {0} Disc Loss: {1}".format(gan_loss,d_loss)) print("Pretrain Epoch Test Gan Loss: {0} Test Disc Loss: {1} \n\n\n".format(tgan,tdisc)) # + id="XiZh2paxOHTd" colab_type="code" colab={} # I use isolated mini batches to slow down discriminator: # I also use smooth/noisy labels proposed by Salimans et al 2016 # Reference: https://github.com/soumith/ganhacks def train(x,y, nepoch, model_save_path = "/content/drive/My Drive/biometrics_data/models/dummymodel"): gen_predict = None #Initialize the inputs and the labels. gan_inp = x gan_label = np.ones(64) gan_predict = None disc_inp = None disc_label = np.zeros(64*2) disc_label[64:] = 1 disc_predict = None sess = tf.Session() for epoch in range(nepoch): for batch_ctr in range(65): #Generator makes a prediction. gen_predict = generator.predict(gan_inp[batch_ctr*64:(batch_ctr+1)*64]) #Minibatch isolation and label smoothing is done here: if(epoch%2==0): disc_inp = gen_predict #disc_label = np.zeros(64) //// I would use this line if there was no noisy labels. disc_label = np.random.normal(loc=0, scale=0.10, size=64) else: disc_inp = y[batch_ctr*64:(batch_ctr+1)*64] #disc_label = np.ones(64) //// I would use this line if there was no noisy labels. disc_label = np.random.normal(loc=1, scale=0.10, size=64) #Initialize a label variable for generator to use it in training. gen_label = y[batch_ctr*64:(batch_ctr+1)*64] #Do one training step. Also assign the losses to variables. We will print them. d_loss = discriminator.train_on_batch(disc_inp,disc_label) gan_loss = gan.train_on_batch(gan_inp[batch_ctr*64:(batch_ctr+1)*64], gan_label) gen_loss = generator.train_on_batch(gan_inp[batch_ctr*64:(batch_ctr+1)*64],gen_label) if epoch+1%500 == 0: save_models(gan,discriminator,generator, model_save_path,epoch+1) print("MODEL SAVED") # Test images are the images after the 4160th image. It makes 311 test images. (tgan,tdisc)= test(x[4160:],y[4160:]) print("Epoch: {2} Gan Loss: {0} Disc Loss: {1} Gen Loss: {3}".format(gan_loss,d_loss,epoch+1,gen_loss)) print("Epoch: {2} Test Gan Loss: {0} Test Disc Loss: {1} \n\n\n".format(tgan,tdisc,epoch+1)) # + id="JmhHddBiTVjK" colab_type="code" outputId="65d1bd4d-2217-428e-e514-1d92f9ac7e90" colab={"base_uri": "https://localhost:8080/", "height": 73} # Checking lengths of the input and ground truth arrays. Also checking if normalization is done. print(len(x),len(y)) print(x.max(),x.min()) print(y.max(),y.min()) # + [markdown] id="tcWGd0HNqQK2" colab_type="text" # If you would like to make trainings, don't forget to modify "model_save_path" variable. # + id="WB3UtK6-1MlK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 56} outputId="c04caa22-ac8a-4d3d-a634-51b4576cf87d" # Use this cell if you have loaded and viewed the results, and want to train further. # If you start the model from beginning, using above cells where I declare # generator architecture, discriminator architecture etc. , you don't need to use this cell since # I already compile the models in above cells. # Learning rate of discriminator is advised to be the double of the generator. # I train the generator two times in each step of training. First is with Gan loss, second is with L2 loss. # That is why I use x4 learning rate in discriminator. """opt_disc = Adam(lr=0.00004) discriminator.trainable = True discriminator.compile(loss = "binary_crossentropy", optimizer = opt_disc) discriminator.summary() opt_gen = Adam(lr=0.00001) generator.compile(loss='mean_squared_error', optimizer = opt_gen) generator.summary() opt_gan = Adam(lr=0.00001) gan.compile(loss = "binary_crossentropy", optimizer = opt_gan) gan.summary()""" # + id="J4yECos4QNIe" colab_type="code" outputId="71efe16a-5609-41ec-baa6-789ab1853646" colab={"base_uri": "https://localhost:8080/", "height": 11494} # To train, don't forget to: # -compile the models # - run the necessary cells to declare the necessary functions such as "train" and "save_models" and "test" #If you train models from beginning, just run the cells of the notebook in order. #If you want to further train loaded models, use the previous cell to compile models. model_save_path = "/content/drive/My Drive/biometrics_data/models/half_face" batch_size = 64 train(x,y,504,model_save_path) # + [markdown] id="fC6S0LiJqWCW" colab_type="text" # Step 3 - Make predictions and view them. # # Step 3 Note: Don't forget to modify "plot_path" variable. This variable determines where to save the plot. # + id="lzF8GvhT1_6C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 550} outputId="8d91c2cb-3d9c-464a-984b-06d65036dd84" #Making predictions and drawing them. #First row: Occluded images #Second row: Ground Truth images #Third row: Predictions import datetime plot_path = "/content/drive/My Drive/biometrics_data/plots/half_face" a = 4160 b = 4170 pred=generator.predict(x[a:b]) fig = plt.figure(figsize = (20,10)) for ctr in range(10): fig.add_subplot(3,10,ctr+1) plt.imshow(np.reshape(x[a + ctr],(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(10 + ctr + 1)) plt.imshow(np.reshape(y[a + ctr]/255,(64,64)), cmap = "gray") for ctr in range(10): fig.add_subplot(3,10,(20 + ctr + 1)) plt.imshow(np.reshape(pred[ctr],(64,64)), cmap = "gray") #plt.savefig(os.path.join(plot_path,str(datetime.datetime.now().strftime('%m-%d-%H:%M')))) # + [markdown] id="ka-RX2vblDFf" colab_type="text" # Step 2- Load weights here: # (Don't forget to modify the model_path) # + [markdown] id="cRUuhUhxXeNW" colab_type="text" # You may encounter an error while loading the model due to multi GPU compatibility: https://github.com/keras-team/keras/issues/9562 # # If you see such an error, you can always use Colab and train a new model. (Don't forget to use GPU.) # # The code to train the model exists in this notebook. # + colab_type="code" id="3OxJnJn5QMLr" colab={"base_uri": "https://localhost:8080/", "height": 1302} outputId="07af0bb7-e676-4a9e-fc40-453ded49871a" model_path = "/content/drive/My Drive/biometrics_data/models/last_experiment_500/" epoch = 500 #generator = creategen() generator.load_weights(os.path.join(model_path,"{0}_wg.h5".format(epoch))) #discriminator = createdisc() discriminator.load_weights(os.path.join(model_path,"{0}_wd.h5".format(epoch))) #gan = creategan(generator,discriminator) gan.load_weights(os.path.join(model_path,"{0}_wgan.h5".format(epoch))) # + id="BM3E25Ep2zjF" colab_type="code" colab={}
Face Completion.ipynb
# --- # jupyter: # jupytext: # formats: ipynb # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Generate a RestAPI for Metadata with Django # # <NAME> # # Helmholtz Coastal Data Center (HCDC) # Helmholtz-Zentrum Geesthacht, Institute of Coastal Research # + [markdown] slideshow={"slide_type": "slide"} # ## What's the problem with CSW? # # - it's XML! # - too complicated to use for scientists or web development # # &rarr; Provide metadata through RestAPI # + [markdown] slideshow={"slide_type": "fragment"} # ### Examples: # - [CERA (DKRZ)](https://cera-www.dkrz.de/WDCC/ui/cerasearch/cerarest/entry?acronym=DKRZ_LTA_706_ds00002) # - [O2A (AWI)](https://dashboard.awi.de/de.awi.data.ws/api/) # + [markdown] slideshow={"slide_type": "slide"} # ## What is a RestAPI # # - [official definition at restfulapi.net](https://restfulapi.net/) # - common implementation: # - provide JSON-formatted data (or other formats) via `GET` request # - alter data on the server via `PUT` request # - do something via `POST` request # + [markdown] slideshow={"slide_type": "slide"} # ## Purpose of this talk # # Provide a simple and basic example from scratch to show the functionality of serving metadata via RestAPI. # + [markdown] slideshow={"slide_type": "fragment"} # ### Requirements to run this notebook # # - `linux` or `osx` # - `django` # - `djangorestframework`, for the rest api # - `uritemplate`, for generating an openAPI schema # # and for generating a graph of the database_ # - `graphviz` # - `django-extensions` # # Install everything via: # # ```bash # conda create -n django-metadata -c conda-forge django-extensions graphviz uritemplate djangorestframework # ``` # + slideshow={"slide_type": "skip"} # rm -r django-metadata-api # + [markdown] slideshow={"slide_type": "slide"} # ## Initialize the django-metadata-api project # + slideshow={"slide_type": "-"} # !mkdir django-metadata-api # !django-admin startproject django_metadata_api django-metadata-api # - # cd django-metadata-api # + [markdown] slideshow={"slide_type": "fragment"} # ### Create a django app in this project # # We call it `api`. Here we will do all our work. # - # !python manage.py startapp api # + [markdown] slideshow={"slide_type": "subslide"} # ## Make the first migration # # This will generate an sqlite3 database (but we could also use something else...) # - # !python manage.py migrate # + [markdown] slideshow={"slide_type": "subslide"} # ## The django project structure # - # !tree # + [markdown] slideshow={"slide_type": "slide"} # ## The project settings # - # !cat django_metadata_api/settings.py # + [markdown] slideshow={"slide_type": "subslide"} # ## Add the necessary apps to the settings # + # %%writefile -a django_metadata_api/settings.py INSTALLED_APPS += [ "api", "rest_framework", "django_extensions", ] # + [markdown] slideshow={"slide_type": "slide"} # ## Django models # # Each `Model` (inherits the `django.db.models.Model` class) defines a table in our (sqlite3) database. The `fields` of each model correspond to the columns of the database table. # + [markdown] slideshow={"slide_type": "fragment"} # Initially, there are no models defined. # - # !cat api/models.py # + [markdown] slideshow={"slide_type": "subslide"} # ## Creating models # # So let's define some. # + # %%writefile api/models.py from django.db import models class Institution(models.Model): """A research institution.""" name = models.CharField( max_length=250, help_text="Name of the institution", ) abbreviation = models.CharField( max_length=10, help_text="Abbreviation of the institution" ) def __str__(self): return f"{self.name} ({self.abbreviation})" # + [markdown] slideshow={"slide_type": "subslide"} # ## Django models # # and some more # + # %%writefile -a api/models.py class Person(models.Model): """A person.""" first_name = models.CharField( max_length=50, help_text="First name of the person" ) last_name = models.CharField( max_length=50, help_text="Last name of the person" ) email = models.EmailField( max_length=255, help_text="Email address of the person.", ) institution = models.ForeignKey( Institution, on_delete=models.PROTECT, help_text="Research institution of the person." ) def __str__(self): return f"{self.first_name} {self.last_name} ({self.institution.abbreviation})" # + [markdown] slideshow={"slide_type": "subslide"} # ## Django models # # and some more # + # %%writefile -a api/models.py class Project(models.Model): """A research project.""" name = models.CharField( max_length=250, help_text="Full name of the project", ) abbreviation = models.CharField( max_length=50, help_text="Abbreviation of the project." ) pi = models.ForeignKey( Person, on_delete=models.PROTECT, help_text="Principal investigator of the model." ) def __str__(self): return f"{self.name} ({self.abbreviation})" # + [markdown] slideshow={"slide_type": "subslide"} # ## Django models # # and some more # + # %%writefile -a api/models.py class Dataset(models.Model): """A dataset output of a model.""" class DataSource(models.TextChoices): """Available data sources.""" model = "MODEL", "derived from a climate model" satellite = "SATELLITE", "derived from satellite observation" name = models.CharField( max_length=200, help_text="Name of the dataset." ) source_type = models.CharField( max_length=20, choices=DataSource.choices, help_text="How the data has been derived." ) project = models.ForeignKey( Project, on_delete=models.CASCADE, help_text="The project this dataset belongs to." ) contact = models.ForeignKey( Person, on_delete=models.PROTECT, help_text="The contact person for this dataset", ) def __str__(self): return f"{self.name} ({self.project.abbreviation})" # + [markdown] slideshow={"slide_type": "subslide"} # ## Django models # # and some more # + # %%writefile -a api/models.py class Parameter(models.Model): """A standardized parameter in our database.""" name = models.CharField( max_length=200, help_text="Name of the dataset." ) unit = models.CharField( max_length=20, help_text="Units of the parameter." ) long_name = models.CharField( max_length=250, help_text="Description of the parameter" ) dataset = models.ForeignKey( Dataset, help_text="The dataset that contains this parameter", related_name="parameters", on_delete=models.CASCADE, ) def __str__(self): return f"{self.name} ({self.unit})" # + [markdown] slideshow={"slide_type": "subslide"} # ## Getting an overview # # `django-extensions` provide the functionality to show a graph of our models. So let's do this # - # !python manage.py graph_models api > apigraph.dot # !dot apigraph.dot -Tsvg -o apigraph.svg # + slideshow={"slide_type": "fragment"} from IPython.display import SVG SVG(filename="apigraph.svg") # + [markdown] slideshow={"slide_type": "subslide"} # ## Update the database # # So far, we just wrote some python. Now tell Django to register our models in the (sqlite3) database: # - # !python manage.py makemigrations # creates the migration scripts # !python manage.py migrate # creates the tables in the database # + [markdown] slideshow={"slide_type": "slide"} # ## Add serializers to our models # # A serializer transforms your model into JSON (and more). # + # %%writefile api/serializers.py from rest_framework import serializers from api import models class InstitutionSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Institution fields = '__all__' # + [markdown] slideshow={"slide_type": "subslide"} # ## And serializers for the other models # + slideshow={"slide_type": "-"} # %%writefile -a api/serializers.py class PersonSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Person fields = '__all__' class ProjectSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Project fields = '__all__' class DatasetSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Dataset fields = '__all__' class ParameterSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = models.Parameter fields = '__all__' # + [markdown] slideshow={"slide_type": "slide"} # ## Generate the viewset for the models # # A viewset (comparable to an HTML webpage) tells django, you to display and update the serialized models. # + # %%writefile api/views.py from rest_framework import viewsets from rest_framework import permissions from api import models, serializers class InstitutionViewSet(viewsets.ModelViewSet): """View the institutions""" queryset = models.Institution.objects.all() serializer_class = serializers.InstitutionSerializer # + [markdown] slideshow={"slide_type": "subslide"} # ## And viewsets for the other models # + slideshow={"slide_type": "-"} # %%writefile -a api/views.py class PersonViewSet(viewsets.ModelViewSet): """View the institutions""" queryset = models.Person.objects.all() serializer_class = serializers.PersonSerializer class ProjectViewSet(viewsets.ModelViewSet): """View the institutions""" queryset = models.Project.objects.all() serializer_class = serializers.ProjectSerializer class DatasetViewSet(viewsets.ModelViewSet): """View the institutions""" queryset = models.Dataset.objects.all() serializer_class = serializers.DatasetSerializer class ParameterViewSet(viewsets.ModelViewSet): """View the institutions""" queryset = models.Parameter.objects.all() serializer_class = serializers.ParameterSerializer # + [markdown] slideshow={"slide_type": "slide"} # ## Define the router # # We generated the webpages, but did not tell anything about where to find them. # # This is the job of the `router`. # + # %%writefile api/urls.py from django.urls import include, path from rest_framework import routers from api import views router = routers.DefaultRouter() router.register(r'institutions', views.InstitutionViewSet) router.register(r'persons', views.PersonViewSet) router.register(r'projects', views.ProjectViewSet) router.register(r'datasets', views.DatasetViewSet) router.register(r'parameters', views.ParameterViewSet) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns = [ path('', include(router.urls)), ] # + [markdown] slideshow={"slide_type": "slide"} # ## Add our `api` app to the main router file # # We now need to add the urls of our API to the main project. # - # cat django_metadata_api/urls.py # + [markdown] slideshow={"slide_type": "subslide"} # ## Add our api urls # + slideshow={"slide_type": "-"} # %%writefile -a django_metadata_api/urls.py from django.urls import include urlpatterns.append(path('', include("api.urls"))) # + [markdown] slideshow={"slide_type": "slide"} # ## Starting django # # Now run # # ```bash # python manage.py runserver # ``` # # in an external terminal to start the development server and head over to http://127.0.0.1:8000 # + [markdown] slideshow={"slide_type": "slide"} # ## Add the parameters to the dataset # + # %%writefile -a api/serializers.py class DatasetSerializer(serializers.HyperlinkedModelSerializer): parameters = ParameterSerializer(many=True) class Meta: model = models.Dataset fields = '__all__' # - # Checkout the changes at http://127.0.0.1:8000/datasets # + [markdown] slideshow={"slide_type": "slide"} # ## Enable the admin interface # - # !cat api/admin.py # + # %%writefile api/admin.py from django.contrib import admin from api import models class ParameterInline(admin.TabularInline): model = models.Parameter @admin.register(models.Dataset) class DatasetAdmin(admin.ModelAdmin): """Administration class for the :model:`api.Dataset` model.""" inlines = [ParameterInline] search_fields = ["name", "project"] # + [markdown] slideshow={"slide_type": "subslide"} # ## Create a user to access the admin interface # # Open a terminal and run # # ``` # python manage.py createsuperuser --email <EMAIL> --username admin # ``` # + [markdown] slideshow={"slide_type": "fragment"} # And checkout http://127.0.0.1:8000/admin # + [markdown] slideshow={"slide_type": "slide"} # ## Restrict PUT and POST to authenticated users # # Djangos Rest framework comes with a login and logout functionality that we need to insert into our projects `urls.py` router file. # + # %%writefile -a django_metadata_api/urls.py urlpatterns.insert( -2, path('api-auth/', include('rest_framework.urls', namespace='rest_framework')) ) # + [markdown] slideshow={"slide_type": "subslide"} # ## Add the permission to our viewsets # + # %%writefile -a api/views.py for view in [PersonViewSet, DatasetViewSet, InstitutionViewSet, ProjectViewSet, ParameterViewSet]: view.permission_classes = [permissions.IsAuthenticatedOrReadOnly] # + [markdown] slideshow={"slide_type": "fragment"} # Now you'll see that you cannot make POST requests anymore to http://127.0.0.1:8000/datasets (for instance). # # Login at http://127.0.0.1:8000/api-auth/login and it will be possible again. # + [markdown] slideshow={"slide_type": "slide"} # ## Export the schema # # Now we can export our database schema to show others, how our RestAPI is structured. For this purpose, we add a new view to our api. # + # %%writefile -a api/urls.py from rest_framework.schemas import get_schema_view urlpatterns.append( path('schema', get_schema_view( title="Metadata Portal", description="API for retrieving metadata", version="1.0.0", urlconf='api.urls', ), name='openapi-schema'), ) # + [markdown] slideshow={"slide_type": "fragment"} # Head over to http://127.0.0.1:8000/schema to see the results # - # ## The END # # That's it. Now you have a well-defined and functional RestAPI with just a few lines of code!
django-rest-metadata-demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #Importing the required libraries import pandas as pd # ## Conservative Hybrid Fund # # These mutual funds invest in both Stocks and Debt/Bonds. However focus on bonds is higher with 75-90% of investments in bonds and rest in stocks. # ## Exctracting Conservative Hybrid Mutual Fund's Historical Investment Returns Data # # Data in this table: Get Absolute historical returns for ₹1000 investment. If 1Y column value is 1234.5 that means, your ₹1000 investment 1 year back would have grown to ₹1234.5. ch_lump_sum_rtn = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/returns/conservative-hybrid-fund.html") df1 = pd.DataFrame(ch_lump_sum_rtn[0]) #Renaming historical returns column names df1.rename({'1W': '1W_RTN(%)', '1M': '1M_RTN(%)', '3M': '3M_RTN(%)', '6M': '6M_RTN(%)', 'YTD': 'YTD_RTN(%)', '1Y': '1Y_RTN(%)', '2Y': '2Y_RTN(%)', '3Y': '3Y_RTN(%)', '5Y': '5Y_RTN(%)', '10Y': '10Y_RTN(%)' }, axis=1, inplace=True) print("Shape of the dataframe:", df1.shape) df1.head() # ## Exctracting Conservative Hybrid Mutual Fund's Monthly Returns Data # # Data in this table: Get monthly returns. If Jan month column value is 5.4% that means, fund has given 5.4% returns in Jan month. ch_monthly_rtn = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/monthly-returns/conservative-hybrid-fund.html") df2 = pd.DataFrame(ch_monthly_rtn[0]) # + #Renaming df1 column names df1.rename({"Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", 'MTD': 'MTD_RTN(%)', "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)", "Apr'21": "Apr'21(%)" }, axis=1, inplace=True) print("Shape of the dataframe:", df2.shape) df2.head() # - # ## Exctracting Conservative Hybrid Mutual Fund's Quarterly Returns Data # # Data in this table: Get quarterly returns. If Q1 column value is 5.4% that means, fund has given 5.4% returns from 1st Jan to 31st Mar. ch_quarterly_rtn = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/quarterly-returns/conservative-hybrid-fund.html") df3 = pd.DataFrame(ch_quarterly_rtn[0]) print("Shape of the dataframe:", df3.shape) df3.head() # ## Exctracting Conservative Hybrid Mutual Fund's Annual Investment Returns Data # # Data in this table: Get annual returns. If 2018 year column value is 5.4% that means, fund has given 5.4% returns from 1st Jan to 31st Dec/Last date. ch_annual_rtn = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/annual-returns/conservative-hybrid-fund.html") df4 = pd.DataFrame(ch_annual_rtn[0]) # + #Renaming yearly returns column names df4.rename({'2020': '2020_RTN(%)', '2019': '2019_RTN(%)', '2018': '2018_RTN(%)', '2017': '2017_RTN(%)', '2016': '2016_RTN(%)', '2015': '2015_RTN(%)', '2014': '2014_RTN(%)', '2013': '2013_RTN(%)', '2012': '2012_RTN(%)', '2011': '2011_RTN(%)', '2010': '2010_RTN(%)' }, axis=1, inplace=True) print("Shape of the dataframe:", df4.shape) df4.head() # - # ## Exctracting Conservative Hybrid Mutual Fund's Rank Within Category Data # # Data in this table: Get performance rank within category. If 1Y column value is 3/45 that means, Fund ranked 3rd in terms of performance out of 45 funds in that category. ch_rank_in_category = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/ranks/conservative-hybrid-fund.html") df5 = pd.DataFrame(ch_rank_in_category[0]) # + #Renaming df5 column names df5.rename({'1W': '1W_Rank', '1M': '1M_Rank', '3M': '3M_Rank', '6M': '6M_Rank', 'YTD': 'YTD_Rank', '1Y': '1Y_Rank', '2Y': '2Y_Rank', '3Y': '3Y_Rank', '5Y': '5Y_Rank', '10Y': '10Y_Rank' }, axis=1, inplace=True) print("Shape of the dataframe:", df5.shape) df5.head() # - # ## Exctracting Conservative Hybrid Mutual Fund's Risk Ratios Data # # Data in this table: Get values of risk ratios calculated on daily returns for last 3 years. ch_risk_ratio = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/risk-ratios/conservative-hybrid-fund.html") df6 = pd.DataFrame(ch_risk_ratio[0]) #Droping the 'Category' column df6.drop('Category', inplace=True, axis=1) print("Shape of the dataframe:", df6.shape) df6.head() # ## Exctracting Conservative Hybrid Mutual Fund's Portfolio Data # # Data in this table: Compare how schemes have invested money across various asset class and number of instruments. ch_portfolio = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/portfolioassets/conservative-hybrid-fund.html") df7 = pd.DataFrame(ch_portfolio[0]) # + #Renaming SIP returns column names df7.rename({'Turnover ratio': 'Turnover ratio(%)'}, axis=1, inplace=True) print("Shape of the dataframe:", df7.shape) df7.head() # - # ## Exctracting Conservative Hybrid Mutual Fund's Latest NAV Data # # Data in this table: Get the latest values of NAV for the mutual funds. ch_nav = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/navs/conservative-hybrid-fund.html") df8 = pd.DataFrame(ch_nav[0]) df8.rename({'1D Change' : '1D Change(%)'}, axis=1, inplace=True) print("Shape of the dataframe:", df8.shape) df8.head() # ## Exctracting Conservative Hybrid Mutual Fund's SIP Returns Data # # Data in this table: Get absolute SIP returns. If 1Y column value is 10%, that means fund has given 10% returns on your SIP investments started 1 year back from latest NAV date. ch_sip_rtns = pd.read_html( "https://www.moneycontrol.com/mutual-funds/performance-tracker/sip-returns/conservative-hybrid-fund.html") df9 = pd.DataFrame(ch_sip_rtns[0]) # + #Renaming SIP returns column names df9.rename({'1Y': '1Y_SIP_RTN(%)', '2Y': '2Y_SIP_RTN(%)', '3Y': '3Y_SIP_RTN(%)', '5Y': '5Y_SIP_RTN(%)', '10Y': '10Y_SIP_RTN(%)', 'YTD' : 'YTD_SIP_RTN(%)' }, axis=1, inplace=True) print("Shape of the dataframe:", df9.shape) df9.head() # - df_final = pd.concat([df1,df2,df3,df4,df5,df6,df7,df8,df9],axis=1,sort=False) print("Shape of the dataframe:", df_final.shape) # + # Remove duplicate columns by name in Pandas df_final = df_final.loc[:,~df_final.columns.duplicated()] # Removing spaces in the column names #df_final.columns = df_final.columns.str.replace(' ','_') print("Shape of the dataframe:", df_final.shape) df_final.head() # + #Exporting the consolidated Conservative Hybrid mf data as a csv file #print("Shape of the dataframe:", df_final.shape) #df_final.to_csv('ch_mf_data('+ str(pd.to_datetime('today').strftime('%d-%b-%Y %H:%M:%S')) + ').csv', # index=False) #Exporting the Conservative Hybrid mf data columns with its datatype as a csv file #df_dtypes.to_csv('ch_mf_col_data_types('+ str(pd.to_datetime('today').strftime('%d-%b-%Y %H:%M:%S')) + '.csv)')
mf_performance_analysis/mf data extraction/Hybrid Funds/Conservative Hybrid Fund/ch_mf_data_extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import time import datetime import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from tqdm import tqdm from mtranslate import translate #### Importing the file #### Path="src/" Filename='projects_increment_translate.csv' Data=pd.read_csv(Path+Filename) def save_file(updated_df): Path="src/" Filename='projects_increment_translate.csv' updated_df.to_csv(Path+Filename, sep=',',index=False) # + i=0 Data=pd.read_csv(Path+Filename) if not 'Translates' in Data.columns: Data['Translates']='' with tqdm(total=len(Data.Description)) as bar: for desc in Data.Description: if(Data['Translates'].loc[i]=='' or pd.isnull(Data['Translates'].loc[i])): if(i%1000==0): Data.to_csv(Path+Filename, sep=',',index=False) Data['Translates'].loc[i]=translate(desc, 'en') else: bar.update(1) i=i+1
Archieve/Translator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Removing Values # # You have seen: # # 1. sklearn break when introducing missing values # 2. reasons for dropping missing values # # It is time to make sure you are comfortable with the methods for dropping missing values in pandas. You can drop values by row or by column, and you can drop based on whether **any** value is missing in a particular row or column or **all** are values in a row or column are missing. # # A useful set of many resources in pandas is available [here](https://chrisalbon.com/). Specifically, Chris takes a close look at missing values [here](https://chri}salbon.com/python/data_wrangling/pandas_dropping_column_and_rows/). # + import numpy as np import pandas as pd import RemovingValues as t import matplotlib.pyplot as plt # %matplotlib inline small_dataset = pd.DataFrame({'col1': [1, 2, np.nan, np.nan, 5, 6], 'col2': [7, 8, np.nan, 10, 11, 12], 'col3': [np.nan, 14, np.nan, 16, 17, 18]}) small_dataset # - # #### Question 1 # # **1.** Drop any row with a missing value. # + all_drop = small_dataset.dropna() #print result all_drop # - t.all_drop_test(all_drop) #test # #### Question 2 # # **2.** Drop only the row with all missing values. # + all_row = small_dataset.dropna(axis=0, how='all') #axis 0 specifies you drop, how all specifies that you #print result all_row # - t.all_row_test(all_row) #test # #### Question 3 # # **3.** Drop only the rows with missing values in column 3. # + only3_drop = small_dataset.dropna(subset=['col3'], how='any') #print result only3_drop # - t.only3_drop_test(only3_drop) #test # #### Question 4 # # **4.** Drop only the rows with missing values in column 3 or column 1. # + only3or1_drop = small_dataset.dropna(subset=['col1', 'col3'], how='any') #print result only3or1_drop # - t.only3or1_drop_test(only3or1_drop) #test
lessons/CRISP_DM/Removing Values - Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline from jax.scipy.ndimage import map_coordinates from constant import * import warnings from jax import jit, partial, random, vmap from tqdm import tqdm warnings.filterwarnings("ignore") np.printoptions(precision=2) nX = Xs.shape[0] nA = As.shape[0] Xs.shape, As.shape # + jupyter={"source_hidden": true} Vgrid = np.load("Value.npy") #Define the earning function, which applies for both employment, 27 states @partial(jit, static_argnums=(0,)) def y(t, x): ''' x = [w,n,m,s,e,o] x = [0,1,2,3,4,5] ''' if t <= T_R: return detEarning[t] * (1+gGDP[jnp.array(x[3], dtype = jnp.int8)]) * x[4] + (1-x[4]) * welfare else: return detEarning[-1] #Earning after tax and fixed by transaction in and out from 401k account @partial(jit, static_argnums=(0,)) def yAT(t,x): yt = y(t, x) if t <= T_R: # yi portion of the income will be put into the 401k if employed return (1-tau_L)*(yt * (1-yi))*x[4] + (1-x[4])*yt else: # t > T_R, n/discounting amount will be withdraw from the 401k return (1-tau_R)*yt + x[1]*Dn[t] #Define the evolution of the amount in 401k account @partial(jit, static_argnums=(0,)) def gn(t, x, r = r_bar): if t <= T_R: # if the person is employed, then yi portion of his income goes into 401k n_cur = x[1] + y(t, x) * yi * x[4] else: # t > T_R, n/discounting amount will be withdraw from the 401k n_cur = x[1] - x[1]*Dn[t] # the 401 grow as the same rate as the stock return (1+r)*n_cur @partial(jit, static_argnums=(0,)) def transition(t,a,x): ''' Input: x = [w,n,m,s,e,o] x = [0,1,2,3,4,5] a = [c,b,k,h,action] a = [0,1,2,3,4] Output: w_next n_next m_next s_next e_next o_next prob_next ''' nA = a.shape[0] s = jnp.array(x[3], dtype = jnp.int8) e = jnp.array(x[4], dtype = jnp.int8) # actions taken b = a[:,1] k = a[:,2] action = a[:,4] w_next = ((1+r_b[s])*b + jnp.outer(k,(1+r_k)).T).T.flatten().repeat(2) n_next = gn(t, x)*jnp.ones(w_next.size) s_next = jnp.tile(jnp.arange(nS),nA).repeat(nE) e_next = jnp.column_stack((e.repeat(nA*nS),(1-e).repeat(nA*nS))).flatten() # job status changing probability and econ state transition probability pe = Pe[s, e] ps = jnp.tile(Ps[s], nA) prob_next = jnp.column_stack(((1-pe)*ps,pe*ps)).flatten() # owner w_next_own = w_next + (action*(H*pt - x[2])).repeat(nS*nE) m_next_own = ((1-action)*x[2]*Dm[t]).repeat(nS*nE) o_next_own = (x[5] - action).repeat(nS*nE) # renter if t <= t_high: w_next_rent = w_next m_next_rent = (action*H*pt*0.8).repeat(nS*nE) o_next_rent = action.repeat(nS*nE) else: w_next_rent = w_next m_next_rent = np.zeros(w_next.size) o_next_rent = np.zeros(w_next.size) w_next = x[5] * w_next_own + (1-x[5]) * w_next_rent m_next = x[5] * m_next_own + (1-x[5]) * m_next_rent o_next = x[5] * o_next_own + (1-x[5]) * o_next_rent return jnp.column_stack((w_next,n_next,m_next,s_next,e_next,o_next,prob_next)) # + jupyter={"source_hidden": true} #Define the utility function @jit def u(c): return (jnp.power(c, 1-gamma) - 1)/(1 - gamma) #Define the bequeath function, which is a function of wealth @jit def uB(tb): return B*u(tb) #Reward function for renting @jit def R(x,a): ''' Input: x = [w,n,m,s,e,o] x = [0,1,2,3,4,5] a = [c,b,k,h,action] a = [0,1,2,3,4] ''' c = a[:,0] h = a[:,3] C = jnp.power(c, alpha) * jnp.power(h, 1-alpha) return u(C) # used to calculate dot product @jit def dotProduct(p_next, uBTB): return (p_next*uBTB).reshape((p_next.shape[0]//(nS*nE), (nS*nE))).sum(axis = 1) # define approximation of fit @jit def fit(v, xp): return map_coordinates(v.reshape(dim),jnp.vstack((xp[:,0]/scaleW, xp[:,1]/scaleN, xp[:,2]/scaleM, xp[:,3], xp[:,4], xp[:,5])), order = 1, mode = 'nearest') @partial(jit, static_argnums=(0,)) def feasibleActions(t, x): # owner sell = As[:,2] budget1 = yAT(t,x) + x[0] - (x[2]-x[2]*Dm[t]) c = budget1*As[:,0] h = jnp.ones(nA)*H*(1+kappa) budget2 = budget1*(1-As[:,0]) k = budget2*As[:,1] b = budget2*(1-As[:,1]) owner_action = jnp.column_stack((c,b,k,h,sell)) # renter buy = As[:,2] budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2) h = budget1*As[:,0]*(1-alpha)/pr c = budget1*As[:,0]*alpha budget2 = budget1*(1-As[:,0]) k = budget2*As[:,1] b = budget2*(1-As[:,1]) renter_action = jnp.column_stack((c,b,k,h,buy)) actions = x[5]*owner_action + (1-x[5])*renter_action return actions @partial(jit, static_argnums=(0,)) def V(t,V_next,x): ''' x = [w,n,m,s,e,o] x = [0,1,2,3,4,5] xp: w_next 0 n_next 1 m_next 2 s_next 3 e_next 4 o_next 5 prob_next 6 ''' actions = feasibleActions(t,x) xp = transition(t,actions,x) # bequeath utility bequeathU = uB(xp[:,0]+x[1]*(1+r_bar)+xp[:,5]*(H*pt-x[2]*Dm[t])) if t == T_max-1: Q = R(x,actions) + beta * dotProduct(xp[:,6], bequeathU) else: Q = R(x,actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU) Q = jnp.nan_to_num(Q,nan = -100) v = Q.max() cbkha = actions[Q.argmax()] return v, cbkha.reshape((1,-1)) # + num = 100000 ''' x = [w,n,m,s,e,o] x = [5,0,0,0,0,0] ''' from jax import random def simulation(key): x = [5, 0, 0, 0, 0, 0] path = [] move = [] for t in range(T_min, T_max-1): _, key = random.split(key) _,a = V(t,Vgrid[:,:,:,:,:,:,t+1],x) xp = transition(t,a,x) p = xp[:,-1] x_next = xp[:,:-1] x = x_next[random.choice(a = nS*nE, p=p, key = key)] path.append(x) move.append(a[0]) return jnp.array(path), jnp.array(move) # - # %%time # simulation part keys = vmap(random.PRNGKey)(jnp.arange(num)) Paths, Moves = vmap(simulation)(keys) # x = [w,n,m,s,e,o] # x = [0,1,2,3,4,5] ws = Paths[:,:,0].T ns = Paths[:,:,1].T ms = Paths[:,:,2].T ss = Paths[:,:,3].T es = Paths[:,:,4].T os = Paths[:,:,5].T cs = Moves[:,:,0].T bs = Moves[:,:,1].T ks = Moves[:,:,2].T hs = Moves[:,:,3].T plt.figure(figsize = [16,8]) plt.title("The mean values of simulation") plt.plot(range(21, T_max-1 + 20),jnp.median(ws,axis = 1)[:-1], label = "wealth") plt.plot(range(21, T_max-1 + 20),jnp.median(cs,axis = 1)[:-1], label = "consumption") plt.plot(range(21, T_max-1 + 20),jnp.median(bs,axis = 1)[:-1], label = "bond") plt.plot(range(21, T_max-1 + 20),jnp.median(ks,axis = 1)[:-1], label = "stock") # plt.plot((hs*pr).mean(axis = 1)[:-1], label = "housing") plt.legend() plt.title("housing consumption") plt.plot(range(21, T_max-1 + 20),(hs).mean(axis = 1)[:-1], label = "housing") plt.plot(range(21, T_max-1 + 20),(os).mean(axis = 1)[:-1], label = "owning") plt.title("401k") plt.plot(range(21, T_max-1 + 20),(ns).mean(axis = 1)[:-1], label = "housing")
20210401/simulationV_discount-3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Import-pandas,-matplotlib,-and-the-matplotlib-dates-and-date-formatting-utilities" data-toc-modified-id="Import-pandas,-matplotlib,-and-the-matplotlib-dates-and-date-formatting-utilities-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Import pandas, matplotlib, and the matplotlib dates and date formatting utilities</a></span></li><li><span><a href="#View-a-couple-of-rows-of-the-Covid-daily-data" data-toc-modified-id="View-a-couple-of-rows-of-the-Covid-daily-data-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>View a couple of rows of the Covid daily data</a></span></li><li><span><a href="#Calculate-new-cases-and-deaths-by-day" data-toc-modified-id="Calculate-new-cases-and-deaths-by-day-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Calculate new cases and deaths by day</a></span></li><li><span><a href="#Show-line-plots-for-new-cases-and-new-deaths-by-day" data-toc-modified-id="Show-line-plots-for-new-cases-and-new-deaths-by-day-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Show line plots for new cases and new deaths by day</a></span></li><li><span><a href="#Calculate-new-cases-and-deaths-by-day-and-region" data-toc-modified-id="Calculate-new-cases-and-deaths-by-day-and-region-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Calculate new cases and deaths by day and region</a></span></li><li><span><a href="#Show-line-plots-of-new-cases-by-selected-regions" data-toc-modified-id="Show-line-plots-of-new-cases-by-selected-regions-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Show line plots of new cases by selected regions</a></span></li><li><span><a href="#Use-a-stacked-plot-to-examine-the-uptick-in-Southern-Africa-more-closely" data-toc-modified-id="Use-a-stacked-plot-to-examine-the-uptick-in-Southern-Africa-more-closely-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Use a stacked plot to examine the uptick in Southern Africa more closely</a></span></li></ul></div> # - # # Import pandas, matplotlib, and the matplotlib dates and date formatting utilities import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.dates import DateFormatter # + # pd.set_option('display.width', 80) # pd.set_option('display.max_columns', 20) # pd.set_option('display.max_rows', 200) # pd.options.display.float_format = '{:,.0f}'.format # + import watermark # %load_ext watermark # %watermark -n -i -iv # - coviddaily = pd.read_csv('data/coviddaily720.csv', parse_dates=['casedate']) # # View a couple of rows of the Covid daily data coviddaily.sample(2, random_state=1).T # # Calculate new cases and deaths by day coviddailytotals = coviddaily.loc[coviddaily['casedate'].between( '2020-02-01', '2020-07-12')].groupby(['casedate'])[['new_cases', 'new_deaths']].sum().reset_index() coviddailytotals.sample(7, random_state=1) # # Show line plots for new cases and new deaths by day # + fig = plt.figure() plt.suptitle('New Covid Cases and Deaths By Day WorldWide in 2020') ax1 = plt.subplot(2, 1, 1) # 2 rows, 1st column ax1.plot(coviddailytotals['casedate'], coviddailytotals['new_cases']) # # %b ---- Abbreviated month name (Jan, Feb, etc.). ax1.xaxis.set_major_formatter(DateFormatter('%b')) ax1.set_xlabel('New Cases') ax2 = plt.subplot(2, 1, 2) # 2 rows, 2nd column ax2.plot(coviddailytotals['casedate'], coviddailytotals['new_deaths']) # # %b ---- Abbreviated month name (Jan, Feb, etc.). ax2.xaxis.set_major_formatter(DateFormatter('%b')) ax2.set_xlabel('New Deaths') plt.tight_layout() plt.subplots_adjust(top=0.88) plt.show() # - # # Calculate new cases and deaths by day and region regiontotals = coviddaily.loc[coviddaily['casedate'].between( '2020-02-01', '2020-07-12')].groupby(['casedate', 'region'])[['new_cases', 'new_deaths']].sum().reset_index() regiontotals.sample(7, random_state=1) # # Show line plots of new cases by selected regions # + showregions = [ 'East Asia', 'Southern Africa', 'North America', 'Western Europe' ] for j in range(len(showregions)): rt = regiontotals.loc[regiontotals['region'] == showregions[j], ['casedate', 'new_cases']] plt.plot(rt['casedate'], rt['new_cases'], label=showregions[j]) plt.title('New Covid Cases By Day and Region in 2020') # # %b ---- Abbreviated month name (Jan, Feb, etc.). plt.gca().get_xaxis().set_major_formatter(DateFormatter('%b')) plt.ylabel('New Cases') plt.legend() plt.show() # + # Not using range(len(showregions)) showregions = [ 'East Asia', 'Southern Africa', 'North America', 'Western Europe' ] plt.figure(figsize=(10, 6)) for reg in showregions: rt = regiontotals.loc[regiontotals['region'] == reg, ['casedate', 'new_cases']] plt.plot(rt['casedate'], rt['new_cases'], label=reg) plt.title('New Covid Cases By Day and Region in 2020') # # %b ---- Abbreviated month name (Jan, Feb, etc.). plt.gca().get_xaxis().set_major_formatter(DateFormatter('%b')) plt.ylabel('New Cases') plt.legend() plt.show() # - # # Use a stacked plot to examine the uptick in Southern Africa more closely af = regiontotals.loc[regiontotals['region'] == 'Southern Africa', ['casedate', 'new_cases']].rename( columns={'new_cases': 'afcases'}) af.shape af.tail(2) sa = coviddaily.loc[coviddaily['location'] == 'South Africa', ['casedate', 'new_cases']].rename( columns={'new_cases': 'sacases'}) sa.shape sa.tail(2) af_all = pd.merge(af, sa, left_on=['casedate'], right_on=['casedate'], how='left') af_all['sacases'].fillna(0, inplace=True) af_all['afcases_nosa'] = af_all['afcases'] - af_all['sacases'] af_all.shape af_all.tail(2) afabb = af_all.loc[af_all['casedate'].between('2020-04-01', '2020-07-12')] afabb.shape afabb.tail(2) fig = plt.figure(figsize=(8, 8)) ax = plt.subplot() ax.stackplot(afabb['casedate'], afabb['sacases'], afabb['afcases_nosa'], labels=['South Africa', 'Other Southern Africa']) ax.xaxis.set_major_formatter(DateFormatter('%m-%d')) plt.title('New Covid Cases in Southern Africa') plt.tight_layout() plt.legend(loc='upper left') plt.show() # **Not seeing same result as shown in book even with the exact code as in book / github** # + af = regiontotals.loc[regiontotals.region == 'Southern Africa', ['casedate', 'new_cases']].rename( columns={'new_cases': 'afcases'}) sa = coviddaily.loc[coviddaily.location == 'South Africa', ['casedate', 'new_cases']].rename( columns={'new_cases': 'sacases'}) af = pd.merge(af, sa, left_on=['casedate'], right_on=['casedate'], how="left") af.sacases.fillna(0, inplace=True) af['afcasesnosa'] = af.afcases - af.sacases afabb = af.loc[af.casedate.between('2020-04-01', '2020-07-12')] fig = plt.figure() ax = plt.subplot() ax.stackplot(afabb.casedate, afabb.sacases, afabb.afcasesnosa, labels=['South Africa', 'Other Southern Africa']) ax.xaxis.set_major_formatter(DateFormatter("%m-%d")) plt.title("New Covid Cases in Southern Africa") plt.tight_layout() plt.legend(loc="upper left") plt.show() # -
src/datacleaning/Chapter 5/6_line_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.svm import SVC from sklearn.metrics import roc_auc_score, roc_curve from sklearn import preprocessing from sklearn.linear_model import LogisticRegression from sklearn.decomposition import PCA from sklearn import metrics from keras.models import Sequential from keras.layers import Dense, Dropout, regularizers from keras.layers import LSTM from keras import backend as K from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM from keras.layers.embeddings import Embedding from keras.preprocessing import sequence import warnings import numpy as np from collections import OrderedDict import os from lob_data_utils import lob, db_result, gdf_pca, model from lob_data_utils.svm_calculation import lob_svm sns.set_style('whitegrid') warnings.filterwarnings('ignore') # - data_length = 24000 stock = '12255' gdf_filename_pattern = 'gdf_{}_r{}_s{}_K50' gdf_parameters = [(0.1, 0.1), (0.01, 0.1), (0.1, 0.5), (0.01, 0.5), (0.25, 0.25)] gdf_dfs = [] for r, s in gdf_parameters: gdf_dfs.append(gdf_pca.SvmGdfResults( stock, r=r, s=s, data_length=data_length, data_dir='../../data/data_gdf', reg_data_dir='../../data/prepared', gdf_filename_pattern=gdf_filename_pattern)) df = gdf_dfs[2].df df_test = gdf_dfs[2].df_test n_components = gdf_dfs[2].get_pca('pca_n_gdf_que_prev').n_components_ class_weights = gdf_dfs[2].get_classes_weights() print(n_components, class_weights) df[[c for c in df.columns if 'gdf' in c]].boxplot(figsize=(16, 4)) def as_keras_metric(method): import functools from keras import backend as K import tensorflow as tf @functools.wraps(method) def wrapper(self, args, **kwargs): """ Wrapper for turning tensorflow metrics into keras metrics """ value, update_op = method(self, args, **kwargs) K.get_session().run(tf.local_variables_initializer()) with tf.control_dependencies([update_op]): value = tf.identity(value) return value return wrapper import tensorflow as tf auc_roc = as_keras_metric(tf.metrics.auc) def MCC(y_true, y_pred): y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator = (tp * tn - fp * fn) denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) return numerator / (denominator + K.epsilon()) # + ## With validation # - def get_model(): model = Sequential() model.add(LSTM(16, activation='relu', input_shape=(16, 1), return_sequences=True)) # model.add(LSTM(8, activation='relu', input_shape=(16, 1), # kernel_regularizer=regularizers.l2(0.01), return_sequences=True)) model.add(LSTM(8, activation='relu', input_shape=(16, 1), kernel_regularizer=regularizers.l2(0.001))) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[MCC, auc_roc]) return model epochs = 50 batch_size = 512 score, m = gdf_dfs[1].train_lstm( get_model, feature_name='pca_n_gdf_que', plot_name='here.png', n_steps=16, fit_kwargs={'epochs': epochs, 'batch_size': batch_size, 'verbose': 0, 'shuffle': False}, class_weight=class_weights, should_return_model=True, should_validate=True, compile_kwargs= { 'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': [MCC, auc_roc]}) score['train_matthews'], score['matthews'], score['test_matthews'], score['roc_auc'], score['test_roc_auc'] score['train_matthews'], score['test_matthews'], score['test_roc_auc'] m.layers[0].trainable_weights[0] units = int(int(m.layers[0].trainable_weights[0].shape[1])/4) print("No units: ", units) print(m.layers) # + ll = 0 W = m.layers[ll].get_weights()[0] U = m.layers[ll].get_weights()[1] b = m.layers[ll].get_weights()[2] W_i = W[:, :units] W_f = W[:, units: units * 2] W_c = W[:, units * 2: units * 3] W_o = W[:, units * 3:] U_i = U[:, :units] U_f = U[:, units: units * 2] U_c = U[:, units * 2: units * 3] U_o = U[:, units * 3:] b_i = b[:units] b_f = b[units: units * 2] b_c = b[units * 2: units * 3] b_o = b[units * 3:] _, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(16, 4)) ax1.plot(U.ravel()) ax2.plot(W.ravel()) ax3.plot(b.ravel()) # - df_log = pd.read_csv('../../gdf_pca/res_log_que.csv') df_log = df_log[df_log['stock'] == int(stock)] columns = [c for c in df_log.columns if 'matthews' in c or 'roc_auc' in c] df_log[columns] # + ### Another # + def get_model(): model = Sequential() model.add(LSTM(32, activation='relu', input_shape=(16, 10), activity_regularizer=regularizers.l1_l2(0.001))) # model.add(LSTM(8, activation='relu', input_shape=(16, 1), # kernel_regularizer=regularizers.l2(0.01), return_sequences=True)) model.add(Dropout(0.5)) # model.add(LSTM(8, activation='relu', input_shape=(16, 1), kernel_regularizer=regularizers.l2(0.001))) # model.add(Dropout(0.25)) #model.add(Dense(4, activation='sigmoid')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[matthews_correlation, auc_roc]) return model def get_model1(): model = Sequential() model.add(LSTM(8, activation='relu', input_shape=(16, 10), return_sequences=True)) model.add(LSTM(4, activation='relu', input_shape=(16, 1), return_sequences=True)) model.add(LSTM(2, activation='relu', input_shape=(16, 1), return_sequences=False)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[matthews_correlation, auc_roc]) return model # - epochs = 50 batch_size = 512 score, m = gdf_dfs[0].train_lstm( get_model1, feature_name='pca_n_gdf_que', plot_name='here.png', n_steps=16, fit_kwargs={'epochs': epochs, 'batch_size': batch_size, 'verbose': 0, 'shuffle': False}, class_weight=class_weights, should_return_model=True, should_validate=True, compile_kwargs= { 'loss': 'binary_crossentropy', 'optimizer': 'adam', 'metrics': [matthews_correlation, auc_roc]}) score['train_matthews'], score['matthews'], score['test_matthews'], score['roc_auc'], score['test_roc_auc'] m.layers[0].trainable_weights[0] units = int(int(m.layers[0].trainable_weights[0].shape[1])/4) print("No units: ", units) print(m.layers) # + ll = 0 W = m.layers[ll].get_weights()[0] U = m.layers[ll].get_weights()[1] b = m.layers[ll].get_weights()[2] W_i = W[:, :units] W_f = W[:, units: units * 2] W_c = W[:, units * 2: units * 3] W_o = W[:, units * 3:] U_i = U[:, :units] U_f = U[:, units: units * 2] U_c = U[:, units * 2: units * 3] U_o = U[:, units * 3:] b_i = b[:units] b_f = b[units: units * 2] b_c = b[units * 2: units * 3] b_o = b[units * 3:] _, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(16, 4)) ax1.plot(U.ravel()) ax2.plot(W.ravel()) ax3.plot(b.ravel()) # - score = {**score, 'arch': model.to_json(), 'batch_size': batch_size, 'n_steps': 5, 'epochs': epochs, 'r': gdf_dfs[1].r, 's': gdf_dfs[1].s} #pd.DataFrame([score]).to_csv( # '../../gdf_pca/res_lstm_weird/res_lstm_pca_que_{}_len24000_r{}_s{}.csv'.format(stock, gdf_dfs[1].r, gdf_dfs[1].s))
overview/playground/playground_12255_lstm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <h2>Inspecting the Aligned Datasets</h2> # # We will be inspecting the corpora of aligned Wikipedia summaries and knowledge base triples (i.e. D1 and D2). import os import cPickle as pickle import operator # Choose which dataset file you would like to load by changing the value of the `selected_dataset` variable below. # * `selected_dataset = 'D1'`: will load the dataset of DBpedia triples aligned with Wikipedia biographies # * `selected_dataset = 'D2'`: will load the dataset of Wikidata triples aligned with Wikipedia biographies selected_dataset = 'D1' assert (selected_dataset == 'D1' or selected_dataset == 'D2'), "selected_dataset can be set to either 'D1' or 'D2'." # We are loading all the dataset `pickle` files that reside in the selected dataset's directory. # + dataset_dir = './%s/data/' % selected_dataset dataset = {'item': [], 'original_summary': [], 'summary_with_URIs': [], 'summary_with_surf_forms': [], 'triples': []} for file in os.listdir(dataset_dir): if file.endswith(".p"): tempDatasetFileLocation = os.path.join(dataset_dir, file) with open(tempDatasetFileLocation, 'rb') as tempDatasetFile: tempDataset = pickle.load(tempDatasetFile) dataset['item'].extend(tempDataset['item']) dataset['original_summary'].extend(tempDataset['original_summary']) dataset['summary_with_URIs'].extend(tempDataset['summary_with_URIs']) dataset['summary_with_surf_forms'].extend(tempDataset['summary_with_surf_forms']) dataset['triples'].extend(tempDataset['triples']) print('Successfully loaded dataset file: %s' % (tempDatasetFileLocation)) assert(len(dataset['item']) == len(dataset['original_summary'])) assert(len(dataset['item']) == len(dataset['triples'])) assert(len(dataset['item']) == len(dataset['summary_with_URIs'])) assert(len(dataset['item']) == len(dataset['summary_with_surf_forms'])) print('Total items that have been loaded in the dataset: %d' % (len(dataset['item']))) # - # The selected dataset is loaded as a dictionary of lists. The lists are aligned with each other. For example, in order to print all entries about the $15$-th item, we simply do: index = 14 for key in dataset: print(key) print(dataset[key][index]) # The keys of the dictionary are described below: # * `item`: refers to the main entity of each Wikipedia summary. # * `original_summary`: refers to the original Wikipedia summary, prior to any pre-processing. # * `triples`: refers to the list of triples that associated with the Wikipedia summary. # * `summary_with_URIs`: refers to the Wikipedia summary after pre-processing. The entities that have been annotated in the original summary are represented as URIs. # * `summary_with_surf_forms`: refers to the Wikipedia summary after pre-processing. The entities that have been annotated in the original summary are represented as surface form tuples. # # Any reference to the main entity in the `triples`, `summary_with_URIs`, and `summary_with_surf_forms` is represented with the special `<item>` token. # # Tokens, such as `#surFormToken101` and `#surFormToken103` are used as temporal placeholders for the annotated entities' URIs in the case of `summary_with_URIs` or the entities' surface form tuples in the case of `summary_with_surf_forms`. These can be replaced by using the supporting dictionaries of the following `pickle` files. # + surf_forms_tokens_location = './%s/utils/Surface-Form-Tokens.p' % selected_dataset with open('./%s/utils/Surface-Form-Tokens.p' % selected_dataset, 'rb') as f: surf_forms_tokens = pickle.load(f) print('Successfully loaded surface form tokens file at: %s' % surf_forms_tokens_location) uri_tokens_location = './%s/utils/URI-Tokens.p' % selected_dataset with open('./%s/utils/URI-Tokens.p' % selected_dataset, 'rb') as f: uri_tokens = pickle.load(f) print('Successfully loaded surface form tokens file at: %s' % uri_tokens_location) surf_form_counts_location = './%s/utils/Surface-Forms-Counts.p' % selected_dataset with open('./%s/utils/Surface-Forms-Counts.p' % selected_dataset, 'rb') as f: surf_form_counts = pickle.load(f) print('Successfully loaded surface forms file at: %s' % surf_form_counts_location) uri_counts_location = './%s/utils/URI-Counts.p' % selected_dataset with open('./%s/utils/URI-Counts.p' % selected_dataset, 'rb') as f: uri_counts = pickle.load(f) print('Successfully loaded surface forms file at: %s' % uri_counts_location) # - # We are inverting the dictionaries of interest. inv_uri_tokens = {v: k for k, v in uri_tokens.iteritems()} inv_surf_forms_tokens = {v: k for k, v in surf_forms_tokens.iteritems()} # An example of the structure of each one of the supporting dictionaries is presented below. Below are examples for the dictionaries that map the temporal placeholders (e.g. `#surFormToken103`) to their respecive URIs and surface form tuples respectively. # ```python # uri_tokens = {u'http://dbpedia.org/resource/Snyder_Rini': '#surFormToken77050', # u'http://dbpedia.org/resource/Mountain_West_Conference': '#surFormToken77051', # ...} # surf_forms_tokens = {(u'http://dbpedia.org/resource/Album', u'studio album'): '#surFormToken352', # (u'http://dbpedia.org/resource/Album', u'studio albums'): '#surFormToken697', # (u'http://dbpedia.org/resource/Actor', u'actor'): '#surFormToken693', # (u'http://dbpedia.org/resource/Actor', u'stage'): '#surFormToken622'} # ``` # Examples of the inverses of the above dictionaries, which map the temporal placeholders (e.g. `#surFormToken103`) to their respecive URIs and surface form tuples, are shown below: # ```python # inv_uri_tokens = {'#surFormToken77050': u'http://dbpedia.org/resource/Snyder_Rini', # '#surFormToken77051': u'http://dbpedia.org/resource/Mountain_West_Conference', # ...} # inv_surf_forms_tokens = {'#surFormToken77057': (u'http://dbpedia.org/resource/Snyder_Rini', u'<NAME>'), # '#surFormToken77051': (u'http://dbpedia.org/resource/Richard_Webb_(actor)', u'<NAME>'), # ...} # ``` # Below are examples of the dictionaries' structure that track the frequency with which surface forms have been associated with entity URIs. # ```python # uri_counts = {'http://dbpedia.org/resource/Actor': {u'actor': 19014, u'actress': 14941, ...}, # 'http://dbpedia.org/resource/Europe': {u'Europe': 3169, u'European': 1446, ...}, # ...} # surf_form_counts = {'http://dbpedia.org/resource/Albert_Einstein': {'Albert Einstein': 1, 'Einstein': 2}, # 'http://dbpedia.org/resource/Artist': {'artist': 1, 'artists': 1}, # ...} # ``` def getAligned(index, use_surface_forms = False): if index < len(dataset['item']): # Printing the summary by representing the annotated entities as URIs. print ('Wikipedia Summary w/ URIs:') tokens = dataset['summary_with_URIs'][index].split() for j in range (0, len(tokens)): if tokens[j] in inv_uri_tokens: tempURI = inv_uri_tokens[tokens[j]] if use_surface_forms: tokens[j] = max(uri_counts[tempURI].iteritems(), key=operator.itemgetter(1))[0] else: tokens[j] = tempURI elif tokens[j] == '<item>': tempURI = dataset['item'][index].decode('utf-8') print tempURI if use_surface_forms: tokens[j] = max(uri_counts[tempURI].iteritems(), key=operator.itemgetter(1))[0] else: tokens[j] = tempURI print(' '.join(tokens)) # Printing the summary by representing the annotated entities as surface form tuples. print ('\nWikipedia Summary w/ Surf. Form Tuples:') tokens = dataset['summary_with_surf_forms'][index].split() for j in range (0, len(tokens)): if tokens[j] in inv_surf_forms_tokens: tempTuple = inv_surf_forms_tokens[tokens[j]] if use_surface_forms: tokens[j] = tempTuple[1] else: tokens[j] = str(tempTuple) elif tokens[j] == '<item>': tempURI = dataset['item'][index].decode('utf-8') if use_surface_forms: tokens[j] = max(surf_form_counts[tempURI].iteritems(), key=operator.itemgetter(1))[0] else: tokens[j] = tempURI print(' '.join(tokens)) # Printing the knowledge base triples allocated to the summary. print ('\nTriples:') for j in range(0, len(dataset['triples'][index])): print(dataset['triples'][index][j].replace('<item>', dataset['item'][index])) else: print('Pick an index between 0 and %d.' % (len(dataset['item']) - 1)) # By running the `getAligned(i, use_surface_forms)` function, we are printing the $i$-th Wikipedia summary, both with URIs and surface form tuples, along with its corresponding triples. # # In case that the `use_surface_forms` variable is set to `True` then: # * In the case of the Wikipedia summaries with URIs, the entity URIs that exist in the text will be replaced by their corresponding most-frequently met surface forms. # * In the case of the Wikipedia summaries with surface form tuples, the tuples are removed and only their second element (surface form) is kept. getAligned(4020, False) getAligned(72017, True)
Inspect-Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import os import numpy as np # + arraydirectory='./FeatureSampleFoodClassification_300/con_array_0.5/' max_arr = -1 min_arr = 100*100 for fn in os.listdir(arraydirectory): if os.path.isfile(arraydirectory + fn) and '.npy' in fn: arr=np.array(np.load(os.path.join(arraydirectory, fn))) max_arr=max(max_arr,len(arr)) min_arr=max(1,min(min_arr,len(arr))) if len(arr)==0: print(fn) print(max_arr) print(min_arr) # -
array_length_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # LogRecordオブジェクトをERRORに設定してログを出力 import logging logging.basicConfig(level=logging.ERROR, format=' %(asctime)s - %(levelname)s - %(message)s') # - logging.debug('プログラム実行中') # DEBUGレベルのログを出力 logging.info('プログラム実行中') # INFOレベルのログを出力 logging.warning('プログラム実行中') # WARNINGレベルのログを出力 logging.error('プログラム実行中') # ERRORレベルのログを出力 logging.critical('プログラム実行中') # CRITICALレベルのログを出力
sample/Python_GOKUI/Python_GOKUI/chap05/sec01/ExceptionHandling_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JimKing100/nfl-test/blob/master/wrangling/Actuals_Offense1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="-TVaaLjSOX5A" colab_type="code" colab={} # Imports import pandas as pd # + id="nfCFDKKWOfBc" colab_type="code" colab={} # Load the raw data player_df = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/nfl-fantasy-ds/master/data/raw/players.csv') offense_df = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/nfl-fantasy-ds/master/data/raw/offense.csv') td_df = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/nfl-fantasy-ds/master/data/raw/td.csv') game_df = pd.read_csv('https://raw.githubusercontent.com/Lambda-School-Labs/nfl-fantasy-ds/master/data/raw/game.csv') # + id="O_pDKtnOSYBT" colab_type="code" outputId="274bc321-6543-49fa-99d0-ff0ce7877dea" colab={"base_uri": "https://localhost:8080/", "height": 204} # The players dataframe - includes all the offensive players for the 2019 NFL season player_df.head() # + id="q0b3DP9LScDh" colab_type="code" outputId="b8fa3f20-1955-4825-c686-b2cfc31b4eca" colab={"base_uri": "https://localhost:8080/", "height": 309} # The offense dataframe - raw offensive data for all 2019 QBs, RBs, WRs, TEs for seasons 2000-2019 offense_df.head() # + id="uXGJDVr_V1gf" colab_type="code" outputId="38adebc4-49e9-44f8-b0a4-14d641958c79" colab={"base_uri": "https://localhost:8080/", "height": 204} # The touchdown dataframe - raw touchdown data for rushing, receiving and passing TD plays of 40 yards and greater. td_df.head() # + id="zl7QIoVSPO3x" colab_type="code" outputId="d335f5b7-306f-42e3-b472-5f19eb366e27" colab={"base_uri": "https://localhost:8080/", "height": 204} # The game dataframe - raw game data to calculate bye weeks game_df.head() # + id="NuuSgHO9Sjnz" colab_type="code" colab={} # Pull the stats to calculate the player points def pull_stats(df, df1, df2, col, p, y, g): # Set empty values to NaN if prior to start year, otherwise zero if (df[col].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].empty): start_year = df1['start'].loc[(df1['player']==p)].iloc[0] if y < start_year: points = float('NaN') else: points = 0 else: rush_tds = df['tdr'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] receive_tds = df['tdrec'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] pass_tds = df['tdp'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] return_tds = df['tdret'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] convs = df['conv'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] rush_yds = df['ry'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] receive_yds = df['recy'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] pass_yds = df['py'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] ints = df['ints'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] fumbles = df['fuml'].loc[(df['player']==p) & (df['year']==y) & (df['week']==g)].iloc[0] rush_rec_td40 = df2['name'].loc[(df2['player']==p) & (df2['year']==y) & (df2['week']==g)].count() pass_td40 = df2['psr'].loc[(df2['psr']==p) & (df2['year']==y) & (df2['week']==g)].count() points = (rush_tds * 6) + \ (receive_tds * 6) + \ (pass_tds * 4) + \ (return_tds * 6) + \ (convs * 2) + \ (rush_yds / 10) + \ (receive_yds /10) + \ (pass_yds / 25) - \ (ints * 2) - \ (fumbles * 2) + \ (rush_rec_td40 * 2) + \ (pass_td40) return points # + id="r0KrLO1lNOMc" colab_type="code" colab={} # Add a row to the final_df dataframe # Each row represents the actual offensive points for each game for each offensive player def add_row(df, p, f, l, n, pos, value_list): df = df.append({'player': p, 'first': f, 'last': l, 'name': n, 'position1': pos, '2000-game1': value_list[0], '2000-game2': value_list[1], '2000-game3': value_list[2], '2000-game4': value_list[3], '2000-game5': value_list[4], '2000-game6': value_list[5], '2000-game7': value_list[6], '2000-game8': value_list[7], '2000-game9': value_list[8], '2000-game10': value_list[9], '2000-game11': value_list[10], '2000-game12': value_list[11], '2000-game13': value_list[12], '2000-game14': value_list[13], '2000-game15': value_list[14], '2000-game16': value_list[15], '2001-game1': value_list[16], '2001-game2': value_list[17], '2001-game3': value_list[18], '2001-game4': value_list[19], '2001-game5': value_list[20], '2001-game6': value_list[21], '2001-game7': value_list[22], '2001-game8': value_list[23], '2001-game9': value_list[24], '2001-game10': value_list[25], '2001-game11': value_list[26], '2001-game12': value_list[27], '2001-game13': value_list[28], '2001-game14': value_list[29], '2001-game15': value_list[30], '2001-game16': value_list[31], '2002-game1': value_list[32], '2002-game2': value_list[33], '2002-game3': value_list[34], '2002-game4': value_list[35], '2002-game5': value_list[36], '2002-game6': value_list[37], '2002-game7': value_list[38], '2002-game8': value_list[39], '2002-game9': value_list[40], '2002-game10': value_list[41], '2002-game11': value_list[42], '2002-game12': value_list[43], '2002-game13': value_list[44], '2002-game14': value_list[45], '2002-game15': value_list[46], '2002-game16': value_list[47], '2003-game1': value_list[48], '2003-game2': value_list[49], '2003-game3': value_list[50], '2003-game4': value_list[51], '2003-game5': value_list[52], '2003-game6': value_list[53], '2003-game7': value_list[54], '2003-game8': value_list[55], '2003-game9': value_list[56], '2003-game10': value_list[57], '2003-game11': value_list[58], '2003-game12': value_list[59], '2003-game13': value_list[60], '2003-game14': value_list[61], '2003-game15': value_list[62], '2003-game16': value_list[63], '2004-game1': value_list[64], '2004-game2': value_list[65], '2004-game3': value_list[66], '2004-game4': value_list[67], '2004-game5': value_list[68], '2004-game6': value_list[69], '2004-game7': value_list[70], '2004-game8': value_list[71], '2004-game9': value_list[72], '2004-game10': value_list[73], '2004-game11': value_list[74], '2004-game12': value_list[75], '2004-game13': value_list[76], '2004-game14': value_list[77], '2004-game15': value_list[78], '2004-game16': value_list[79], '2005-game1': value_list[80], '2005-game2': value_list[81], '2005-game3': value_list[82], '2005-game4': value_list[83], '2005-game5': value_list[84], '2005-game6': value_list[85], '2005-game7': value_list[86], '2005-game8': value_list[87], '2005-game9': value_list[88], '2005-game10': value_list[89], '2005-game11': value_list[90], '2005-game12': value_list[91], '2005-game13': value_list[92], '2005-game14': value_list[93], '2005-game15': value_list[94], '2005-game16': value_list[95], '2006-game1': value_list[96], '2006-game2': value_list[97], '2006-game3': value_list[98], '2006-game4': value_list[99], '2006-game5': value_list[100], '2006-game6': value_list[101], '2006-game7': value_list[102], '2006-game8': value_list[103], '2006-game9': value_list[104], '2006-game10': value_list[105], '2006-game11': value_list[106], '2006-game12': value_list[107], '2006-game13': value_list[108], '2006-game14': value_list[109], '2006-game15': value_list[110], '2006-game16': value_list[111], '2007-game1': value_list[112], '2007-game2': value_list[113], '2007-game3': value_list[114], '2007-game4': value_list[115], '2007-game5': value_list[116], '2007-game6': value_list[117], '2007-game7': value_list[118], '2007-game8': value_list[119], '2007-game9': value_list[120], '2007-game10': value_list[121], '2007-game11': value_list[122], '2007-game12': value_list[123], '2007-game13': value_list[124], '2007-game14': value_list[125], '2007-game15': value_list[126], '2007-game16': value_list[127], '2008-game1': value_list[128], '2008-game2': value_list[129], '2008-game3': value_list[130], '2008-game4': value_list[131], '2008-game5': value_list[132], '2008-game6': value_list[133], '2008-game7': value_list[134], '2008-game8': value_list[135], '2008-game9': value_list[136], '2008-game10': value_list[137], '2008-game11': value_list[138], '2008-game12': value_list[139], '2008-game13': value_list[140], '2008-game14': value_list[141], '2008-game15': value_list[142], '2008-game16': value_list[143], '2009-game1': value_list[144], '2009-game2': value_list[145], '2009-game3': value_list[146], '2009-game4': value_list[147], '2009-game5': value_list[148], '2009-game6': value_list[149], '2009-game7': value_list[150], '2009-game8': value_list[151], '2009-game9': value_list[152], '2009-game10': value_list[153], '2009-game11': value_list[154], '2009-game12': value_list[155], '2009-game13': value_list[156], '2009-game14': value_list[157], '2009-game15': value_list[158], '2009-game16': value_list[159], '2010-game1': value_list[160], '2010-game2': value_list[161], '2010-game3': value_list[162], '2010-game4': value_list[163], '2010-game5': value_list[164], '2010-game6': value_list[165], '2010-game7': value_list[166], '2010-game8': value_list[167], '2010-game9': value_list[168], '2010-game10': value_list[169], '2010-game11': value_list[170], '2010-game12': value_list[171], '2010-game13': value_list[172], '2010-game14': value_list[173], '2010-game15': value_list[174], '2010-game16': value_list[175], '2011-game1': value_list[176], '2011-game2': value_list[177], '2011-game3': value_list[178], '2011-game4': value_list[179], '2011-game5': value_list[180], '2011-game6': value_list[181], '2011-game7': value_list[182], '2011-game8': value_list[183], '2011-game9': value_list[184], '2011-game10': value_list[185], '2011-game11': value_list[186], '2011-game12': value_list[187], '2011-game13': value_list[188], '2011-game14': value_list[189], '2011-game15': value_list[190], '2011-game16': value_list[191], '2012-game1': value_list[192], '2012-game2': value_list[193], '2012-game3': value_list[194], '2012-game4': value_list[195], '2012-game5': value_list[196], '2012-game6': value_list[197], '2012-game7': value_list[198], '2012-game8': value_list[199], '2012-game9': value_list[200], '2012-game10': value_list[201], '2012-game11': value_list[202], '2012-game12': value_list[203], '2012-game13': value_list[204], '2012-game14': value_list[205], '2012-game15': value_list[206], '2012-game16': value_list[207], '2013-game1': value_list[208], '2013-game2': value_list[209], '2013-game3': value_list[210], '2013-game4': value_list[211], '2013-game5': value_list[212], '2013-game6': value_list[213], '2013-game7': value_list[214], '2013-game8': value_list[215], '2013-game9': value_list[216], '2013-game10': value_list[217], '2013-game11': value_list[218], '2013-game12': value_list[219], '2013-game13': value_list[220], '2013-game14': value_list[221], '2013-game15': value_list[222], '2013-game16': value_list[223], '2014-game1': value_list[224], '2014-game2': value_list[225], '2014-game3': value_list[226], '2014-game4': value_list[227], '2014-game5': value_list[228], '2014-game6': value_list[229], '2014-game7': value_list[230], '2014-game8': value_list[231], '2014-game9': value_list[232], '2014-game10': value_list[233], '2014-game11': value_list[234], '2014-game12': value_list[235], '2014-game13': value_list[236], '2014-game14': value_list[237], '2014-game15': value_list[238], '2014-game16': value_list[239], '2015-game1': value_list[240], '2015-game2': value_list[241], '2015-game3': value_list[242], '2015-game4': value_list[243], '2015-game5': value_list[244], '2015-game6': value_list[245], '2015-game7': value_list[246], '2015-game8': value_list[247], '2015-game9': value_list[248], '2015-game10': value_list[249], '2015-game11': value_list[250], '2015-game12': value_list[251], '2015-game13': value_list[252], '2015-game14': value_list[253], '2015-game15': value_list[254], '2015-game16': value_list[255], '2016-game1': value_list[256], '2016-game2': value_list[257], '2016-game3': value_list[258], '2016-game4': value_list[259], '2016-game5': value_list[260], '2016-game6': value_list[261], '2016-game7': value_list[262], '2016-game8': value_list[263], '2016-game9': value_list[264], '2016-game10': value_list[265], '2016-game11': value_list[266], '2016-game12': value_list[267], '2016-game13': value_list[268], '2016-game14': value_list[269], '2016-game15': value_list[270], '2016-game16': value_list[271], '2017-game1': value_list[272], '2017-game2': value_list[273], '2017-game3': value_list[274], '2017-game4': value_list[275], '2017-game5': value_list[276], '2017-game6': value_list[277], '2017-game7': value_list[278], '2017-game8': value_list[279], '2017-game9': value_list[280], '2017-game10': value_list[281], '2017-game11': value_list[282], '2017-game12': value_list[283], '2017-game13': value_list[284], '2017-game14': value_list[285], '2017-game15': value_list[286], '2017-game16': value_list[287], '2018-game1': value_list[288], '2018-game2': value_list[289], '2018-game3': value_list[290], '2018-game4': value_list[291], '2018-game5': value_list[292], '2018-game6': value_list[293], '2018-game7': value_list[294], '2018-game8': value_list[295], '2018-game9': value_list[296], '2018-game10': value_list[297], '2018-game11': value_list[298], '2018-game12': value_list[299], '2018-game13': value_list[300], '2018-game14': value_list[301], '2018-game15': value_list[302], '2018-game16': value_list[303], '2019-game1': value_list[304], '2019-game2': value_list[305], '2019-game3': value_list[306], '2019-game4': value_list[307], '2019-game5': value_list[308], '2019-game6': value_list[309], '2019-game7': value_list[310], '2019-game8': value_list[311], '2019-game9': value_list[312], '2019-game10': value_list[313], '2019-game11': value_list[314], '2019-game12': value_list[315], '2019-game13': value_list[316], '2019-game14': value_list[317], '2019-game15': value_list[318], '2019-game16': value_list[319] }, ignore_index=True) return df # + id="fAzOtuI3N-pj" colab_type="code" outputId="67b55d52-2491-49e3-ee74-1ebf95825c1a" colab={"base_uri": "https://localhost:8080/", "height": 1000} # The main code for iterating through the kicker list, calculating the points and adding the rows # to the final_df dataframe. column_names = ['player', 'first', 'last', 'name', 'position1', '2000-game1', '2000-game2', '2000-game3', '2000-game4', '2000-game5', '2000-game6', '2000-game7', '2000-game8', '2000-game9', '2000-game10', '2000-game11', '2000-game12', '2000-game13', '2000-game14', '2000-game15', '2000-game16', '2001-game1', '2001-game2', '2001-game3', '2001-game4', '2001-game5', '2001-game6', '2001-game7', '2001-game8', '2001-game9', '2001-game10', '2001-game11', '2001-game12', '2001-game13', '2001-game14', '2001-game15', '2001-game16', '2002-game1', '2002-game2', '2002-game3', '2002-game4', '2002-game5', '2002-game6', '2002-game7', '2002-game8', '2002-game9', '2002-game10', '2002-game11', '2002-game12', '2002-game13', '2002-game14', '2002-game15', '2002-game16', '2003-game1', '2003-game2', '2003-game3', '2003-game4', '2003-game5', '2003-game6', '2003-game7', '2003-game8', '2003-game9', '2003-game10', '2003-game11', '2003-game12', '2003-game13', '2003-game14', '2003-game15', '2003-game16', '2004-game1', '2004-game2', '2004-game3', '2004-game4', '2004-game5', '2004-game6', '2004-game7', '2004-game8', '2004-game9', '2004-game10', '2004-game11', '2004-game12', '2004-game13', '2004-game14', '2004-game15', '2004-game16', '2005-game1', '2005-game2', '2005-game3', '2005-game4', '2005-game5', '2005-game6', '2005-game7', '2005-game8', '2005-game9', '2005-game10', '2005-game11', '2005-game12', '2005-game13', '2005-game14', '2005-game15', '2005-game16', '2006-game1', '2006-game2', '2006-game3', '2006-game4', '2006-game5', '2006-game6', '2006-game7', '2006-game8', '2006-game9', '2006-game10', '2006-game11', '2006-game12', '2006-game13', '2006-game14', '2006-game15', '2006-game16', '2007-game1', '2007-game2', '2007-game3', '2007-game4', '2007-game5', '2007-game6', '2007-game7', '2007-game8', '2007-game9', '2007-game10', '2007-game11', '2007-game12', '2007-game13', '2007-game14', '2007-game15', '2007-game16', '2008-game1', '2008-game2', '2008-game3', '2008-game4', '2008-game5', '2008-game6', '2008-game7', '2008-game8', '2008-game9', '2008-game10', '2008-game11', '2008-game12', '2008-game13', '2008-game14', '2008-game15', '2008-game16', '2009-game1', '2009-game2', '2009-game3', '2009-game4', '2009-game5', '2009-game6', '2009-game7', '2009-game8', '2009-game9', '2009-game10', '2009-game11', '2009-game12', '2009-game13', '2009-game14', '2009-game15', '2009-game16', '2010-game1', '2010-game2', '2010-game3', '2010-game4', '2010-game5', '2010-game6', '2010-game7', '2010-game8', '2010-game9', '2010-game10', '2010-game11', '2010-game12', '2010-game13', '2010-game14', '2010-game15', '2010-game16', '2011-game1', '2011-game2', '2011-game3', '2011-game4', '2011-game5', '2011-game6', '2011-game7', '2011-game8', '2011-game9', '2011-game10', '2011-game11', '2011-game12', '2011-game13', '2011-game14', '2011-game15', '2011-game16', '2012-game1', '2012-game2', '2012-game3', '2012-game4', '2012-game5', '2012-game6', '2012-game7', '2012-game8', '2012-game9', '2012-game10', '2012-game11', '2012-game12', '2012-game13', '2012-game14', '2012-game15', '2012-game16', '2013-game1', '2013-game2', '2013-game3', '2013-game4', '2013-game5', '2013-game6', '2013-game7', '2013-game8', '2013-game9', '2013-game10', '2013-game11', '2013-game12', '2013-game13', '2013-game14', '2013-game15', '2013-game16', '2014-game1', '2014-game2', '2014-game3', '2014-game4', '2014-game5', '2014-game6', '2014-game7', '2014-game8', '2014-game9', '2014-game10', '2014-game11', '2014-game12', '2014-game13', '2014-game14', '2014-game15', '2014-game16', '2015-game1', '2015-game2', '2015-game3', '2015-game4', '2015-game5', '2015-game6', '2015-game7', '2015-game8', '2015-game9', '2015-game10', '2015-game11', '2015-game12', '2015-game13', '2015-game14', '2015-game15', '2015-game16', '2016-game1', '2016-game2', '2016-game3', '2016-game4', '2016-game5', '2016-game6', '2016-game7', '2016-game8', '2016-game9', '2016-game10', '2016-game11', '2016-game12', '2016-game13', '2016-game14', '2016-game15', '2016-game16', '2017-game1', '2017-game2', '2017-game3', '2017-game4', '2017-game5', '2017-game6', '2017-game7', '2017-game8', '2017-game9', '2017-game10', '2017-game11', '2017-game12', '2017-game13', '2017-game14', '2017-game15', '2017-game16', '2018-game1', '2018-game2', '2018-game3', '2018-game4', '2018-game5', '2018-game6', '2018-game7', '2018-game8', '2018-game9', '2018-game10', '2018-game11', '2018-game12', '2018-game13', '2018-game14', '2018-game15', '2018-game16', '2019-game1', '2019-game2', '2019-game3', '2019-game4', '2019-game5', '2019-game6', '2019-game7', '2019-game8', '2019-game9', '2019-game10', '2019-game11', '2019-game12', '2019-game13', '2019-game14', '2019-game15', '2019-game16' ] oplayers_df = player_df.loc[(player_df['position1'] == 'QB') | (player_df['position1'] == 'RB') | (player_df['position1'] == 'WR') | (player_df['position1'] == 'TE')] player_list = oplayers_df['player'].tolist() final_df = pd.DataFrame(columns = column_names) for player in player_list: first = player_df['first'].loc[(player_df['player']==player)].iloc[0] last = player_df['last'].loc[(player_df['player']==player)].iloc[0] name = player_df['name'].loc[(player_df['player']==player)].iloc[0] position1 = player_df['position1'].loc[(player_df['player']==player)].iloc[0] print(player) # Determine the current team of the player for s in range(1, 17): if offense_df['team'].loc[(offense_df['player']==player) & (offense_df['seas']==s)].empty: x = 0 else: team = offense_df['team'].loc[(offense_df['player']==player) & (offense_df['seas']==s)].iloc[0] break new_team = team player_scores = [] for year in range(2000,2020): week = 0 for game in range(1,17): week = week + 1 # Check the current team of the player if (offense_df['team'].loc[(offense_df['player']==player) & (offense_df['year']==year) & (offense_df['week']==week)].empty): x = 0 else: new_team = offense_df['team'].loc[(offense_df['player']==player) & (offense_df['year']==year) & (offense_df['week']==week)].iloc[0] if team != new_team: team = new_team # Handle the bye week if ((game_df['h'].loc[(game_df['seas']==year) & (game_df['wk']==week) & ((game_df['v']==team) | (game_df['h']==team))].empty) & (game_df['v'].loc[(game_df['seas']==year) & (game_df['wk']==week) & ((game_df['v']==team) | (game_df['h']==team))].empty)): week = week + 1 score = pull_stats(offense_df, player_df, td_df, 'fp', player, year, week) else: score = pull_stats(offense_df, player_df, td_df, 'fp', player, year, week) player_scores.append(score) final_df = add_row(final_df, player, first, last, name, position1, player_scores) # + id="i-muvKifV5rA" colab_type="code" outputId="6eb6deaa-05ee-447f-bb87-5c4095c48a50" colab={"base_uri": "https://localhost:8080/", "height": 1000} final_df.head(50) # + id="SSBvtOR7Wf51" colab_type="code" colab={} # Save the results to .csv file final_df.to_csv('/content/actuals_offense.csv', index=False)
wrangling/Actuals_Offense1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd my_file = 'data/uurgeg_344_2011-2020.txt' df = pd.read_csv(my_file, skiprows=30) df[:26] time_filter = (df[' HH']<=8) | (df[' HH']>=20) time_filter.head() time_filter = df[time_filter] temp_filter = time_filter[' T']>=240 temp_filter = time_filter[temp_filter] temp_filter.head(20) recent_years_filter = temp_filter['YYYYMMDD'] >= 20200101 recent_years_filter = temp_filter[recent_years_filter] len(recent_years_filter['YYYYMMDD'].unique()) len(temp_filter['YYYYMMDD'].unique()) df.columns len(df)
airco_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_mxnet_p36 # language: python # name: conda_mxnet_p36 # --- # # Image classification transfer learning demo # # 1. [Introduction](#Introduction) # 2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing) # 3. [Fine-tuning the Image classification model](#Fine-tuning-the-Image-classification-model) # 4. [Set up hosting for the model](#Set-up-hosting-for-the-model) # 1. [Import model into hosting](#Import-model-into-hosting) # 2. [Create endpoint configuration](#Create-endpoint-configuration) # 3. [Create endpoint](#Create-endpoint) # 5. [Perform Inference](#Perform-Inference) # # ## Introduction # # Welcome to our end-to-end example of distributed image classification algorithm in transfer learning mode. In this demo, we will use the Amazon sagemaker image classification algorithm in transfer learning mode to fine-tune a pre-trained model (trained on imagenet data) to learn to classify a new dataset. In particular, the pre-trained model will be fine-tuned using [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/). # # To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. # ## Prequisites and Preprocessing # # ### Permissions and environment variables # # Here we set up the linkage and authentication to AWS services. There are three parts to this: # # * The roles used to give learning and hosting access to your data. This will automatically be obtained from the role used to start the notebook # * The S3 bucket that you want to use for training and model data # * The Amazon sagemaker image classification docker image which need not be changed # + # %%time import boto3 import re from sagemaker import get_execution_role role = get_execution_role() bucket='<<bucket-name>>' # customize to your bucket containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest', 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'} training_image = containers[boto3.Session().region_name] print(training_image) # - # ## Fine-tuning the Image classification model # # The caltech 256 dataset consist of images from 257 categories (the last one being a clutter category) and has 30k images with a minimum of 80 images and a maximum of about 800 images per category. # # The image classification algorithm can take two types of input formats. The first is a [recordio format](https://mxnet.incubator.apache.org/tutorials/basic/record_io.html) and the other is a [lst format](https://mxnet.incubator.apache.org/how_to/recordio.html?highlight=im2rec). Files for both these formats are available at http://data.dmlc.ml/mxnet/data/caltech-256/. In this example, we will use the recordio format for training and use the training/validation split [specified here](http://data.dmlc.ml/mxnet/data/caltech-256/). # + import os import urllib.request import boto3 def download(url): filename = url.split("/")[-1] if not os.path.exists(filename): urllib.request.urlretrieve(url, filename) def upload_to_s3(channel, file): s3 = boto3.resource('s3') data = open(file, "rb") key = channel + '/' + file s3.Bucket(bucket).put_object(Key=key, Body=data) # # caltech-256 download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec') download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec') upload_to_s3('validation', 'caltech-256-60-val.rec') upload_to_s3('train', 'caltech-256-60-train.rec') # - # Once we have the data available in the correct format for training, the next step is to actually train the model using the data. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail. # ## Training parameters # There are two kinds of parameters that need to be set for training. The first one are the parameters for the training job. These include: # # * **Input specification**: These are the training and validation channels that specify the path where training data is present. These are specified in the "InputDataConfig" section. The main parameters that need to be set is the "ContentType" which can be set to "application/x-recordio" or "application/x-image" based on the input data format and the S3Uri which specifies the bucket and the folder where the data is present. # * **Output specification**: This is specified in the "OutputDataConfig" section. We just need to specify the path where the output can be stored after training # * **Resource config**: This section specifies the type of instance on which to run the training and the number of hosts used for training. If "InstanceCount" is more than 1, then training can be run in a distributed manner. # # Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are: # # * **num_layers**: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used. # * **num_training_samples**: This is the total number of training samples. It is set to 15420 for caltech dataset with the current split # * **num_classes**: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class # * **epochs**: Number of training epochs # * **learning_rate**: Learning rate for training # * **mini_batch_size**: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run # After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between 10 to 12 minutes per epoch on a p2.xlarge machine. The network typically converges after 10 epochs. # + isConfigCell=true # The algorithm supports multiple network depth (number of layers). They are 18, 34, 50, 101, 152 and 200 # For this training, we will use 18 layers num_layers = 18 # we need to specify the input image shape for the training data image_shape = "3,224,224" # we also need to specify the number of training samples in the training set # for caltech it is 15420 num_training_samples = 15420 # specify the number of output classes num_classes = 257 # batch size for training mini_batch_size = 128 # number of epochs epochs = 2 # learning rate learning_rate = 0.01 top_k=2 # Since we are using transfer learning, we set use_pretrained_model to 1 so that weights can be # initialized with pre-trained weights use_pretrained_model = 1 # - # # Training # Run the training using Amazon sagemaker CreateTrainingJob API # + # %%time import time import boto3 from time import gmtime, strftime s3 = boto3.client('s3') # create unique job name job_name_prefix = 'sagemaker-imageclassification-notebook' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) job_name = job_name_prefix + timestamp training_params = \ { # specify the training docker image "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "File" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p2.xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": job_name, "HyperParameters": { "image_shape": image_shape, "num_layers": str(num_layers), "num_training_samples": str(num_training_samples), "num_classes": str(num_classes), "mini_batch_size": str(mini_batch_size), "epochs": str(epochs), "learning_rate": str(learning_rate), "use_pretrained_model": str(use_pretrained_model) }, "StoppingCondition": { "MaxRuntimeInSeconds": 360000 }, #Training data should be inside a subdirectory called "train" #Validation data should be inside a subdirectory called "validation" #The algorithm currently only supports fullyreplicated model (where data is copied onto each machine) "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": 's3://{}/train/'.format(bucket), "S3DataDistributionType": "FullyReplicated" } }, "ContentType": "application/x-recordio", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": 's3://{}/validation/'.format(bucket), "S3DataDistributionType": "FullyReplicated" } }, "ContentType": "application/x-recordio", "CompressionType": "None" } ] } print('Training job name: {}'.format(job_name)) print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource'])) # + # create the Amazon SageMaker training job sagemaker = boto3.client(service_name='sagemaker') sagemaker.create_training_job(**training_params) # confirm that the training job has started status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus'] print('Training job current status: {}'.format(status)) try: # wait for the job to finish and report the ending status sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name) training_info = sagemaker.describe_training_job(TrainingJobName=job_name) status = training_info['TrainingJobStatus'] print("Training job ended with status: " + status) except: print('Training failed to start') # if exception is raised, that means it has failed message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason'] print('Training failed with the following error: {}'.format(message)) # - training_info = sagemaker.describe_training_job(TrainingJobName=job_name) status = training_info['TrainingJobStatus'] print("Training job ended with status: " + status) # If you see the message, # # > `Training job ended with status: Completed` # # then that means training sucessfully completed and the output model was stored in the output path specified by `training_params['OutputDataConfig']`. # # You can also view information about and the status of a training job using the AWS SageMaker console. Just click on the "Jobs" tab. # # Inference # # *** # # A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the topic mixture representing a given document. # # This section involves several steps, # # 1. [Create Model](#CreateModel) - Create model for the training output # 1. [Create Endpoint Configuration](#CreateEndpointConfiguration) - Create a configuration defining an endpoint. # 1. [Create Endpoint](#CreateEndpoint) - Use the configuration to create an inference endpoint. # 1. [Perform Inference](#Perform Inference) - Perform inference on some input data using the endpoint. # ## Create Model # # We now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration. # + # %%time import boto3 from time import gmtime, strftime sage = boto3.Session().client(service_name='sagemaker') model_name="test-image-classification-model" print(model_name) info = sage.describe_training_job(TrainingJobName=job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest', 'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest', 'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest', 'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'} hosting_image = containers[boto3.Session().region_name] primary_container = { 'Image': hosting_image, 'ModelDataUrl': model_data, } create_model_response = sage.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) # - # ### Create Endpoint Configuration # At launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. # # In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration. # + from time import gmtime, strftime timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name_prefix + '-epc-' + timestamp endpoint_config_response = sage.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) # - # ### Create Endpoint # Lastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete. # + # %%time import time timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name_prefix + '-ep-' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # - # Finally, now the endpoint can be created. It may take sometime to create the endpoint... # + # get the status of the endpoint response = sagemaker.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') # - # If you see the message, # # > `Endpoint creation ended with EndpointStatus = InService` # # then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console. # # We will finally create a runtime object from which we can invoke the endpoint. # ## Perform Inference # Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint. # import boto3 runtime = boto3.Session().client(service_name='runtime.sagemaker') # ### Download test image # !wget -O /tmp/test.jpg http://www.vision.caltech.edu/Image_Datasets/Caltech256/images/008.bathtub/008_0007.jpg file_name = '/tmp/test.jpg' # test image from IPython.display import Image Image(file_name) import json import numpy as np with open(file_name, 'rb') as f: payload = f.read() payload = bytearray(payload) response = runtime.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) result = response['Body'].read() # result will be in json format and convert it to ndarray result = json.loads(result) # the result will output the probabilities for all classes # find the class with maximum probability and print the class index index = np.argmax(result) object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter'] print("Result: label - " + object_categories[index] + ", probability - " + str(result[index])) # ### Clean up # # When we're done with the endpoint, we can just delete it and the backing instances will be released. Run the following cell to delete the endpoint. sage.delete_endpoint(EndpointName=endpoint_name)
introduction_to_amazon_algorithms/imageclassification_caltech/Image-classification-transfer-learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PART 1: Exploratory Data Analysis # In this Jupyter Notebook, we analyze the given external datasets through a **preprocessing** lens: we manipulate, curate, and prepare data to better understand what we're dealing with and to prepare our input data for more advanced prediction-driven modifications. # # - **NOTE**: Before working through this notebook, please ensure that you have all necessary dependencies as denoted in [Section A: Imports and Initializations](#section-A) of this notebook. # # - **NOTE**: Before working through Sections A-D of this notebook, please run all code cells in [Appendix A: Supplementary Custom Objects](#appendix-A) to ensure that all relevant functions and objects are appropriately instantiated and ready for use. # # --- # ## 🔵 TABLE OF CONTENTS 🔵 <a name="TOC"></a> # # Use this **table of contents** to navigate the various sections of the preprocessing notebook. # # #### 1. [Section A: Imports and Initializations](#section-A) # # All necessary imports and object instantiations for data preprocessing. # # #### 2. [Section B: Manipulating Our Datasets](#section-B) # # Data manipulation operations, including null value removal/imputation, # data splitting/merging, and data frequency generation. # # #### 3. [Section C: Visualizing Trends Across Our Data](#section-C) # # Data visualizations to outline trends and patterns inherent across our data # that may mandate further analysis. # # #### 4. [Section D: Saving Our Interim Datasets](#section-D) # # Saving preprocessed data states for further access. # # #### 5. [Appendix A: Supplementary Custom Objects](#appendix-A) # # Custom Python object architectures used throughout the data preprocessing. # # --- # ## 🔹 Section A: Imports and Initializations <a name="section-A"></a> # General Importations for Data Manipulation and Visualization. import numpy as np import pandas as pd import matplotlib.pyplot as plt # Custom Algorithmic Structures for Data Preprocessing. import sys sys.path.append("../structures/") from custom_structures import corrplot_ from dataset_preprocessor import Dataset_Preprocessor # #### Instantiate our Preprocessor Engine # # Custom Preprocessor Class for Directed Data Manipulation. # # **NOTE**: Please refer to _Appendix A: Supplementary Custom Objects_ for instructions on how to view the fully implemented dataset preprocessor object. preproc = Dataset_Preprocessor() # ##### [(back to top)](#TOC) # # --- # ## 🔹 Section B: Manipulating Our Datasets <a name="section-B"></a> # #### Read Our Raw Data Into Conditional DataFrame(s) # # **Call** `.load_data()` **method to load in all conditionally separated external datasets.** # # _NOTE_: Currently loading in both datasets independently using defaulted condition `which="both"`. (df_train, df_test) = preproc.load_data() # #### Get Unique Values Across Each Feature in Training Dataset. # # **Call the** `get_uniques()` **custom function to identify unique values across all input features for dataset(s).** # # _NOTE_: Currently identifying unique data values across all features in dataset using defaulted conditions `features=None` and `how="value"`. uniques_train, uniques_test = get_uniques(df_train), get_uniques(df_test) # #### Check Which Features Across Training/Testing Data Contain `NaN` (Null) Values. # # _NOTE_: Null values are denoted as `np.nan` (float) datatypes and will appear as `<type 'float'>` data notations. floating_features_train = identify_typed_features(uniques_train) # > _RESULT_: No null values detected across any features in training dataset. floating_features_test = identify_typed_features(uniques_test) # > _RESULT_: Null values potentially detected across nine (9) features in testing dataset. # #### Confirm Null Value Presence Across Identified "Floating" Features. get_null_metrics(df_test, subset=floating_features_test, metric="binary") # #### Get Proportion of Null Values Across Each Identified "Floating" Feature. get_null_metrics(df_test, subset=floating_features_test, metric="percent") # #### Impute Null Values Across Floating Features # # **NOTE**: Since null values are highly sparse across our data (highest frequent occurrency is ~0.1%) and the size of our data is not small, we can safely drop null values rather than performing advanced imputation (e.g. _null value flagging_, _mean/mode replacement_). preproc.null_imputer(df_test, subset=floating_features_test, method="drop", na_filter="any") # #### Reencode Alphabetical Features for Numerical Encoding Consistency LOOKUP_TABLE_ALPHANUMERIC = { 1: "A", 2: "B", 3: "C", 4: "D" } # _NOTE_: Feature encoding occurs inplace; if condition `drop_og` is `True`, then rerunning method call will result in errors due to dropped target. # + preproc.feature_encoder(df_train, target="C", lookup_table=LOOKUP_TABLE_ALPHANUMERIC, drop_og=True) preproc.feature_encoder(df_test, target="A", lookup_table=LOOKUP_TABLE_ALPHANUMERIC, drop_og=True) # - # #### 🔸 CHECKPOINT 🔸 # # **Interim data ready to save.** # ##### [(back to top)](#TOC) # # --- # ## 🔹 Section C: Visualizing Trends Across Our Data <a name="section-C"></a> # #### ⭐️ _TODO_: Include visualizations towards end of pipeline architectural creation. ⭐️ # ##### [(back to top)](#TOC) # # --- # ## 🔹 Section D: Saving Our Interim Datasets <a name="section-D"></a> # Interim datasets are data states directly after preprocessing, where data is designated for curation and manipulation prior to target vs. non-target handling. # # #### Save Current (Preprocessed) Data States to Interim Datasets # # **Call** `.save_dataset()` **method to save data state to interim folder for processing accessability.** REL_PATH_TO_ITM_DATA = "../data/interim/" FILENAME_TRAINING, FILENAME_TESTING = "train_i", "test_i" preproc.save_dataset(df_train, REL_PATH_TO_ITM_DATA + FILENAME_TRAINING) preproc.save_dataset(df_test, REL_PATH_TO_ITM_DATA + FILENAME_TESTING) # ##### [(back to top)](#TOC) # # --- # ## 🔹 Appendix A: Supplementary Custom Objects <a name="appendix-A"></a> # #### A[1]: 6Nomads Dataset Preprocessor. # To view the **Data Preprocessor Engine**, please follow the following steps: # # 1. Navigate to the `structures` sibling directory. # 2. Access the `dataset_preprocessor.py` file. # 3. View the `Dataset_Preprocessor()` object architecture. # _NOTE_: **Creating Preprocessor Engine in Notebook Until Further Separation of Concerns.** # #### A[2]: Function to Obtain Relevant Unique Values or Data Types from Feature(s) Across Dataset. def get_uniques(dataset, features=None, how="both"): """ Custom function that analyzes a dataset's given feature(s) and returns all unique values or data types across each inputted feature. INPUTS: {features}: - NoneType(None): Sets function to use all features across dataset. (DEFAULT) - str: Single referenced feature in dataset. - list: List of referenced features in dataset. {how}: - str(both): Identifies both unique data types and values. (DEFAULT) - str(dtype): Identifies unique data types. - str(value): Identifies unique data values. OUTPUTS: dict(uniques): Dictionary structure mapping each input feature to relevantly identified unique values/types. """ # Validate selected features argument if features is not None and type(features) not in [str, list]: raise TypeError("ERROR: Inappropriate data type passed to argument `features`.\n\nExpected type in range:\n - NoneType\n - str()\n - list()\n\nActual:\n - {}".format(str(type(features)))) # Validate unique identifier argument if how not in ["both", "dtype", "value"]: raise ValueError("ERROR: Inappropriate value passed to argument `how`.\n\nExpected value in range:\n - both\n - dtype\n - value\n\nActual:\n - {}".format(how)) # Reformat `features` object into list if features is None: features = dataset.columns.tolist() if type(features) == str: features = [features] # Create uniques object and iteratively map each feature to associated unique data uniques = dict() # Create dictionary object associating feature(s) and unique data types and values if how == "both": unique_types, unique_values = dict(), dict() for feature in features: unique_types[feature] = list(set(map(type, dataset[feature]))) unique_values[feature] = sorted(dataset[feature].unique().tolist()) unique_components = [unique_types, unique_values] for feature in unique_types.keys(): uniques[feature] = {"dtypes": unique_components[0][feature], "values": unique_components[1][feature]} else: for feature in features: # Create dictionary object associating feature(s) and unique data types if how == "dtype": uniques[feature] = list(set(map(type, dataset[feature]))) # Create dictionary object associating feature(s) and unique values if how == "value": uniques[feature] = sorted(dataset[feature].unique().tolist()) return uniques # #### A[3]: Function to Identify and Return Features Containing Unique Input Data Types. def identify_typed_features(uniques, dtype=float): """ Custom function that extracts features from previously generated unique feature data based on whether or not feature includes user-specified data type. INPUTS: {uniques}: - dict: Dictionary object of feature associations generated by `get_uniques()`. {dtype}: - type(float): Float data type. (DEFAULT) - type(int): Integer data type. - type(str): String data type. OUTPUTS: list(typed_features): List of feature names corresponding to identified user-specified data types. """ typed_features = list() for key in uniques.keys(): if uniques[key]["dtypes"][0] == dtype: print("IDENTIFIED FEATURE OF TYPE '{}': {}".format(str(dtype), key)) typed_features.append(key) return typed_features # #### A[4]: Function to Calculate Null/Missing Metrics of Given Feature Data. def get_null_metrics(dataset, subset=None, metric="percent"): """ Custom function that produces series of associated features and metrics related to presence and proportion of null/missing values. INPUTS: {dataset}: - pd.DataFrame: Single input dataset. {subset}: - NoneType: If None, all features are used for null metric evaluation. (DEFAULT) - list: Array of features across data to consider; others are ignored. {metric}: - str(percent): Determines calculation of relative proportions of null values per feature. (DEFAULT) - str(count): Determines calculation of absolute count of null values per feature. - str(binary): Determines identification of whether or not any null values occur per feature. OUTPUTS: pd.Series: Series of associated feature names and relative null value prevalence metrics. """ # Validate `subset` keyword argument if subset is None: subset = dataset.columns.tolist() # Calculate percentages for null values across each input feature if metric == "percent": return dataset[subset].isna().sum() / len(dataset) # Calculate total counts of null values across each input feature elif metric == "count": return dataset[subset].isna().sum() # Determine True/False based on null value presence across each input feature elif metric == "binary": binarized_metrics = list() for feature in subset: nulls_in_feature = dataset[feature].isna().values.any() binarized_metrics.append((feature, nulls_in_feature)) binarized_metrics_series = list(zip(*binarized_metrics)) return pd.Series(binarized_metrics_series[1], index=binarized_metrics_series[0]) # ##### [(back to top)](#TOC) # # ---
notebooks/01-exploratory-data-analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import numpy as np import matplotlib.pyplot as plt results = json.load(open('SCMTuned.json', 'r')) plt.plot(results['train_loss']) plt.plot(results['train_acc']) idx, loss = zip(*results['val_loss']) plt.plot(idx, loss) idx, acc = zip(*results['val_acc']) plt.plot(idx, acc)
finetuned/ExploreFineTunings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tortas/DS-Unit-2-Sprint-3-Classification-Validation/blob/master/module1-logistic-regression/LS_DS_231_Logistic_Regression-LECTURE-v3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="N7SXF6jEBd5_" # # Lambda School Data Science - Logistic Regression # # Logistic regression is the baseline for classification models, as well as a handy way to predict probabilities (since those too live in the unit interval). While relatively simple, it is also the foundation for more sophisticated classification techniques such as neural networks (many of which can effectively be thought of as networks of logistic models). # + [markdown] colab_type="text" id="E7-AOngjadRN" # ## Lecture - Where Linear goes Wrong # ### Return of the Titanic 🚢 # # You've likely already explored the rich dataset that is the Titanic - let's use regression and try to predict survival with it. The data is [available from Kaggle](https://www.kaggle.com/c/titanic/data), so we'll also play a bit with [the Kaggle API](https://github.com/Kaggle/kaggle-api). # + [markdown] id="lRDtzPUwiWMe" colab_type="text" # ### Get data, option 1: Kaggle API # # #### Sign up for Kaggle and get an API token # 1. [Sign up for a Kaggle account](https://www.kaggle.com/), if you don’t already have one. # 2. [Follow these instructions](https://github.com/Kaggle/kaggle-api#api-credentials) to create a Kaggle “API Token” and download your `kaggle.json` file. If you are using Anaconda, put the file in the directory specified in the instructions. # # _This will enable you to download data directly from Kaggle. If you run into problems, don’t worry — I’ll give you an easy alternative way to download today’s data, so you can still follow along with the lecture hands-on. And then we’ll help you through the Kaggle process after the lecture._ # + [markdown] id="Nl6-gCB-iWMi" colab_type="text" # #### Put `kaggle.json` in the correct location # # - ***If you're using Anaconda,*** put the file in the directory specified in the [instructions](https://github.com/Kaggle/kaggle-api#api-credentials). # # - ***If you're using Google Colab,*** upload the file to your Google Drive, and run this cell: # + id="292Yo7YeiWMk" colab_type="code" colab={} from google.colab import drive drive.mount('/content/drive') # %env KAGGLE_CONFIG_DIR=/content/drive/My Drive/ # + [markdown] id="wznD9SHBiWMr" colab_type="text" # #### Install the Kaggle API package and use it to get the data # # You also have to join the Titanic competition to have access to the data # + colab_type="code" id="MnHLWPYDcyIe" colab={} # !pip install kaggle # + id="hjvHnKqOiWMz" colab_type="code" colab={} # !kaggle competitions download -c titanic # + [markdown] id="DrXeGUrviWM6" colab_type="text" # ### Get data, option 2: Download from the competition page # 1. [Sign up for a Kaggle account](https://www.kaggle.com/), if you don’t already have one. # 2. [Go to the Titanic competition page](https://www.kaggle.com/c/titanic) to download the [data](https://www.kaggle.com/c/titanic/data). # + [markdown] id="6Z4VlbuXiWM7" colab_type="text" # ### Get data, option 3: Use Seaborn # # ``` # import seaborn as sns # train = sns.load_dataset('titanic') # ``` # # But Seaborn's version of the Titanic dataset is not identical to Kaggle's version, as we'll see during this lesson! # + [markdown] id="MFMrbaB1iWM9" colab_type="text" # ### Read data # + id="jJzw2K1MiWM_" colab_type="code" colab={} outputId="70838525-9c9d-4496-9c08-0acd53aef339" import pandas as pd train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') train.shape, test.shape # + [markdown] id="PQNMDEHLiWNJ" colab_type="text" # Notice that `train.csv` has one more column than `test.csv` : The target, `Survived`. # # Kaggle provides test labels, but not test targets. Instead, you submit your test predictions to Kaggle to get your test scores. Why? This is model validaton best practice, makes competitons fair, and helps us learn about over- and under-fitting. # + id="MsporzJCiWNL" colab_type="code" colab={} outputId="c1ea6634-8a52-4fa7-e975-f2d5e2fa81b7" train.sample(n=5) # + id="bqeHrNX6iWNU" colab_type="code" colab={} outputId="e633435f-769d-4237-940b-eb105164441d" test.sample(n=5) # + [markdown] id="zgg6Sm5RiWNg" colab_type="text" # Do some data exploration. # # About 62% of passengers did not survive. # + id="M0RyrLB1iWNi" colab_type="code" colab={} outputId="27b028c0-996c-42f9-a1e6-de17f7f4934e" target = 'Survived' train[target].value_counts(normalize=True) # + [markdown] id="rw7T5nUmiWNp" colab_type="text" # Describe the numeric columns # + id="8D3IbBqLiWNr" colab_type="code" colab={} outputId="7a4ba7af-b79c-4b67-ab43-179fb1c96753" train.describe(include='number') # + [markdown] id="5Q32X8ZqiWN0" colab_type="text" # Describe the non-numeric columns # + id="vOy6KoYRiWN5" colab_type="code" colab={} outputId="59ea0f32-ecce-47e4-f35f-631033488b21" train.describe(exclude='number') # + [markdown] id="holRF-veiWOH" colab_type="text" # ### How would we try to do this with linear regression? # + [markdown] id="PI1qE8EkiWOJ" colab_type="text" # We choose a few numeric features, split the data into X and y, [impute missing values](https://scikit-learn.org/stable/modules/impute.html), and fit a Linear Regression model on the train set. # + colab_type="code" id="fcxfpsjdFJwM" outputId="590a3bba-67fe-48b4-bf6e-91e67bd29bd4" colab={"base_uri": "https://localhost:8080/", "height": 34} from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression features = ['Pclass', 'Age', 'Fare'] target = 'Survived' X_train = train[features] y_train = train[target] X_test = test[features] imputer = SimpleImputer() X_train_imputed = imputer.fit_transform(X_train) X_test_imputed = imputer.transform(X_test) lin_reg = LinearRegression() lin_reg.fit(X_train_imputed, y_train) # + [markdown] id="sYwzXYmniWOV" colab_type="text" # Let's consider a test case. What does our Linear Regression predict for a 1st class, 5 year-old, with a fare of 500? # # 119% probability of survival. # + id="MHrVdZEfiWOX" colab_type="code" colab={} outputId="e47b7dd1-2225-4723-9442-9c90812a4169" import numpy as np test_case = np.array([[1, 5, 500]]) # Rich 5-year old in first class lin_reg.predict(test_case) # + [markdown] id="mfvH0ExbiWOk" colab_type="text" # Based on the Linear Regression's intercept and coefficients, it will predict probabilities greater than 100%, or less than 0%, given high enough / low enough values for the features. # + id="JgQAr-GEiWOm" colab_type="code" colab={} outputId="563b2359-caec-4f91-860a-3077395f9c46" print('Intercept', lin_reg.intercept_) coefficients = pd.Series(lin_reg.coef_, X_train.columns) print(coefficients.to_string()) # + [markdown] id="0JjIzui6iWOw" colab_type="text" # ### How would we do this with Logistic Regression? # + [markdown] id="A3lvle4NiWOz" colab_type="text" # The scikit-learn API is consistent, so the code is similar. # # We instantiate our model (here with `LogisticRegression()` instead of `LinearRegression()`) # # We use the same method to fit the model on the training data: `.fit(X_train_imputed, y_train)` # # We use the same method to make a predict for our test case: `.predict(test_case)` — But this returns different results. Regressors return continuous values, but classifiers return discrete predictions of the class label. In this binary classification problem, our discrete class labels are `0` (did not survive) or `1` (did survive). # # Classifiers also have a `.predict_proba` method, which returns predicted probabilities for each class. The probabilities sum to 1. # # We predict ~3% probability that our test case did not surive, and 97% probability that our test case did survive. This result is what we want and expect for our test case: to predict survival, with high probability, but less than 100%. # + colab_type="code" id="dpUm8Dl-u2aB" outputId="44bc9b92-52ac-4e13-ab03-e87cbfd5fea7" colab={"base_uri": "https://localhost:8080/", "height": 89} from sklearn.linear_model import LogisticRegression log_reg = LogisticRegression(solver='lbfgs') log_reg.fit(X_train_imputed, y_train) print('Prediction for rich 5 year old:', log_reg.predict(test_case)) print('Predicted probabilities for rich 5 year old:', log_reg.predict_proba(test_case)) # + [markdown] id="liWFdSx7iWPB" colab_type="text" # Logistic Regression calculates predicted probablities between the range of 0 and 1. By default, scikit-learn makes a discrete prediction by returning whichever class had the highest predicted probability for that observation. # # In the case of binary classification, this is equivalent to using a threshold of 0.5. However, we could choose a different threshold, for different trade-offs between false positives versus false negatives. # + id="wgf5nan9iWPD" colab_type="code" colab={} outputId="0607d0e4-df27-4180-bf19-d9058436f50a" threshold = 0.5 probabilities = log_reg.predict_proba(X_test_imputed)[:,1] manual_predictions = (probabilities > threshold).astype(int) direct_predictions = log_reg.predict(X_test_imputed) all(manual_predictions == direct_predictions) # + [markdown] id="YW2_xwJAiWPL" colab_type="text" # ### How accurate is the Logistic Regression? # + [markdown] id="f20J6igliWPM" colab_type="text" # Scikit-learn estimators provide a convenient method, `.score`. It uses the X features to generate predictions. Then it compares the predictions to the y ground truth labels. Then it returns the score. # # For regressors, `.score` returns R^2. # # For classifiers, `.score` returns Accuracy. # # Our Logistic Regression model has 70% training accuracy. (This is higher than the 62% accuracy we would get with a baseline that predicts every passenger does not survive.) # + id="2dQOc9ULiWPP" colab_type="code" colab={} outputId="e0e5608c-c1e5-4c8d-e4b4-b2096bc83d78" score = log_reg.score(X_train_imputed, y_train) print('Train Accuracy Score', score) # + [markdown] id="1Bfw5W3MiWPe" colab_type="text" # Accuracy is just the number of correct predictions divided by the total number of predictions. # # For example, we can look at our first five predictions: # + id="WSu9Ivo4iWPf" colab_type="code" colab={} y_pred = log_reg.predict(X_train_imputed) # + id="9DX6oHs8iWPm" colab_type="code" colab={} outputId="35eb6a10-a6cf-4d82-843f-6c3e4c7b0cba" y_pred[:5] # + [markdown] id="Aw1_vllMiWPu" colab_type="text" # And compare to the ground truth labels for these first five observations: # + id="l9lRtcsOiWPv" colab_type="code" colab={} outputId="c9c1d693-86ef-4f7c-9e81-b0259a6b0392" y_train[:5].values # + [markdown] id="-6yDvK7wiWP6" colab_type="text" # We have four correct predictions, divided by five total predictions, for 80% accuracy. # + id="cr6NTVU9iWP9" colab_type="code" colab={} outputId="1387008e-d3c2-4d9d-bef7-acaf75d25a0e" correct_predictions = 4 total_predictions = 5 accuracy = correct_predictions / total_predictions print(accuracy) # + [markdown] id="OwGnzd8aiWQE" colab_type="text" # scikit-learn's `accuracy_score` function works the same way and returns the same result. # + id="T4uDJZeIiWQI" colab_type="code" colab={} outputId="0f25e034-d373-4889-b939-77c393b44145" from sklearn.metrics import accuracy_score accuracy_score(y_train[:5], y_pred[:5]) # + [markdown] id="awv9uX7niWQY" colab_type="text" # We don't want to just score our model on the training data. # # We cannot calculate a test accuracy score ourselves in this notebook, because Kaggle does not provide test labels. # # We could split the train data into train and validation sets. However, we don't have many observations. (Fewer than 1,000.) # # As another alternative, we can use cross-validation: # + id="r4DIDrd9iWQa" colab_type="code" colab={} outputId="1920cc8b-5757-4218-8b42-df590605c918" from sklearn.model_selection import cross_val_score scores = cross_val_score(log_reg, X_train_imputed, y_train, cv=10) print('Cross-Validation Accuracy Scores', scores) # + [markdown] id="T4D3WYFQiWQn" colab_type="text" # We can see a range of scores: # + id="5_SitmMxiWQp" colab_type="code" colab={} outputId="e8dd5cf8-3f78-4def-ed6e-ef73af86bb49" scores = pd.Series(scores) scores.min(), scores.mean(), scores.max() # + [markdown] id="QedJBSAqiWQz" colab_type="text" # To learn more about Cross-Validation, see these links: # # - https://scikit-learn.org/stable/modules/cross_validation.html # - https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html # - https://github.com/LambdaSchool/DS-Unit-2-Sprint-3-Classification-Validation/blob/master/module2-baselines-validation/model-validation-preread.md#what-is-cross-validation # + [markdown] id="CVmpRiEviWQ0" colab_type="text" # ### What's the equation for Logistic Regression? # # https://en.wikipedia.org/wiki/Logistic_function # # https://en.wikipedia.org/wiki/Logistic_regression#Probability_of_passing_an_exam_versus_hours_of_study # + id="J38KkIWMiWQ2" colab_type="code" colab={} outputId="ed7dd9a1-dbb8-459a-a459-b78290ef6db9" print('Intercept', log_reg.intercept_[0]) coefficients = pd.Series(log_reg.coef_[0], X_train.columns) print(coefficients.to_string()) # + id="b7gZjwqdiWQ_" colab_type="code" colab={} outputId="a25d94e0-5d91-4e4a-a9f0-a611d9da6dd9" # The logistic sigmoid "squishing" function, # implemented to work with numpy arrays def sigmoid(x): return 1 / (1 + np.e**(-x)) sigmoid(np.dot(log_reg.coef_, test_case.T) + log_reg.intercept_) # + [markdown] id="9GqFl82aiWRG" colab_type="text" # Or we can write the code with the `@` operator instead of numpy's dot product function # + id="pRYhA95XiWRI" colab_type="code" colab={} outputId="87bb3f60-ee67-40a5-c552-c05b14391f4d" sigmoid(log_reg.coef_ @ test_case.T + log_reg.intercept_) # + [markdown] id="fr97nyUeiWRR" colab_type="text" # Either way, we get the same result as our scikit-learn Logistic Regression # + id="dgquKO3ZiWRT" colab_type="code" colab={} outputId="3377205c-3347-4dfa-a779-773f1b77a8e7" log_reg.predict_proba(test_case) # + [markdown] id="_Pm9ZkqYiWRb" colab_type="text" # ## Feature Engineering # # Get the [Category Encoder](http://contrib.scikit-learn.org/categorical-encoding/) library # # If you're running on Google Colab: # # ``` # # # !pip install category_encoders # ``` # # If you're running locally with Anaconda: # # ``` # # # !conda install -c conda-forge category_encoders # ``` # + [markdown] id="h118qjxEiWRd" colab_type="text" # #### Notice that Seaborn's version of the Titanic dataset has more features than Kaggle's version # + id="DwQFr4dOiWRh" colab_type="code" colab={} outputId="defc01bb-1940-4690-973f-5103b7b61b18" import seaborn as sns sns_titanic = sns.load_dataset('titanic') print(sns_titanic.shape) # + id="6bJhYIkJiWRr" colab_type="code" colab={} outputId="9898f34b-b187-4f2d-a631-9f957d803381" sns_titanic.head() # + [markdown] id="oU4H80n9iWR5" colab_type="text" # #### We can make the `adult_male` and `alone` features, and we can extract features from `Name` # + id="MVZ0Oou6iWSE" colab_type="code" colab={} def make_features(X): X = X.copy() X['adult_male'] = (X['Sex'] == 'male') & (X['Age'] >= 16) X['alone'] = (X['SibSp'] == 0) & (X['Parch'] == 0) X['last_name'] = X['Name'].str.split(',').str[0] X['title'] = X['Name'].str.split(',').str[1].str.split('.').str[0] return X # + id="mxtw3gWaiWSK" colab_type="code" colab={} train = make_features(train) test = make_features(test) # + id="oTN_4pFtiWST" colab_type="code" colab={} outputId="73f090ae-12d0-40dd-8b24-b52daddf1c21" train.head() # + id="QHiDuHgtiWSc" colab_type="code" colab={} outputId="c7a6bc94-09c5-4551-a75c-e0f223dda602" train['adult_male'].value_counts() # + id="haTzzoyciWSi" colab_type="code" colab={} outputId="3e0c27ab-8952-4eaa-9821-0645f2a53ad2" train['alone'].value_counts() # + id="jGawckYniWSu" colab_type="code" colab={} outputId="56003288-e87d-4a59-8e46-f14ed17804ca" train['title'].value_counts() # + id="t2gqHqamiWS0" colab_type="code" colab={} outputId="9393395e-1670-485f-ebb2-3b209a08a8ba" train.describe(include='number') # + id="ScAmhPqOiWTC" colab_type="code" colab={} outputId="b2ee0a2f-4920-40d5-b9e9-3c776f1d3adf" train.describe(exclude='number') # + [markdown] id="0slD59bUiWTK" colab_type="text" # ### Category Encoders! # # http://contrib.scikit-learn.org/categorical-encoding/onehot.html # + [markdown] id="4SvWw3MZiWTL" colab_type="text" # End-to-end example # + id="o0GsI2DSiWTN" colab_type="code" colab={} outputId="bc21434e-0297-4711-e273-77bf66c15a62" import category_encoders as ce pd.set_option('display.max_columns', 1000) features = ['Pclass', 'Age', 'Fare', 'Sex', 'Embarked', 'adult_male', 'alone', 'title'] target = 'Survived' X_train = train[features] X_test = test[features] y_train = train[target] y_test = train[target] encoder = ce.OneHotEncoder(use_cat_names=True) imputer = SimpleImputer() log_reg = LogisticRegression(solver='lbfgs', max_iter=1000) X_train_encoded = encoder.fit_transform(X_train) X_test_encoded = encoder.transform(X_test) X_train_imputed = imputer.fit_transform(X_train_encoded) X_test_imputed = imputer.transform(X_test_encoded) scores = cross_val_score(log_reg, X_train_imputed, y_train, cv=10) print('Cross-Validation Accuracy Scores', scores) # + [markdown] id="8eJj91xRiWTU" colab_type="text" # Here's what the one-hot encoded data looks like # + id="nHGbcYwWiWTV" colab_type="code" colab={} outputId="8c099a29-d02c-4970-c246-eaceac3c6c60" X_train_encoded.sample(n=5) # + [markdown] id="6knIos9TiWTf" colab_type="text" # The cross-validation accuracy scores improve with the additional features # + id="d22QIpkPiWTh" colab_type="code" colab={} outputId="d69f4435-0d79-4f01-f114-e1dfcdfff783" # %matplotlib inline import matplotlib.pyplot as plt log_reg.fit(X_train_imputed, y_train) coefficients = pd.Series(log_reg.coef_[0], X_train_encoded.columns) plt.figure(figsize=(10,10)) coefficients.sort_values().plot.barh(color='grey'); # + [markdown] id="7LKMRvSSiWTq" colab_type="text" # ### Scaler # # https://scikit-learn.org/stable/modules/preprocessing.html#scaling-features-to-a-range # + [markdown] id="xqPfQNGGiWTs" colab_type="text" # End-to-end example # + id="gtzeHspmiWTt" colab_type="code" colab={} outputId="551801cc-c179-4972-a575-9e5b656e9088" from sklearn.preprocessing import MinMaxScaler encoder = ce.OneHotEncoder(use_cat_names=True) imputer = SimpleImputer() scaler = MinMaxScaler() log_reg = LogisticRegression(solver='lbfgs', max_iter=1000) X_train_encoded = encoder.fit_transform(X_train) X_test_encoded = encoder.transform(X_test) X_train_imputed = imputer.fit_transform(X_train_encoded) X_test_imputed = imputer.transform(X_test_encoded) X_train_scaled = scaler.fit_transform(X_train_imputed) X_test_scaled = scaler.transform(X_test_imputed) scores = cross_val_score(log_reg, X_train_scaled, y_train, cv=10) print('Cross-Validation Accuracy Scores', scores) # + [markdown] id="osWz9aTsiWT3" colab_type="text" # Now all the features have a min of 0 and a max of 1 # + id="v7Go0HM-iWT4" colab_type="code" colab={} outputId="68ad09e0-16ae-4c20-d918-73aeb821b9af" pd.DataFrame(X_train_scaled).describe() # + [markdown] id="MPlqqvEtiWT9" colab_type="text" # The model coefficients change with scaling # + id="MsWeK1pMiWT-" colab_type="code" colab={} outputId="80b03079-fa05-43d1-bd06-01c1372b2ba7" log_reg.fit(X_train_scaled, y_train) coefficients = pd.Series(log_reg.coef_[0], X_train_encoded.columns) plt.figure(figsize=(10,10)) coefficients.sort_values().plot.barh(color='grey'); # + [markdown] id="WAHh8SuoiWUE" colab_type="text" # ### Pipeline # + [markdown] id="RAmPJOyyiWUF" colab_type="text" # https://scikit-learn.org/stable/modules/compose.html#pipeline # + id="CUExiwLRiWUH" colab_type="code" colab={} outputId="071f6b73-4e94-4459-8f52-5260eba1bb29" from sklearn.pipeline import make_pipeline pipe = make_pipeline( ce.OneHotEncoder(use_cat_names=True), SimpleImputer(), MinMaxScaler(), LogisticRegression(solver='lbfgs', max_iter=1000) ) scores = cross_val_score(pipe, X_train, y_train, cv=10) print('Cross-Validation Accuracy Scores', scores) # + id="BX6NWsW4iWUM" colab_type="code" colab={} pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) submission = test[['PassengerId']].copy() submission['Survived'] = y_pred submission.to_csv('kaggle-submission-001.csv', index=False) # + [markdown] colab_type="text" id="iblW74C8afuR" # ## Assignment: real-world classification # # We're going to check out a larger dataset - the [FMA Free Music Archive data](https://github.com/mdeff/fma). It has a selection of CSVs with metadata and calculated audio features that you can load and try to use to classify genre of tracks. To get you started: # + [markdown] id="10JiSszWiWUV" colab_type="text" # ### Get and unzip the data # + [markdown] id="D_EX58niiWUW" colab_type="text" # #### Google Colab # + colab_type="code" id="SsySnuKaKtQf" outputId="2f8d801c-2455-4beb-f6ee-9eaa53117a48" colab={"base_uri": "https://localhost:8080/", "height": 425} # !wget https://os.unil.cloud.switch.ch/fma/fma_metadata.zip # !unzip fma_metadata.zip # + [markdown] id="TLEnRLWkiWUc" colab_type="text" # #### Windows # - Download the [zip file](https://os.unil.cloud.switch.ch/fma/fma_metadata.zip) # - You may need to use [7zip](https://www.7-zip.org/download.html) to unzip it # # # #### Mac # - Download the [zip file](https://os.unil.cloud.switch.ch/fma/fma_metadata.zip) # - You may need to use [p7zip](https://superuser.com/a/626731) to unzip it # + [markdown] id="6HabCJH6iWUe" colab_type="text" # ### Look at first 4 lines of raw `tracks.csv` file # + id="OA2X6Y17iWUf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="4bdc470b-0073-452b-e1ff-53a77eebeed8" # !head -n 4 fma_metadata/tracks.csv # + [markdown] id="cc2-1KmiiWUl" colab_type="text" # ### Read with pandas # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html # + id="k9Csmtuxj-17" colab_type="code" colab={} import pandas as pd # + id="hPsO33fgiWUm" colab_type="code" colab={} tracks = pd.read_csv('fma_metadata/tracks.csv', header=[0,1], index_col=0) # + id="NwgKxE5AiWUs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 570} outputId="858325de-3a6f-41bd-ce51-a8cbd521a2a5" tracks.head() # + [markdown] id="xB7r6MnpiWUx" colab_type="text" # ### More data prep # + [markdown] id="cMf5YwJ4iWUy" colab_type="text" # Get value counts of the target. (The syntax is different because the header has two levels, it's a "MultiIndex.") # # The target has multiple classes, and many missing values. # + id="stVmsKKjiWUz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 323} outputId="2527dd28-b459-4cdc-92bd-9fbf52e83b56" tracks['track']['genre_top'].value_counts(normalize=True, dropna=False) # + [markdown] id="yJzFWjvHiWU4" colab_type="text" # We can't do supervised learning where targets are missing. (In other words, we can't do supervised learning without supervision.) # # So, only keep observations where the target is not null. # + id="osEvLLFviWU5" colab_type="code" colab={} target_not_null = tracks['track']['genre_top'].notnull() tracks = tracks[target_not_null] # + [markdown] id="_eep11_siWU8" colab_type="text" # Load `features.csv`: "common features extracted from the audio with [librosa](https://librosa.github.io/librosa/)" # # It has 3 levels of columns! # + id="zcQVVn_NiWU9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 346} outputId="9e4ae57a-c6b2-4f29-afba-edb4d30f29a2" features = pd.read_csv('fma_metadata/features.csv', header=[0,1,2], index_col=0) features.head() # + [markdown] id="mdeFKD2xiWVG" colab_type="text" # I want to [drop a level](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.MultiIndex.droplevel.html) here from the audio features dataframe, so it has the same number of levels (2) as the tracks metadata dataframe, so that I can better merge the two together. # + id="GsTR0pH4iWVH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="204306c4-09ca-452b-d3f0-a64d45a677bd" features.columns = features.columns.droplevel(level=2) features.head() # + [markdown] id="Uwv4R4t4iWVP" colab_type="text" # Merge the metadata with the audio features, on track id (the index for both dataframes). # + id="Tivko9fwiWVR" colab_type="code" colab={} df = pd.merge(tracks, features, left_index=True, right_index=True) # + [markdown] id="l25l-RBNiWVV" colab_type="text" # And drop a level of columns again, because dealing with MultiIndex is hard # + id="wHFZgK45iWVW" colab_type="code" colab={} df.columns = df.columns.droplevel() # + [markdown] id="XEZ3SRlYiWVh" colab_type="text" # This is now a pretty big dataset. Almost 500,000 rows, over 500 columns, and over 200 megabytes in RAM. # + id="7OGYAnKmiWVk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="36774439-aa6d-44b0-81fd-9a0021280de1" print(df.shape) df.info() # + [markdown] id="zO1msBJtiWVp" colab_type="text" # ### Fit Logistic Regression! # + id="phhBrGIfiWVq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="29fb9976-23d7-482a-e757-d4792b1ee2d8" from sklearn.model_selection import train_test_split y = df['genre_top'] X = df.select_dtypes('number').drop(columns=['longitude', 'latitude']) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.50, test_size=0.50, random_state=42, stratify=y) X_train.shape, X_test.shape, y_train.shape, y_test.shape # + id="rbgrmvFtlBVJ" colab_type="code" colab={} from sklearn.linear_model import LogisticRegression # + id="udKQkzIviWVy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="b346b223-6a98-45df-f221-2ece8ce08e97" model = LogisticRegression(solver='lbfgs', multi_class='auto') model.fit(X_train, y_train) # + [markdown] id="J4Wx4nW3iWV8" colab_type="text" # Accuracy is 37%, which sounds bad, BUT ... # + id="8Gz1rb2YiWV9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f9f2b6f8-cb56-460a-b960-59927b25f412" model.score(X_test, y_test) # + [markdown] id="BH-PTY2ciWWE" colab_type="text" # ... remember we have 16 classes, and the majority class (Rock) occurs 29% of the time, so the model isn't worse than random guessing for this problem # + id="c13m4i92iWWH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="c3c30c03-c448-42bc-adf5-94123e0f22f2" y.value_counts(normalize=True) # + [markdown] colab_type="text" id="kQUVlUKQMPPW" # This dataset is bigger than many you've worked with so far, and while it should fit in Colab, it can take awhile to run. That's part of the challenge! # # Your tasks: # - Clean up the variable names in the dataframe # - Use logistic regression to fit a model predicting (primary/top) genre # - Inspect, iterate, and improve your model # - Answer the following questions (written, ~paragraph each): # - What are the best predictors of genre? # - What information isn't very useful for predicting genre? # - What surprised you the most about your results? # # *Important caveats*: # - This is going to be difficult data to work with - don't let the perfect be the enemy of the good! # - Be creative in cleaning it up - if the best way you know how to do it is download it locally and edit as a spreadsheet, that's OK! # - If the data size becomes problematic, consider sampling/subsetting, or [downcasting numeric datatypes](https://www.dataquest.io/blog/pandas-big-data/). # - You do not need perfect or complete results - just something plausible that runs, and that supports the reasoning in your written answers # # If you find that fitting a model to classify *all* genres isn't very good, it's totally OK to limit to the most frequent genres, or perhaps trying to combine or cluster genres as a preprocessing step. Even then, there will be limits to how good a model can be with just this metadata - if you really want to train an effective genre classifier, you'll have to involve the other data (see stretch goals). # # This is real data - there is no "one correct answer", so you can take this in a variety of directions. Just make sure to support your findings, and feel free to share them as well! This is meant to be practice for dealing with other "messy" data, a common task in data science. # + [markdown] id="yilZo2PLmoZf" colab_type="text" # ###Assignment Code # + id="i8Xxv8O0mr_R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 369} outputId="2fb5ad15-c995-47d4-da39-5609ab5a1d59" df.head() # + id="v5yX87RAnRXW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 442} outputId="742f032e-f1a3-43f3-bfa1-63c08b59f4d1" df.dropna(axis='columns').columns.value_counts() # + [markdown] colab_type="text" id="wlI5OXfSag9C" # ## Resources and stretch goals # # - Check out the other .csv files from the FMA dataset, and see if you can join them or otherwise fit interesting models with them # - [Logistic regression from scratch in numpy](https://blog.goodaudience.com/logistic-regression-from-scratch-in-numpy-5841c09e425f) - if you want to dig in a bit more to both the code and math (also takes a gradient descent approach, introducing the logistic loss function) # - Create a visualization to show predictions of your model - ideally show a confidence interval based on error! # - Check out and compare classification models from scikit-learn, such as [SVM](https://scikit-learn.org/stable/modules/svm.html#classification), [decision trees](https://scikit-learn.org/stable/modules/tree.html#classification), and [naive Bayes](https://scikit-learn.org/stable/modules/naive_bayes.html). The underlying math will vary significantly, but the API (how you write the code) and interpretation will actually be fairly similar. # - Sign up for [Kaggle](https://kaggle.com), and find a competition to try logistic regression with # - (Not logistic regression related) If you enjoyed the assignment, you may want to read up on [music informatics](https://en.wikipedia.org/wiki/Music_informatics), which is how those audio features were actually calculated. The FMA includes the actual raw audio, so (while this is more of a longterm project than a stretch goal, and won't fit in Colab) if you'd like you can check those out and see what sort of deeper analysis you can do.
module1-logistic-regression/LS_DS_231_Logistic_Regression-LECTURE-v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import SelectFromModel from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve from sklearn.utils import resample from keras.utils import to_categorical from sklearn.cross_validation import KFold from keras.models import Sequential from keras.layers.core import Dense, Dropout from keras.callbacks import EarlyStopping import pandas as pd import numpy as np import matplotlib.pyplot as plt # Functions def find_optimal_cutoff( target, predicted ): fpr, tpr, threshold = roc_curve(target, predicted) i = np.arange( len( tpr ) ) roc=pd.DataFrame({'tf':pd.Series(tpr-(1-fpr), index=i), 'threshold':pd.Series(threshold, index=i)}) roc_t = roc.ix[(roc.tf-0).abs().argsort()[:1]] return list(roc_t['threshold']) # Load dataset print( '===> loading dataset' ) data = pd.read_csv( '~/bitbucket_repos/machine_learning/dataset/HR.csv' ) dataset = data.rename( columns = {'left':'class'} ) # Unbalanced dataset # Upsampling minority = dataset[ dataset['class'] == 1 ] minority_upsampled = resample( minority, replace = True, n_samples = 11428, random_state = 123 ) # Downsampling majority = dataset[ dataset['class'] == 0 ] majority_downsampled = resample( majority, replace = False, n_samples = 3571, random_state = 123 ) dataset = pd.concat( [minority, majority_downsampled] ) # Transform features dataset = pd.get_dummies( dataset, columns = ['sales', 'salary'] ) # Selection features features = dataset.drop( 'class', axis = 1 ) labels = dataset[['class']] rf = RandomForestClassifier( n_estimators = 100, criterion = 'entropy', max_depth = 15, min_samples_leaf = 50, min_samples_split = 100, random_state = 10 ) # Train the selector rf.fit( features, labels.values.ravel() ) features_imp = pd.Series(rf.feature_importances_,index=features.columns).sort_values(ascending=False) print( 'features importance:\n', features_imp ) criteria = rf.feature_importances_ > 0.155 features = features.iloc[:, criteria ] print( pd.DataFrame({'Main Features': features.columns} ) ) # + # Split dataset - Train and Test dataset trainX, testX, trainY, testY = train_test_split( features, labels, test_size = 0.2 ) trainX = trainX.as_matrix() trainY = to_categorical( trainY, num_classes = 2 ) testX = testX.as_matrix() testY = to_categorical( testY, num_classes = 2 ) # - # Cross-Validation results = [] i = 0 n_folds = 10 cv = KFold( len( trainX ), n_folds = n_folds ) callbacks = [EarlyStopping( monitor = 'val_loss', patience = 2 )] for traincv, testcv in cv: print( '===> running fold', i+1, '/', n_folds ) # Train mlp = Sequential() mlp.add(Dense(100, input_dim=trainX.shape[1], kernel_initializer='random_uniform',bias_initializer='random_uniform', activation='relu')) mlp.add( Dense( 2, kernel_initializer='random_uniform', bias_initializer = 'random_uniform', activation='sigmoid' ) ) mlp.compile( optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'] ) mlp.fit( trainX[traincv], trainY[traincv], batch_size=64, epochs=50, verbose=0, validation_split=0.3, callbacks=callbacks ) # Test predicted = mlp.predict_proba( trainX[testcv] ) predicted = np.where( predicted[:,0] > 0.5, 1, 0 ) acc = accuracy_score( trainY[testcv,0], predicted ) print( '\naccuracy:', acc ) results.append( acc ) i += 1 print( 'Results:', str( 100*np.array( results ).mean() ), '+/-', str( np.array( results ).std() ) ) # + # Training model - MLP mlp = Sequential() mlp.add( Dense( 100, input_dim=trainX.shape[1], kernel_initializer='random_uniform', bias_initializer='random_uniform', activation='relu') ) mlp.add( Dense( 2, kernel_initializer='random_uniform', bias_initializer = 'random_uniform', activation='sigmoid' ) ) # Output Layer mlp.compile( optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'] ) # Training callbacks = [EarlyStopping( monitor = 'val_loss', patience = 2 )] mlp_info = mlp.fit( trainX, trainY, batch_size=64, epochs=50, verbose=2, validation_split=0.3, callbacks = callbacks ) # - # Training Error fig, axs = plt.subplots( figsize = ( 15, 5 ) ) axs.plot( range( 1, len( mlp_info.history['loss'] ) + 1 ), mlp_info.history['loss'] ) axs.plot( range( 1, len( mlp_info.history['val_loss'] ) + 1 ), mlp_info.history['val_loss'] ) axs.set_title( 'Model Loss' ) axs.set_title( 'Loss' ) axs.set_title( 'Epoch' ) axs.set_xticks( np.arange( 1,len(mlp_info.history['loss'] )+1), len(mlp_info.history['loss'])/10 ) axs.legend( ['train', 'val'], loc = 'best' ) plt.show() # Performance model over Test Dataset predicted = mlp.predict_proba( testX ) threshold = find_optimal_cutoff( testY[:,0], predicted[:,0] ) print( '\nthreshold:', threshold[0] ) predicted = np.where( predicted[:,0] > threshold, 1, 0 ) # + # Metrics # Accuracy acc = accuracy_score( testY[:,0], predicted ) print( 'accuracy:', acc ) # Confusion Matrix cm = confusion_matrix( testY[:,0], predicted ) print( cm ) # F1-score f1 = f1_score( testY[:,0], predicted ) print( 'f1-score:', f1 ) # - print( '===> well done' )
MLP-Keras-HR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Training a SVM with gradient descent # # In this notebook I show how to derivate an algorithm to train a SVM using projected gradient descent. In practice SVM are trained using more nuance algorithms like [SMO](https://en.wikipedia.org/wiki/Sequential_minimal_optimization). However as suggested in [CIML](http://ciml.info/), SVMs can also be trained using projected gradient descent. # + import sys import numpy as np import matplotlib.pyplot as plt sys.path.append("..") from models.svm import SVM from utils.datasets import blobs_classification_dataset, radial_classification_dataset from utils.visualization import plot_decision_boundary # + # %matplotlib inline # Turn interactive plotting off plt.ioff() # Reproducibility np.random.seed(1) # - # ## Optimal separating hyperplane # # The goal is to find the hyperplane that better separates some data according to their binary label. Let's take the following data. # + (x_train, y_train), (x_test, y_test) = blobs_classification_dataset(features=2, classes=2) # Turn labels from {0,1} to {-1,1} y_train += y_train-1 y_test += y_test-1 plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap='jet') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() # - # We want to find the line that better separates both clusters # ### The perceptron: # To solve this problem the perceptron algorithms initiliazed randomly some separating hyperplane $x^T\beta + \beta_0 = 0$ and iterate over the training points to adjust the parameters $\beta$. For each point it computes the predicted label, and if the prediction is incorrect we take a step in the negative direction of the gradient. We are minimizing the distance of the missclassified points to the margin. The perceptron predict the class of a point $x_i$ as: # $$ # \hat{y} = \text{sign}(x_i^T\beta + \beta_0 = 0) # $$ # # To minimize the margin we wan to minimize the expression: # $$ # L(\beta, \beta_0) = -\sum_{i \in \mathit{M}}y_i(x_i^T\beta + \beta_0 = 0) # $$ # where $\mathit{M}$ is the set of missclassified data examples. # # Taking gradients: # $$ # \frac{\partial{L}}{\partial{\beta}} = -\sum_{i \in \mathit{M}}y_ix_i \\ # \frac{\partial{L}}{\partial{\beta_0}} = -\sum_{i \in \mathit{M}}y_i # $$ # # We train using stochastic gradient descent, this is, using one example at a time. # + # Initialize separating line beta = np.random.rand(2) beta0 = 0.0 def predict(x): return np.sign(np.dot(x, beta) + beta0) # Plot initial separating line ax = plot_decision_boundary(lambda x: np.clip(predict(x), 0, 1), x_range=[x_test[:, 0].min()-1, x_test[:, 0].max()+1], y_range=[x_test[:, 1].min()-1, x_test[:, 1].max()+1]) # Plot test data ax.scatter(x_test[:, 0], x_test[:, 1], c=y_test, cmap='jet') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() # + # Train the perceptron until convergence converged = False while(not converged): converged = True for i in range(x_train.shape[0]): predicted = predict(x_train[i, :]) if predicted != y_train[i]: converged = False beta += 0.1*x_train[i, :]*y_train[i] beta0 += 0.1*y_train[i] # Plot separating line after training ax = plot_decision_boundary(lambda x: np.clip(predict(x), 0, 1), x_range=[x_test[:, 0].min()-1, x_test[:, 0].max()+1], y_range=[x_test[:, 1].min()-1, x_test[:, 1].max()+1]) # Plot test data ax.scatter(x_test[:, 0], x_test[:, 1], c=y_test, cmap='jet') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() # - # The problem with the perceptron is that it is not guaranteed to converge to any good solution when data is not lineary separable, moreover it does not find the optimal separating hyperplane, it converges whenever the plane is good enough to separate the training data. If we want to generalize as much as possible we need another approach. # ## Maximizing the margin # # The most simple version of the SVM is the problem of finding the optimal separating hyperplane between 2 classes which are lineary separable. The optimal plane is that that maximizes the margin M defined as the distance from the plane to its closest point in the training set. Let $f(x) = x^T\beta + \beta_0 = 0$ be the separating hyperplane. Then we can solve the optimization problem: # $$ # \max_{\beta, \beta_0, \|\beta\|=1} M \\ # \text{subject to: } y_i(x_i^T\beta + \beta_0) \geq M, i=1,2,..,N # $$ # where $y_i \in {-1, 1}$ is the label of the $i$-th training example $x_i$. # # Noting that the distance from a point $x_i^\prime$ to a hyperplane $x^T\beta^\prime + \beta_0^\prime = 0$ is $\frac{|x_i^{\prime T}\beta^\prime + \beta_0^\prime|}{\|\beta^\prime\|}$, the original problem without noise can be interpreted as maximizing the minimum distance($M$) from each point in the data to the separating plane. Setting $M$ as $\frac{1}{\|\beta\|}$ the problem can be rewritte as: # $$ # \min_{\beta, \beta_0} \frac{1}{2}\|\beta\|^2 \\ # \text{subject to: } y_i(x_i^T\beta + \beta_0) \geq 1, i=1,2,..,N # $$ # The problem is reduced to optimizing a quadratic function, with just one single minimum, subject to some constraint. This constrained optimization problem can be solved using a method called the [Lagrangian multiplier](https://en.wikipedia.org/wiki/Lagrange_multiplier), a good explanation is given in [Khan Academy](https://www.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/lagrange-multipliers-and-constrained-optimization). This method incorporates the constrain along with the objective function into the same expression. Moreover, the Lagrangian multiplier method allows to solve optimization problems with equality constraints, in order to incorporate the inequality constraint the [KKT conditions](https://en.wikipedia.org/wiki/Karush%E2%80%93Kuhn%E2%80%93Tucker_conditions) must also be met. The Lagrange primal function of the problem is: # $$ # L_P = \frac{1}{2}\|\beta\|^2 - \sum_{i=1}^N\alpha_i[y_i(x_i^T\beta + \beta_0) - 1] # $$ # # Setting derivatives to $0$: # $$ # \beta = \sum_{i=1}^{N}\alpha_iy_ix_i \\ # 0 = \sum_{i=1}^{N}\alpha_iy_i # $$ # # and substituting: # $$ # L_D = \sum_{i=1}^{N}\alpha_i - \frac{1}{2}\sum_{i=1}^{N}\sum_{i=k}^{N}\alpha_i\alpha_ky_iy_kx_i^Tx_k \\ # \text{subject to: } \alpha_i \geq 0, i=1,2,..,N # $$ # # The solution is obtained by maximizing $L_D$, which is the dual form. To satisfy the KKT conditions the following constraint: # $$ # \alpha_i[y_i(x_i^T\beta + \beta_0) - 1] = 0, \forall i # $$ # ## The kernel trick # Having defined the objective function in terms of the dot product of the data points, we can substitute it by a kernel function: # $$ # L_D = \sum_{i=1}^{N}\alpha_i - \frac{1}{2}\sum_{i=1}^{N}\sum_{i=k}^{N}\alpha_i\alpha_ky_iy_kK(x_i, x_k) \\ # $$ # # The predicted category of a new point $x^\prime$ is computed as: # $$ # \hat{y} = \text{sign}(\sum_{i=1}^{N}\alpha_iy_iK(x_i, x^\prime) + b) # $$ # ## Optimizing the dual form # # The objective dual form can be optimized using a projected version of gradient descent. The idea is to perform gradient descent, but after performing each training step, the variables should be tunned in order to meet the conditions, in this case setting $\alpha_i := 0, \forall i : \alpha_i < 0$. # # Taking the dual form in matrix form we have: # $$ # L_D = \alpha^T\mathbf{1} - \frac{1}{2}\alpha^TG\alpha \\ # G_{ij} = y_iy_jK(x_i, x_j) # $$ # # Taking the derivative: # $$ # \frac{\partial{L_D}}{\partial{\alpha}} = \mathbf{1} - \frac{1}{2}\alpha^T(G+G^T) = \mathbf{1} - \alpha^TG # $$ # Notice that we can perform the last step because G is symmetric. # # One migh be tempted to set the drivative to $0$ and compute the analytic solution. But this doesn't take into account the constraints, one can enforced them after computing the multipliers but then the solution is no longer guarateed to be the optimal. # # Enforcing the conditions at each gradient update we ensure that the conditions are meet and that we converge to an optimum. # # We may now solve the above problem, even with more complex data distributions: (x_train, y_train), (x_test, y_test) = radial_classification_dataset() plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap='jet') plt.show() # + # Train SVM quadratic_model = SVM(kernel='quadratic') loss = quadratic_model.fit(x_train, y_train, iterations=10000, learning_rate=0.01) # Plot decision boundary ax = plot_decision_boundary(quadratic_model.predict, x_range=[x_train[:, 0].min()-1, x_train[:, 0].max()+1], y_range=[x_train[:, 1].min()-1, x_train[:, 1].max()+1]) ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap='jet') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() # - # ## Dealing with noise: Hinge Loss # To deal with non-separable data, or simply with noisy points that fall on the other side of the real boundary, we can include a cost parameter $C$: # $$ # \min_{\beta, \beta_0} \frac{1}{2}\|\beta\|^2 + C\sum_{i=1}^{N}\xi_i\\ # \text{subject to: } \xi_i \geq 0, y_i(x_i^T\beta + \beta_0) \geq 1-\xi_i, \forall i # $$ # where $\xi_i$ are slack variables proportional to the amount by which each point $x_i$ falls on the wrong side of its margin. # The total amount of slack cannot surpass $\frac{1}{C}$. # # Following the same procedure of Lagragian multiplier and taking the primal and dual forms we arrive at the same objective function, but with a harder constraint: # $$ # L_D = \sum_{i=1}^{N}\alpha_i - \frac{1}{2}\sum_{i=1}^{N}\sum_{i=k}^{N}\alpha_i\alpha_ky_iy_kK(x_i, x_k) \\ # \text{subject to: } 0 \leq \alpha_i \leq C, \forall i # $$ # # With these cost penalty $C$ we can train a classifier dealing with noisy training data: (x_train, y_train), (x_test, y_test) = radial_classification_dataset(noise=0.5) plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap='jet') plt.show() # + # Train SVM quadratic_model = SVM(kernel='quadratic', C=5) loss = quadratic_model.fit(x_train, y_train, iterations=10000, learning_rate=0.01) # Plot decision boundary ax = plot_decision_boundary(quadratic_model.predict, x_range=[x_train[:, 0].min()-1, x_train[:, 0].max()+1], y_range=[x_train[:, 1].min()-1, x_train[:, 1].max()+1]) ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap='jet') plt.xlabel('$x_1$') plt.ylabel('$x_2$') plt.show() # - # ## Bibliography # # * [ESL](http://web.stanford.edu/~hastie/ElemStatLearn/) # * [An Idiot's guide to Support vector machines](http://web.mit.edu/6.034/wwwbob/svm-notes-long-08.pdf) # * [A Course in Machine Learning](http://ciml.info/) # * [Khan Academy](https://www.khanacademy.org/)
notebooks/svm-derivation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pymysql import pandas as pd import numpy as np import datetime import time import sys sys.path.append(r'D:\jupyter files\waiting_time_project\my_tools') import tools_for_os.for_df as ml_df sql_course = """SELECT * FROM course""" data_course = ml_df.get_df_from_sql(sql_course) print(f'The shape of data_course {data_course.shape}') sql_plan = """SELECT * FROM plan""" data_plan = ml_df.get_df_from_sql(sql_plan) print(f'The shape of data_plan {data_plan.shape}') sql_radiation = """SELECT * FROM radiation""" data_radiation = ml_df.get_df_from_sql(sql_radiation) print(f'The shape of data_radiation {data_radiation.shape}') sql_radiationhstry = """SELECT * FROM radiationhstry""" data_radiationhstry = ml_df.get_df_from_sql(sql_radiationhstry) print(f'The shape of data_radiationhstry {data_radiationhstry.shape}') sql_patient = """SELECT * FROM patient""" data_patient = ml_df.get_df_from_sql(sql_patient) print(f'The shape of data_patient {data_patient.shape}') try: data_course.drop(columns = ['LastUpdated'], inplace = True) data_plan.drop('LastUpdated', axis = 1, inplace = True) data_radiation.drop('LastUpdated', axis = 1, inplace = True) data_radiationhstry.drop('LastUpdated', axis = 1, inplace = True) data_patient.drop('LastUpdated', axis = 1, inplace = True) print('Drop columns') except: print('Finish droppping columns') # ##### Radiationhstry 表里是治疗的信息(treatment information),其中包括治疗开始时间和结束时间,治疗序数,监控单元 # ##### Appointment 表里是预约信息(appointment information),其中包括预约开始时间和结束时间,预约的状态等 # ##### Radiationhstry 和 Appointment 两张表之间没有直接连接的key ,需要通过患者接受治疗的时间来连接两张表,换句话说,Appointment.ScheduledStartTime 和Radiationhstry.TreatmentStartTime 提供了每位患者预约信息(scheduled appointment)和治疗记录(treatment records)的连接 # + # data_course_ = data_course.drop('AliasSerNum', axis = 1, inplace = False) # course_plan = pd.merge(data_course_, data_plan, on = 'CourseSerNum', how = 'inner') # print(f'The shape of course_plan {course_plan.shape}') # data_radiation.drop(columns = ['AliasSerNum']) # course_plan_rad = pd.merge(course_plan, data_radiation, on = 'PlanSerNum', how = 'inner') # print(f'The shape of course_plan_rad {course_plan_rad.shape}') # data_radiationhstry.drop(columns = ['AliasSerNum']) # course_plan_rad_radhstry = pd.merge(course_plan_rad, data_radiationhstry, on = 'RadiationSerNum', how = 'inner') # print(f'The shape of course_plan_rad_radhstry {course_plan_rad_radhstry.shape}') # - # ========================================================================== # ## Radiation 表 # ##### 只保留放射剂量大于0 的数据 data_radiation.AliasSerNum.value_counts() # ##### data_radiation 所有记录都是 AliasSerNum = 37,Core Tables from ARIA data_radiation.DeliveryType.value_counts() print(f'The shape of data_radiation {data_radiation.shape}') data_radiation_TM = data_radiation[(data_radiation.DeliveryType == 'TREATMENT')] print(f'The shape of data_radiation_TM {data_radiation_TM.shape}') data_radiation_IM = data_radiation[(data_radiation.DeliveryType == 'IMAGE')] print(f'The shape of data_radiation_IM {data_radiation_IM.shape}') # ##### data_radiation 的DeliveryType 有两种,分别是 Treatment 和Image,通过放射剂量筛选之后,只剩下Treatment 类型 data_radiation_IM.MU.value_counts() # ##### Image 类型里,MU 和MUCoeff 几乎全都是0 # ##### 目前我们只使用DeliveryType = 'TREATMENT' 并且放射剂量 > 0 print(f'The shape of data_radiation {data_radiation.shape}') data_radiation_ = data_radiation[data_radiation.DeliveryType == 'TREATMENT'] print(f'The shape of data_radiation_ {data_radiation_.shape}') data_radiation_ = data_radiation_[(data_radiation_.MUCoeff > 0) & (data_radiation_.MU > 0)] print(f'The shape of data_radiation_ {data_radiation_.shape}') data_radiation_grouped = data_radiation_.groupby('PlanSerNum') data_radiation_grouped.get_group(105405) # ## Radiationhstry 表 data_radiationhstry.AliasSerNum.value_counts() # ##### data_radiationhstry 所有记录都是 AliasSerNum = 37,Core Tables from ARIA,和data_radiation 一样 data_radiationhstry.RadiationHstrySerNum.unique() # ##### RadiationHstrySerNum,RadiationHstryAriaSer 都是惟一的 data_radiationhstry_grouped = data_radiationhstry.groupby('RadiationSerNum')# 574554 # data_radiationhstry_grouped.get_group(574554).sort_values(by = 'TreatmentStartTime', ascending = True) data_radiationhstry_grouped.count() data_radiationhstry.RadiationSerNum.value_counts() # ## Plan 表 data_plan.AliasSerNum.value_counts() # ##### Plan 所有记录都是 AliasSerNum = 37,Core Tables from ARIA,和data_radiationhstry,data_radiation 一样 data_plan.Status.value_counts() data_plan[data_plan.Status == 'Rejected'] data_plan_ = data_plan[data_plan.Status != 'Unapproved'] data_plan_grouped = data_plan_.groupby('CourseSerNum') # data_plan_grouped.get_group(1) data_plan_grouped.groups.keys() data_plan_grouped.get_group(268) data_plan.Status.iloc[0] # ## Course 表 data_course.AliasSerNum.value_counts() # ##### Course 所有记录都是 AliasSerNum = 37,Core Tables from ARIA,和Plan,data_radiationhstry,data_radiation 一样 # ========================================================================== # ## 数据预处理 data_path = 'D:\\jupyter files\\data_waiting_time_project\\preprocess_data\\' def data_compare(df1, df2, name): df_count1 = df1[name].value_counts().index.tolist() df_count2 = df2[name].value_counts().index.tolist() same = list(set(df_count1).intersection(set(df_count1))) print(f'same: {len(same)}') a = list(set(df_count1).difference(set(df_count2))) print(f'df1 has, df2 does not has: {len(a)}') b = list(set(df_count2).difference(set(df_count1))) print(f'df2 has, df1 does not has: {len(b)}') # + # data_compare(co_pa, ra_rh_pl, 'AliasSerNum') # + # Radiation 的预处理 print('='*20) print('Process data_radiation') print(f'\nThe shape of data_radiation {data_radiation.shape}') data_radiation_ = data_radiation[(data_radiation.DeliveryType == 'TREATMENT') & (data_radiation.MU > 0) & (data_radiation.MUCoeff > 0)] print(f'The shape of data_radiation_ {data_radiation_.shape}') # Radiationhstry 的预处理 print('\nProcess data_radiationhstry') print(f'\nThe shape of data_radiationhstry {data_radiationhstry.shape}') data_radiationhstry_ = data_radiationhstry[data_radiationhstry.TreatmentStartTime > pd.Timestamp('2015-01-01 00:00:00')] print(f'The shape of data_radiationhstry_ {data_radiationhstry_.shape}') print(f'\nMerge data_radiation_ and data_radiationhstry_') ra_rh = pd.merge(data_radiation_, data_radiationhstry_, on = ['RadiationSerNum', 'AliasSerNum'], how = 'inner') print(f'\nThe shape of ra_rh {ra_rh.shape}') print('='*20) print(f'Merge data_plan and ra_rh') print(f'\nThe shape of data_plan {data_plan.shape}') ra_rh_pl = pd.merge(data_plan, ra_rh, on = ['PlanSerNum', 'AliasSerNum'], how = 'inner') print(f'The shape of ra_rh_pl {ra_rh_pl.shape}') print('='*20) print(f'Merge data_course and data_patient') print(f'\nThe shape of data_course {data_course.shape}') print(f'The shape of data_patient {data_patient.shape}') co_pa = pd.merge(data_course, data_patient, on = 'PatientSerNum', how = 'inner') print(f'The shape of co_pa {co_pa.shape}') print('='*20) print(f'Merge co_pa and ra_rh_pl') print(f'\nThe shape of co_pa {co_pa.shape}') print(f'The shape of ra_rh_pl {ra_rh_pl.shape}') co_pa_ra_rh_pl = pd.merge(co_pa, ra_rh_pl, on = ['CourseSerNum', 'AliasSerNum'], how = 'inner') print(f'The shape of co_pa_ra_rh_pl {co_pa_ra_rh_pl.shape}') # 删除值完全相同的列 print('='*20) print(f'Drop columns with same values') for col in co_pa_ra_rh_pl.columns: if len(co_pa_ra_rh_pl[col].unique()) == 1: co_pa_ra_rh_pl.drop(col, axis = 1, inplace = True) print(f'\nco_pa_ra_rh_pl shape {co_pa_ra_rh_pl.shape}') # - co_pa_ra_rh_pl.columns # + feature_columns = [ 'RadiationHstryAriaSer', 'TreatmentStartTime', 'TreatmentEndTime', 'FractionNumber', 'ImagesTaken', 'UserName', 'RadiationSerNum', 'RadiationId', 'ResourceSerNum', 'MU', 'MUCoeff', 'TreatmentTime', 'PatientSerNum', 'CourseId' ] data_part2 = co_pa_ra_rh_pl[feature_columns] data_part2['date'] = data_part2.apply(lambda x: x.TreatmentStartTime.strftime("%Y--%m--%d"), axis = 1) data_part2['Treatment_duration'] = data_part2.apply(lambda x: (x.TreatmentEndTime - x.TreatmentStartTime).seconds, axis = 1) data_part2.sort_values(by = ['PatientSerNum', 'RadiationHstryAriaSer'], inplace = True) # - data_part2_grouped = data_part2.groupby('PatientSerNum') data_part2_grouped.get_group(83) data_part2.columns data_part2.to_csv(data_path + 'data_part2.csv')
jupyter_files/eda/治疗信息_treatment_information.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Import Required Libraries import pandas as pd import numpy as np from pulp import * from itertools import product import time # + # Read the distance matrix from the excel karayollari = pd.read_excel('TR81_KGM.xls', index_col=1) df = karayollari.copy() city_df = df.iloc[:,1:-2] city_df.head() # + # Check if the matrix is symmetric def check_symmetric(a, rtol=1e-05, atol=1e-08): import numpy return numpy.allclose(a, a.T, rtol=rtol, atol=atol) check_symmetric(city_df.values) # False # Matrix is not symmetric # + # Change city names with city codes. city_names = df.index city_df.index = np.arange(1,82) city_df.columns = np.arange(1,82) # + # Keep record of city_code - city_name pairs city_code_dict = dict(zip(np.arange(1,len(city_names)+1),city_names)) # + # Select 82 cities cities = np.arange(1,82) # - cost = city_df.copy() max_cost = cost.max().max() cost = cost.fillna(100*max_cost) n = len(cities) cost.head() # + #Define model objective, variables and constraints prob = LpProblem(name="TSP_TR_Problem", sense=LpMinimize) # It is a minimization problem roads = [(i,j) for i in cities for j in cities] # Arcs (roads) are defined variables = LpVariable.dicts("Road", (cities, cities), 0, 1, LpInteger) # Arc Variables are created nodes = {} for i in cities: # Order Variables used for DL constraints are created nodes[i] = LpVariable(f'u{i}', 1, len(cities), LpInteger) # - # The objective function is added to 'prob' first prob += ( lpSum([variables[i][j] * cost.loc[i,j] for (i, j) in roads]), "Sum_of_Transporting_Costs", ) # Vehicles should exit from the city exactly once for i in cities: prob += (lpSum([variables[i][j] for j in cities]) >= 1, "Exit_From_City_%s" %i ) # Vehicles should enter the city exactly once for i in cities: prob += ( lpSum([variables[j][i] for j in cities]) >= 1, "Enter_To_City_%s" % i, ) # No paired subtours allowed for i in cities: for j in cities: prob+= (variables[i][j]+variables[j][i]<=1) # + # # Subtour Constraints # for (i,j) in product(cities, cities): # if (i!=cities[0]) and (j!=cities[0]): # prob+=( # nodes[i] - nodes[j] + (n-1)*variables[i][j] + (n-3)*variables[j][i] <= n-2, f"{i}_{j}_subtour_constraint") # + # Subtours encountered # subtour1=[10,17,22,39,59,34,41,54,77,16,11,26,43,3,64,20,48,9,35,45] #subtour2=[10,17,22,39,59,34,41,14,81,54,77,16,11,26,43,3,64,20,48,9,35,45] # + subtour1=[10,17,22,39,59,34,41,54,77,16,11,26,43,3,64,20,48,9,35,45] subtour2=[10,17,22,39,59,34,41,14,81,54,77,16,11,26,43,3,64,20,48,9,35,45] subtour3=[10,45,35,9,48,20,7,15,32,64,3,43,26,11,16,77,41,54,34,59,39,22,17] subtour4=[10, 17, 22, 39, 59, 34, 41, 67, 74, 78, 14, 81, 54, 77, 16, 11, 26, 43, 3, 64, 32, 15, 7, 20, 48, 9, 35, 45] subtour_log = [] # Keep the codes of the cities that were previously in a subtour subtours = [subtour1, subtour2, subtour3, subtour4] for subtour in subtours: for i in subtour: for j in subtour: if i!=j and not ((i in subtour_log) and (j in subtour_log)): prob += (nodes[i] - nodes[j] + (n-1)*variables[i][j] + (n-3)*variables[j][i] <= n-2, f"{i}_{j}_subtour_constraint") subtour_log.extend(subtour) # + start = time.time() prob.solve() print(time.time()-start, "seconds passed") # - LpStatus[prob.status] final_result = [] t=0 for v in prob.variables(): if v.varValue==1: final_result.append(v.name) final_result source = [] destination = [] final_pairs = [] # + for i in final_result: if len(i)>5: a, b, c = i.split('_') source.append(b) destination.append(c) for i in final_result: if len(i)>5: a, b, c = i.split('_') final_pairs.append([b,c]) # + # Check if there exist a loop arc for i in range(0,len(source)): if source[i]==destination[i]: print('Loop arc exists') # - print("# of source cities: ", len(set(source))) print("# of destination cities: ", len(set(destination))) final_pairs # + route = ['10'] for i in range(0,80): for j in final_pairs: if j[0]==route[-1]: route.append(j[1]) break # - len(np.unique(np.array(route))) # Check if len==81. If not, subtour exists. np.array(route) subtour4=[10, 17, 22, 39, 59, 34, 41, 67, 74, 78, 14, 81, 54, 77, 16, 11, 26, 43, 3, 64, 32, 15, 7, 20, 48, 9, 35, 45] # # Visualization on the map # + # api_key api_key = '<KEY>' html = 'https://roads.googleapis.com/v1/snapToRoads?path=-35.27801,149.12958|-35.28032,149.12907|-35.28099,149.12929|-35.28144,149.12984|-35.28194,149.13003|-35.28282,149.12956|-35.28302,149.12881|-35.28473,149.12836 &interpolate=true &key=<KEY>' # - import urllib.request
script_81_cities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo 5: Frequent Location Set Mining # + import warnings warnings.filterwarnings('ignore') import sys sys.path.append('..') # %load_ext autoreload # %autoreload 2 import loci as lc from loci import io from loci import clustering from loci import analytics from loci import plots # - # ## Create a GeoDataFrame from a CSV file containing geolocated posts by users pois = io.read_poi_csv(input_file='../datasets/flickr-berlin.csv', col_name='user_id', source_crs='EPSG:4326', target_crs='EPSG:3068') pois.head() # ## Cluster posts together to identify main locations pois_in_clusters, eps_per_cluster = lc.clustering.compute_clusters(pois, alg='hdbscan', min_pts=200) cluster_borders = lc.clustering.cluster_shapes(pois_in_clusters, 1, eps_per_cluster) plots.map_choropleth(cluster_borders, id_field='cluster_id', value_field='size') # ## Find frequent location sets freq_loc = lc.analytics.freq_locationsets(location_visits=pois_in_clusters, locations=cluster_borders, location_id_col='cluster_id', locationset_id_col='user_id', min_sup=0.01, min_length=3) print('Frequent location sets found: ' + str(len(freq_loc.index))) # ## Sort results by support freq_loc.sort_values(by='support', ascending=False).head() # ## Sort results by length freq_loc.sort_values(by='length', ascending=False).head() # ## Show selected result on map # result_id = 417 result_id = 336 lc.plots.map_geometry(freq_loc.to_crs(crs={'init': 'EPSG:4326'}).loc[result_id].geometry)
notebooks/Demo_05_Frequent_Location_Sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculate CanOE nitrate derivate # # Step 2 for uptake and remineralization parameterization. First interpolated CaNOE nitrate fields (bio---NO3_interpolation.ipynb), now calculate derivative. import numpy as np import netCDF4 as nc import datetime from calendar import monthrange from joblib import Parallel # Load ANHA12 coordinate file: # Mesh: mesh = nc.Dataset('/ocean/brogalla/GEOTRACES/data/ANHA12/ANHA12_mesh1.nc') tmask = np.array(mesh.variables['tmask'])[0,:,:,:] Z_masked = np.ma.masked_where((tmask > 0.1), tmask) mdepth = np.array(mesh.variables['nav_lev']) mlons = np.array(mesh.variables['nav_lon']) mlats = np.array(mesh.variables['nav_lat']) # ### Functions: def load_data(year, month): # Load interpolated CanOE nitrate field (files created in bio---NO3_interpolation.ipynb) folder = '/ocean/brogalla/GEOTRACES/data/bio/Mn_202110/' filename = f'NO3_y{year}m{month:02}.nc' data = nc.Dataset(folder+filename) NO3 = np.array(data.variables['NO3']) NO3 = NO3*1e-3*1e-3 # mmol/m3--> mol/L return NO3 def deriv(year, month): # Calculate month-to-month change in nitrate if (month < 4) or (month > 7): # 5 and 8 diff = np.zeros((50,2400,1632)) # Avoid replenishment of NO3 from mixing by zeroing non-summer months else: NO3_1 = load_data(year, month) # Current month NO3_2 = load_data(year, month+1) # Next month nday = monthrange(year, month)[1] # Number of days in current month # Calculate difference between next month and current month, NO3_2 - NO3_1 and convert units diff = np.subtract(NO3_2, NO3_1)/(3600*24*nday) # mol/L/month --> mol/L/s save_NO3(f'delta_prod_y{year}m{month:02}.nc', diff[:,:,:]) return diff def save_NO3(filename, field): # Save forcing files: ncd = nc.Dataset(f'/ocean/brogalla/GEOTRACES/data/bio/Mn_202110/{filename}', 'w', zlib=True) ncd.createDimension('x',1632) ncd.createDimension('y',2400) ncd.createDimension('deptht', 50) # variables NO3_var = ncd.createVariable('dNO3', 'float64', ('deptht','y','x')) NO3_var.units = 'mol/L/s' NO3_var.long_name = 'Month-to-month delta Nitrate' NO3_var.coordinates = 'nav_lon nav_lat deptht' NO3_var[:] = field[:,:,:] ncd.close() return # ## Run: # ##### Choose year: # + tags=[] year = 2005 # - # Calculate derivative: # Final result has to be in units of [NO_3]/s # # \begin{equation*} # \frac{\partial{NO_{3}(i)}}{\partial{t}} \approxeq \frac{NO_{3}(i+1) - NO_{3}(i) }{\Delta t} = # \frac{NO_{3}(2002m2)-NO_{3}(2002m1)}{(24*3600*ndays)} # \end{equation*} # + tags=[] def joblib_solver(deriv, year, month): dNO3 = deriv(year, month) return dNO3 # + tags=[] months=np.arange(1,13) joblist=[] for month in months: positional_args=[deriv,year,month] keyword_args={} joblist.append((joblib_solver,positional_args,keyword_args)) # - ncores=1 with Parallel(n_jobs=ncores,backend='threading') as parallel: results=parallel(joblist) i_NO3 = np.zeros((12,50,2400,1632)) for month in range(1,13): m = nc.Dataset(f'/ocean/brogalla/GEOTRACES/data/bio/Mn_202110/delta_prod_y2002m{month:02}.nc') m1 = np.array(m.variables['dNO3']) i_NO3[month-1,:,:,:] = m1
forcing/bio---NO3_derivative.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os import re import sys import config import kmeans import constants from sklearn import cluster import numpy as np import pandas as pd import plotly.plotly as py import extract import transform import load from scipy.stats import zscore #Pull the main data frame d_ci = extract.extract_all_lazy()['d_ci'] # Register transform functions here, create them in the transform.py column_operations = {'ANSWER_num_rooms':transform.answer_num_rooms, 'ANSWER_ann_op_rev':transform.answer_ann_op_rev, 'ANSWER_ann_revenue':transform.answer_ann_revenue, 'ANSWER_num_employees_pos':transform.answer_num_employees_pos, 'create_Executive':transform.create_Executive, 'ANSWER_cur_base_pay':transform.answer_cur_base_pay #'create_RevPAR':transform.create_RevPAR } # + #Create the Modeling dataset clean_data = transform.clean_model(d_ci, column_operations) d_ci_v2 = clean_data['data'] metadata = clean_data['metadata'] # + #Preprocessing step before modeling. Normalize numeric score via Z-score features = ['CREATED_cur_base_pay_hrs','ANSWER_num_rooms','ANSWER_num_employees_pos'] # features = features + ['Gaming/Casino', #Service Types # 'Golf/Country Club', # 'Lodging - Full Service', # 'Lodging - Select Service', # 'Vacation Ownership'] features_norm = [f + "_" for f in features] d_ci_v2[features_norm] = d_ci_v2[features].apply(lambda x: pd.to_numeric(x), axis=1).apply(zscore) # Markets/cities go in 0 position dimensions = ['CITYMARKET','POSITION'] modeling = d_ci_v2[features_norm + dimensions] # The top X number of markets by the number of properties number_of_markets = 75 number_of_clusters = 3 markets = transform.top_markets_by_property(modeling, number_of_markets, dimensions[0]) positions = modeling['POSITION'].unique().tolist() # - marketdata = pd.DataFrame(markets) marketdata.to_csv("top_75_markets.csv") positions # + total_frame = pd.DataFrame() distance_frame = pd.DataFrame() for market in markets: temp_frame = modeling[modeling['CITYMARKET'] == market] temp_frame = transform.run_model(temp_frame, features_norm, number_of_clusters) total_frame = total_frame.append(temp_frame) #for job in positions: # temp_frame_v1 = temp_frame[temp_frame['POSITION'] == job] # if temp_frame_v1.shape[0] > number_of_clusters: # print market, " ", job, " ",temp_frame_v1.shape # temp_frame_v1 = transform.run_model(temp_frame_v1, features_norm, number_of_clusters) # total_frame = total_frame.append(temp_frame_v1) t_frame = total_frame.merge(d_ci_v2) # - t_frame.to_csv("ww_top_30_markets_out.csv", index=False, encoding='utf8') kwik_frame = load.kwik_analytics(t_frame, False, markets) # ## Below will have to be moved to a more permanent home. Script to create Dash_v1 data # + hotels = kwik_frame['hotels'] jobs = kwik_frame['jobs'] hotels.to_csv(os.path.join(os.getcwd(),'..','..','data','processed','ww_hotels_30.csv')) jobs.to_csv(os.path.join(os.getcwd(),'..','..','data','processed','ww_jobs_30.csv')) # + test = jobs[jobs['CITYMARKET'] == 'Atlanta'] test_v2 = test[test['POSITION'] == 'Bus Person'] test_v2 = test_v2[test_v2['clusters'] == 0] #values = ['clusters','min','25','50','75','max'] test_v3 = test_v2[values] test_v3 # - data_butt=[] for cluster in test_v3['clusters']: temp_dict = {} test_v4 = test_v3[test_v3['clusters'] == cluster] values = ['min','25','50','75','max'] temp_dict['x'] = values temp_dict['y'] =list(test_v4[values].values[0]) temp_dict['name'] = "Tier: {}".format(cluster) temp_dict['type'] = ['scatter'] data_butt.append(temp_dict) d = pd.Series(jobs['CITYMARKET'].unique()).to_dict() payload = [] for key in d.keys(): newdict = {} newdict['label'] = d[key] newdict['value'] = d[key] payload.append(newdict) generate_mselect_data(hotels,'PROPERTY_NAME') generate_mselect_data(jobs, 'POSITION') # ##Visualization and Cluster Analysis##
src/data/analytics_kmeans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook import pandas as pd import numpy as np import matplotlib.pyplot as plt # + # log_file = [line.strip() for line in open("E:/run/hm_se_ee/logger.txt", 'r')];site = "H&M se" log_file = [line.strip() for line in open("E:/run/hm_us_ee/logger.txt", 'r')];site = "H&M us" # log_file = [line.strip() for line in open("E:/run/express_gifts/logger.txt", 'r')];site = "Studio" # + thresholds = [(float)(x[62:])/100 for x in log_file if "Start running with threshold " in x] payments_improvement = [((float)(x[x.index("totally: ")+9:]))*100 for x in log_file if "Improvement totally: " in x] time = [(int)(x[x.index("took ")+5:x.index(" ", x.index("took ")+5)])/60 for x in log_file if "Ended with threshold " in x] thresholds = thresholds[:len(payments_improvement)] # - print(thresholds) print(payments_improvement) print(time) # + # plt.plot(thresholds, payments_improvement, 'ro') # plt.axis([0.30, 0.85, 0, 5]) # plt.show() fig, ax1 = plt.subplots() ax1.plot(thresholds, payments_improvement, '-', color='b') ax1.set_xlabel('Threshold') # Make the y-axis label, ticks and tick labels match the line color. ax1.set_ylabel('Improvement (percent)', color='b') ax1.tick_params('y', colors='b') ax2 = ax1.twinx() ax2.plot(thresholds, time, '-', color='r') ax2.set_ylabel('Time (hours)', color='r') ax2.tick_params('y', colors='r') ax1.set_title(site) fig.tight_layout() plt.show() # -
graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 6 of Introduction to Biological System Design # ## Dynamical System Analysis Tools # ### <NAME> # # Pre-requisite: To get the best out of this notebook, make sure that you have the basic understanding of ordinary differential equations. For more information on ODEs you may refer to any standard book on engineering math. To learn more about how to numerically simulate ODEs, refer to [week3_intro_ode.ipynb](https://pages.hmc.edu/pandey/reading/week3_intro_ode.ipynb). Further, it is assumed that you have a working knowledge of use of Hill functions to model gene regulation. Computational examples with Hill functions are discussed in [week4_hill_functions.ipynb](https://pages.hmc.edu/pandey/reading/week4_hill_functions.pdf). # # This notebook presents biological design choices by use of numerical simulations, mathematical models, and response times of biological systems. # # Disclaimer: Content in this notebook is inspired by the fabulous [compuatational notebook](https://colab.research.google.com/github/justinbois/biological-circuit-design-colab/blob/master/chapters/03_small_circuits.ipynb) by <NAME> and <NAME> on Biological Circuit Design. # # Design Choice - Activator or Repressor # Problem setting: How can we use computational tools to explore the biological design choice of choosing a repressor or an activator to regulate a gene. As an example, we consider the case of regulating a gene by transcription factors in response to environmental signals of different kinds. The first environmental signal that we consider is a toxin signal. A toxin in the cellular environment will trigger an anti-toxin gene expression in the cell. The anti-toxin expression may be activated by inducing a chemical inducer that will bind to the repressor that keeps the anti-toxin gene repressed. An alternative design could be when the presence of toxin in the environment triggers a chemical inducer that activates a transcription factor that recruits RNA polymerase to activate the transcription of the anti-toxin gene. # # On the other hand, a different kind of environmental signal could be a signal such as response to glucose in the environment. This would be a pathway that would be active most of the time as the cell grows since it requires glucose for various metabolic activities. One of the first transcriptional activators discovered in bacteria was the AraC transcription factor. A primary function of the AraC family transcription factors is to regulate sugar catabolism and utilizing the sugar in cells for various metabolic functions. So, on detecting sugar in the environment, a transcription factor (such as AraC) is triggered. Similar to the toxin signal, the mechanism to activate sugar catabolism genes could involve negative induction of a repressor or a positive induction of an activator. # # We will use computational tools at our disposal to create a very simple simulation to explore the design choices. # ## Environmental Signals - Toxin and Glucose # + from scipy import signal import matplotlib.pyplot as plt import numpy as np timepoints = np.linspace(0, 1000, 1000, endpoint = True) max_toxin_value = 20 #arbitrary units toxin_signal = max_toxin_value*np.ones_like(timepoints) *\ -1*signal.square(2*np.pi*2*timepoints, duty = 0.75) # Cut off the signal at 0 so that there are no negative values for i, s in enumerate(toxin_signal): if s < 0: toxin_signal[i] = 0 fig, ax = plt.subplots(1,2, figsize = (15,5), sharey = True) ax[0].plot(timepoints, toxin_signal) ax[0].set_xlabel('Time', fontsize = 18) ax[0].set_ylabel('Toxin levels', fontsize = 18) ax[0].tick_params(labelsize = 14) max_glucose_value = 20 #arbitrary units glucose_signal = max_glucose_value*np.ones_like(timepoints) *\ -1*signal.square(2*np.pi*2*timepoints, duty = 0.25) for i, s in enumerate(glucose_signal): if s < 0: glucose_signal[i] = 0 ax[1].plot(timepoints, glucose_signal) ax[1].set_xlabel('Time', fontsize = 18) ax[1].set_ylabel('Glucose levels', fontsize = 18) ax[1].tick_params(labelsize = 14) fig.suptitle('Environmental Signal Levels Over Many Cell Generations', fontsize = 18); # - # ## Gene Regulation Response (Ideal) # + def regulated_gene(x,t,*args): k_tx, u, K, n, d_x = args return k_tx*(u**n/(K**n + u**n)) - d_x*x from scipy.integrate import odeint k_tx = 2 K = 15 d_x = 0.06 n = 2 fig, all_axes = plt.subplots(1,2, figsize = (15,5), sharey = True) fig.suptitle('Response to Environmental Signals Over' + \ 'Many Cell Generations', fontsize = 18); ax = all_axes[0] # For u = 0 previous_time = 0 array_nonzero = np.where(toxin_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_toxin_value previous_time = next_time array_zero = np.where(toxin_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(toxin_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u =/= 0 previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3, label = 'Anti-Toxin') ax.plot(timepoints, toxin_signal, 'b', lw = 3, alpha = 0.6, label = 'Toxin') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Toxin/Anti-Toxin levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); ax = all_axes[1] # For u = 0 previous_time = 0 array_nonzero = np.where(glucose_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_glucose_value previous_time = next_time array_zero = np.where(glucose_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(glucose_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3) # For u =/= 0 previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) ax.plot(t_solve, solution, 'k', lw = 3, label = 'Sugar Catabolism') ax.plot(timepoints, glucose_signal, 'b', lw = 3, alpha = 0.6, label = 'Glucose') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Glucose/Metabolic TF levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); # - # ## Non-specific binding (leaky) expression with activator # # When there is no input => activator is not bound and there can be leaky expression. When there is an input, activator is bound so that there is no leaky expression. # + def regulated_gene(x,t,*args): k_tx, u, K, n, d_x = args return k_tx*(u**n/(K**n + u**n)) - d_x*x def leaky_expression(x, t, *args): alpha, k_tx, d_x = args return k_tx*alpha - d_x*x from scipy.integrate import odeint k_tx = 2 K = 15 d_x = 0.06 n = 2 alpha = 0.09 fig, all_axes = plt.subplots(1,2, figsize = (15,5), sharey = True) fig.suptitle('Activator Response (with leak) to Environmental Signals'+\ 'Over Many Cell Generations', fontsize = 18); ax = all_axes[0] # For u = 0 previous_time = 0 array_nonzero = np.where(toxin_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_toxin_value previous_time = next_time array_zero = np.where(toxin_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(toxin_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_toxin_value previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3, label = 'Leak') ax.plot(t_solve, solution, 'k', lw = 3, label = 'Anti-Toxin') ax.plot(timepoints, toxin_signal, 'b', lw = 3, alpha = 0.6, label = 'Toxin') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Toxin/Anti-Toxin levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); ax = all_axes[1] # For u = 0 previous_time = 0 array_nonzero = np.where(glucose_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_glucose_value previous_time = next_time array_zero = np.where(glucose_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(glucose_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_glucose_value previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3, label = 'Leak') ax.plot(t_solve, solution, 'k', lw = 3, label = 'Sugar Catabolism') ax.plot(timepoints, glucose_signal, 'b', lw = 3, alpha = 0.6, label = 'Glucose') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Glucose/Metabolic TF levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); # - # ## Non-specific binding (leaky) expression with repressor # # When there is no input => repressor is bound and there is no leak. But when there is an input signal, repressor is unbound and there can be leaky expression as well. # + def regulated_gene(x,t,*args): k_tx, u, K, n, d_x = args return k_tx*(u**n/(K**n + u**n)) - d_x*x def leaky_expression(x, t, *args): alpha, k_tx, d_x = args return k_tx*alpha - d_x*x from scipy.integrate import odeint k_tx = 2 K = 15 d_x = 0.06 n = 2 alpha = 0.09 fig, all_axes = plt.subplots(1,2, figsize = (15,5), sharey = True) fig.suptitle('Repressor Response (with leak) to Environmental'+ \ 'Signals Over Many Cell Generations', fontsize = 18); ax = all_axes[0] # For u = 0 previous_time = 0 array_nonzero = np.where(toxin_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_toxin_value previous_time = next_time array_zero = np.where(toxin_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(toxin_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_toxin_value previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_toxin_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3, label = 'Leak') ax.plot(t_solve, solution, 'k', lw = 3, label = 'Anti-Toxin') ax.plot(timepoints, toxin_signal, 'b', lw = 3, alpha = 0.6, label = 'Toxin') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Toxin/Anti-Toxin levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); ax = all_axes[1] # For u = 0 previous_time = 0 array_nonzero = np.where(glucose_signal != 0)[0] next_time = array_nonzero[0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_glucose_value previous_time = next_time array_zero = np.where(glucose_signal == 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time,next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = 0, t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = 0 again previous_time = next_time array_zero = np.where(glucose_signal != 0)[0] next_time = array_zero[np.where(array_zero > previous_time)][0] t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, 0, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (0, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3) ax.plot(t_solve, solution, 'k', lw = 3) # For u = max_glucose_value previous_time = next_time next_time = int(timepoints[-1]) # last point t_solve = np.linspace(previous_time, next_time, next_time - previous_time) solution = odeint(regulated_gene, y0 = solution[-1], t = t_solve, args = (k_tx, max_glucose_value, K, n, d_x)) leaky_solution = odeint(leaky_expression, y0 = 0, t = t_solve, args = (alpha, k_tx, d_x)) ax.plot(t_solve, leaky_solution, 'r', lw = 3, label = 'Leak') ax.plot(t_solve, solution, 'k', lw = 3, label = 'Sugar Catabolism') ax.plot(timepoints, glucose_signal, 'b', lw = 3, alpha = 0.6, label = 'Glucose') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('Glucose/Metabolic TF levels', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); # - # Two papers that discuss the mechanisms and detailed models behind this demand theory are: # # 1. Shinar et al. 2005 "Rules for biological regulation basedon error minimization". [URL](https://www.pnas.org/content/pnas/103/11/3999.full.pdf) - Uses the non-specific binding theory to prove the evolutionary selections against repressors in high demand genes and selection against activators in low demand genes. # # 2. Gerland et al. 2008 "Evolutionary selection between alternative mode of gene regulation". [URL](https://www.pnas.org/content/pnas/106/22/8841.full.pdf) - Uses mutation models to show that the population size and time-scales of environmental variations guide the evolutionary selection for repressors and activators in different situations. # # # Design Choice - Response Time # # Consider the unregulated gene expression model (from [week3_intro_ode.ipynb](https://pages.hmc.edu/pandey/reading/week3_intro_ode.pdf)): # # $\frac{dX}{dt} = k - dX$ # # We derived the analytical solution for this model in Week 3. It is given by: # # $X(t) = \frac{k}{d}\left(1 - e^{-d t}\right)$ # # The steady-state concentration of X is given by $\frac{k}{d}$. Clearly, the response time is only dependent on the degradation parameter $d$. We define the response time as the time that the system takes to reach $1 - \frac{1}{e}$, or approximately 63% of its maximum value. This response time is equal to $t_r = \frac{1}{d}$. The time $t_{1/2}$ is the time the system takes to reach half of the maximum value. We compute these metrics of speed of response using the following code: # + # Parameters k = 100 d = 1 # Dynamics timepoints = np.linspace(0, 6, 400) X = k / d * (1 - np.exp(-d * timepoints)) # Plot response ax = plt.axes() ax.plot(timepoints, X, lw=4) # Mark the response time (when we get to level 1-1/e) t0 = 1 / d x0 = k / d * (1 - np.exp(-1)) t_half = np.log(2)/d ax.axvline(t0, color = 'k', ls = '--', lw = 4, label = 'Response Time') ax.axvline(t_half, color = 'r', ls = '--', lw = 4, label = 't-half') ax.set_xlabel('Time', fontsize = 18) ax.set_ylabel('$X(t)$', fontsize = 18) ax.tick_params(labelsize = 14) ax.legend(fontsize = 14); # - # ### Note on 2nd order system response # # For 2nd order underdamped systems that overshoot the steady-state value, the response time is usually defined using a rise-time metric. Rise time is defined as the time taken to reach 90% of the steady-state value. To measure the error in response, a settling time metric is defined. Settling time is defined as the time the system takes to reach within 2% (or 5%) of the steady-state value. # ## Tuning the speed of response # + # Parameters k = 100 d = np.array([1, 2, 3]) colors = ['r','k','b'] # Compute dynamics timepoints = np.linspace(0, 6, 400) X = [k / d_i * (1 - np.exp(-d_i * timepoints)) for d_i in d] fig, ax = plt.subplots(1,2, figsize = (15,8)) ax[0].set_title('k = '+ str(k) + ', d = ' + str(d), fontsize = 18) ax[0].set_xlim([0,6]) ax[0].set_xlabel('Time', fontsize = 18) ax[0].set_ylabel('$X(t)$', fontsize = 18) ax[0].tick_params(labelsize = 14); ax[1].set_title('Normalized steady-states', fontsize = 18) ax[1].set_xlim([0,6]) ax[1].set_xlabel('Time', fontsize = 18) ax[1].set_ylabel('$X(t)$', fontsize = 18) ax[1].tick_params(labelsize = 14); for x_vals, d_i, color in zip(X, d, colors): ax[0].plot(timepoints, x_vals, color=color, lw=4, label = 'd = '+str(d_i)) ax[0].scatter(1 / d_i, k / d_i * (1 - np.exp(-1)), color=color) ax[0].axvline(1 / d_i, color = 'k', ls = '--', lw = 4, alpha = 0.2) ax[0].legend(fontsize = 14); ax[1].plot(timepoints, x_vals / np.max(x_vals), color=color, lw=4, label = 'd = ' + str(d_i)) ax[1].scatter(1 / d_i, 1 - np.exp(-1), color=color) ax[1].axvline(1 / d_i, color = 'k', ls = '--', lw = 4, alpha = 0.2) ax[1].legend(fontsize = 14); # - # ## Negative autoregulation accelerates response times # # Consider the negative autoregulation model from [week4_hill_functions.ipynb](https://pages.hmc.edu/pandey/reading/week4_hill_functions.pdf): # # $\frac{dX}{dt} = k \frac{K_d}{K_d + X} - dX$ # # Let us compare the time response of the negative autoregulation to the unregulated gene expression discussed above: # + # Negative autoregulation model (from HW 4) def negative_autoregulation(x, t, *args): k, Kd, d = args return k * (Kd / (Kd + x)) - d * x from scipy.integrate import odeint # Parameters timepoints = np.linspace(0, 6, 400) Kd = 1 d = 1 k = 100 # Negative autoregulated solution X_nar = odeint(negative_autoregulation, y0 = 0, t = timepoints, args=(k, Kd, d)) # Unregulated solution unregulated_X = (k/d)*(1 - np.exp(-d * timepoints)) fig, ax = plt.subplots(1,2, figsize = (15,8)) ax[0].set_title('Negative Autoregulation and Unregulated expression', fontsize = 18) ax[0].set_xlim([0,6]) ax[0].set_xlabel('Time', fontsize = 18) ax[0].set_ylabel('$X(t)$', fontsize = 18) ax[0].tick_params(labelsize = 14); ax[1].set_title('Normalized steady-states', fontsize = 18) ax[1].set_xlim([0,6]) ax[1].set_xlabel('Time', fontsize = 18) ax[1].set_ylabel('$X(t)$', fontsize = 18) ax[1].tick_params(labelsize = 14); ax[0].plot(timepoints, X_nar[:,0], color='b', lw=4, label = 'Negative Autoregulation') ax[0].plot(timepoints, unregulated_X, color='k', lw=4, label = 'Unregulated') ax[0].legend(fontsize = 14) ax[1].plot(timepoints, X_nar[:,0] / np.max(X_nar[:,0]), color='b', lw=4, label = 'Negative Autoregulation') ax[1].plot(timepoints, unregulated_X / np.max(unregulated_X), color='k', lw=4, label = 'Unregulated') ax[1].legend(fontsize = 14);
reading/week6_system_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Applied Process Mining Module # # This notebook is part of an Applied Process Mining module. The collection of notebooks is a *living document* and subject to change. # # # Assignment 1 - 'Event Logs and Process Visualization' (Python / PM4Py) # ## Setup # # <img src="https://pm4py.fit.fraunhofer.de/static/assets/images/pm4py-site-logo-padded.png" alt="PM4Py" style="width: 200px;"/> # # In this notebook, we are using the [PM4Py library](https://pm4py.fit.fraunhofer.de/) in combination with several standard Python data science libraries: # # * [pandas](https://pandas.pydata.org/) # * [plotnine](https://plotnine.readthedocs.io/en/stable/) # + ## Perform the commented out commands to install the dependencies # # %pip install pandas # # %pip install matplotlib # # %pip install pm4py # - import pandas as pd import pm4py import plotnine from plotnine import ggplot, geom_point, aes, theme_bw, coord_flip, scale_y_discrete, theme, element_text, geom_bin2d # ## Assignment # # In the first hands-on session, you are going to explore a real-life dataset and apply what was presented in the lecture about event logs and basic process mining visualizations. The objective is to explore your dataset and as an event log and with the learned process mining visualizations in mind. # # * Analyse basic properties of the the process (business process or other process) that has generated it. # * What are possible case notions / what is the or what are the case identifiers? # * What are the activities? Are all activities on the same abstraction level? Can activities be derived from other data? # * Can activities or actions be derived from other (non-activity) data? # * Discovery a map of the process (or a sub-process) behind it. # * Are there multiple processes that can be discovered? # * What is the effect of taking a subset of the data? # You may use this notebook to conduct the analysis. # ## Dataset # The proposed real-life dataset to investigate is the *BPI Challenge 2020* dataset. The dataset is captured from the travel reimbursment process of Eindhoven University of Technolog and has been collected for usage in the BPI challenge. The BPI challenge is a yearly event in the Process Mining research community in which an event log is released along with some business questions that shall be addressed with process analytics techniques. # # Here is more informaation on the dataset and downloads links to the data files: # # * [Overview of the Case](https://icpmconference.org/2020/bpi-challenge/) # * [Dataset](https://doi.org/10.4121/uuid:52fb97d4-4588-43c9-9d04-3604d4613b51) # # On the BPI Challenge 2020 website above, there are several reports (including the winners of the challenge) that describe and analyze the dataset in detail. However, we suggest that you first try to explore the dataset without reading the reports. The business questions and a description of the process flow can be also found at the BPI Challenge 2020 website. We repeat it here for convenience: # # ### Process Flow # # The various declaration documents (domestic and international declarations, pre-paid travel costs and requests for payment) all follow a similar process flow. After submission by the employee, the request is sent for approval to the travel administration. If approved, the request is then forwarded to the budget owner and after that to the supervisor. If the budget owner and supervisor are the same person, then only one of the these steps it taken. In some cases, the director also needs to approve the request. # # In all cases, a rejection leads to one of two outcomes. Either the employee resubmits the request, or the employee also rejects the request. # # If the approval flow has a positive result, the payment is requested and made. # # The travel permits follow a slightly different flow as there is no payment involved. Instead, after all approval steps a trip can take place, indicated with an estimated start and end date. These dates are not exact travel dates, but rather estimated by the employee when the permit request is submitted. The actual travel dates are not recorded in the data, but should be close to the given dates in most cases. # # After the end of a trip, an employee receives several reminders to submit a travel declaration. # # After a travel permit is approved, but before the trip starts, employees can ask for a reimbursement of pre-paid travel costs. Several requests can be submitted independently of each other. After the trip ends, an international declaration can be submitted, although sometimes multiple declarations are seen for specific cases. # # It’s important to realize that the process described above is the process for 2018. For 2017, there are some differences as this was a pilot year and the process changed slightly on several occasions. # # ### Business Questions # # The following questions are of interest: # # * What is the throughput of a travel declaration from submission (or closing) to paying? # * Is there are difference in throughput between national and international trips? # * Are there differences between clusters of declarations, for example between cost centers/departments/projects etc.? # * What is the throughput in each of the process steps, i.e. the submission, judgement by various responsible roles and payment? # * Where are the bottlenecks in the process of a travel declaration? # * Where are the bottlenecks in the process of a travel permit (note that there can be mulitple requests for payment and declarations per permit)? # * How many travel declarations get rejected in the various processing steps and how many are never approved? # # Then there are more detailed questions # # * How many travel declarations are booked on projects? # * How many corrections have been made for declarations? # * Are there any double payments? # * Are there declarations that were not preceded properly by an approved travel permit? Or are there even declarations for which no permit exists? # * How many travel declarations are submitted by the traveler and how many by a mandated person? # * How many travel declarations are first rejected because they are submitted more than 2 months after the end of a trip and are then re-submitted? # * Is this different between departments? # * How many travel declarations are not approved by budget holders in time (7 days) and are then automatically rerouted to supervisors? # * Next to travel declarations, there are also requests for payments. These are specific for non-TU/e employees. Are there any TU/e employees that submitted a request for payment instead of a travel declaration? # # Similar to the task at the BPI challenge, we are aware that not all questions can be answered on this dataset and we encourage you to come up with new and interesting insights. # ## Data Loading # To simplify the data loading task, here are the initial steps: # + # to be done # - # ## Event Log # # Have a look at the excellent `PM4Py` documentation: https://pm4py.fit.fraunhofer.de/documentation#importing
python/handson1-eventlogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Import import os import requests import pandas as pd from dotenv import load_dotenv import alpaca_trade_api as tradeapi from MCForecastTools import MCSimulation import requests import json from pathlib import Path # %matplotlib inline # - # Load .env enviroment variables load_dotenv('alpaca.env') # + # Set Alpaca API key and secret alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # Create the Alpaca API object alpaca = tradeapi.REST( alpaca_api_key, alpaca_secret_key, api_version="v2") # + #Reading the AMZN data into a Pandas Data Frame AMZN_df = pd.read_csv('RESOURCES/AMZN.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') AMZN_df.sort_index(inplace=True) AMZN_df.head() #Desktop/Fintech/Homework/Fintech_Project_1/project1/project1/data/rawdata/portfolio_jsirab.csv # + #Reading the FLGT Data into a Pandas Data Frame FLGT_df = pd.read_csv('RESOURCES/FLGT.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') FLGT_df.sort_index(inplace=True) FLGT_df.head() # + #Reading the DAC data into a Pandas Data Frame DAC_df = pd.read_csv('RESOURCES/DAC.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') DAC_df.sort_index(inplace=True) DAC_df.head() # + #Reading the BABA data into a Pandas Data Frame BABA_df = pd.read_csv('RESOURCES/BABA.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') BABA_df.sort_index(inplace=True) BABA_df.head() # + #Reading the BA data into a Pandas Data Frame BA_df = pd.read_csv('RESOURCES/BA.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') BA_df.sort_index(inplace=True) BA_df.head() # + #Reading the PYPL data into a Pandas Data Frame PYPL_df = pd.read_csv('RESOURCES/PYPL.csv', index_col="Date", infer_datetime_format=True, parse_dates=True, decimal = ',') PYPL_df.sort_index(inplace=True) PYPL_df.head() # - #Combine ticker in portfolio portfolio = pd.concat([AMZN_df, BA_df, BABA_df, DAC_df, PYPL_df, FLGT_df], axis='columns', join='inner') portfolio columns_custom = ['AMZN','BA','BABA','DAC','PYPL','FLGT'] portfolio.columns = columns_custom portfolio # + active="" # mkvirtualenv pyfolio # -
notebooks/jsirab/notebook-jsirabNEW.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="_xnMOsbqHz61" # # Auto Colorizador # # La finalidad del proyecto es poder **colorear fotografía antiguas**. El programa coge como input una imagen en blanco y negro y es capaz de outputear la misma imagen con color. La arquitectura se basa en este paper: https://arxiv.org/pdf/1611.07004.pdf # # ![](https://blog.floydhub.com/content/images/2018/06/function_equal_to_three_grids.png) # # # + [markdown] colab_type="text" id="e1_Y75QXJS6h" # ## Importar librerías # # * Tensorflow 2.0 # * Numpy, matplotlib, pillow, etc # # # + id="WK9cv647Ngnz" colab_type="code" colab={} from google.colab import drive, files drive.mount('/content/drive') # + colab_type="code" id="YfIk2es3hJEd" colab={} import os import time import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm from tensorflow import keras from google.colab import files from IPython.display import clear_output, display # + id="5CCLFG9-yhJr" colab_type="code" colab={} # !pip install -q h5py pyyaml # + [markdown] colab_type="text" id="iYn4MdZnKCey" # ## Cargar el dataset # # Se ha usado un subset de *Places2* como dataset. Más información [en el link](http://places2.csail.mit.edu/download.html). Posteriormente se ha preprocesado para que cada imagen tenga la imagen con color y en blanco y negro y se ha subido al google drive. # # + colab_type="code" id="Kn-k8kTXuAlv" colab={} PATH = './combined/' TEST_PATH = './test/' # + id="soXuxQPNZuzc" colab_type="code" colab={} # !tar -xvf './drive/My Drive/combined.tar' # + id="6a0qcQkVPBmY" colab_type="code" colab={} os.mkdir('test/') images = os.listdir('combined') split = int(0.9*len(images)) for i, image in enumerate(images): if i < split: pass else: os.rename('combined/{}'.format(image), 'test/{}'.format(image)) # + id="XVO97bOAffFs" colab_type="code" colab={} # !ls test | wc -l # + colab_type="code" id="2CbTEt448b4R" colab={} BUFFER_SIZE = 500 BATCH_SIZE = 64 IMG_WIDTH = 256 IMG_HEIGHT = 256 # + colab_type="code" id="aO9ZAGH5K3SY" colab={} def load(image_file): image = tf.io.read_file(image_file) image = tf.image.decode_jpeg(image) w = tf.shape(image)[1] w = w // 2 input_image = image[:, :w, :] real_image = image[:, w:, :] input_image = tf.cast(input_image, tf.float32) real_image = tf.cast(real_image, tf.float32) return input_image, real_image # + [markdown] id="q9A4wIaRQXb-" colab_type="text" # Provamos si el dataset se ha cargado correctamente. # + colab_type="code" id="4OLHMpsQ5aOv" colab={} inp, re = load(PATH+'Places365_val_00002585.jpg') plt.figure() plt.imshow(inp/255.0) plt.figure() plt.imshow(re/255.0) # + [markdown] id="gjKPx5wjSpe3" colab_type="text" # ## Funciones auxiliares # # Aquí definimos las diferentes funciones auxiliares del código: # # - normalizar a [-1, 1] # - redimensionar a IMG_HEIGHT i IMG_WIDTH # - corte aleatorio # - cargar imágenes del dataset # - cargar imagen de un URL # - temblor aleatorio (*random jitter*): # - en este proceso, tal y como se menciona en el paper, se redimensiona la imagen a un tamaño mayor, entonces se hace un corte aleatorio del tamaño original y luego se hace un volteo horizontal también de forma aleatoria. Más abajo hay 4 ejemplos. # + colab_type="code" id="rwwYQpu9FzDu" colab={} def resize(input_image, real_image, height, width): input_image = tf.image.resize(input_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) real_image = tf.image.resize(real_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) return input_image, real_image # + colab_type="code" id="Yn3IwqhiIszt" colab={} def random_crop(input_image, real_image): stacked_image = tf.stack([input_image, real_image], axis=0) cropped_image = tf.image.random_crop( stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3]) return cropped_image[0], cropped_image[1] # + colab_type="code" id="muhR2cgbLKWW" colab={} # normalizing the images to [-1, 1] def normalize(input_image, real_image): input_image = (input_image / 127.5) - 1 real_image = (real_image / 127.5) - 1 return input_image, real_image # + colab_type="code" id="fVQOjcPVLrUc" colab={} @tf.function() def random_jitter(input_image, real_image): # resizing to 286 x 286 x 3 input_image, real_image = resize(input_image, real_image, 286, 286) # randomly cropping to 256 x 256 x 3 input_image, real_image = random_crop(input_image, real_image) if tf.random.uniform(()) > 0.5: # random mirroring input_image = tf.image.flip_left_right(input_image) real_image = tf.image.flip_left_right(real_image) return input_image, real_image # + colab_type="code" id="n0OGdi6D92kM" colab={} plt.figure(figsize=(6, 6)) for i in range(4): rj_inp, rj_re = random_jitter(inp, re) plt.subplot(2, 2, i+1) plt.imshow(rj_inp/255.0) plt.axis('off') plt.show() # + colab_type="code" id="tyaP4hLJ8b4W" colab={} def load_image_train(image_file): input_image, real_image = load(image_file) input_image, real_image = random_jitter(input_image, real_image) input_image, real_image = normalize(input_image, real_image) return input_image, real_image # + colab_type="code" id="VB3Z6D_zKSru" colab={} def load_image_test(image_file): input_image, real_image = load(image_file) input_image, real_image = resize(input_image, real_image, IMG_HEIGHT, IMG_WIDTH) input_image, real_image = normalize(input_image, real_image) return input_image, real_image # + id="-zmWf3f7REBs" colab_type="code" colab={} def download_image(url): try: response = requests.get(url) img = Image.open(BytesIO(response.content)) return img except Exception as e: print('Error {}'.format(e)) exit(1) # + [markdown] colab_type="text" id="PIGN6ouoQxt3" # ## Definir el dataset # + colab_type="code" id="SQHmYSmk8b4b" colab={} train_dataset = tf.data.Dataset.list_files(PATH+'*.jpg') train_dataset = train_dataset.shuffle(BUFFER_SIZE) train_dataset = train_dataset.map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) train_dataset = train_dataset.batch(BATCH_SIZE) # + colab_type="code" id="MS9J0yA58b4g" colab={} test_dataset = tf.data.Dataset.list_files(TEST_PATH+'*.jpg') test_dataset = test_dataset.shuffle(BUFFER_SIZE) test_dataset = test_dataset.map(load_image_test) test_dataset = test_dataset.batch(1) # + [markdown] id="bQzRYi4WWCsK" colab_type="text" # ## Arquitectura pix2pix # # Este código está basado en la arquitectura **pix2pix** (por *Isola et al*). Pese a disponer de un generador y un discriminador con objetivos opuestos en busca de un equilibrio de Nash, esta arquitectura es diferente a las GANs normales pues está no coge como input ruido aleatorio (espacio latente), en esta en cambio cogemos como input toda una imagen *x*. Nuestro objetivo es **traducirla** en otra imagen con una estructura similar. Es decir, nuestro generador *G* tiene que producir *G(X)* el cual tendrá que ser indistinguible de *y* (la otra imagen con una estructura similar) para nuestro discriminador *D*. # # # Sus partes principales son: # # - **Generador U-NET**: el generador de la pix2pix se parece mucho a un **autoencoder**. Coge la imagen que tiene que ser traducida, la comprime a un espacio de menos dimensiones llamado **Cuello de Botella** y luego aprende a hacer upsampling a partir de los features extraídos para conseguir la imagen deseada como output. # # Además también tiene ciertos parecidos con una ResNet en la manera en como la información de capas previas es introducida a las siguientes usando las llamadas **skip connections**. En esta arquitectura disponemos de skip connections que salen de la mitat encoder de la red y van a la otra mitad decoder. Esto nos sirve para prevenir que perdamos información en el cuello de botella. # # # - **Discriminador Patch-GAN**: en este discriminador en vez de coger las imágenes y clasificarlas en verdaderas o falsas, se clasifican individualmente diferentes trozos de la imagen así se refuerza el objetivo de conseguir detalles mucho más nítidos. Además es más rápido de clasificar toda una imágen ya que solo tiene que clasificar pequeños trozos y eso significa menos parámetros. # + [markdown] id="MxcXc48QU4Ns" colab_type="text" # ### Bloques auxiliares # # Aquí definiremos los bloques downsample y upsample. Nos será útil pues el generador (U-NET) dispone de los dos y el discriminador (Patch-GAN) también downsamplea. Además hace el código más leíble ya que los bloques se repiten varias veces. # + colab_type="code" id="tqqvWxlw8b4l" colab={} OUTPUT_CHANNELS = 3 # + colab_type="code" id="3R09ATE_SH9P" colab={} def downsample(filters, size, apply_batchnorm=True): initializer = tf.random_normal_initializer(0., 0.02) result = tf.keras.Sequential() result.add( tf.keras.layers.Conv2D(filters, size, strides=2, padding='same', kernel_initializer=initializer, use_bias=False)) if apply_batchnorm: result.add(tf.keras.layers.BatchNormalization()) result.add(tf.keras.layers.LeakyReLU()) return result # + colab_type="code" id="a6_uCZCppTh7" colab={} # downsampling down_model = downsample(3, 4) down_result = down_model(tf.expand_dims(inp, 0)) print (down_result.shape) # + colab_type="code" id="nhgDsHClSQzP" colab={} def upsample(filters, size, apply_dropout=False): initializer = tf.random_normal_initializer(0., 0.02) result = tf.keras.Sequential() result.add( tf.keras.layers.Conv2DTranspose(filters, size, strides=2, padding='same', kernel_initializer=initializer, use_bias=False)) result.add(tf.keras.layers.BatchNormalization()) if apply_dropout: result.add(tf.keras.layers.Dropout(0.5)) result.add(tf.keras.layers.ReLU()) return result # + colab_type="code" id="mz-ahSdsq0Oc" colab={} # upsampling up_model = upsample(3, 4) up_result = up_model(down_result) print (up_result.shape) # + [markdown] id="TrsBfSV2Vd5K" colab_type="text" # ### Generador # # # + colab_type="code" id="lFPI4Nu-8b4q" colab={} def Generator(): down_stack = [ downsample(64, 4, apply_batchnorm=False), # (batch_size, 128, 128, 64) downsample(128, 4), # (batch_size, 64, 64, 128) downsample(256, 4), # (batch_size, 32, 32, 256) downsample(512, 4), # (batch_size, 16, 16, 512) downsample(512, 4), # (batch_size, 8, 8, 512) downsample(512, 4), # (batch_size, 4, 4, 512) downsample(512, 4), # (batch_size, 2, 2, 512) downsample(512, 4), # (batch_size, 1, 1, 512) ] up_stack = [ upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024) upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024) upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024) upsample(512, 4), # (batch_size, 16, 16, 1024) upsample(256, 4), # (batch_size, 32, 32, 512) upsample(128, 4), # (batch_size, 64, 64, 256) upsample(64, 4), # (batch_size, 128, 128, 128) ] initializer = tf.random_normal_initializer(0., 0.02) last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4, strides=2, padding='same', kernel_initializer=initializer, activation='tanh') # (bs, 256, 256, 3) concat = tf.keras.layers.Concatenate() inputs = tf.keras.layers.Input(shape=[None,None,3]) x = inputs # Downsampling through the model skips = [] for down in down_stack: x = down(x) skips.append(x) skips = reversed(skips[:-1]) # Upsampling and establishing the skip connections for up, skip in zip(up_stack, skips): x = up(x) x = concat([x, skip]) x = last(x) return tf.keras.Model(inputs=inputs, outputs=x) # + colab_type="code" id="U1N1_obwtdQH" colab={} generator = Generator() PATH_generator = 'drive/My Drive/generator_250.h5' new_model = keras.models.load_model(PATH_generator) gen_output = generator(inp[tf.newaxis,...], training=False) plt.imshow(gen_output[0,...]) # + colab_type="code" id="0HXiSCiAjkU-" colab={} # !ls 'drive/My Drive' # + [markdown] id="eJQwZrtGVg85" colab_type="text" # ### Discriminador # # + colab_type="code" id="ll6aNeQx8b4v" colab={} def Discriminator(): initializer = tf.random_normal_initializer(0., 0.02) inp = tf.keras.layers.Input(shape=[None, None, 3], name='input_image') tar = tf.keras.layers.Input(shape=[None, None, 3], name='target_image') x = tf.keras.layers.concatenate([inp, tar]) # (batch_size, 256, 256, channels*2) down1 = downsample(64, 4, False)(x) # (batch_size, 128, 128, 64) down2 = downsample(128, 4)(down1) # (batch_size, 64, 64, 128) down3 = downsample(256, 4)(down2) # (batch_size, 32, 32, 256) zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (batch_size, 34, 34, 256) conv = tf.keras.layers.Conv2D(512, 4, strides=1, kernel_initializer=initializer, use_bias=False)(zero_pad1) # (batch_size, 31, 31, 512) batchnorm1 = tf.keras.layers.BatchNormalization()(conv) leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1) zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (batch_size, 33, 33, 512) last = tf.keras.layers.Conv2D(1, 4, strides=1, kernel_initializer=initializer)(zero_pad2) # (batch_size, 30, 30, 1) return tf.keras.Model(inputs=[inp, tar], outputs=last) # + colab_type="code" id="gDkA05NE6QMs" colab={} discriminator = Discriminator() disc_out = discriminator([inp[tf.newaxis,...], gen_output], training=False) plt.imshow(disc_out[0,...,-1], vmin=-20, vmax=20, cmap='RdBu_r') plt.colorbar() # + [markdown] id="PHaf6U4ZVpUx" colab_type="text" # ### Losses # + colab_type="code" id="cyhxTuvJyIHV" colab={} LAMBDA = 100 # + colab_type="code" id="Q1Xbz5OaLj5C" colab={} loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True) # + colab_type="code" id="wkMNfBWlT-PV" colab={} def discriminator_loss(disc_real_output, disc_generated_output): real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output) generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output) total_disc_loss = real_loss + generated_loss return total_disc_loss # + colab_type="code" id="90BIcCKcDMxz" colab={} def generator_loss(disc_generated_output, gen_output, target): gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output) # mean absolute error l1_loss = tf.reduce_mean(tf.abs(target - gen_output)) total_gen_loss = gan_loss + (LAMBDA * l1_loss) return total_gen_loss # + [markdown] id="kvqWltpUV-uF" colab_type="text" # ### Optimizadores # + colab_type="code" id="iWCn_PVdEJZ7" colab={} generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) # + [markdown] id="HEoTT_WxV8D8" colab_type="text" # ### Checkpoints # + colab_type="code" id="WJnftd5sQsv6" colab={} """ if not os.path.exists('./checkpoints'): os.makedirs('./checkpoints') checkpoint_dir = './checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator=generator, discriminator=discriminator) """ # + colab_type="code" id="NS2GWywBbAWo" colab={} EPOCHS = 6 # + colab_type="code" id="RmdVsmvhPxyy" colab={} def generate_images(model, test_input, tar): prediction = model(test_input, training=True) plt.figure(figsize=(15,15)) display_list = [test_input[0], tar[0], prediction[0]] title = ['Input Image', 'Ground Truth', 'Predicted Image'] for i in range(3): plt.subplot(1, 3, i+1) plt.title(title[i]) # getting the pixel values between [0, 1] to plot it. plt.imshow(display_list[i] * 0.5 + 0.5) plt.axis('off') plt.show() # + id="cGVwLMjfFWe-" colab_type="code" colab={} import datetime log_dir="logs/" summary_writer = tf.summary.create_file_writer(log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) # + [markdown] id="VqdJkpPpVvCS" colab_type="text" # ## Training # # # + colab_type="code" id="KBKUV2sKXDbY" colab={} @tf.function def train_step(input_image, target, epoch): with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: gen_output = generator(input_image, training=True) disc_real_output = discriminator([input_image, target], training=True) disc_generated_output = discriminator([input_image, gen_output], training=True) gen_loss = generator_loss(disc_generated_output, gen_output, target) disc_loss = discriminator_loss(disc_real_output, disc_generated_output) generator_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables) discriminator_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(generator_gradients, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(discriminator_gradients, discriminator.trainable_variables)) with summary_writer.as_default(): tf.summary.scalar('gen_total_loss', gen_loss, step=epoch) tf.summary.scalar('disc_loss', disc_loss, step=epoch) # + id="4jru8C7GTj7z" colab_type="code" colab={} if not os.path.exists('./generator'): os.makedirs('./generator') # + colab_type="code" id="2M7LmLtGEMQJ" colab={} def train(dataset, epochs): for epoch in range(epochs): start = time.time() for input_image, target in tqdm(dataset): train_step(input_image, target, epoch) #checkpoint.save(file_prefix = checkpoint_prefix) #for inp, tar in test_dataset.take(10): # generate_images(generator, inp, tar) # clear_output(wait=True) #print ('Time taken for epoch {} is {} sec\n'.format(epoch + 1, time.time()-start)) # + colab_type="code" id="a1zZmKmvOH85" colab={} train(train_dataset, 5) # + colab_type="code" id="fNA-xMKrjj_4" colab={} # %load_ext tensorboard # %tensorboard --logdir {log_dir} # + [markdown] id="9ZacugPtV1T4" colab_type="text" # ## Save the models # + colab_type="code" id="IVrfuIV5jjqz" colab={} if not os.path.exists('./generator'): os.makedirs('./generator') #tf.saved_model.save(generator, "./generator/") generator.save('./generator/generator_final.h5') # + [markdown] colab_type="text" id="O3zO6UIGjjW4" # ## Testear el último checkpoint # # # + colab_type="code" id="KUgSnmy2nqSP" colab={} for inp, tar in test_dataset.take(50): generate_images(generator, inp, tar) # + [markdown] id="Z36k1eMHYzaU" colab_type="text" # ## Guardar el generador # # + id="fufpt6svPlnG" colab_type="code" colab={} # !zip -r generator3.zip generator # + [markdown] id="aYkBJrZko1lC" colab_type="text" # # Test saved model # # + id="h4y_qWXNo449" colab_type="code" colab={} PATH_generator = 'drive/My Drive/generator_3' new_model = keras.models.load_model(PATH_generator) for inp, tar in test_dataset.take(10): generate_images(new_model, inp, tar)
AutoColorizador.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import os import sys from scipy.optimize import curve_fit import mdtraj as md import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib import ticker as mticker # %matplotlib inline import glob import seaborn as sns from mpl_toolkits.axes_grid1 import make_axes_locatable def split_NHVecs(nhvecs, dt, tau): """ This function will split the trajectory in chuncks based off tau_m or tau memory i.e. the memory of the """ nFiles = len(nhvecs) nFramesPerChunk = int(tau/dt) ###tau/timestep used_frames = np.zeros(nFiles,dtype=int) remainingFrames = np.zeros(nFiles,dtype=int) for i in range(nFiles): nFrames = nhvecs[i].shape[0] used_frames[i] = int(nFrames/nFramesPerChunk)*nFramesPerChunk remainingFrames[i] = nFrames % nFramesPerChunk nFramesTot=int(used_frames.sum()) out = np.zeros((nFramesTot,NHVecs[0].shape[1],NHVecs[0].shape[2]), dtype=NHVecs[0].dtype) start = 0 for i in range(nFiles): end = int(start+used_frames[i]) endv = int(used_frames[i]) out[start:end,...] = nhvecs[i][0:endv,...] start = end sh = out.shape vecs = out.reshape((int(nFramesTot/nFramesPerChunk), nFramesPerChunk, sh[-2], sh[-1])) return vecs def _bound_check(func, params): """ Checks if the fit returns a sum of the amplitudes greater than 1. """ if len(params) == 1: return False elif len(params) %2 == 0 : s = sum(params[0::2]) return (s>1) else: s = params[0]+sum(params[1::2]) return (s>1) def calc_chi(y1, y2, dy=[]): """ calculates the chi^2 difference between the predicted model and the actual data """ if dy != []: return np.sum( (y1-y2)**2.0/dy )/len(y1) else: return np.sum( (y1-y2)**2.0 )/len(y1) def func_exp_decay1(t, tau_a): return np.exp(-t/tau_a) def func_exp_decay2(t, A, tau_a): return A*np.exp(-t/tau_a) def func_exp_decay3(t, A, tau_a, tau_b): return A*np.exp(-t/tau_a) + (1-A)*np.exp(-t/tau_b) def func_exp_decay4(t, A, tau_a, B, tau_b ): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) def func_exp_decay5(t, A, tau_a, B, tau_b, tau_g ): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + (1-A-B)*np.exp(-t/tau_g) def func_exp_decay6(t, A, tau_a, B, tau_b, G, tau_g ): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) def func_exp_decay7(t, A, tau_a, B, tau_b, G, tau_g, tau_d): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + (1-A-B-G)*np.exp(-t/tau_d) def func_exp_decay8(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d) def func_exp_decay9(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d, tau_e): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d) + (1-A-B-G-D)*np.exp(-t/tau_e) def func_exp_decay10(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d, E, tau_e): return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d) + E*np.exp(-t/tau_e) def _return_parameter_names(num_pars): """ Function that returns the names of the parameters for writing to the dataframe after the fit. num_pars is the number of parameters in the fit. 1,3,5,7,9 are the num_params that constrain the fit. while the even numbers are the parameters for the functions that don't constrain the fits. """ if num_pars==1: return ['C_a', 'tau_a'] elif num_pars==2: return ['C_a', 'tau_a'] elif num_pars==3: return ['C_a', 'tau_a', 'tau_b'] elif num_pars==4: return ['C_a', 'tau_a', 'C_b', 'tau_b'] elif num_pars==5: return ['C_a', 'tau_a', 'C_b', 'tau_b', 'tau_g'] elif num_pars==6: return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g'] elif num_pars==7: return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'tau_d'] elif num_pars==8: return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d'] elif num_pars==9: return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d', 'tau_e'] elif num_pars==10: return [ 'C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d', 'C_e', 'tau_e'] def do_Expstyle_fit2(num_pars, x, y, dy=np.empty([]), tau_mem=50.): """ Performs the exponential fit on the function defined by num_pars using scipy optimize curve fit. Provides initial guesses for the amplitudes and the correlation times. Takes the number of parameters, x values, y values, error in the y (dy), and tau_mem. Tau_mem is the maximum tau that the correlation times can take, which bound the fit. Can also be set to np.inf if you want no bounds. Returns, the Chi-squared value of the fit to the model along with the parameter values (popt), the parameter error (popv) and the model itself. """ b1_guess = y[0]/num_pars/2 t1_guess = [tau_mem/1280.0, tau_mem/640.0, tau_mem/64.0, tau_mem/8.0] if num_pars==1: func=func_exp_decay1 guess=(t1_guess[2]) bound=(0.,np.inf) elif num_pars==2: func=func_exp_decay2 guess=(b1_guess, t1_guess[2]) bound=([0.0, x[0]],[1., tau_mem]) elif num_pars==3: func=func_exp_decay3 guess=(b1_guess, t1_guess[3], t1_guess[2]) bound=([0.0,x[0],x[0]],[1., tau_mem, tau_mem]) elif num_pars==4: func=func_exp_decay4 guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2]) bound=([0.0, x[0], 0.0, x[0]],[1., tau_mem, 1., tau_mem]) elif num_pars==5: func=func_exp_decay5 guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], t1_guess[1]) bound=([0.0, x[0], 0.0, x[0],x[0]],[1., tau_mem, 1., tau_mem, tau_mem]) elif num_pars==6: func=func_exp_decay6 guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], b1_guess, t1_guess[1]) #bound=([0.0, x[0], 0.0, x[0], 0.0, x[0]],[1., tau_mem, 1., tau_mem, 1., tau_mem]) bound=([0.0, x[0], 0.0, x[0], 0.0, x[0]],[1., np.inf, 1., np.inf, 1., np.inf]) elif num_pars==7: func=func_exp_decay7 guess=(b1_guess, t1_guess[2], b1_guess, t1_guess[1], b1_guess, t1_guess[0], t1_guess[3]) bound=([0.0, x[0], 0.0, x[0], 0.0, x[0], x[0]],[1., tau_mem, 1., tau_mem, 1., tau_mem, tau_mem]) elif num_pars==8: func=func_exp_decay8 guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], b1_guess, t1_guess[1], b1_guess, t1_guess[0]) #if dy != []: # popt, popv = curve_fit(func, x, y, p0=guess, sigma=dy, bounds=bound, method='trf', loss='soft_l1') #else: popt, popv = curve_fit(func, x, y, p0=guess, bounds=bound, loss='soft_l1') ymodel=[ func(x[i], *popt) for i in range(len(x)) ] #print ymodel bExceed=_bound_check(func, popt) if bExceed: print >> sys.stderr, "= = = WARNING, curve fitting in do_LSstyle_fit returns a sum>1.//" return 9999.99, popt, np.sqrt(np.diag(popv)), ymodel else: return calc_chi(y, ymodel, dy), popt, popv, ymodel # + def fitCorrF(CorrDF, dCorrDF, tau_mem, pars_l, fixfit): """ Main function to fit the correlation function. Loops over all residues with N-H vectors and calculates the fit, appends the best fit from findbest_Expstyle_fits2. Passes the set of lists to fitstoDF to return a data frame of the best fits for each residue. Takes the correlation function CorrDF and errors in the correlation function, maximum tau mem to cut correlation function off from, the list of parameters you want to fit too. If you don't want to test the fit and use a fixed parameter set, set fixfit to True and pass a list of length 1 into pars_l. """ NH_Res = CorrDF.columns chi_list=[] ; names_list=[] ; pars_list=[] ; errs_list=[] ; ymodel_list=[]; covarMat_list = []; for i in CorrDF.columns: tstop = np.where(CorrDF.index.values==tau_mem)[0][0] x = CorrDF.index.values[:tstop] y = CorrDF[i].values[:tstop] #CorrDF[i].values[:tstop] dy = dCorrDF[i].values[:tstop] ## if not fixfit then find find the best expstyle fit. Otherwise force the fit to nparams if (not fixfit)&(len(pars_l)>1): print("Finding the best fit for residue {}".format(i)) chi, names, pars, errs, ymodel, covarMat = findbest_Expstyle_fits2(x, y, tau_mem, dy, par_list=pars_l, threshold=thresh) elif (fixfit)&(len(pars_l)==1): print("Performing a fixed fit for {} exponentials".format(int(pars_l[0]/2))) chi, pars, covarMat, ymodel = do_Expstyle_fit2(pars_l[0], x, y, dy, tau_mem) names = _return_parameter_names(len(pars)) errs = np.sqrt(np.diag(covarMat)) else: print("The list of parameters is empty. Breaking out.") break; chi_list.append(chi) names_list.append(names) pars_list.append(pars) errs_list.append(errs) ymodel_list.append(ymodel) covarMat_list.append(covarMat) FitDF = fitstoDF(NH_Res, chi_list, pars_list, errs_list, names_list) return FitDF, covarMat_list # - def fitstoDF(resnames, chi_list, pars_list, errs_list, names_list): ## Set Up columns indices and names for the data frame """ Function that takes the residue names, chi^2, parameters, errors and names of the fits and returns a data frame of the parameters. """ mparnames = _return_parameter_names(8) mtau_names = np.array(mparnames)[1::2] mc_names = np.array(mparnames)[::2] colnames = np.array(['Resname','NumExp']) tau_errnames = np.array([[c,"{}_err".format(c)] for c in mtau_names]).flatten() mc_errnames = np.array([[c, "{}_err".format(c)] for c in mc_names]).flatten() colnames = np.hstack([colnames,mc_errnames]) colnames = np.hstack([colnames,tau_errnames]) colnames = np.hstack([colnames,np.array(['Chi_Fit'])]) FitDF = pd.DataFrame(index=np.arange(len(pars_list)), columns=colnames).fillna(0.0) FitDF['Resname'] = resnames FitDF['Chi_Fit'] = chi_list for i in range(len(pars_list)): npar = len(pars_list[i]) if (npar%2)==1: ccut = npar-2 tau_f, terr = pars_list[i][1:ccut+1:2], errs_list[i][1:ccut+1:2] tau_f = np.hstack([tau_f, pars_list[i][-1]]) terr = np.hstack([terr, errs_list[i][-1]]) sort_tau = np.argsort(tau_f) coeff, cerr= pars_list[i][0:ccut:2], errs_list[i][0:ccut:2] Clast = 1; Clasterr = 0.0; for n,m in zip(coeff, cerr): Clast -= n Clasterr += m coeff =np.hstack([coeff, np.array(Clast)]) cerr =np.hstack([cerr, np.array(Clasterr)]) tne = np.array([[c,"{}_err".format(c)] for c in mparnames[1:npar+1:2]]).flatten() cne = np.array([[c, "{}_err".format(c)] for c in mparnames[0:npar:2]]).flatten() else: tau_f, terr = pars_list[i][1::2], errs_list[i][1::2] coeff, cerr= pars_list[i][0::2], errs_list[i][0::2] sort_tau = np.argsort(tau_f)[::-1] tne = np.array([[c,"{}_err".format(c)] for c in names_list[i][1::2]]).flatten() cne = np.array([[c, "{}_err".format(c)] for c in names_list[i][0::2]]).flatten() NumExp=np.array(len(tau_f)) tau_err = np.array([[t,e] for t,e in zip(tau_f[sort_tau],terr[sort_tau])]).flatten() c_err = np.array([[c,e] for c,e in zip(coeff[sort_tau], cerr[sort_tau])]).flatten() namesarr = np.hstack([np.array('NumExp'),cne,tne]) valarr = np.hstack([NumExp,c_err,tau_err]) FitDF.loc[i,namesarr] = valarr FitDF['AUC_a'] = FitDF.C_a*FitDF.tau_a; FitDF['AUC_b'] = FitDF.C_b*FitDF.tau_b; FitDF['AUC_g'] = FitDF.C_g*FitDF.tau_g; FitDF['AUC_d'] = FitDF.C_d*FitDF.tau_d; FitDF['AUC_Total'] = FitDF[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1) FitDF['A_Total'] = FitDF[['C_a','C_b','C_g','C_d']].sum(axis=1) return FitDF def J_direct_transform(om, consts, taus): """ Calculation of the spectral density from the parameters of the fit by direct fourier transform """ ## Calculation for the direct spectral density ndecay=len(consts) ; noms=1;###lnden(om) Jmat = np.zeros( (ndecay, noms ) ) for i in range(ndecay): Jmat[i] = consts[i]*(taus[i]*1e-9)/( 1 + np.power((taus[i]*1e-9)*(om),2.)) return Jmat.sum(axis=0) def calc_NMR_Relax(J, fdd, fcsa, gammaH, gammaN): """ Function to calculate the R1, R2 and NOE from the spectral densities and the physical parameters for the dipole-dipole and csa contributions, fdd and fcsa. """ R1 = fdd * (J['Diff'] + 3*J['15N'] + 6*J['Sum']) + fcsa * J['15N'] R2 = (0.5 * fdd * (4*J['0'] + J['Diff'] + 3*J['15N'] + 6*J['1H'] + 6*J['Sum']) + (1./6.) * fcsa*(4*J['0'] + 3*J['15N']) ) NOE = 1 + ((fdd*gammaH)/(gammaN*R1))*(6*J['Sum'] - J['Diff']) return R1, R2, NOE # + H_gyro = 2*np.pi*42.57748*1e6 ## Gyromagnetic Ratio: Hydrogen ([rad]/[s][T]) N_gyro = -2*np.pi*4.317267*1e6 ## Gyromagnetic Ratio: Nitrogen ([rad]/[s][T]) B0 = 14.1 ## Field Strength = 18.8 Teslas (800), 16.4 (700), 14.1 (600) ## Need 5 Frequencies: ## J[0], J[wH], J[wN], J[wH-wN], J[wH+wN] Larmor1H = H_gyro*B0 ## Larmor Frequency: Hydrogen ([rad]/[s]) Larmor15N = N_gyro*B0 ## Larmor Frequency: Hydrogen ([rad]/[s]) omDiff = Larmor1H - Larmor15N ## Diff in Larmor Frequencies of Spin IS omSum = Larmor1H + Larmor15N ## Sum of Larmor Frequencies of Spin IS #vB = 800 ## 800 MHz B-field #mu_0 = 8.85418782e-12 ; # m^-3 kg^-1 s^4 A^2 mu_0 = 4*np.pi*1e-7 ; ## H/m hbar = 1.0545718e-34 ; # [J] * [s] = [kg] * [m^2] * [s^-1] ####omegaB = 2.0*np.pi*vB / 267.513e6 ##(800 MHz) ????? R_NH = 1.02e-10 ## distance between N-H atoms in Angstroms dSigmaN = -170e-6 #mu_0=1 ###f_DD = 7.958699205571828e-67 * R_NH**-6.0 * N_gyro**2 FDD = (1./10.)*np.power((mu_0*hbar*H_gyro*N_gyro)/(4*np.pi*np.power(R_NH,3)),2) #FCSA = 498637299.69233465 FCSA = (2.0/15.0)*(Larmor15N**2)*(dSigmaN**2) ## CSA factor # - CtDF14 = pd.read_csv('Ct_ab40_f_278_2500_final.csv', index_col=0) dCtDF14 = pd.read_csv('dCt_ab40_f_278_2500_final.csv', index_col=0) figCt, axesCt = plt.subplots(3,3,sharey=True, sharex=True, figsize=(24,16)) figCt.subplots_adjust(hspace=0.05,wspace=0.05) NCt = [5,5,5,5,4,4,4,4,4] start = 0 for n, ax in zip(NCt,axesCt.flatten()): end = start + n CtDF14.iloc[:,start:end].plot(grid=True, ax=ax, logx=True, xlim=(0.02,200), ylim=(0,1.0), cmap='gnuplot_r') start = end ax.legend(frameon=False,loc=1) axesCt[0,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) axesCt[1,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) axesCt[2,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) #axesCt[3,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) #axesCt[4,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) #axesCt[5,0].set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=14) for xl in axesCt[2,:]: xl.set_xlabel(r'$\mathbf{\tau \ \ (ns)}$', fontsize=18) tau_mem=70.0 FitDF, covarMat_list = fitCorrF(CtDF14, dCtDF14, tau_mem, [6], fixfit=True) FitDF.tau_a_err.iloc[19] # + t_a=[] t_b=[] t_g=[] t_a_er=[] t_b_er=[] t_g_er=[] c_a=[] c_a_er = [] c_b=[] c_b_er = [] c_g=[] c_g_er = [] n = 11 for tau_mem in range(15, 105, 5): FitDF, covarMat_list = fitCorrF(CtDF14, dCtDF14, tau_mem, [6], fixfit=True) t_a.append(FitDF.tau_a.iloc[n]) t_a_er.append(FitDF.tau_a_err.iloc[n]) c_a.append(FitDF.C_a.iloc[n]) c_a_er.append(FitDF.C_a_err.iloc[n]) t_b.append(FitDF.tau_b.iloc[n]) t_b_er.append(FitDF.tau_b_err.iloc[n]) c_b.append(FitDF.C_b.iloc[n]) c_b_er.append(FitDF.C_b_err.iloc[n]) t_g.append(FitDF.tau_g.iloc[n]) t_g_er.append(FitDF.tau_g_err.iloc[n]) c_g.append(FitDF.C_g.iloc[n]) c_g_er.append(FitDF.C_g_err.iloc[n]) df = pd.DataFrame(list(zip(t_a, t_a_er, c_a, c_a_er, t_b, t_b_er, c_b, c_b_er, t_g, t_g_er, c_g, c_g_er,)), columns=['tau_a', 'tau_a_err', 'C_a', 'C_a_err', 'tau_b', 'tau_b_err', 'C_b', 'C_b_err', 'tau_g', 'tau_g_err', 'C_g', 'C_g_err',]) df.index=np.arange(15, 105, 5) # - Jarr=[] for i,fit in FitDF.iterrows(): c = fit[['C_a','C_b','C_g','C_d']].values t = fit[['tau_a','tau_b','tau_g','tau_d']].values Jdict = {'0':0, '1H':0,'15N':0,'Sum':0,'Diff':0} J0 = J_direct_transform(0, c, t) #print(J0) JH = J_direct_transform(Larmor1H, c, t) JN = J_direct_transform(Larmor15N, c, t) JSum = J_direct_transform(omSum, c, t) JDiff = J_direct_transform(omDiff, c, t) Jdict['1H'] = JH ; Jdict['15N'] = JN; Jdict['0'] = J0; Jdict['Sum'] = JSum; Jdict['Diff'] = JDiff; Jarr.append(Jdict) # + NMRRelaxDF = pd.DataFrame(np.zeros((len(Jarr),3)),index=range(1,len(Jarr)+1), columns=['T1','T2','NOE']) for index in range(1,len(Jarr)+1): r1, r2, noe = calc_NMR_Relax(Jarr[index-1], FDD, FCSA, H_gyro, N_gyro) NMRRelaxDF.loc[index,'R1'] = r1; NMRRelaxDF.loc[index,'R2'] = r2; NMRRelaxDF.loc[index,'NOE'] = noe; NMRRelaxDF['Resname'] = FitDF['Resname'].values NMRRelaxDF['RESNUM'] = NMRRelaxDF['Resname'].str.extract('([0-9]+)',expand=False).astype('int')+1 # - FitRelaxDF = FitDF.merge(NMRRelaxDF, how='left', left_on='Resname',right_on='Resname').set_index(NMRRelaxDF.index) FitRelaxName= "NMRFitRelax_{}_ab40_f_278K_600MHz_inf".format(int(tau_mem)) FitRelaxDF.to_csv('{}.csv'.format(FitRelaxName)) FTOPN = "PROD.noH20.ab40_f1_amber.parm7" NHVecs = [] def ScaleNMRParams(FitDF, ScaleType=1, tau_0=12.0, tau_d=0.010): NMRRelax_Scl = FitDF.copy() if ScaleType == 1: print('Scaling all correlation times by : {}'.format(tau_0)) NMRRelax_Scl['tau_a'] = NMRRelax_Scl['tau_a']/(1 + NMRRelax_Scl['tau_a']/tau_0) NMRRelax_Scl['tau_b'] = NMRRelax_Scl['tau_b']/(1 + NMRRelax_Scl['tau_b']/tau_0) NMRRelax_Scl['tau_g'] = NMRRelax_Scl['tau_g']/(1 + NMRRelax_Scl['tau_g']/tau_0) NMRRelax_Scl['AUC_a'] = NMRRelax_Scl['tau_a']*NMRRelax_Scl['C_a'] NMRRelax_Scl['AUC_b'] = NMRRelax_Scl['tau_b']*NMRRelax_Scl['C_b'] NMRRelax_Scl['AUC_g'] = NMRRelax_Scl['tau_g']*NMRRelax_Scl['C_g'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g']].sum(axis=1) elif ScaleType == 2: print('Scaling NMR by adding very fast time constant, tau_d = {}'.format(tau_d)) NMRRelax_Scl['C_d'] = 1 - NMRRelax_Scl[['C_a','C_b','C_g']].sum(axis=1) NMRRelax_Scl['tau_d'] = tau_d NMRRelax_Scl['AUC_d'] = NMRRelax_Scl['tau_d']*NMRRelax_Scl['C_d'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1) elif ScaleType == 3: print('Scaling only the longest correlation time, tau_a by : {}'.format(tau_0)) NMRRelax_Scl['tau_a'] = NMRRelax_Scl['tau_a']/(1 + NMRRelax_Scl['tau_a']/tau_0) NMRRelax_Scl['AUC_a'] = NMRRelax_Scl['tau_a']*NMRRelax_Scl['C_a'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1) elif ScaleType == 4: print('Scaling all correlation times by {} and adding very fast time constant at : {}'.format(tau_0, tau_d)) NMRRelax_Scl['C_d'] = 1 - NMRRelax_Scl[['C_a','C_b','C_g']].sum(axis=1) NMRRelax_Scl['tau_d'] = tau_d NMRRelax_Scl['AUC_d'] = NMRRelax_Scl['tau_d']*NMRRelax_Scl['C_d'] NMRRelax_Scl['tau_a'] = NMRRelax_Scl['tau_a']/(1 + NMRRelax_Scl['tau_a']/tau_0) NMRRelax_Scl['tau_b'] = NMRRelax_Scl['tau_b']/(1 + NMRRelax_Scl['tau_b']/tau_0) NMRRelax_Scl['tau_g'] = NMRRelax_Scl['tau_g']/(1 + NMRRelax_Scl['tau_g']/tau_0) NMRRelax_Scl['AUC_a'] = NMRRelax_Scl['tau_a']*NMRRelax_Scl['C_a'] NMRRelax_Scl['AUC_b'] = NMRRelax_Scl['tau_b']*NMRRelax_Scl['C_b'] NMRRelax_Scl['AUC_g'] = NMRRelax_Scl['tau_g']*NMRRelax_Scl['C_g'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1) elif ScaleType == 5: print('Scaling only the longest correlation time, tau_a by : {}'.format(tau_0)) NMRRelax_Scl['tau_a'] = NMRRelax_Scl['tau_a']/(1 + NMRRelax_Scl['tau_a']/tau_0) NMRRelax_Scl['AUC_a'] = NMRRelax_Scl['tau_a']*NMRRelax_Scl['C_a'] NMRRelax_Scl['C_d'] = 1 - NMRRelax_Scl[['C_a','C_b','C_g']].sum(axis=1) NMRRelax_Scl['tau_d'] = tau_d NMRRelax_Scl['AUC_d'] = NMRRelax_Scl['tau_d']*NMRRelax_Scl['C_d'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1) elif ScaleType == 6: print('Scaling all correlation times by predefined scaling parameters') NMRRelax_Scl['tau_a'] = NMRRelax_Scl['tau_a']/(1.52603 + NMRRelax_Scl['tau_a']*0.086184) NMRRelax_Scl['tau_b'] = NMRRelax_Scl['tau_b']/(1.52603 + NMRRelax_Scl['tau_b']*0.086184) NMRRelax_Scl['tau_g'] = NMRRelax_Scl['tau_g']/(1.52603 + NMRRelax_Scl['tau_g']*0.086184) NMRRelax_Scl['AUC_a'] = NMRRelax_Scl['tau_a']*NMRRelax_Scl['C_a'] NMRRelax_Scl['AUC_b'] = NMRRelax_Scl['tau_b']*NMRRelax_Scl['C_b'] NMRRelax_Scl['AUC_g'] = NMRRelax_Scl['tau_g']*NMRRelax_Scl['C_g'] NMRRelax_Scl['AUC_Total'] = NMRRelax_Scl[['AUC_a','AUC_b','AUC_g']].sum(axis=1) for i,fit in NMRRelax_Scl.iterrows(): c = fit[['C_a','C_b','C_g','C_d']].values t = fit[['tau_a','tau_b','tau_g','tau_d']].values Jdict = {'0':0, '1H':0,'15N':0,'Sum':0,'Diff':0} J0 = J_direct_transform(0, c, t) JH = J_direct_transform(Larmor1H, c, t) JN = J_direct_transform(Larmor15N, c, t) JSum = J_direct_transform(omSum, c, t) JDiff = J_direct_transform(omDiff, c, t) Jdict['1H'] = JH ; Jdict['15N'] = JN; Jdict['0'] = J0; Jdict['Sum'] = JSum; Jdict['Diff'] = JDiff; r1, r2, noe = calc_NMR_Relax(Jdict, FDD, FCSA, H_gyro, N_gyro) NMRRelax_Scl.loc[i, 'R1'] = r1 NMRRelax_Scl.loc[i, 'R2'] = r2 NMRRelax_Scl.loc[i, 'NOE'] = noe NMRRelax_Scl.loc[i, 'R1_SE'] = np.square(NMRRelax_Scl.loc[i, 'R1'] - EXPNOEdf.loc[i, 'R1-Exp']) NMRRelax_Scl.loc[i, 'R2_SE'] = np.square(NMRRelax_Scl.loc[i, 'R2'] - EXPNOEdf.loc[i, 'R2-Exp']) NMRRelax_Scl.loc[i, 'NOE_SE'] = np.square(NMRRelax_Scl.loc[i, 'NOE'] - EXPNOEdf.loc[i, 'NOE-Exp']) return NMRRelax_Scl FitDF1 = pd.read_csv('NMRFitRelax_15_ab40_f_278K_600MHz_15_inf_corrected.csv', index_col=0) EXPNOEdf = pd.read_csv('NMRFitRelax_final_ab40_f_278K_600MHz.csv', index_col=0) NMRRelaxDF14_Scl1 = ScaleNMRParams(FitDF1, ScaleType=6) Jarr=[] for i,fit in NMRRelaxDF14_Scl1.iterrows(): c = fit[['C_a','C_b','C_g','C_d']].values t = fit[['tau_a','tau_b','tau_g','tau_d']].values Jdict = {'0':0, '1H':0,'15N':0,'Sum':0,'Diff':0} J0 = J_direct_transform(0, c, t) JH = J_direct_transform(Larmor1H, c, t) JN = J_direct_transform(Larmor15N, c, t) JSum = J_direct_transform(omSum, c, t) JDiff = J_direct_transform(omDiff, c, t) Jdict['1H'] = JH ; Jdict['15N'] = JN; Jdict['0'] = J0; Jdict['Sum'] = JSum; Jdict['Diff'] = JDiff; Jarr.append(Jdict) # + NMRRelaxDF = pd.DataFrame(np.zeros((len(Jarr),3)),index=range(1,len(Jarr)+1), columns=['R1','R2','NOE']) for index in range(1,len(Jarr)+1): r1, r2, noe = calc_NMR_Relax(Jarr[index-1], FDD, FCSA, H_gyro, N_gyro) NMRRelaxDF.loc[index,'R1'] = r1; NMRRelaxDF.loc[index,'R2'] = r2; NMRRelaxDF.loc[index,'NOE'] = noe; NMRRelaxDF['Resname'] = FitDF1['Resname'].values NMRRelaxDF['RESNUM'] = NMRRelaxDF['Resname'].str.extract('([0-9]+)',expand=False).astype('int')+1 # - # + # An "interface" to matplotlib.axes.Axes.hist() method fig_f3 = plt.figure(131512, figsize=(6.5,4)) axf3 = plt.subplot(111) a = NMRRelaxDF14_Scl1["tau_a"] b = NMRRelaxDF14_Scl1["tau_b"] g = NMRRelaxDF14_Scl1["tau_g"] plt.hist(x=a, bins='auto', color='orange', alpha=0.7, rwidth=0.85) plt.hist(x=b, bins='auto', color='green', alpha=0.7, rwidth=0.85) plt.hist(x=g, bins='auto', color='red', alpha=0.7, rwidth=0.85) plt.xscale('log') plt.xlim(0.01,11) plt.ylim(0,18) plt.tick_params(labelsize=24) #plt.legend(frameon=False,loc=0,prop={'size':14,'weight':'bold'}, ncol=4) #plt.grid(axis='y', alpha=0.75) #plt.xlabel('Time Constant (ns)', weight='bold', fontsize=15 ) plt.ylabel('Count',weight='bold', fontsize=24) #plt.title('Time Constants (ns)') plt.text(0.0025,18,'A', weight='bold',fontsize=24, ha='left',va='top') #maxfreq = n.max() #Set a clean upper y-axis limit. #plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10) plt.savefig('ab40_timeConstant_Hist_15_inf.png', bbox_inches='tight',dpi=600) # + fig_f3 = plt.figure(131512, figsize=(8,3)) axf3 = plt.subplot(111) a = FitDF1["tau_a"][15] b = FitDF1["tau_b"][15] g = FitDF1["tau_g"][15] a1 = NMRRelaxDF14_Scl1["tau_a"][15] b1 = NMRRelaxDF14_Scl1["tau_b"][15] g1 = NMRRelaxDF14_Scl1["tau_g"][15] axf3.vlines(x=a, ymin = 0, ymax = 0.5, linewidth=3, color ="orange", linestyle ="--") axf3.vlines(x=b, ymin = 0, ymax = 0.5, linewidth=3, color ="green", linestyle ="--") axf3.vlines(x=a1, ymin = 0, ymax = 0.5, linewidth=3, color ="orange", linestyle ="-") axf3.vlines(x=b1, ymin = 0, ymax = 0.5, linewidth=3, color ="green", linestyle ="-") axf3.set_xlim(0.5,12) axf3.set_ylim(0,0.5) axf3.set_xscale('linear') axf3.axes.yaxis.set_visible(False) plt.tick_params(labelsize=14) divider = make_axes_locatable(axf3) axLin = divider.append_axes("left", size=2.0, pad=0, sharey=axf3) axLin.set_xscale('log') axLin.set_xlim((0.1, 0.5)) axLin.vlines(x=g, ymin = 0, ymax = 0.5, linewidth=3, color ="red", linestyle ="--") axLin.vlines(x=g1, ymin = 0, ymax = 0.5, linewidth=3, color ="red", linestyle ="-") #axLin.bar(x=g1, height = 1, color='red', alpha=0.7, width=0.005) axLin.set_xticks([0.1, 0.3, 0.5]) plt.tick_params(labelsize=14) axLin.axes.yaxis.set_visible(False) axLin.minorticks_off() list(axLin.spines.values())[1].set_visible(False) list(axLin.spines.values())[3].set_visible(False) list(axLin.spines.values())[0].set_visible(False) list(axf3.spines.values())[1].set_visible(False) list(axf3.spines.values())[3].set_visible(False) list(axf3.spines.values())[0].set_visible(False) plt.text(0.1,0.5,'D', weight='bold',fontsize=18, ha='left',va='top') plt.text(2.55,0.48,r'$\mathbf{A\beta 40 \ GLN15}$',fontsize=12, ha='left',va='top') #plt.xlabel('Time Constant (ns)', weight='bold', fontsize=15 ) plt.savefig('ab40_timeConstant_Hist_GLN15.png', bbox_inches='tight',dpi=600) # - FitRelaxName= "ab40_f_278_2500ns_scaled_langevin" NMRRelaxDF.to_csv('{}.csv'.format(FitRelaxName)) # # PLOT THE RELAXATION DATA def _plot_NMRRelax(fitdf, nmr_expdf, nres, r1r2_lim, fsize=(8,10)): fignmr_err, axnmr_err = plt.subplots(2, 1, sharex=True,figsize=fsize) fignmr_err.subplots_adjust(hspace=0.025) RMSE_R1R2 = np.sqrt(fitdf.iloc[1:-1][['R1_SE','R2_SE','NOE_SE']].mean()) ##R1 Plots axR1 = axnmr_err[0] nmr_expdf[['RESID','R1']].plot(x='RESID', y='R1',ax=axR1, color='k', label=r'$\mathit{R}_\mathbf{1}$_Exp', linestyle='-', linewidth=2, marker='d') fitdf[['RESNUM','R1']].plot(x='RESNUM',y='R1', ax=axR1, color='teal', label=r'$\mathit{{R}}_\mathbf{{1}}$-RMSE:{:.2}'.format(RMSE_R1R2['R1_SE']), linewidth=2, linestyle='-', marker='o') ## R2 Plots nmr_expdf[['RESID','R2']].plot(x='RESID', y='R2',ax=axR1, color='k', label=r'$\mathit{{R}}_\mathbf{{2}}$_Exp', linestyle='--', linewidth=2, marker='d') fitdf[['RESNUM','R2']].plot(x='RESNUM',y='R2', ax=axR1, color='#fb7d07', label=r'$\mathit{{R}}_\mathbf{{2}}$-RMSE:{:.2}'.format(RMSE_R1R2['R2_SE']), linewidth=2, linestyle='--', marker='o') #axR2.text(1, 6.40, 'b', weight='bold',fontsize=18, ha='left',va='top') #axR1.set_ylabel(r'$\mathit{R}_\mathbf{2} \ \ (s^{-1})$', weight='bold', fontsize=15) #axR1.legend(frameon=False, loc='3', prop={'size':14,'weight':'bold'}) axR1.set_ylim(r1r2_lim[0], r1r2_lim[1]) axR1.set_ylabel(r'$\mathit{R}_\mathbf{1,2} \ \ (s^{-1})$', weight='bold', fontsize=15) axR1.legend(frameon=False, ncol=2, mode='expand', loc='upper left', prop={'size':14,'weight':'bold'}) axR1.tick_params(labelsize=14) ## NOE Plots axNOE = axnmr_err[1] nmr_expdf[['RESID','NOE']].plot(x='RESID', y='NOE', ax=axNOE, color='k', label='NOE_Exp', linestyle='-',ylim=(0.0,1.0), linewidth=2, marker='d') fitdf[['RESNUM','NOE']].plot(x='RESNUM',y='NOE', ax=axNOE, color='green', label='NOE-RMSE:{:.1}'.format(RMSE_R1R2['NOE_SE']), linewidth=2, linestyle='-' , marker='o') #axNOE.text(1,0.95,'c', weight='bold',fontsize=18, ha='left',va='top') axNOE.set_ylabel(r'$\mathbf{^{1}H-^{15}N}$ Het. NOE', weight='bold', fontsize=15) axNOE.set_xlabel('Residue Number', weight='bold', fontsize=15) axNOE.set_xlim(0, nres+1) axNOE.legend(frameon=False, prop={'size':14,'weight':'bold'}) axNOE.set_xticks(np.arange(0, nres, 10)) axNOE.set_xticks(np.arange(0, nres, 5), minor=True) axNOE.set_xticklabels(np.arange(0, nres, 10)) axNOE.tick_params(labelsize=14) return fignmr_err DSSP = pd.read_csv('../rg_dssp/ab40_f_278_dssp.csv', index_col=0) # + figNMR_Err, axNMR_Err = plt.subplots(5, 1, sharex=True, figsize=(8,12)) figNMR_Err.subplots_adjust(hspace=0.025) RMSE_R1R2 = np.sqrt(NMRRelaxDF14_Scl1.iloc[1:-1][['R1_SE','R2_SE','NOE_SE']].mean()) ##R1 Plots axR1 = axNMR_Err[0] NMRRelaxDF14_Scl1[['RESNUM','R1-Exp']].plot(x='RESNUM', y='R1-Exp',ax=axR1, color='k', label=r'$\mathbf{R}_\mathbf{1}$_Exp', linestyle='-', linewidth=2, marker='d') NMRRelaxDF[['RESNUM','R1']].plot(x='RESNUM',y='R1', ax=axR1, color='teal', label=r'$\mathbf{{R}}_\mathbf{{1}}$-RMSE:{:.2}'.format(RMSE_R1R2['R1_SE']), linewidth=2, linestyle='-', marker='o') axR1.set_ylim(0, 2.7) axR1.text(1,2.60,'a', weight='bold',fontsize=18, ha='left',va='top') axR1.set_ylabel(r'$\mathbf{R}_\mathbf{1} \ \ (\mathbf{s}^\mathbf{-1})$', weight='bold', fontsize=15) axR1.legend(frameon=False, loc='best', prop={'size':14,'weight':'bold'}) axR1.tick_params(labelsize=14) ## R2 Plots axR2 = axNMR_Err[1] NMRRelaxDF14_Scl1[['RESNUM','R2-Exp']].plot(x='RESNUM', y='R2-Exp',ax=axR2, color='k', label=r'$\mathbf{{R}}_\mathbf{{2}}$_Exp', linestyle='-', linewidth=2, marker='d') NMRRelaxDF[['RESNUM','R2']].plot(x='RESNUM',y='R2', ax=axR2, color='#fb7d07', label=r'$\mathbf{{R}}_\mathbf{{2}}$-RMSE:{:.2}'.format(RMSE_R1R2['R2_SE']), linewidth=2, linestyle='-', marker='o') axR2.text(1,6.40,'b', weight='bold',fontsize=18, ha='left',va='top') axR2.set_ylabel(r'$\mathbf{R}_\mathbf{2} \ \ (\mathbf{s}^\mathbf{-1})$', weight='bold', fontsize=15) axR2.legend(frameon=False, loc='best', prop={'size':14,'weight':'bold'}) axR2.tick_params(labelsize=14) axR2.set_ylim(0.1,6.5) axR2.hlines(y=NMRRelaxDF['R2'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="#fb7d07", linestyle ="--") axR2.hlines(y=NMRRelaxDF['R2'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="#fb7d07", linestyle ="--") axR2.hlines(y=NMRRelaxDF14_Scl1['R2-Exp'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="k", linestyle ="--") axR2.hlines(y=NMRRelaxDF14_Scl1['R2-Exp'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="k", linestyle ="--") ## NOE Plots axNOE = axNMR_Err[2] NMRRelaxDF14_Scl1[['RESNUM','NOE-Exp']].plot(x='RESNUM', y='NOE-Exp', ax=axNOE, color='k', label='NOE_Exp', linestyle='-',ylim=(0.0,1.0), linewidth=2, marker='d') NMRRelaxDF[['RESNUM','NOE']].plot(x='RESNUM',y='NOE', ax=axNOE, color='limegreen', label='NOE-RMSE:{:.2}'.format(RMSE_R1R2['NOE_SE']), linewidth=2, linestyle='-' , marker='o') axNOE.text(1,0.95,'c', weight='bold',fontsize=18, ha='left',va='top') axNOE.set_ylabel(r'$\mathbf{^{1}H-^{15}N}$ Het. NOE', weight='bold', fontsize=15) axNOE.set_xlabel('Residue Number', weight='bold', fontsize=15) axNOE.set_ylim(-1.75,1.1) axNOE.set_xlim(0,41) axNOE.hlines(y=NMRRelaxDF['NOE'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="limegreen", linestyle ="--") axNOE.hlines(y=NMRRelaxDF['NOE'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="limegreen", linestyle ="--") axNOE.hlines(y=NMRRelaxDF14_Scl1['NOE-Exp'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="k", linestyle ="--") axNOE.hlines(y=NMRRelaxDF14_Scl1['NOE-Exp'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="k", linestyle ="--") axNOE.legend(frameon=False,loc=1,prop={'size':14,'weight':'bold'}) axNOE.set_xticks(np.arange(5,41,5)) axNOE.set_xticklabels(np.arange(5,41,5)) axNOE.tick_params(labelsize=14) figNMR_Err.savefig('NMRRelax_ab40_f_278.png', bbox_inches='tight',dpi=600) # + fig_f3 = plt.figure(131512, figsize=(8,4)) axf3 = plt.subplot(111) #fig_f3, axf3 = plt.subplots(111, figsize=(8,8)) #fig_f3.subplots_adjust(hspace=0.05) #ylim_t1=(1,8) ## Plot the Amplitudes to S1 NMRRelaxDF14_Scl1[['RESNUM','C_a','C_a_err']].plot(x='RESNUM', y='C_a', yerr='C_a_err', c='orange', ax=axf3, label=r'$\mathbf{A_1}$', linewidth=2, marker='o', linestyle='-') NMRRelaxDF14_Scl1[['RESNUM','C_b','C_b_err']].plot(x='RESNUM', y='C_b', yerr='C_b_err', c='green', ax=axf3, label=r'$\mathbf{A_2}$', linewidth=2, marker='o', linestyle='-') NMRRelaxDF14_Scl1[['RESNUM','C_g','C_g_err']].plot(x='RESNUM', y='C_g', yerr='C_g_err', c='red', ax=axf3, label=r'$\mathbf{A_3}$', linewidth=2, marker='o', linestyle='-') NMRRelaxDF14_Scl1[['RESNUM','A_Total']].plot(x='RESNUM', y='A_Total', c='black', ax=axf3, label=r'$\mathbf{A_{sum}}$', linewidth=2, marker='o', linestyle='-') axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="orange", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="orange", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="g", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="g", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][1:40].mean(), xmin = 1, xmax = 40, linewidth=2, color ="r", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="r", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][1:40].mean(), xmin = 1, xmax = 40, linewidth=2, color ="k", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="k", linestyle ="--") axf3.set_ylim(0, 1.05) axf3.set_ylabel(r'Amplitudes', fontsize=15, weight='bold') axf3.set_xlabel('Residue Number', weight='bold', fontsize=15) axf3.set_xlim(0,41) axf3.legend(frameon=False,loc=0,prop={'size':14,'weight':'bold'}, ncol=4) axf3.set_xticks(np.arange(5,41,5)) axf3.set_xticklabels(np.arange(5,41,5)) axf3.tick_params(labelsize=14) fig_f3.savefig('amp_ab40_f_278.png', bbox_inches='tight',dpi=300) ''' NMRRelaxDF14_Scl1[['RESNUM','tau_a','tau_a_err']].plot(x='RESNUM', y='tau_a', yerr='tau_a_err', c='blue', ax=axf3[1], label=r'$\mathbf{\tau_{1}}$', linewidth=2, marker='o',linestyle='-') NMRRelaxDF14_Scl1[['RESNUM','tau_b','tau_b_err']].plot(x='RESNUM', y='tau_b', yerr='tau_b_err', c='green', ax=axf3[1], logy=True, label=r'$\mathbf{\tau_{2}}$', linewidth=2, marker='o',linestyle='-') NMRRelaxDF14_Scl1[['RESNUM','tau_g','tau_g_err']].plot(x='RESNUM', y='tau_g', yerr='tau_g_err', c='red', ax=axf3[1], logy=True, label=r'$\mathbf{\tau_{3}}$', linewidth=2, marker='o',linestyle='-') axf3[1].tick_params(labelsize=15, which='both') axf3[1].set_yticks(np.linspace(ylim_t1[0], ylim_t1[1], ylim_t1[1] - ylim_t1[0]+1)[1:-1], minor=True) axf3[1].set_yticklabels(np.linspace(ylim_t1[0], ylim_t1[1], ylim_t1[1] - ylim_t1[0]+1)[1:-1], minor=True) axf3[1].yaxis.set_minor_formatter(mticker.ScalarFormatter()) axf3[1].yaxis.set_major_formatter(mticker.ScalarFormatter()) axf3[1].set_ylim(ylim_t1[0], ylim_t1[1]) axf3[1].set_ylabel(r'$\mathbf{\tau \ \ (ns)}$', fontsize=15, weight='bold') axf3[1].legend(loc=1, frameon=False) for axfit in axf3: axfit.set_xlim(0, 41) axfit.set_xticks(np.arange(0, 41, 5)) axfit.set_xticks(np.arange(0, 41, 5), minor=True) axfit.tick_params(labelsize=15) axf3[0].set_xticklabels([]) axf3[1].set_xticklabels(np.arange(0,41,5)) axf3[1].set_xlim(-1, 41) fig_f3.canfig = plt.figure(131512, figsize=(9,7)) ax = plt.subplot(111)vas.draw() f3s1_ylbl_pos = axf3[0].yaxis.label.get_position() f3s2_ylbl_pos = axf3[1].yaxis.label.get_position() print(f3s1_ylbl_pos, f3s2_ylbl_pos) ''' #axf3[1].set_ylabel(r'$\mathbf{\tau \ \ (ns)}$', labelpad = (f3s2_ylbl_pos[0] - f3s2_ylbl_pos[0] + 4.0), # fontsize=15, weight='bold') #axf3.set_ylabel(r'$\mathbf{Amplitudes}$', labelpad = (f3s1_ylbl_pos[0] - f3s1_ylbl_pos[0] + 4.0), # fontsize=15, weight='bold') # - # + figProbDecays = plt.figure(5,figsize=(8,6)) axPD = figProbDecays.add_subplot(111) RESExamples = ['SER26-NH']#, 'ARG46-NH','ALA61-NH', 'GLN78-NH'] AUCTExamples = NMRRelaxDF14_SclB[['Resname','AUC_Total']].set_index('Resname').loc[RESExamples].sort_values('AUC_Total') ResESorted = AUCTExamples.index.values ind_re = [np.where(NMRRelaxDF14_SclB['Resname']==RE)[0][0] for RE in RESExamples] x = CtDF14.index.values Example_Model_DF = pd.DataFrame(index=x, columns=RESExamples) for RE in RESExamples: ind_re = np.where(NMRRelaxDF14_SclB['Resname']==RE)[0][0] Example_Model_DF.loc[:,RE] = func_exp_decay8(x, *NMRRelaxDF14_SclB.iloc[ind_re][['C_a','tau_a','C_b','tau_b','C_g','tau_g','C_d','tau_d']].values) AUCTExamples2 = FitDF[['Resname','AUC_Total']].set_index('Resname').loc[RESExamples].sort_values('AUC_Total') ResESorted2 = AUCTExamples2.index.values ind_re2 = [np.where(FitDF['Resname']==RE)[0][0] for RE in RESExamples] x2 = CtDF14.index.values Example_Model_DF2 = pd.DataFrame(index=x2, columns=RESExamples) for RE in RESExamples: ind_re = np.where(FitDF['Resname']==RE)[0][0] Example_Model_DF2.loc[:,RE] = func_exp_decay6(x, *FitDF.iloc[ind_re][['C_a','tau_a','C_b','tau_b','C_g','tau_g']].values) Example_Model_DF[ResESorted].plot.line(grid=True,xlim=(0.02, 1000), color=['orange'],logx=True, ylim=(0.0, 1.0), ax=axPD, linewidth=2,label='Scaling w tf') #Example_Model_DF1[ResESorted1].plot.line(grid=True, xlim=(0.02, 1000), color=['red'],logx=True, ylim=(0.0, 1.0), ax=axPD, linewidth=2,label='only tf') Example_Model_DF2[ResESorted2].plot.line(grid=True, xlim=(0.02, 1000), color=['blue'],logx=True, ylim=(0.0, 1.0), ax=axPD, linewidth=2,label='Fit') CtDF14[ResESorted].plot(grid=True, xlim=(0.02,1000), color=['green'],logx=True, ylim=(0.0,1.0), ax=axPD, linewidth=2,label='Sim') axPD.set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=20) axPD.set_xlabel(r'$\mathbf{\tau \ \ (ns)}$', fontsize=20) RgHandles, RgLabels = axPD.get_legend_handles_labels() axPD.legend(RgHandles, RgLabels, frameon=False,prop={'size':15}) legend_labels1 = ['{}-Sim'.format(RD.split('-')[0]) for RD in ResESorted] #legend_labels2 = ['{}'.format(RD.split('-')[0]) for RD in ResESorted] #axPD.legend(np.hstack([ExResNew]), frameon=False, prop={'size':20,'weight':'bold'}) #axPD.set_title('Example Shortest and Longest Effective Correlations',weight='bold',size=14) axPD.tick_params(labelsize=18) # + tau_d = 0.006 NMRRelax_Scl2= ScaleNMRParams(FitDF, ScaleType=2, tau_d = tau_d) figExDecays, axExDecays = plt.subplots(2,2,figsize=(20,20), sharex=False, sharey=False, num=30691) figExDecays.subplots_adjust(hspace=0.275, wspace=0.275) #fign_long = 'ExampleCorrelations_tauM{}ns_SimvsFit_threshNone_36us_FixExp3_NBInf_TestingTfRed_noSplines'.format(int(tau_mem), 'None') RESExamples = ['ARG5-NH', 'LEU17-NH','ASN27-NH', 'LEU34-NH']# , 'ARG55-NH'] ind_re = [np.where(NMRRelax_Scl2['Resname']==RE)[0][0] for RE in RESExamples] x = CtDF14.index.values x_wfast = np.hstack([np.arange(0.001,0.020,0.001),x]) Example_Model_DF = pd.DataFrame(index=x_wfast, columns=RESExamples) Example_Model_DF_Opt = pd.DataFrame(index=x_wfast, columns=RESExamples) Example_Model_DF_Add = pd.DataFrame(index=x_wfast, columns=RESExamples) for RE in RESExamples: ind_re = np.where(FitDF['Resname']==RE)[0][0] Example_Model_DF.loc[:,RE] = func_exp_decay6(x_wfast, *FitDF.iloc[ind_re][['C_a','tau_a','C_b','tau_b','C_g','tau_g']].values) Example_Model_DF_Opt.loc[:,RE] = func_exp_decay8(x_wfast, *NMRRelax_Scl2.iloc[ind_re][['C_a','tau_a','C_b','tau_b','C_g','tau_g','C_d','tau_d']].values) ExpFit = func_exp_decay2(x_wfast, *NMRRelax_Scl2.iloc[ind_re][['C_d','tau_d']].values) Example_Model_DF_Add.loc[:,RE] = Example_Model_DF.loc[:,RE] + ExpFit clist = ['#06c2ac','#5a9e58', '#797419', '#85440a', '#7b0323'] clist2 = ['#06c2ac', '#84ca80', '#d3c968', '#b56b31', '#7b0323'] for RES, AX, cl in zip(RESExamples, axExDecays.flatten(), clist2[:-1]): CtDF14[RES].plot.line(grid=False, xlim=(0.02, 1000), logx=True, color=cl, ylim=(0.0, 1.0), ax=AX, linestyle='-', linewidth=4) Example_Model_DF[RES].plot.line(grid=False, xlim=(0.02, 1000), color=cl, logx=True, ylim=(0.0, 1.2), ax=AX, linestyle='--', linewidth=4) if RES == 'GLY41-NH': AX.set_ylim((0,0.8)) AX.set_yticks(np.arange(0.0,0.9,0.1)) #AX.set_yticklabels([0.5,0.6,0.7,0.8], weight='bold') else: AX.set_yticks(np.arange(0.0,1.0,0.1)) #AX.set_yticklabels([0.7,0.8,0.9,1.0], weight='bold') AX.set_ylabel(r'$\mathbf{C(\tau)}$',fontsize=22) AX.set_xlabel(r'$\mathbf{\tau \ \ (ns)}$', fontsize=22) update_resname = pd.Series(RES.split('-')[0]).str.extract('([A-Z]+)([0-9]+)') AX.legend(['{}-Sim'.format(update_resname.loc[0,0]+str((int(update_resname.loc[0,1])+1))), '{}-Fit'.format(update_resname.loc[0,0]+str((int(update_resname.loc[0,1])+1))), r'$\mathbf{{{}-w/\mathit{{\tau}}_f}}$'.format(update_resname.loc[0,0]+str((int(update_resname.loc[0,1])+1)))], frameon=False, loc=2, prop={'size':20,'weight':'bold'}) #axPD.set_title('Example Shortest and Longest Effective Correlations',weight='bold',size=14) AX.set_xticks([],minor=True) #AX.set_xticks([4e-2, 1e-1, 2e-1]) #AX.set_xticklabels([4e-2, 1e-1, 2e-1], fontsize=19, weight='bold') AX.xaxis.set_major_formatter(mpl.ticker.LogFormatterSciNotation()) AX.tick_params(labelsize=17) #for spine in list(AX.spines.values())[1:]: # spine.set_visible(False) list(AX.spines.values())[1].set_visible(False) list(AX.spines.values())[3].set_visible(False) #for panel, AX in zip(['a','b','c','d'], axExDecays.flatten()): # AX.text(2.25e-2, 0.94, panel, weight='bold', fontsize=22) figExDecays.savefig('ab40_example_ct.png', bbox_inches='tight', dpi=600) # + filenames = sorted(glob.glob('../contact_map/ResNContact_tseries_*.csv')) store_data = np.zeros((125000,40), dtype='float32') len=0 for filename in filenames: #print(filename) con_prob = pd.read_csv(filename, header=0, index_col=0, delimiter=',') store_data += con_prob.values len+=1 avg = store_data/len data = pd.DataFrame(data=avg, index=con_prob.index) con_map = data.mean(axis=0)-2 con_map[1:39] = con_map[1:39]-1 # - DSSP = pd.read_csv('../rg_dssp/ab40_f_278_dssp.csv', index_col=0) DSSP ['Helix'] = DSSP['3-10 Helix'] + DSSP[r'$\alpha$-helix'] DSSP [r'$\beta$-strand'] = DSSP[r'Parallel $\beta$-sheet'] + DSSP[r'Anti-Parallel $\beta$-sheet'] # + from matplotlib import cm as cm ## Defining the color map for the dssp plots and secondary structure assignments spectral_SS = cm.get_cmap('Spectral',3) spectralW_SS = np.append(spectral_SS(range(3)),[[1., 1. ,1. , 1.]], axis=0) SS_ColorPalette = {r'$\beta$-sheet':spectralW_SS[0], r'Helix':spectralW_SS[1], 'PPII':spectralW_SS[2]} SatCMap_Dark2 = cm.get_cmap('Dark2', 8) SatCMap_SS = np.append(SatCMap_Dark2(range(8)),[[1., 1. ,1. , 1.]], axis=0) SatCMap_SS_F3 = [SatCMap_SS[0], SatCMap_SS[3], SatCMap_SS[5]] SatCMapSS_ColorPalette = {r'$\beta$-sheet':SatCMap_SS[0], r'Helix':SatCMap_SS[3], 'PPII':SatCMap_SS[5]} #SatCMap_SS = np.append(SatCMap_Dark2(range(6)),[[1., 1. ,1. , 1.]], axis=0) SatCMap_SS_F3 = [SatCMap_SS[3], SatCMap_SS[0], sns.color_palette('Pastel1')[-1]] SatYelSS_ColorPalette = {r'$\beta$-sheet':SatCMap_SS[-1], r'Helix':SatCMap_SS[3], 'PPII':sns.color_palette('Paired')[-2]} # - con_map.min() con_map.max() con_map.mean() # + figNMR_Err, axNMR_Err = plt.subplots(5, 1, sharex=True, figsize=(8,14), gridspec_kw={'height_ratios': [1, 0.5, 3,3,3]}) figNMR_Err.subplots_adjust(hspace=0.025) RMSE_R1R2 = np.sqrt(NMRRelaxDF14_Scl1.iloc[1:-1][['R1_SE','R2_SE','NOE_SE']].mean()) #DSSP PLots s3_bounds = axNMR_Err[0].get_position().bounds axNMR_Err[0].axis('off') axSS = figNMR_Err.add_axes([0.125, s3_bounds[1] + 0, s3_bounds[2], 0.08]) DSSP[[r'$\beta$-strand','Helix', 'PPII']].plot.bar(ax=axSS, color=SatCMap_SS_F3, linewidth=1, width=1.0, stacked=True, edgecolor='k', legend=False) axSS.tick_params(labelsize=14) axSS.set_xticks(np.arange(-1, DSSP.shape[0], 5), minor=False) axSS.set_ylabel('SS', fontsize=15, weight='bold') axSS.set_xticklabels([]) axSS.set_xlim(-1, DSSP.shape[0]) axSS.set_ylim(0, 0.55) axSS.set_yticks(np.arange(0,0.55,0.25)) #get handles and labels handles, labels = axSS.get_legend_handles_labels() #specify order of items in legend order = [1,0,2] #add legend to plot axSS.legend([handles[idx] for idx in order],[labels[idx] for idx in order], frameon=False, loc=1, prop={'size':14,'weight':'bold'}, ncol=3) #axSS.legend(frameon=False, loc=1, prop={'size':14,'weight':'bold'}, ncol=3) axSS.text(-8,0.55,'A', weight='bold',fontsize=18, ha='left',va='top') #Heatmap plot ax_scsc = axNMR_Err[1] #cbax_scsc = ax_scsc.add_axes([0.91, 0.125, 0.025, 0.755]) cmp_scsc = (con_map.values).reshape(40,1).T cmp_scsc = np.insert(cmp_scsc, 0, 0).reshape(41,1).T #hm_scsc = sns.heatmap(cmp_scsc, cmap='Blues', ax=ax_scsc, yticklabels=False, cbar=False, robust=False) cmap = ax_scsc.imshow(cmp_scsc, cmap='Greys', aspect='auto', vmin=0, vmax=5) ax_scsc.set_yticks([]) ax_scsc.set_xticklabels([]) ax_scsc.tick_params(labelsize=14) ax_scsc.set_ylabel('C#', weight='bold', fontsize=15) #ax_scsc.text(-7,-0.5,'B', weight='bold',fontsize=18, ha='left',va='top') ## R2 Plots axR2 = axNMR_Err[2] EXPNOEdf[['RESNUM','R2-Exp']].plot(x='RESNUM', y='R2-Exp',ax=axR2, color='k', label='Exp', linestyle='-', linewidth=2, marker='d', markersize=5) NMRRelaxDF[['RESNUM','R2']].plot(x='RESNUM',y='R2', ax=axR2, color='#fb7d07', label=r'MD (RMSE = {:.2})'.format(RMSE_R1R2['R2_SE']), linewidth=2, linestyle='-', marker='o', markersize=5) #axR2.text(-7,6.40,'C', weight='bold',fontsize=18, ha='left',va='top') axR2.set_ylabel(r'$\mathbf{R}_\mathbf{2} \ \ (\mathbf{s}^\mathbf{-1})$', weight='bold', fontsize=15) axR2.legend(frameon=False, loc='best', prop={'size':14,'weight':'bold'}) axR2.tick_params(labelsize=14) axR2.set_ylim(0,6.5) axR2.hlines(y=NMRRelaxDF['R2'][3:20].mean(), xmin = 3, xmax = 20, linewidth=2, color ="#fb7d07", linestyle ="--") axR2.hlines(y=NMRRelaxDF['R2'][20:38].mean(), xmin = 21, xmax = 38, linewidth=2, color ="#fb7d07", linestyle ="--") axR2.hlines(y=EXPNOEdf['R2-Exp'][3:20].mean(), xmin = 3, xmax = 20, linewidth=2, color ="k", linestyle ="--") axR2.hlines(y=EXPNOEdf['R2-Exp'][20:38].mean(), xmin = 21, xmax = 38, linewidth=2, color ="k", linestyle ="--") ##R1 Plots axR1 = axNMR_Err[3] EXPNOEdf[['RESNUM','R1-Exp']].plot(x='RESNUM', y='R1-Exp',ax=axR1, color='k', label='Exp', linestyle='-', linewidth=2, marker='d', markersize=5) NMRRelaxDF[['RESNUM','R1']].plot(x='RESNUM',y='R1', ax=axR1, color='teal', label='MD (RMSE = {:.2})'.format(RMSE_R1R2['R1_SE']), linewidth=2, linestyle='-', marker='o', markersize=5) axR1.set_ylim(0, 2.7) #axR1.text(-7,2.60,'D', weight='bold',fontsize=18, ha='left',va='top') axR1.set_ylabel(r'$\mathbf{R}_\mathbf{1} \ \ (\mathbf{s}^\mathbf{-1})$', weight='bold', fontsize=15) axR1.legend(frameon=False, loc='best', prop={'size':14,'weight':'bold'}) axR1.tick_params(labelsize=14) ## NOE Plots axNOE = axNMR_Err[4] EXPNOEdf[['RESNUM','NOE-Exp']].plot(x='RESNUM', y='NOE-Exp', ax=axNOE, color='k', label='Exp', linestyle='-',ylim=(0.0,1.0), linewidth=2, marker='d', markersize=5) NMRRelaxDF[['RESNUM','NOE']].plot(x='RESNUM',y='NOE', ax=axNOE, color='limegreen', label='MD (RMSE = {:.2})'.format(RMSE_R1R2['NOE_SE']), linewidth=2, linestyle='-' , marker='o', markersize=5) #axNOE.text(-7,1.05,'E', weight='bold',fontsize=18, ha='left',va='top') axNOE.set_ylabel(r'$\mathbf{^{1}H-^{15}N}$ Het. NOE', weight='bold', fontsize=15) axNOE.set_xlabel('Residue Number', weight='bold', fontsize=15) axNOE.set_ylim(-1.75,1.15) axNOE.set_xlim(0,41) axNOE.hlines(y=NMRRelaxDF['NOE'][3:20].mean(), xmin = 3, xmax = 20, linewidth=2, color ="limegreen", linestyle ="--") axNOE.hlines(y=NMRRelaxDF['NOE'][20:38].mean(), xmin = 21, xmax = 38, linewidth=2, color ="limegreen", linestyle ="--") axNOE.hlines(y=EXPNOEdf['NOE-Exp'][3:20].mean(), xmin = 3, xmax = 20, linewidth=2, color ="k", linestyle ="--") axNOE.hlines(y=EXPNOEdf['NOE-Exp'][20:38].mean(), xmin = 21, xmax = 38, linewidth=2, color ="k", linestyle ="--") axNOE.legend(frameon=False,loc=1,prop={'size':14,'weight':'bold'}) axNOE.set_xticks(np.arange(5,41,5)) axNOE.set_xticklabels(np.arange(5,41,5)) axNOE.tick_params(labelsize=14) figNMR_Err.savefig('seq_dep_ab40_f_278.png', bbox_inches='tight',dpi=600) # + fig_f3 = plt.figure(131512, figsize=(8,4)) axf3 = plt.subplot(111) #fig_f3, axf3 = plt.subplots(111, figsize=(8,8)) #fig_f3.subplots_adjust(hspace=0.05) #ylim_t1=(1,8) ## Plot the Amplitudes to S1 NMRRelaxDF14_Scl1[['RESNUM','C_a','C_a_err']].plot(x='RESNUM', y='C_a', yerr='C_a_err', c='orange', ax=axf3, label=r'$\mathbf{A_1}$', linewidth=2, marker='o', linestyle='-', markersize=5) NMRRelaxDF14_Scl1[['RESNUM','C_b','C_b_err']].plot(x='RESNUM', y='C_b', yerr='C_b_err', c='green', ax=axf3, label=r'$\mathbf{A_2}$', linewidth=2, marker='o', linestyle='-', markersize=5) NMRRelaxDF14_Scl1[['RESNUM','C_g','C_g_err']].plot(x='RESNUM', y='C_g', yerr='C_g_err', c='red', ax=axf3, label=r'$\mathbf{A_3}$', linewidth=2, marker='o', linestyle='-', markersize=5) NMRRelaxDF14_Scl1[['RESNUM','A_Total']].plot(x='RESNUM', y='A_Total', c='black', ax=axf3, label=r'$\mathbf{A_{sum}}$', linewidth=2, marker='o', linestyle='-', markersize=5) axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="orange", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][20:40].mean(), xmin = 20, xmax = 40, linewidth=2, color ="orange", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][96:140].mean(), xmin = 96, xmax = 140, linewidth=2, color ="orange", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_a'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="orange", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="g", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][20:40].mean(), xmin = 20, xmax = 40, linewidth=2, color ="g", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][96:140].mean(), xmin = 96, xmax = 140, linewidth=2, color ="g", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_b'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="g", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="r", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][20:40].mean(), xmin = 20, xmax = 40, linewidth=2, color ="r", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][96:140].mean(), xmin = 96, xmax = 140, linewidth=2, color ="r", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['C_g'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="r", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][1:20].mean(), xmin = 1, xmax = 20, linewidth=2, color ="k", linestyle ="--") axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][20:40].mean(), xmin = 20, xmax = 40, linewidth=2, color ="k", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][96:140].mean(), xmin = 96, xmax = 140, linewidth=2, color ="k", linestyle ="--") #axf3.hlines(y=NMRRelaxDF14_Scl1['A_Total'][20:40].mean(), xmin = 21, xmax = 40, linewidth=2, color ="k", linestyle ="--") axf3.set_ylim(0, 1.05) axf3.set_ylabel(r'Amplitudes', fontsize=15, weight='bold') #axf3.set_xlabel('Residue Number', weight='bold', fontsize=15) axf3.set_xlim(0,41) ##axf3.set_xlabel().remove() axf3.legend(frameon=False,loc=0,prop={'size':14,'weight':'bold'}, ncol=4) axf3.set_xticks(np.arange(5,41,5)) axf3.set_xticklabels(np.arange(5,41,5)) axf3.text(-5.5,1.05,'C', weight='bold',fontsize=18, ha='left',va='top') frame1 = plt.gca() frame1.axes.xaxis.set_ticklabels([]) axf3.tick_params(labelsize=14) fig_f3.savefig('amp_ab40.png', bbox_inches='tight',dpi=300) # + print(NMRRelaxDF14_Scl1['tau_a'][1:39].mean()) print(NMRRelaxDF14_Scl1['tau_a'][1:39].std()) print(NMRRelaxDF14_Scl1['tau_b'][1:39].mean()) print(NMRRelaxDF14_Scl1['tau_b'][1:39].std()) print(NMRRelaxDF14_Scl1['tau_g'][1:39].mean()) print(NMRRelaxDF14_Scl1['tau_g'][1:39].std()) print(NMRRelaxDF14_Scl1['C_a'][1:39].mean()) print(NMRRelaxDF14_Scl1['C_a'][1:39].std()) print(NMRRelaxDF14_Scl1['C_b'][1:39].mean()) print(NMRRelaxDF14_Scl1['C_b'][1:39].std()) print(NMRRelaxDF14_Scl1['C_g'][1:39].mean()) print(NMRRelaxDF14_Scl1['C_g'][1:39].std()) print(NMRRelaxDF14_Scl1['A_Total'][1:39].mean()) print(NMRRelaxDF14_Scl1['A_Total'][1:39].std()) # - NMRRelaxDF['R2'][12:17] EXPNOEdf['R2-Exp'][12:17] NMRRelaxDF['R2'][28:35] EXPNOEdf['R2-Exp'][28:35] # + figExDecays, axExDecays = plt.subplots(1,1,figsize=(8,7)) figExDecays.subplots_adjust(hspace=0.275, wspace=0.275) RESExamples = ['GLN14-NH', 'ILE30-NH'] ind_re = [np.where(NMRRelaxDF14_Scl1['Resname']==RE)[0][0] for RE in RESExamples] x = CtDF14.index.values Example_Model_DF = pd.DataFrame(index=x, columns=RESExamples) for RE in RESExamples: ind_re = np.where(FitDF1['Resname']==RE)[0][0] Example_Model_DF.loc[:,RE] = func_exp_decay6(x, *FitDF1.iloc[ind_re][['C_a','tau_a','C_b','tau_b','C_g','tau_g']].values) clist = ['#06c2ac', '#85440a', '#7b0323'] clist2 = ['#06c2ac', '#b56b31', '#7b0323'] for RES, cl in zip(RESExamples, clist2[:-1]): update_resname = pd.Series(RES.split('-')[0]).str.extract('([A-Z]+)([0-9]+)') CtDF14[RES].plot.line(grid=False, xlim=(0.02, 1000), logx=True, color=cl, ylim=(0.0, 1.0), ax=axExDecays, label = '{}-Sim'.format(update_resname.loc[0,0]+str((int(update_resname.loc[0,1])+1))) , linestyle='-', linewidth=4) Example_Model_DF[RES].plot.line(grid=False, xlim=(0.02, 1000), color=cl, logx=True, ylim=(0.0, 1.2), ax=axExDecays, label = '{}-Fit'.format(update_resname.loc[0,0]+str((int(update_resname.loc[0,1])+1))), linestyle='--', linewidth=4) axExDecays.set_ylim((0,0.8)) axExDecays.set_yticks(np.arange(0.0,1.1,0.2)) axExDecays.set_ylabel(r'$\mathbf{C(t)}$',fontsize=15) axExDecays.set_xlabel(r'$\mathbf{t \ \ (ns)}$', fontsize=15) axExDecays.legend(frameon=False, loc=1, prop={'size':14,'weight':'bold'}, ncol=2) axExDecays.set_xticks([],minor=True) axExDecays.xaxis.set_major_formatter(mpl.ticker.LogFormatterSciNotation()) axExDecays.tick_params(labelsize=14) # list(axExDecays.spines.values())[1].set_visible(False) # list(axExDecays.spines.values())[3].set_visible(False) plt.text(0.0045,1.02,'A', weight='bold',fontsize=18, ha='left',va='top') plt.savefig('ab40_Ct_fit.png', bbox_inches='tight',dpi=600) # -
ab40_NMR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # <img src="../images/aeropython_logo.png" alt="AeroPython" style="width: 300px;"/> # # Problema de tiro parabólico # ## Introducción # Éste ejemplo no es más que una ayuda para introducir el ejemplo del salto de la rana, pues es un poco menos complejo. # # El ejemplo consiste simplemente en averiguar la velocidad necesaria para lanzar un proyectil con un determinado ángulo a una determinada distancia. Aunque éste problema admite solución analítica, nosotros vamos a usar un método numérico, que es muy similar al que se emplea a menudo en problemas de contorno en equaciones diferenciales (donde la solución puede no ser tan sencilla de encontrar). # ## Implementación # Como siempre, comenzamos creando la función principal, `main()`, luego recolectamos aquellas funciones que nos hagan falta, y vamos implementando de la más sencilla a la más compleja, repitiendo el proceso para cada una de ellas. # # Recuerda no comenzar a implementar una función hasta no haber acabado con la anterior. # ### main() # Por tanto tenemos dos funciones que debemos implementar: # # * `init()` # * `bissection` # # No cabe la menor duda de que `init()` va a ser más sencilla. # ### init() # Por tanto, nuestra nueva función requiere de una función, `shot()` que haga de solver para un determinado tiro parabólico. Puede ser discutible si es más compleja que `bissection`, pero en un principio parece que será la más sencilla. # ### shot() # ### bissection() # ## A probarlo! # --- # _En esta clase hemos visto cómo crear funciones que encapsulen tareas de nuestro programa y las hemos aplicado para respondernos ciertas preguntas sencillas._ # # **Referencias** # # * Libro "Learn Python the Hard Way" http://learnpythonthehardway.org/book/ # * Python Tutor, para visualizar código Python paso a paso http://pythontutor.com/ # * Libro "How To Think Like a Computer Scientist" http://interactivepython.org/runestone/static/thinkcspy/toc.html # * Project Euler: ejercicios para aprender Python https://projecteuler.net/problems # * Python Challenge (!) http://www.pythonchallenge.com/ # --- # <br/> # #### <h4 align="right">¡Síguenos en Twitter! # <br/> # ###### <a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> # <br/> # ###### Este notebook ha sido realizado por: <NAME> # <br/> # ##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName"><NAME> y <NAME></span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>. # --- # _Las siguientes celdas contienen configuración del Notebook_ # # _Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ # # File > Trusted Notebook # Esta celda da el estilo al notebook from IPython.core.display import HTML css_file = '../styles/aeropython.css' HTML(open(css_file, "r").read())
notebooks_vacios/005-PythonBasico-EjercicioTiroParabolico.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Azure ML and IoT Edge MS Learn Module # - For more details regarding this module and learning path see https://docs.microsoft.com/en-gb/learn/paths/ai-edge-engineer/ # - For more information go to https://aka.ms/aml-tutorial/ai-iot-power-couple # # # Specify parameters # Fill in the parameters below. If you already have IoT Hub or Azure ML workspace, then enter their information here. Otherwise, the parameter names will be used in provisioning new services. # + # Enter the resource group in Azure where you want to provision the resources resource_group_name="IoTEdgeResources" # Enter Azure region where your services will be provisioned, for example "eastus2" azure_region="WestUS2" # Enter your Azure IoT Hub name # If you don't have an IoT Hub, pick a name to make a new one iot_hub_name="pt-iothub" # Enter your IoT Edge device ID # If you don't have an IoT Edge device registered, pick a name to create a new one # This is NOT the name of your VM, but it's just an entry in your IoT Hub, so you can pick any name iot_device_id="myEdgeDevice" # Provide your Azure subscription ID to provision your services subscription_id="7061071d-4f8f-4627-bf7d-189b681e7017" # Provide your Azure ML service workspace name # If you don't have a workspace, pick a name to create a new one aml_workspace_name="ptAzureML" # - # DO NOT CHANGE THIS VALUE for this tutorial # This is the name of the AML module you deploy to the device module_name="machinelearningmodule" # # Loading IOT Extension # + tags=[] # Load the IoT extension for Azure CLI # !az extension add --name azure-iot # - # # Login to Azure # + tags=[] # !az login # - # # Setting the Azure Subscription # + tags=[] # !az account set --subscription $subscription_id # - # # Install the Azure Machine Learning library # + tags=[] # First downgrade pip - https://techdirectarchive.com/2020/03/14/how-to-upgrade-and-downgrade-pip/ # azureml-sdk fails to load 'ruamel' if pip > 20.1.1 (https://github.com/MicrosoftDocs/mslearn-train-package-module-iot-edge/issues/1) # !python -m pip install pip==20.1.1 # !pip install --upgrade azureml-sdk # - # # Check core SDK version number # + tags=[] # Check core SDK version number import azureml.core from azureml.core import Workspace print("SDK version:", azureml.core.VERSION) # - # # Create the Machine learning Workspace # + tags=[] # Create ML WorkSpace ws = Workspace.create(subscription_id = subscription_id, resource_group = resource_group_name, name = aml_workspace_name, location = azure_region) ws.write_config() # - # # Install the Libraries # + tags=[] # !pip install pandas # !pip install sklearn # - # # Load and read data set # + # Load and read data set import pandas import numpy import pickle from sklearn import tree from sklearn.model_selection import train_test_split # - temp_data = pandas.read_csv('temperature_data.csv') temp_data # # Load features and labels # Load features and labels X, Y = temp_data[['machine_temperature', 'machine_pressure', 'ambient_temperature', 'ambient_humidity']].values, temp_data['anomaly'].values # # Train The Model # + tags=[] # Split data 65%-35% into training set and test set X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.35, random_state=0) # Change regularization rate and you will likely get a different accuracy. reg = 0.01 # Train a decision tree on the training set #clf1 = LogisticRegression(C=1/reg).fit(X_train, Y_train) clf1 = tree.DecisionTreeClassifier() clf1 = clf1.fit(X_train, Y_train) print (clf1) # - # # Evaluate the accuracy # + tags=[] # Evaluate the test set accuracy = clf1.score(X_test, Y_test) print ("Accuracy is {}".format(accuracy)) # - # # Serializing and testing the model # + tags=[] # Serialize the model and write to disk f = open('model.pkl', 'wb') pickle.dump(clf1, f) f.close() print ("Exported the model to model.pkl") # + tags=[] # Test the model by importing it and providing a sample data point print("Import the model from model.pkl") f2 = open('model.pkl', 'rb') clf2 = pickle.load(f2) # Normal (not an anomaly) #X_new = [[24.90294136, 1.44463889, 20.89537849, 24]] #X_new = [[33.40859853, 2.413637808, 20.89162813, 26]] #X_new = [[34.42109181, 2.528985143, 21.23903786, 25]] # Anomaly X_new = [[33.66995566, 2.44341267, 21.39450979, 26]] #X_new = [[105.5457931, 10.63179922, 20.62029994, 26]] print ('New sample: {}'.format(X_new)) pred = clf2.predict(X_new) print('Predicted class is {}'.format(pred)) # - # # Register Model to cloud # + tags=[] #Register Model to cloud from azureml.core.model import Model model = Model.register(model_path = "model.pkl", model_name = "model.pkl", tags = {'area': "anomaly", 'type': "classification"}, description = "Sample anomaly detection model for IOT tutorial", workspace = ws) # + tags=[] #print model print(model.name, model.description, model.version, sep = '\t') # - # # Write out python file iot_score.py # + tags=[] # %%writefile iot_score.py # This script generates the scoring file # with the init and run functions needed to # operationalize the anomaly detection sample import pickle import json import pandas import joblib from sklearn.linear_model import Ridge from azureml.core.model import Model def init(): global model # this is a different behavior than before when the code is run locally, even though the code is the same. model_path = Model.get_model_path('model.pkl') # deserialize the model file back into a sklearn model model = joblib.load(model_path) # note you can pass in multiple rows for scoring def run(input_str): try: input_json = json.loads(input_str) input_df = pandas.DataFrame([[input_json['machine']['temperature'],input_json['machine']['pressure'],input_json['ambient']['temperature'],input_json['ambient']['humidity']]]) pred = model.predict(input_df) print("Prediction is ", pred[0]) except Exception as e: result = str(e) if pred[0] == 1: input_json['anomaly']=True else: input_json['anomaly']=False return [json.dumps(input_json)] # - # # Loading Azure ML workspace # + tags=[] # Initialize a workspace object from persisted configuration from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # # Create docker Image # + # Create docker Image This specifies the dependencies to include in the environment from azureml.core.conda_dependencies import CondaDependencies myenv = CondaDependencies.create( conda_packages=['pandas', 'scikit-learn', 'numpy'], pip_packages=['azureml-sdk']) with open("myenv.yml","w") as f: f.write(myenv.serialize_to_string()) # + tags=[] from azureml.core.image import Image, ContainerImage image_config = ContainerImage.image_configuration(runtime= "python", execution_script="iot_score.py", conda_file="myenv.yml", tags = {'area': "iot", 'type': "classification"}, description = "IOT Edge anomaly detection demo") image = Image.create(name = "tempanomalydetection", # this is the model object models = [model], image_config = image_config, workspace = ws) # + tags=[] image.wait_for_creation(show_output = True) # + tags=[] for i in Image.list(workspace = ws,tags = ["area"]): print('{}(v.{} [{}]) stored at {} with build log {}'.format(i.name, i.version, i.creation_state, i.image_location, i.image_build_log_uri)) # - # # Testing the model on Azure container Instance # + tags=["deploy service", "aci"] #test Model on Azure Container from azureml.core.webservice import AciWebservice aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1, tags = {'area': "iot", 'type': "classification"}, description = 'IOT Edge anomaly detection demo') # + tags=[] from azureml.core.webservice import AciWebservice aci_service_name = 'tempsensor-iotedge-ml-1' print(aci_service_name) aci_service = AciWebservice.deploy_from_image(deployment_config = aciconfig, image = image, name = aci_service_name, workspace = ws) aci_service.wait_for_deployment(True) print(aci_service.state) # - # # Testing web service # + tags=["deploy service", "aci"] #testing Web Service import json # Anomaly #test_sample = json.dumps({ "machine": { "temperature": 33.66995566, "pressure": 2.44341267 }, \ # "ambient": { "temperature": 21.39450979, "humidity": 26 },\ # "timeCreated": "2017-10-27T18:14:02.4911177Z" }) # Normal test_sample = json.dumps({ "machine": { "temperature": 31.16469009, "pressure": 2.158002669 }, \ "ambient": { "temperature": 21.17794693, "humidity": 25 },\ "timeCreated": "2017-10-27T18:14:02.4911177Z" }) test_sample = bytes(test_sample,encoding = 'utf8') prediction = aci_service.run(input_data = test_sample) print(prediction) # - # # Getting container details # + tags=[] # Getting your container details container_reg = ws.get_details()["containerRegistry"] reg_name=container_reg.split("/")[-1] container_url = "\"" + image.image_location + "\"," subscription_id = ws.subscription_id print('{}'.format(image.image_location)) print('{}'.format(reg_name)) print('{}'.format(subscription_id)) from azure.mgmt.containerregistry import ContainerRegistryManagementClient from azure.mgmt import containerregistry client = ContainerRegistryManagementClient(ws._auth,subscription_id) result= client.registries.list_credentials(resource_group_name, reg_name, custom_headers=None, raw=False) username = result.username password = result.passwords[0].value # - # # Deploying container to the Edge device # + #Deploying Container to Edge file = open('iot-workshop-deployment-template.json') contents = file.read() contents = contents.replace('__MODULE_NAME', module_name) contents = contents.replace('__REGISTRY_NAME', reg_name) contents = contents.replace('__REGISTRY_USER_NAME', username) contents = contents.replace('__REGISTRY_PASSWORD', password) contents = contents.replace('__REGISTRY_IMAGE_LOCATION', image.image_location) with open('./deployment.json', 'wt', encoding='utf-8') as output_file: output_file.write(contents) # - # Push the deployment JSON to the IOT Hub # !az iot edge set-modules --device-id $iot_device_id --hub-name $iot_hub_name --content deployment.json
IoT Edge anomaly detection tutorial/Creating-and-deploying-Azure-machine-learning-module.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/moh2236945/pytorch_classification/blob/master/models/AlexNet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="2NN4PmLQhvif" colab_type="code" colab={} #Basic Library import os import torch import torch.nn as nn import torch.optim as optim import torch.functional as F from torch.utils import data import torchvision.datasets as datasets import torchvision.transforms as transforms # + id="9nXmTX7JjXVj" colab_type="code" colab={} device=torch.device('cuda' if torch.cuda.is_available() else 'cpu') num_EPOCHS=90 Batch_size=128 MOMENTUM=0.9 LR_DECCAY=0.005 LR_INIT=0.01 IMAGE_DIM=227 NUM_ClASSES=1000 DEVICE_IDS=[0,1,2,3] INPUT_ROOT_DIR='data_in' OUTPUT_DIR='alexnet_data_out' LOG_DIR=OUTPUT_DIR+'/logs' CHECKPOINT_DIR=OUTPUT_DIR+'/models' #checkpoint path directory os.makedirs(CHECKPOINT_DIR,exist_ok=True) # + id="DpYe4GYBwuAX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="97515b9b-d235-4222-dfec-853cd474aa75" class AlexNet(nn.Module): def __init(self,num_classes=1000): super().__init__() self.net=nn.Sequential( nn.Conv2d(in_channels=3,out_channels=96,kerenel_size=11,stride=4), nn.Relu(), nn.LocalResponsenorm(size=5,alpha=0.0001,beta=0.75,k=2), nn.MaxPool2d(kernel_size=3,stride=2), nn.Conv2d(96,256,5,padding=2), nn.Relu(), nn.localResponsenorm(size=5,aplpha=0.0001,beta=0.75,k=2), nn.Maxpool2d(kernel_size=3,stride=2), nn.Conv2d(256,384,3,padding=1), nn.Relu(), nn.Conv2d(384,256,3,padding=1), nn.Relu() nn.MaxPool2d(kernel_size=3, stride=2), ) self.classifier=nn.Sequential( nn.Dropout(p=0.5,inplace=True), nn.Linear(in_features=(256*6*6),out_features=4096), nn.Relu(), nn.Dropout(p=0.5,inplace=True), nn.Linear(in_features=4096,out_features=4096), nn.Relu(), nn.Linear(in_features=4096,out_features=num_classes), ) self.init_bias()# initialize the bias def init_bias(self): for layers in self.net: if isinstance(layer,nn.Conv2d): nn.init.normal_(layer.weight,mean=0,std=0.01) nn.init.constant_(layer.bias,0) # original paper = 1 for Conv2d layers 2nd, 4th, and 5th conv layers nn.init.constant_(self.net[4].bias, 1) nn.init.constant_(self.net[10].bias, 1) nn.init.constant_(self.net[12].bias, 1) def forward(self,x): x=self.net(x) x=x.view(-1,256*6*6) return self.classifier(x) if __name__=='__main__': alexnet=AlexNet(num_classes=NUM_ClASSES).to(device) alexnet=torch.nn.parallel.DataParallel(alex_net,device_ids==DEVICE_IDS) print(alexnet) dataset=datasets.ImageFolder(TRAIN_IMG_DIR,transform.Compose([transform.CenterCrop(IMAGE_DIM), transform.ToTensor, transform.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225]),])) dataloader=data.dataLoader(dataset,shuffle=true,num_workers=8, drop_last=True,batxh_size=BATCH_SIZE) #create optimzer optimizer=optim.Adam(params=alexnet.parameters(),Ir=0.0001) Ir_schedular=optim.Ir_scheduler.StepLR(optimizer,step_size=30,gamma=0.1) for epoch in range(NUM_EPOCHES): Ir_scheduler.step() for imgs,classes in dataloader: imgs,classes=img.to(device),classes.to(device) output=alexnet(imgs) loss=F.cross_entropy(output,classes) #u[date optimizer optimizer.zero_grad() loss.backward() optimizer.step()] # log the information and add to tensorboard if total_steps % 10 == 0: with torch.no_grad(): _, preds = torch.max(output, 1) accuracy = torch.sum(preds == classes) print('Epoch: {} \tStep: {} \tLoss: {:.4f} \tAcc: {}' .format(epoch + 1, total_steps, loss.item(), accuracy.item())) tbwriter.add_scalar('loss', loss.item(), total_steps) tbwriter.add_scalar('accuracy', accuracy.item(), total_steps) # print out gradient values and parameter average values if total_steps % 100 == 0: with torch.no_grad(): # print and save the grad of the parameters # also print and save parameter values print('*' * 10) for name, parameter in alexnet.named_parameters(): if parameter.grad is not None: avg_grad = torch.mean(parameter.grad) print('\t{} - grad_avg: {}'.format(name, avg_grad)) tbwriter.add_scalar('grad_avg/{}'.format(name), avg_grad.item(), total_steps) tbwriter.add_histogram('grad/{}'.format(name), parameter.grad.cpu().numpy(), total_steps) if parameter.data is not None: avg_weight = torch.mean(parameter.data) print('\t{} - param_avg: {}'.format(name, avg_weight)) tbwriter.add_histogram('weight/{}'.format(name), parameter.data.cpu().numpy(), total_steps) tbwriter.add_scalar('weight_avg/{}'.format(name), avg_weight.item(), total_steps) total_steps += 1 # save checkpoints checkpoint_path = os.path.join(CHECKPOINT_DIR, 'alexnet_states_e{}.pkl'.format(epoch + 1)) state = { 'epoch': epoch, 'total_steps': total_steps, 'optimizer': optimizer.state_dict(), 'model': alexnet.state_dict(), 'seed': seed, } torch.save(state, checkpoint_path)
models/AlexNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### This is a seq2seq machine translation code on the Amazon Food Reviews Dataset, but you could change for any other translation dataset. It's recommended to train in the cloud on at least 1 GPU. This has a pretrained embedding layer using GloVe. from __future__ import absolute_import, division, print_function, unicode_literals import pandas as pd from sklearn.model_selection import train_test_split import tensorflow as tf import numpy as np import io import json from keras_preprocessing.text import tokenizer_from_json import datetime import os #PRINT VERSION!!! tf.__version__ print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # ## import dataset # ### I'm using the amazon food reviews dataset here train = pd.read_csv('~/your_long_path/reviews.csv') train = train[['Summary','Text']] train.head() # ## getting word counts train['text_length'] = train['Text'].str.count(' ') train['text_length'].describe() train['summary_length'] = train['Summary'].str.count(' ') train['summary_length'].describe() train.head() # ## bounding data lengths # + #get rid of weirdness in test/train set train = train[train['summary_length']>=2].reset_index(drop=True) train = train[train['summary_length']<=20].reset_index(drop=True) train = train[train['text_length']<=100].reset_index(drop=True) # - print(train.shape) print(train.head()) # ## cleaning data and making and saving test set train['text_lower'] = train['Text'].str.lower() train['text_no_punctuation'] = train['text_lower'].str.replace('[^\w\s]','') # + ### adding "_start_" and "_end_" delimeters to summary this tells the model where to start train['summary_lower'] = train["Summary"].str.lower() train['summary_no_punctuation'] = '_start_' + ' ' +train['summary_lower'].str.replace('[^\w\s]','')+ ' ' +'_end_' # + #shuffle dataset and reset index train = train.sample(frac=1).reset_index(drop=True) test = train[0:100] train = train[100:] test.to_csv('test_set.csv') # - # ## playing with max features # + #setting max features and max len for text and summarty for model max_features1 = 100000 maxlen1 = 100 max_features2 = 100000 maxlen2 = 20 # - # ## making tokenizers and saving them tok1 = tf.keras.preprocessing.text.Tokenizer(num_words=max_features1) tok1.fit_on_texts(list(train['text_no_punctuation'].astype(str))) #fit to cleaned text tf_train_text =tok1.texts_to_sequences(list(train['text_no_punctuation'].astype(str))) tf_train_text =tf.keras.preprocessing.sequence.pad_sequences(tf_train_text, maxlen=maxlen1) #let's execute pad step # + #save tokenizer for scoring later on tokenizer1_json = tok1.to_json() with io.open('tok1.json', 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer1_json, ensure_ascii=False)) # + #the processing has to be done for both #two different tokenizers # - tok2 = tf.keras.preprocessing.text.Tokenizer(num_words=max_features2, filters = '*') tok2.fit_on_texts(list(train['summary_no_punctuation'].astype(str))) #fit to cleaned text tf_train_summary = tok2.texts_to_sequences(list(train['summary_no_punctuation'].astype(str))) tf_train_summary = tf.keras.preprocessing.sequence.pad_sequences(tf_train_summary, maxlen=maxlen2, padding ='post') tokenizer2_json = tok2.to_json() with io.open('tok2.json', 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer2_json, ensure_ascii=False)) # ## setting dimensions and getting the shapes # + vectorized_summary = tf_train_summary # For Decoder Input, you don't need the last word as that is only for prediction # when we are training using Teacher Forcing. decoder_input_data = vectorized_summary[:, :-1] # Decoder Target Data Is Ahead By 1 Time Step From Decoder Input Data (Teacher Forcing) decoder_target_data = vectorized_summary[:, 1:] print(f'Shape of decoder input: {decoder_input_data.shape}') print(f'Shape of decoder target: {decoder_target_data.shape}') vectorized_text = tf_train_text # Encoder input is simply the body of the text encoder_input_data = vectorized_text doc_length = encoder_input_data.shape[1] print(f'Shape of encoder input: {encoder_input_data.shape}') # + #setting size of vocabulary encoder and decoder vocab_size_encoder = len(tok1.word_index) + 1 vocab_size_decoder = len(tok2.word_index) + 1 # + #set latent dimension for embedding and hidden units latent_dim = 100 # - # ## GloVe embedding layer # + # Preparing GloVe GLOVE_DIR = "/your_long_path/GloVe" embeddings_index = {} f = open(os.path.join(GLOVE_DIR, 'glove.6B.{}d.txt'.format(latent_dim))) for line in f: values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Found %s word vectors.' % len(embeddings_index)) # + #build embedding weights matrix for text embedding_matrix = np.zeros((len(tok1.word_index) + 1, latent_dim)) for word, i in tok1.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector # - # # model # + ######################## #### Encoder Model #### #setting Encoder Input encoder_inputs = tf.keras.Input(shape=(doc_length,), name='Encoder-Input') # GloVe Embeding for encoder x = tf.keras.layers.Embedding(vocab_size_encoder, latent_dim, name='Body-Word-Embedding', weights=[embedding_matrix], mask_zero=False, trainable=False)(encoder_inputs) #Batch normalization is used so that the distribution of the inputs #to a specific layer doesn't change over time x = tf.keras.layers.BatchNormalization(name='Encoder-Batchnorm-1')(x) # We do not need the `encoder_output` just the hidden state _, state_h = tf.keras.layers.GRU(latent_dim, return_state=True, name='Encoder-Last-GRU')(x) # Set the encoder as a separate entity so we can encode without decoding if desired encoder_model = tf.keras.Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model') seq2seq_encoder_out = encoder_model(encoder_inputs) ######################## #### Decoder Model #### decoder_inputs = tf.keras.Input(shape=(None,), name='Decoder-Input') # for teacher forcing # Embedding For Decoder, not GloVe dec_emb = tf.keras.layers.Embedding(vocab_size_decoder, latent_dim, name='Decoder-Word-Embedding', mask_zero=False, )(decoder_inputs) #batch normalization dec_bn = tf.keras.layers.BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb) # Set up the decoder, using `decoder_state_input` as initial state. decoder_gru = tf.keras.layers.GRU(latent_dim, return_state=True, return_sequences=True, name='Decoder-GRU') #the decoder "decodes" the encoder out decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=seq2seq_encoder_out) x = tf.keras.layers.BatchNormalization(name='Decoder-Batchnorm-2')(decoder_gru_output) # Dense layer for prediction decoder_dense = tf.keras.layers.Dense(vocab_size_decoder, activation='softmax', name='Final-Output-Dense') decoder_outputs = decoder_dense(x) ######################## #### Seq2Seq Model #### seq2seq_Model = tf.keras.Model([encoder_inputs, decoder_inputs], decoder_outputs) #parallelize data on N GPUs if desired #seq2seq_Model = tf.keras.utils.multi_gpu_model(seq2seq_Model, gpus=N) seq2seq_Model.compile(optimizer=tf.keras.optimizers.Nadam(lr=0.001), loss='sparse_categorical_crossentropy') # - # ** Examine Model Architecture Summary ** #from seq2seq_utils import viz_model_architecture seq2seq_Model.summary() # ## Generator # + #find values for train/val split data_len = len(encoder_input_data) val_split = int(np.floor(data_len*.15)) train_split = int(np.floor(data_len*.85)) # + # separating into train and validation data X_enc_train = encoder_input_data[0:train_split] X_dec_train = decoder_input_data[0:train_split] y_t_train = np.expand_dims(decoder_target_data, -1)[0:train_split] X_enc_val = encoder_input_data[-val_split:-1] X_dec_val = decoder_input_data[-val_split:-1] y_t_val = np.expand_dims(decoder_target_data, -1)[-val_split:-1] # - class generatorClass(Sequence): def __init__(self, X_enc, X_dec, y_t, batch_size): self.X_enc = X_enc self.X_dec = X_dec self.y_t = y_t self.batch_size = batch_size self.lock = threading.Lock() def __len__(self): return int(np.ceil(len(self.X_enc) / float(self.batch_size))) def __getitem__(self, idx): with self.lock: batch_index1 = idx * self.batch_size batch_index2 = (idx + 1) * self.batch_size batch_Xe = self.X_enc[batch_index1:batch_index2] batch_Xd = self.X_dec[batch_index1:batch_index2] batch_y = self.y_t[batch_index1:batch_index2] batch_X = [batch_Xe, batch_Xd] return batch_X, batch_y def on_epoch_end(self): """Method called at the end of every epoch. """ pass # # train model # + #tensorboard log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) #checkpoints checkpointer = tf.keras.callbacks.ModelCheckpoint( filepath='/tmp/weights.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True) #early_stopping early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto', restore_best_weights=True) #model epochs = 10 batch_size = 700 idx = 0 gen_instance = generatorClass(X_enc_train, X_dec_train, y_t_train, batch_size) val_instance = generatorClass(X_enc_val, X_dec_val, y_t_val, batch_size) #n_batches = (int(encoder_input_data.shape[0]) // batch_size) * epochs history = seq2seq_Model.fit_generator(generator = gen_instance, epochs=epochs , max_queue_size=50, validation_data = val_instance, validation_freq=1, steps_per_epoch = int(encoder_input_data.shape[0]) // batch_size, callbacks=[tensorboard_callback, checkpointer], #early_stop], use_multiprocessing=True, workers=7) #save final model seq2seq_Model.save('your_model.h5') # - # # scoring # + #load the model seq2seq_Model = tf.keras.models.load_model('your_model.h5') # Show the model architecture seq2seq_Model.summary() # + #open the tokenizers with open('tok1.json') as f: data = json.load(f) tok1 = tokenizer_from_json(data) with open('tok2.json') as f: data = json.load(f) tok2 = tokenizer_from_json(data) # - #look at test set test.head() #pick a cell from the clean data to test and look at it test_text = [test['text_no_punctuation'][6]] test_text # + # get the encoder's features for the decoder tok1.fit_on_texts(test_text) # + #tokenize test text raw_tokenized = tok1.texts_to_sequences(test_text) raw_tokenized = tf.keras.preprocessing.sequence.pad_sequences(raw_tokenized, maxlen=maxlen1) # - #predict the encoder state of the new sentence body_encoding = encoder_model.predict(raw_tokenized) #get output shapes of decoder word embedding latent_dim = seq2seq_Model.get_layer('Decoder-Word-Embedding').output_shape[-1] # + #get layer method for getting the embedding (word clusters) decoder_inputs = seq2seq_Model.get_layer('Decoder-Input').input dec_emb = seq2seq_Model.get_layer('Decoder-Word-Embedding')(decoder_inputs) dec_bn = seq2seq_Model.get_layer('Decoder-Batchnorm-1')(dec_emb) gru_inference_state_input = tf.keras.Input(shape=(latent_dim,), name='hidden_state_input') gru_out, gru_state_out = seq2seq_Model.get_layer('Decoder-GRU')([dec_bn, gru_inference_state_input]) # Reconstruct dense layers dec_bn2 = seq2seq_Model.get_layer('Decoder-Batchnorm-2')(gru_out) dense_out = seq2seq_Model.get_layer('Final-Output-Dense')(dec_bn2) # - decoder_model = tf.keras.Model([decoder_inputs, gru_inference_state_input], [dense_out, gru_state_out]) # save the encoder's embedding before its updated by decoder for later # optional original_body_encoding = body_encoding state_value = np.array(tok2.word_index['_start_']).reshape(1, 1) state_value decoded_sentence = [] stop_condition = False vocabulary_inv = dict((v, k) for k, v in tok2.word_index.items()) vocabulary_inv while not stop_condition: #print(1) preds, st = decoder_model.predict([state_value, body_encoding]) pred_idx = np.argmax(preds[:, :, 2:]) + 2 pred_word_str = vocabulary_inv[pred_idx] print(pred_word_str) if pred_word_str == '_end_' or len(decoded_sentence) >= maxlen2: stop_condition = True break decoded_sentence.append(pred_word_str) # update the decoder for the next word body_encoding = st state_value = np.array(pred_idx).reshape(1, 1) #print(state_value) # + #compare to original summary print([test['summary_no_punctuation'][6]]) # -
clean_code_machine_summarization-w_Generator_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ZvXUBQbpdADe" # # Installation # # Install all the dependencies to make the most out of TorchCAM # + colab={"base_uri": "https://localhost:8080/"} id="6-isjCf8eouw" outputId="296bd40f-e369-4003-9d20-19cab042a2a6" # !pip install torchvision matplotlib # + [markdown] id="CkH4BlzmdJ4c" # ## Latest stable release # + id="i_H5vMoMc6Rw" # !pip install torchcam # + [markdown] id="VVEOrDRpdNd0" # ## From source # + colab={"base_uri": "https://localhost:8080/", "height": 530} id="IC2H2zSwdRz1" outputId="22ced310-94b4-4c05-c4c9-014d40524f6d" # Install the most up-to-date version from GitHub # !pip install -e git+https://github.com/frgfm/torch-cam.git#egg=torchcam # + [markdown] id="QnqnIbRegkeE" # Now go to `Runtime/Restart runtime` for your changes to take effect! # + [markdown] id="SZp62o6qdlTc" # # Basic usage # + id="P0Cv_smlMpMf" # %matplotlib inline # All imports import matplotlib.pyplot as plt import torch from torch.nn.functional import softmax, interpolate from torchvision.io.image import read_image from torchvision.models import resnet18 from torchvision.transforms.functional import normalize, resize, to_pil_image from torchcam.methods import SmoothGradCAMpp, LayerCAM from torchcam.utils import overlay_mask # + colab={"base_uri": "https://localhost:8080/"} id="RJ5L0BrUfgLm" outputId="f3c20158-5aa0-4ec3-ebfd-a4b9a946d0b4" # Download an image # !wget https://www.woopets.fr/assets/races/000/066/big-portrait/border-collie.jpg # Set this to your image path if you wish to run it on your own data img_path = "border-collie.jpg" # + id="Zymmsgcbf_6m" # Instantiate your model here model = resnet18(pretrained=True).eval() # + [markdown] id="H1A7RpqEdukb" # ## Illustrate your classifier capabilities # + colab={"base_uri": "https://localhost:8080/"} id="jF1zwBBrd2Nz" outputId="428dcb4f-523c-4e9e-bca6-8f818f454a29" cam_extractor = SmoothGradCAMpp(model) # Get your input img = read_image(img_path) # Preprocess it for your chosen model input_tensor = normalize(resize(img, (224, 224)) / 255., [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # Preprocess your data and feed it to the model out = model(input_tensor.unsqueeze(0)) # Retrieve the CAM by passing the class index and the model output cams = cam_extractor(out.squeeze(0).argmax().item(), out) # + colab={"base_uri": "https://localhost:8080/"} id="quB21NjxgNUW" outputId="e11ff8b7-6d38-4e9e-f201-17d02d8f5772" # Notice that there is one CAM per target layer (here only 1) for cam in cams: print(cam.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 264} id="7fA09lIYg_Mb" outputId="d945d0f9-982d-4f84-b92d-0404359cccc1" # The raw CAM for name, cam in zip(cam_extractor.target_names, cams): plt.imshow(cam.numpy()); plt.axis('off'); plt.title(name); plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 264} id="_FIofDSdhC6K" outputId="cdf433c6-0f55-4bc2-daa9-69b8a0b8106c" # Overlayed on the image for name, cam in zip(cam_extractor.target_names, cams): result = overlay_mask(to_pil_image(img), to_pil_image(cam, mode='F'), alpha=0.5) plt.imshow(result); plt.axis('off'); plt.title(name); plt.show() # + id="VsKWEYueiZqJ" # Once you're finished, clear the hooks on your model cam_extractor.remove_hooks() # + [markdown] id="8wvuEmcGd287" # # Advanced tricks # + [markdown] id="MUr7Em4rd5aK" # ## Extract localization cues # + id="BKIiNXKWd8f7" colab={"base_uri": "https://localhost:8080/"} outputId="0a0aac2e-5a9e-464d-fb9d-12a1180a294f" # Retrieve the CAM from several layers at the same time cam_extractor = LayerCAM(model) # Preprocess your data and feed it to the model out = model(input_tensor.unsqueeze(0)) print(softmax(out, dim=1).max()) # + id="wjcWMbbtbVxv" cams = cam_extractor(out.squeeze(0).argmax().item(), out) # + colab={"base_uri": "https://localhost:8080/", "height": 252} id="0Rc2TtqNb1VX" outputId="fb7eb234-3c9b-4c27-a027-4ba1cbd82538" # Resize it resized_cams = [resize(to_pil_image(cam), img.shape[-2:]) for cam in cams] segmaps = [to_pil_image((resize(cam.unsqueeze(0), img.shape[-2:]).squeeze(0) >= 0.5).to(dtype=torch.float32)) for cam in cams] # Plot it for name, cam, seg in zip(cam_extractor.target_names, resized_cams, segmaps): _, axes = plt.subplots(1, 2) axes[0].imshow(cam); axes[0].axis('off'); axes[0].set_title(name) axes[1].imshow(seg); axes[1].axis('off'); axes[1].set_title(name) plt.show() # + id="WbmvA4hTNKdH" # Once you're finished, clear the hooks on your model cam_extractor.remove_hooks() # + [markdown] id="DNdwZO7Fd8mC" # ## Fuse CAMs from multiple layers # + id="G6TDPWKJeCIq" # Retrieve the CAM from several layers at the same time cam_extractor = LayerCAM(model, ["layer2", "layer3", "layer4"]) # Preprocess your data and feed it to the model out = model(input_tensor.unsqueeze(0)) # Retrieve the CAM by passing the class index and the model output cams = cam_extractor(out.squeeze(0).argmax().item(), out) # + colab={"base_uri": "https://localhost:8080/"} id="4vdTWfmjjE4w" outputId="9a0c5ab7-c624-4a9c-d85a-611b99cf82f6" # This time, there are several CAMs for cam in cams: print(cam.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 145} id="6q4O5zrvjKXv" outputId="ce024ac3-b605-43c8-af6f-f7b28e907d06" # The raw CAM _, axes = plt.subplots(1, len(cam_extractor.target_names)) for idx, name, cam in zip(range(len(cam_extractor.target_names)), cam_extractor.target_names, cams): axes[idx].imshow(cam.numpy()); axes[idx].axis('off'); axes[idx].set_title(name); plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 511} id="E1WTyofWjNw9" outputId="acac1711-a1b3-4130-ea58-84a21c7be07b" # Let's fuse them fused_cam = cam_extractor.fuse_cams(cams) # Plot the raw version plt.imshow(fused_cam.numpy()); plt.axis('off'); plt.title(" + ".join(cam_extractor.target_names)); plt.show() # Plot the overlayed version result = overlay_mask(to_pil_image(img), to_pil_image(fused_cam, mode='F'), alpha=0.5) plt.imshow(result); plt.axis('off'); plt.title(" + ".join(cam_extractor.target_names)); plt.show() # + id="E06Ld0-lNQGT" # Once you're finished, clear the hooks on your model cam_extractor.remove_hooks()
torch-cam/quicktour.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import molsysmt as msm # # Hydrophobicity file_path = msm.demo_systems.files['1tcd.pdb'] molecular_system_A = msm.convert(file_path) molecular_system_A = msm.remove_solvent(molecular_system_A) molecular_system_A = msm.add_missing_hydrogens(molecular_system_A) msm.info(molecular_system_A) molecular_system_B = msm.demo_systems.metenkephalin() molecular_system_B = msm.add_terminal_capping(molecular_system_B) hydrophobicity = msm.physchem.hydrophobicity(molecular_system_B, type='eisenberg') hydrophobicity
docs/contents/physchem/hydrophobicity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') # %matplotlib inline # - from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import roc_auc_score from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler card_df = pd.read_csv('../creditcard.csv') # V1may be result of a PCA Dimensionality reduction to protect user identities # and sensitive features(v1-v28) card_df.head(3) card_df.shape # 인자로 입력받은 DataFrame을 복사한뒤 # Time 컬럼만 삭제하고 복사된 DataFrame 반환 def get_preprocessed_df(df=None): df_copy = df.copy() df_copy.drop('Time', axis=1, inplace=True) return df_copy # 사전 데이터 가공 후 학습과 테스트 데이터 세트를 반환하는 함수 def get_train_test_dataset(df=None): df_copy = get_preprocessed_df(df) X_features = df_copy.iloc[:, :-1] y_target = df_copy.iloc[:, -1] # stratify taget에 분포도에 따라 # 학습, 테스트 데이터를 분할 해준다. X_train, X_test, y_train, y_test = train_test_split( X_features, y_target, test_size=0.3, random_state=0, stratify=y_target) return X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = get_train_test_dataset(card_df) print('학습 데이터 레이블 값 비율') print(y_train.value_counts() / y_train.shape[0] * 100) print('테스트 데이터 레이블 값 비율') print(y_test.value_counts() / y_test.shape[0] * 100) lr_clf = LogisticRegression() lr_clf.fit(X_train, y_train) lr_pred = lr_clf.predict(X_test) lr_pred_proba = lr_clf.predict_proba(X_test)[:, 1] # 수정된 get_clf_eval() 함수 def get_clf_eval(y_test, pred=None, pred_proba=None): confusion = confusion_matrix( y_test, pred) accuracy = accuracy_score(y_test , pred) precision = precision_score(y_test , pred) recall = recall_score(y_test , pred) f1 = f1_score(y_test,pred) # ROC-AUC 추가 roc_auc = roc_auc_score(y_test, pred_proba) print('오차 행렬') print(confusion) # ROC-AUC print 추가 print('정확도: {0:.4f}, 정밀도: {1:.4f}, 재현율: {2:.4f},\ F1: {3:.4f}, AUC:{4:.4f}'.format(accuracy, precision, recall, f1, roc_auc)) get_clf_eval(y_test, lr_pred, lr_pred_proba) def get_model_train_eval(model, ftr_train=None, ftr_test=None, tgt_train=None, tgt_test=None): model.fit(ftr_train, tgt_train) pred = model.predict(ftr_test) pred_proba = model.predict_proba(ftr_test)[:, 1] get_clf_eval(tgt_test, pred, pred_proba) # > (boost_from_average가 True일 경우 레이블 값이 극도로 불균형 분포를 이루는 경우 재현률 및 ROC-AUC 성능이 매우 저하됨.) # LightGBM 2.1.0 이상 버전에서 이와 같은 현상 발생 # + from lightgbm import LGBMClassifier lgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False) get_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) # - # ## 중요 데이터 분포도 변환 후 모델 학습/예측/평가 # ### 중요 feature의 분포도 확인 plt.figure(figsize=(8, 4)) plt.xticks(range(0, 30000, 1000), rotation=60) sns.distplot(card_df['Amount']) # ### 데이터 사전 가공을 위한 별도의 함수에 StandardScaler를 이용하여 Amount 피처 변환 # 사이킷런의 StandardScaler를 이용하여 정규분포 형태로 Amount 피처값 변환하는 로직으로 수정. def get_preprocessed_df(df=None): df_copy = df.copy() scaler = StandardScaler() amount_n = scaler.fit_transform(df_copy['Amount'].values.reshape(-1, 1)) # 변환된 Amount를 Amount_Scaled로 피처명 변경후 DataFrame맨 앞 컬럼으로 입력 df_copy.insert(0, 'Amount_Scaled', amount_n) # 기존 Time, Amount 피처 삭제 df_copy.drop(['Time','Amount'], axis=1, inplace=True) return df_copy # ### StandardScaler 변환 후 로지스틱 회귀 및 LightGBM 학습/예측/평가 # + # Amount를 정규분포 형태로 변환 후 로지스틱 회귀 및 LightGBM 수행. X_train, X_test, y_train, y_test = get_train_test_dataset(card_df) print('### 로지스틱 회귀 예측 성능 ###') lr_clf = LogisticRegression() get_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) print('### LightGBM 예측 성능 ###') lgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False) get_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) # - # ### Amount를 로그 변환 def get_preprocessed_df(df=None): df_copy = df.copy() # 넘파이의 log1p( )를 이용하여 Amount를 로그 변환 amount_n = np.log1p(df_copy['Amount']) df_copy.insert(0, 'Amount_Scaled', amount_n) df_copy.drop(['Time','Amount'], axis=1, inplace=True) return df_copy # + # log1p 와 expm1 설명 import numpy as np print(1e-1000 == 0.0) print(np.log(1e-1000)) print(np.log(1e-1000 + 1)) print(np.log1p(1e-1000)) # - var_1 = np.log1p(100) var_2 = np.expm1(var_1) print(var_1, var_2) # + X_train, X_test, y_train, y_test = get_train_test_dataset(card_df) print('### 로지스틱 회귀 예측 성능 ###') get_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) print('### LightGBM 예측 성능 ###') get_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) # - # ## 이상치 데이터 제거 후 모델 학습/예측/평가 # ### 각 피처들의 상관 관계를 시각화. 결정 레이블인 class 값과 가장 상관도가 높은 피처 추출 plt.figure(figsize=(9, 9)) corr = card_df.corr() sns.heatmap(corr, cmap='RdBu') def get_outlier(df=None, column=None, weight=1.5): # fraud에 해당하는 column 데이터만 추출, 1/4 분위와 3/4 분위 지점을 np.percentile로 구함. fraud = df[df['Class']==1][column] quantile_25 = np.percentile(fraud.values, 25) quantile_75 = np.percentile(fraud.values, 75) # IQR을 구하고, IQR에 1.5를 곱하여 최대값과 최소값 지점 구함. iqr = quantile_75 - quantile_25 iqr_weight = iqr * weight lowest_val = quantile_25 - iqr_weight highest_val = quantile_75 + iqr_weight # 최대값 보다 크거나, 최소값 보다 작은 값을 아웃라이어로 설정하고 DataFrame index 반환. outlier_index = fraud[(fraud < lowest_val) | (fraud > highest_val)].index return outlier_index #np.percentile(card_df['V14'].values, 100) np.max(card_df['V14']) outlier_index = get_outlier(df=card_df, column='V14', weight=1.5) print('이상치 데이터 인덱스:', outlier_index) # + # get_processed_df( )를 로그 변환 후 V14 피처의 이상치 데이터를 삭제하는 로직으로 변경. def get_preprocessed_df(df=None): df_copy = df.copy() amount_n = np.log1p(df_copy['Amount']) df_copy.insert(0, 'Amount_Scaled', amount_n) df_copy.drop(['Time','Amount'], axis=1, inplace=True) # 이상치 데이터 삭제하는 로직 추가 outlier_index = get_outlier(df=df_copy, column='V14', weight=1.5) df_copy.drop(outlier_index, axis=0, inplace=True) return df_copy X_train, X_test, y_train, y_test = get_train_test_dataset(card_df) print('### 로지스틱 회귀 예측 성능 ###') get_model_train_eval(lr_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) print('### LightGBM 예측 성능 ###') get_model_train_eval(lgbm_clf, ftr_train=X_train, ftr_test=X_test, tgt_train=y_train, tgt_test=y_test) # - # ## SMOTE 오버 샘플링 적용 후 모델 학습/예측/평가 # + from imblearn.over_sampling import SMOTE smote = SMOTE(random_state=0) X_train_over, y_train_over = smote.fit_sample(X_train, y_train) print('SMOTE 적용 전 학습용 피처/레이블 데이터 세트: ', X_train.shape, y_train.shape) print('SMOTE 적용 후 학습용 피처/레이블 데이터 세트: ', X_train_over.shape, y_train_over.shape) print('SMOTE 적용 후 레이블 값 분포: \n', pd.Series(y_train_over).value_counts()) # - y_train.value_counts() lr_clf = LogisticRegression() # ftr_train과 tgt_train 인자값이 SMOTE 증식된 X_train_over와 y_train_over로 변경됨에 유의 get_model_train_eval(lr_clf, ftr_train=X_train_over, ftr_test=X_test, tgt_train=y_train_over, tgt_test=y_test) # + import matplotlib.pyplot as plt import matplotlib.ticker as ticker from sklearn.metrics import precision_recall_curve # %matplotlib inline def precision_recall_curve_plot(y_test , pred_proba_c1): # threshold ndarray와 이 threshold에 따른 정밀도, 재현율 ndarray 추출. precisions, recalls, thresholds = precision_recall_curve( y_test, pred_proba_c1) # X축을 threshold값으로, Y축은 정밀도, 재현율 값으로 각각 Plot 수행. 정밀도는 점선으로 표시 plt.figure(figsize=(8,6)) threshold_boundary = thresholds.shape[0] plt.plot(thresholds, precisions[0:threshold_boundary], linestyle='--', label='precision') plt.plot(thresholds, recalls[0:threshold_boundary],label='recall') # threshold 값 X 축의 Scale을 0.1 단위로 변경 start, end = plt.xlim() plt.xticks(np.round(np.arange(start, end, 0.1),2)) # x축, y축 label과 legend, 그리고 grid 설정 plt.xlabel('Threshold value'); plt.ylabel('Precision and Recall value') plt.legend(); plt.grid() plt.show() # - precision_recall_curve_plot( y_test, lr_clf.predict_proba(X_test)[:, 1] ) lgbm_clf = LGBMClassifier(n_estimators=1000, num_leaves=64, n_jobs=-1, boost_from_average=False) get_model_train_eval(lgbm_clf, ftr_train=X_train_over, ftr_test=X_test, tgt_train=y_train_over, tgt_test=y_test)
self/chap4/credit_card_fraud_detection/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Leitura e visualização da imagem # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np img = plt.imread('cameraman.tiff') # Mostra a imagem. # Por padrão, o matplotlib faz um mapeamento dos valores 0 a 255 na imagem para # as cores do mapa de cores viridis (https://matplotlib.org/examples/color/colormaps_reference.html) plt.imshow(img) # - # Mostra a imagem utilizando mapa de cores nível de cinza plt.imshow(img, cmap='gray') # Imprime a primeira linha da imagem print(img[0]) # Plota os valores de três linhas diferentes da imagem plt.figure(figsize=[15, 8]) plt.subplot(3, 1, 1) plt.plot(img[0]) plt.title('Linha 0') plt.subplot(3, 1, 2) plt.plot(img[100]) plt.title('Linha 100') plt.subplot(3, 1, 3) plt.plot(img[250]) plt.title('Linha 250') plt.tight_layout() # # Transformações de imagens # Negativo, utilizando loops Python num_linhas, num_cols = img.shape img_neg = np.zeros((num_linhas, num_cols), dtype=np.uint8) for row in range(num_linhas): for col in range(num_cols): img_neg[row, col] = 255 - img[row, col] plt.imshow(img_neg, 'gray') # Negativo, utilizando o maravilhoso numpy img_neg_np = 255 - img plt.imshow(img_neg_np, 'gray') # Logaritmo img_log = np.zeros((num_linhas, num_cols), dtype=np.float) for row in range(num_linhas): for col in range(num_cols): img_log[row, col] = np.log(1+img[row, col]) plt.imshow(img_log, 'gray') # Visualização da função logaritmo x = range(1, 255) y = np.log(x) plt.plot(x, y) # Simulando uma situação na qual o logaritmo seria util img_sim = img.astype(float) img_sim[20:25, 30:35] = 1000 plt.imshow(img_sim, 'gray') img_sim_log = np.log(img_sim) plt.figure() plt.imshow(img_sim_log, 'gray') # Implementação de uma lookup table logarítmica lookupTable = np.zeros(256) for value in range(0, 256): lookupTable[value] = np.log(1+value) print(lookupTable) # Transformação de imagem utilizando lookup table img_log = np.zeros((num_linhas, num_cols), dtype=np.float) for row in range(num_linhas): for col in range(num_cols): img_log[row, col] = lookupTable[img[row, col]] plt.imshow(img_log, 'gray')
Lecture 02 - Intensity transforms and spatial filtering/Transformacoes pontuais em imagens.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # Relevant Links: # # 1. Visit https://developers.google.com/gmail/api/quickstart/python # # 2. Click "Enable the GMail API", follow the steps and finallly click "DOWNLOAD CLIENT CONFIGURATION". Save the "credentials.json" file to the current working directory # # 3. Visit https://console.cloud.google.com/ # # 4. Create a new project # # 5. Click "Go to APIs Overview" # # 6. Click the "Credentials" tab on the left. Click "+ CREATE CREDENTIALS" at the top and choose "Service Account". Give the service account a name and click "Create" # # 7. Click on the newly created service account, ensure it is enabled, and click "ADD KEY" -> "Create new key". Pick "JSON" and download the json file and store it in the current working directory. # # 8. Run the cells in this notebook. # - pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib # + from __future__ import print_function from random import random from datetime import datetime import sys import os import pickle import google.oauth2.credentials from email.mime.text import MIMEText import base64 from googleapiclient.discovery import build from googleapiclient.errors import HttpError from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request # - #set this to the name of the JSON file downloaded in step 7 above os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= "" # # Setup # + # If modifying these scopes, delete the file token_gmail.pickle. SCOPES = ['https://www.googleapis.com/auth/gmail.compose'] def main(): """Shows basic usage of the Gmail API. Lists the user's Gmail labels. """ creds = None # The file token_gmail.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token_gmail.pickle'): with open('token_gmail.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token_gmail.pickle', 'wb') as token: pickle.dump(creds, token) if __name__ == '__main__': main() # - def get_authenticated_gmail_service(): """ This function uses the token_gmail.pickle file to get an authenticated object used to talk to Gmail """ credentials = None API_SERVICE_NAME = 'gmail' API_VERSION = 'v1' if os.path.exists('token_gmail.pickle'): with open('token_gmail.pickle', 'rb') as token: credentials = pickle.load(token) return build(API_SERVICE_NAME, API_VERSION, credentials = credentials) service = get_authenticated_gmail_service() # # Core Functions def create_message(sender, to, subject, message_text): """ Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. message_text: The text of the email message. Returns: An object containing a base64url encoded email object. """ message = MIMEText(message_text) message['to'] = to message['from'] = sender message['subject'] = subject return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()} def send_message(service, user_id, message): """ Send an email message. Args: service: Authorized Gmail API service instance. user_id: User's email address. The special value "me" can be used to indicate the authenticated user. message: Message to be sent. Returns: Sent Message. """ try: message = (service.users().messages().send(userId=user_id, body=message) .execute()) print('Message Id: %s' % message['id']) return message except Exception as e: print('An error occurred: %s' % e) # # Run Code #create a message my_message = create_message('your_email_address', 'your_email_address', 'Subject', 'message text') #send the message send_message(service, 'me', my_message)
time-series-analysis-anomalies/Gmail API Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Agrupando Datos # - # __Group By__ se refiere al proceso que involucra uno o más de los siguientes pasos: # # * Dividir los datos en grupos basados en algún criterio. # * Aplicar una función a cada uno de los grupos independientemente. # * Combinar los resultados en una estructura de datos. # # La división es el paso principal. Usualmente, el usuario busca dividir la data en grupos y luego _hacer algo_ con estos grupos. En el paso de aplicar se puede: # # * Agregación: Calcular alguna estadística(s) para cada grupo. Por ejemplo, la suma, promedio y/o conteo de cada grupo. # # * Transformación: Algún cálculo específico a cada grupo pero devolviendo un índice similar al original. Por ejemplo, estandarizar o rellenar valores nulos respecto a cada grupo. # # * Filtrado: Descartar grupos de acuerdo a un cálculo grupal que se evalua verdadero o falso. Por ejemplo, descartar los regristros que la cantidad de miembros del grupo es menor a cierto umbral. # La clase de hoy será motivada con el dataset de monstruos de bolsillo favorito de los milleniasl: __Pokemon__. import os import pandas as pd pkm = ( pd.read_csv(os.path.join("..", "data", "pokemon.csv"), index_col="#") .rename(columns=lambda x: x.replace(" ", "").replace(".", "_").lower()) ) pkm.head() # Ejemplo: ¿Sabes cuántos pokemones legendarios hay por generación? ¿No? Agrupemos por generación. pkm.groupby("generation") type(pkm.groupby("generation")) # Hacer un groupby nos entrega un objeto groupby, usualmente no nos ayuda mucho, pero ya tiene los grupos separados internamente. # # Idea: Iteremos por grupo y contemos! for name, group in pkm.groupby("generation"): print(f"type(name): {type(name)}") print(f"type(group): {type(group)}\n") print(f"name: {name}") print(f"group:") display(group) break for name, group in pkm.groupby("generation"): print(f"La generación {name} tiene {group['legendary'].sum()} pokemones legendarios.") # Lo anterior no es lo mejor, porque es secuencial, es decir, debemos esperar a que la i-ésima iteración termine para ejecutar la (i+1)-ésima iteración. # + [markdown] slideshow={"slide_type": "slide"} # ## Group By - Aggregation # - # Una vez el objeto GroupBy ha sido creado, es posible aplicar diferentes métodos para realizar los cálculos requeridos. El más común, es `aggregate()`, o equivalentemente, `agg()`. pkm.groupby("generation").agg({"legendary": "sum"}) # Nota que después de aplicado el `agg` el nombre de la columna se mantiene, esto se puede cambiar de la siguiente forma: # Nombre de la columna como argumento de agg, tupla con nombre de la columna y operación. pkm.groupby("generation").agg(legendaries_sum=("legendary", "sum")) # Comparemos tiempos # %%timeit aux = pd.DataFrame().rename_axis(index="generation") for name, group in pkm.groupby("generation"): aux.loc[name, "legendary"] = group['legendary'].sum() # %%timeit pkm.groupby("generation").agg({"legendary": "sum"}) # Si es algo pequeño de una sola columna, puedes acceder directamente a ella. pkm.groupby("generation")["legendary"].sum() # Ojo! Devuelve una serie # Puedes agrupar por más de una columna. pkm.groupby(["type1", "type2"]).agg(hp_max=("hp", "max")) # También puedes agregar más de una columna. ( pkm.groupby(["type1", "type2"]) .agg( hp_max=("hp", "max"), attack_max=("attack", "max") ) ) # Incluso hacer más de una agregación a una misma columna ( pkm.groupby(["type1", "type2"]) .agg({"hp": ["min", "mean", "max"]} ) ) # Si quieres cambiar los nombres es un poco más verboso. ( pkm.groupby(["type1", "type2"]) .agg( hp_min=("hp", "min"), hp_mean=("hp", "mean"), hp_max=("hp", "max"), ) ) # También puedes aplicar tus propias funciones ( pkm.groupby(["type1", "type2"]) .agg( hp_range=("hp", lambda x: x.max() - x.min()), ) ) # Finalmente, si quieres interactuar con más de una columna, necesitas el método `apply`. ( pkm.groupby(["type1", "type2"]) .apply(lambda df: df["attack"].mean() - df["defense"].mean()) ) # Eres libre de definir tu propia función y entregarla a un groupby, por ejemplo, usando el mismo ejemplo anterior: def attack_minus_defense(df): return df["attack"].mean() - df["defense"].mean() pkm.groupby(["type1", "type2"]).apply(attack_minus_defense) # Nota que no es necesario usar la función lambda, pero si es importante que tu función definida tenga como argumento un dataframe (puedes pensar que en cada grupo se le entregará el dataframe filtrado). # + [markdown] slideshow={"slide_type": "slide"} # ## Group By - Transform # - # Ejemplo: Normalizar cada columna agrupados por generación ( pkm.groupby("generation") .transform(lambda s: (s - s.mean()) / s.std()) ) # También se lo puedes aplicar a una sola columna. ( pkm.groupby("generation")["attack"] .transform(lambda s: (s - s.mean()) / s.std()) ) # Personalmente, no suelo utilizar mucho este método, porque prefiero guardar la data original. Suelo agregar nuevas columnas, por ejemplo: pkm.assign( attack_nrm=lambda df: df.groupby("generation")["attack"].transform(lambda s: (s - s.mean()) / s.std()) ) # + [markdown] slideshow={"slide_type": "slide"} # ## Group By - Filter # - # Ejemplo: Filtrar el dataframe de pokemons pero manteniendo solo las generaciones que tengan más de 10 pokemones legendarios. # # Como te estás dando cuenta, no se puede hacer con una máscara, porque el criterio no depende de cada registro, depende del grupo al cual pertenece el regirstro. pkm_filtered = pkm.groupby("generation").filter(lambda df: df["legendary"].sum() > 10) pkm_filtered.head() # Veamos cuales son las generaciones que permanecieron luego del filtrado. pkm_filtered["generation"].unique() # Verifiquemos que filtramos correctamente pkm.groupby("generation")["legendary"].sum() # Lo importante del argumento del método `filter` es que retorne un booleano! Es distinto a cuando uno hace una máscara, donde se obtiene una serie de elementos booleanos. # # Por ejemplo, filtrar los pokemones que son de la primera generación generación. pkm.loc[lambda df: df["generation"] != 1] # La máscara dentro de `loc` es una serie de elementos booleanos. pkm["generation"] != 1 # ## Resumen # * Agrupar datos por condiciones es una tarea usual. # * Dependiendo de tu objetivo es posible operar, transformar o filtrar los grupos.
06_pandas/M2L05_data_aggregation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Copyright 2021 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # - # # Multi-GPU Scaling in NVTabular with Dask # # # ## NVTabular + Dask Integration # # NVTabular enables the use of [Dask](https://dask.org/) for multi-GPU parallelism, which integrates the following classes with the [RAPIDS](https://rapids.ai/) Dask-CuDF library: # # - **nvtabular.Dataset**: Most NVTabular functionality requires the raw data to be converted to a Dataset object. The conversion is very inexpensive, as it requires minimal IO (if any at all). A Dataset can be initialized using file/directory paths ("csv" or "parquet"), a PyArrow Table, a Pandas/CuDF DataFrame, or a Pandas/CuDF-based *Dask* DataFrame. The purpose of this "wrapper" class is to provide other NVTabular components with reliable mechanisms to (1) translate the target data into a Dask collection, and to (2) iterate over the target data in small-enough chunks to fit comfortably in GPU memory. # - **nvtabular.Workflow**: This is the central class used in NVTabular to compose a GPU-accelerated preprocessing pipeline. The Workflow class now tracks the state of the underlying data by applying all operations to an internal Dask-CuDF DataFrame object (`ddf`). # - **nvtabular.ops.StatOperator**: All "statistics-gathering" operations must be designed to operate directly on the Workflow object's internal `ddf`. This requirement facilitates the ability of NVTabular to handle the calculation of global statistics in a scalable way. # # **Big Picture**: NVTabular is tightly integrated with Dask-CuDF. By representing the underlying dataset as a (lazily-evaluated) collection of CuDF DataFrame objects (i.e. a single `dask_cudf.DataFrame`), we can seamlessly scale our preprocessing workflow to multiple GPUs. # # ## Simple Multi-GPU Toy Example # In order to illustrate the Dask-CuDF-based functionality of NVTabular, we will walk through a simple preprocessing example using *toy* data. # # #### Resolving Memory Errors # This notebook was developed on a DGX-1 system (8 V100 GPUs with 1TB host memory). Users with limited device and/or host memory (less than 16GB on device, and less than 32GB on host) may need to modify one or more of the default options. Here are the best places to start: # # - `device_memory_limit`: Reduce the memory limit for workers in your cluster. This setting may need to be much lower than the actual memory capacity of your device. # - `part_mem_fraction`: Reduce the partition size of your Dataset. Smaller partition sizes enable better control over memory spilling on the workers (but reduces compute efficiency). # - `out_files_per_proc`: Increase the number of output files per worker. The worker must be able to shuffle each output file in device memory for the per-worker shuffling algorithm. # - `shuffle`: Change the shuffling option to `Shuffle.PER_PARTITION` in `workflow.apply`. The default (per-worker) option currently requires the entire output dataset to fit in host memory. # # ### Step 1: Import Libraries and Cleanup Working Directories # + # Standard Libraries import os import glob import shutil # External Dependencies import cupy as cp import cudf import dask_cudf from dask_cuda import LocalCUDACluster from dask.distributed import Client from dask.utils import parse_bytes from dask.delayed import delayed import rmm # NVTabular import nvtabular as nvt import nvtabular.ops as ops from nvtabular.io import Shuffle from nvtabular.utils import device_mem_size # - # Note that it is often a good idea to set-aside (fast) dedicated disk space for Dask "workers" to spill data and write logging information. To make things simple, we will perform all IO within a single `BASE_DIR` for this example. Make sure to reset this environment variable as desired. # + # Choose a "fast" root directory for this example BASE_DIR = os.environ.get("BASE_DIR", "./basedir") # Define and clean our worker/output directories dask_workdir = os.path.join(BASE_DIR, "workdir") demo_output_path = os.path.join(BASE_DIR, "demo_output") demo_dataset_path = os.path.join(BASE_DIR, "demo_dataset") # Ensure BASE_DIR exists if not os.path.isdir(BASE_DIR): os.mkdir(BASE_DIR) # Make sure we have a clean worker space for Dask if os.path.isdir(dask_workdir): shutil.rmtree(dask_workdir) os.mkdir(dask_workdir) # Make sure we have a clean output path if os.path.isdir(demo_output_path): shutil.rmtree(demo_output_path) os.mkdir(demo_output_path) # Get device memory capacity capacity = device_mem_size(kind="total") # - # ### Step 2: Deploy a Distributed-Dask Cluster # # Before we walk through the rest of this multi-GPU preprocessing example, it is important to reiterate that Dask-CuDF is used extensively within NVTabular. This essentially means that you do **not** need to do anything special to *use* Dask here. With that said, the default behavior of NVTabular is to to utilize Dask's ["synchronous"](https://docs.dask.org/en/latest/scheduling.html) task scheduler, which precludes distributed processing. In order to properly utilize a multi-GPU system, you need to deploy a `dask.distributed` *cluster*. # # There are many different ways to create a distributed Dask cluster. This notebook will focus only on the `LocalCUDACluster` API, which is provided by the RAPIDS [Dask-CUDA](https://github.com/rapidsai/dask-cuda) library. It is also recommended that you check out [this blog article](https://blog.dask.org/2020/07/23/current-state-of-distributed-dask-clusters) to see a high-level summary of the many other cluster-deployment utilities. # # For this example, we will assume that you want to perform preprocessing on a single machine with multiple GPUs. In this case, we can use `dask_cuda.LocalCUDACluster` to deploy a distributed cluster with each worker process being pinned to a distinct GPU. This class also provides our workers with mechanisms for device-to-host memory spilling (explained below), and (optionally) enables the use of NVLink and infiniband-based inter-process communication via UCX. # + # Deploy a Single-Machine Multi-GPU Cluster protocol = "tcp" # "tcp" or "ucx" visible_devices = "0,1,2,3" # Delect devices to place workers device_spill_frac = 0.9 # Spill GPU-Worker memory to host at this limit. # Reduce if spilling fails to prevent # device memory errors. cluster = None # (Optional) Specify existing scheduler port if cluster is None: cluster = LocalCUDACluster( protocol = protocol, CUDA_VISIBLE_DEVICES = visible_devices, local_directory = dask_workdir, device_memory_limit = capacity * device_spill_frac, ) # Create the distributed client client = Client(cluster) client # - # #### The Dask Diagnostics Dashboard # # If you created a new distributed cluster in the previous cell, the output should specify the address of a [diagnostics dashboard](https://docs.dask.org/en/latest/diagnostics-distributed.html) (e.g. **Dashboard**: http://IP:8787/status). You can also run `client.dashboard_link` to get the same information. If you have [Bokeh](https://bokeh.org/) installed in your environment, the scheduler will create this dashboard by default. If you click on the link, or paste the url in a web browser, you will see a page that looks something like the figure below. Note that you may need to update the IP address in the link if you are working on a remote machine. # # ![dask-dashboard.png](../../images/dask-dashboard.png) # # The Dask dashboard is typically the best way to visualize the execution progress and resource usage of a Multi-GPU NVTabular workflow. For [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) users, the [Dask JupyterLab Extension](https://github.com/dask/dask-labextension) further integrates the same diagnostic figures into the notebook environment itself. # # #### Device-to-Host Memory Spilling # # One of the advantages of using [Dask-CUDA](https://github.com/rapidsai/dask-cuda) to deploy a distributed cluster is that the workers will move data between device memory and host memory, and between host memory and disk, to avoid out-of-memory (OOM) errors. To set the threshold for device-to-host spilling, a specific byte size can be specified with `device_memory_limit`. Since the worker can only consider the size of input data, and previously finished task output, this limit must be set lower than the actual GPU memory capacity. If the limit is set too high, temporary memory allocations within the execution of task may lead to OOM. With that said, since spilling can dramatically reduce the overall performance of a workflow, a conservative `device_memory_limit` setting is only advised when it proves absolutely necessary (i.e. heavy spilling is deemed inevitable for a given workflow). # # #### Initializing Memory Pools # # Since allocating memory is often a performance bottleneck, it is usually a good idea to initialize a memory pool on each of our workers. When using a distributed cluster, we must use the `client.run` utility to make sure a function is exectuted on all available workers. # + # Initialize RMM pool on ALL workers def _rmm_pool(): rmm.reinitialize( pool_allocator=True, initial_pool_size=None, # Use default size ) client.run(_rmm_pool) # - # **Note**: If you have problems with this, it *may* be a `numba-0.51` problem. Try: `conda install -c conda-forge numba=0.50` # # # ### Step 3: Create a "Toy" Parquet Dataset # In order to illustrate the power of multi-GPU scaling, without requiring an excessive runtime, we can use the `cudf.datasets.timeseries` API to generate a largish (~20GB) toy dataset with Dask-CuDF. # + # %%time # Write a "largish" dataset (~20GB). # Change `write_count` and/or `freq` for larger or smaller dataset. # Avoid re-writing dataset if it already exists. write_count = 25 freq = "1s" if not os.path.exists(demo_dataset_path): def _make_df(freq, i): df = cudf.datasets.timeseries( start="2000-01-01", end="2000-12-31", freq=freq, seed=i ).reset_index(drop=False) df["name"] = df["name"].astype("object") df["label"] = cp.random.choice(cp.array([0, 1], dtype="uint8"), len(df)) return df dfs = [delayed(_make_df)(freq, i) for i in range(write_count)] dask_cudf.from_delayed(dfs).to_parquet(demo_dataset_path, write_index=False) del dfs # - # ### Step 4: Create an NVTabular Dataset object # # As discussed above, the `nvt.Workflow` class requires data to be represented as an `nvt.Dataset`. This convention allows NVTabular to abstract away the raw format of the data, and convert everything to a consistent `dask_cudf.DataFrame` representation. Since the `Dataset` API effectively wraps functions like `dask_cudf.read_csv`, the syntax is very simple and the computational cost is minimal. # # **Important Dataset API Considerations**: # # - Can be initialized with the following objects: # - 1+ file/directory paths. An `engine` argument is required to specify the file format (unless file names are appended with `csv` or `parquet`) # - `cudf.DataFrame`. Internal `ddf` will have 1 partition. # - `pandas.DataFrame`. Internal `ddf` will have 1 partition. # - `pyarrow.Table`. Internal `ddf` will have 1 partition. # - `dask_cudf.DataFrame`. Internal `ddf` will be a shallow copy of the input. # - `dask.dataframe.DataFrame`. Internal `ddf` will be a direct pandas->cudf conversion of the input. # - For file-based data initialization, the size of the internall `ddf` partitions will be chosen according to the following arguments (in order of precedence): # - `part_size`: Desired maximum size of each partition **in bytes**. Note that you can pass a string here. like `"2GB"`. # - `part_mem_fraction`: Desired maximum size of each partition as a **fraction of total GPU memory**. # # **Note on Dataset Partitioning**: # The `part_size` and `part_mem_fraction` options will be used to specify the desired maximum partition size **after** coversion to CuDF, not the partition size in parquet format (which may be compressed and/or dictionary encoded). For the "parquet" engine, these parameters do not result in the direct mapping of a file byte-range to a partition. Instead, the first row-group in the dataset is converted to a `cudf.DataFrame`, and the size of that DataFrame is used to estimate the number of contiguous row-groups to assign to each partition. In the current "parquet" engine implementation, row-groups stored in different files will always be mapped to different partitions. # %%time # Create a Dataset # (`engine` argument optional if file names appended with `csv` or `parquet`) ds = nvt.Dataset(demo_dataset_path, engine="parquet", part_size="500MB") # Once your data is converted to a Dataset object, it can be converted to a `dask_cudf.DataFrame` using the `to_ddf` method. The wonderful thing about this DataFrame object, is that you are free to operate on it using a familiar CuDF/Pandas API. ds.to_ddf().head() # Note that the output of a Dataset (a `ddf`) can be used to initialize a new Dataset. This means we can use Dask-CuDF to perform complex ETL on our data before we process it in a Workflow. For example, although NVTabular does not support global shuffling transformations (yet), these operations **can** be performed before (and/or after) a Workflow. The catch here is that operations requiring the global movement of data between partitions can require more device memory than available. # Example of global shuffling outside an NVT Workflow ddf = ds.to_ddf().shuffle("id", ignore_index=True) ds = nvt.Dataset(ddf) ds.to_ddf() # Since global shuffling operations can lead to significant GPU-memory pressure, we will start with a simpler Dataset definition for this example. # + del ds del ddf dataset = nvt.Dataset(demo_dataset_path, engine="parquet", part_mem_fraction=0.1) # - # Note that the default value for part_mem_fraction (0.125) is usually safe, but we will use a slightly smaller partition size for this example to be conservative. # # **Note**: If you have a system with limited device and/or host memory (less than 16GB on device, and less than 32GB on host), you may need to use an even smaller `part_mem_fraction` here. # # ### Step 5: Define our NVTabular Workflow # # Now that we have our Dask cluster up and running, we can use the NVTabular API as usual. For multi-GPU execution, the only requirement is that you specify a `client` when you initialize the NVTabular Workflow. # # **IMPORTANT**: You must pass `client` to `nvt.Workflow` to enable Multi-GPU execution! # + cat_features = ["name", "id"] >> ops.Categorify( out_path=demo_output_path, # Path to write unique values used for encoding ) cont_features = ["x", "y"] >> ops.Normalize() workflow = nvt.Workflow(cat_features + cont_features + ["label", "timestamp"], client=client) # - # ### Step 6: Apply our Workflow # %%time shuffle = Shuffle.PER_WORKER # Shuffle algorithm out_files_per_proc = 8 # Number of output files per worker workflow.fit_transform(dataset).to_parquet( output_path=os.path.join(demo_output_path,"processed"), shuffle=shuffle, out_files_per_proc=out_files_per_proc, ) # For this (modestly sized) toy dataset, we get a great performance boost when we move from 1 to 2 V100 GPUs, and the workflow scales reasonably well to a full [DGX-1 system](https://www.nvidia.com/en-gb/data-center/dgx-systems/dgx-1/). Although the 8-GPU performance reflects a parallel efficiency of only 50% or so, higher effiencies can be expected for larger datasets. In fact, recent [TPCx-BB benchmarking studies](https://medium.com/rapids-ai/no-more-waiting-interactive-big-data-now-32f7b903cf41) have clearly demonstrated that NVTabular's parallel backend, Dask-CuDF, can effectively scale to many V100 or A100-based nodes (utilizing more than 100 GPUs). # # **Note on Communication**: # It is important to recognize that multi-GPU and multi-node scaling is typically much more successful with UCX support (enabling both NVLink and Infiniband communication). # # **Example Results**: # # **1 x 32GB V100 GPU** # ``` # CPU times: user 5.74 s, sys: 3.87 s, total: 9.62 s # Wall time: 50.9 s # ``` # # **2 x 32GB V100 GPUs** # ``` # CPU times: user 6.64 s, sys: 3.53 s, total: 10.2 s # Wall time: 24.3 s # ``` # # **8 x 32GB V100 GPUs** # ``` # CPU times: user 6.84 s, sys: 3.73 s, total: 10.6 s # Wall time: 13.5 s # ``` # Now that we are done executing our Workflow, we can check the output data to confirm that everything is looking good. dask_cudf.read_parquet(os.path.join(demo_output_path,"processed")).head() # ### Step 7: (Optional) Follow-up Processing/Writing with dask_cudf # # Instead of using to_parquet to persist your processed dataset to disk, it is also possible to get a dask dataframe from the transformed dataset and perform follow-up operations with the Dask-CuDF API. For example, if you want to convert the entire dataset into a `groupby` aggregation, you could do something like the following. # %%time ddf = workflow.transform(dataset).to_ddf() ddf = ddf.groupby(["name"]).max() # Optional follow-up processing ddf.to_parquet(os.path.join(demo_output_path, "dask_output"), write_index=False) # As always, we can use either `nvt.Dataset` or `dask_cudf` directly to read back our data. dask_cudf.read_parquet(os.path.join(demo_output_path, "dask_output")).compute() # ## Notes on Shuffling # # NVTabular currently supports two shuffling options when writing output to disk: # # - `nvt.io.Shuffle.PER_PARTITION` # - `nvt.io.Shuffle.PER_WORKER` # # For both these cases, the partitions of the underlying dataset/ddf are randomly ordered before any processing is performed. If `PER_PARTITION` is specified, each worker/process will also shuffle the rows within each partition before splitting and appending the data to a number (`out_files_per_proc`) of output files. Output files are distinctly mapped to each worker process. If `PER_WORKER` is specified, each worker will follow the same procedure as `PER_PARTITION`, but will re-shuffle each file after all data is persisted. This results in a full shuffle of the data processed by each worker. To improve performace, this option currently uses host-memory `BytesIO` objects for the intermediate persist stage. The general `PER_WORKER` algorithm is illustrated here: # # ![image.png](../../images/per_worker_shuffle.png) #
examples/multi-gpu-toy-example/multi-gpu_dask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # Import the modules import numpy as np import pandas as pd from pathlib import Path from sklearn.metrics import balanced_accuracy_score from sklearn.metrics import confusion_matrix from imblearn.metrics import classification_report_imbalanced import warnings warnings.filterwarnings('ignore') # - # --- # Read the CSV file from the Resources folder into a Pandas DataFrame lending_df = pd.read_csv("Resources/lending_data.csv") # Review the DataFrame lending_df.head() # + # Separate the y variable, the labels y = lending_df['loan_status'] # Separate the X variable, the features X = lending_df.loc[:, lending_df.columns != 'loan_status'] # - # Review the y variable Series y.head() # Review the X variable DataFrame X.head() # Check the balance of our target values y.value_counts() # + # Import the train_test_learn module from sklearn.model_selection import train_test_split # Split the data using train_test_split # Assign a random_state of 1 to the function X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 1) # - # --- # + # Import the LogisticRegression module from SKLearn from sklearn.linear_model import LogisticRegression # Instantiate the Logistic Regression model # Assign a random_state parameter of 1 to the model logistic_regression_model = LogisticRegression(random_state = 1) # Fit the model using training data logistic_regression_model.fit(X_train, y_train) # - # Make a prediction using the testing data predictions = logistic_regression_model.predict(X_test) # Print the balanced_accuracy score of the model balanced_accuracy_score(y_test, predictions) # Generate a confusion matrix for the model confusion_matrix(y_test, predictions) # + tags=[] # Print the classification report for the model print(classification_report_imbalanced(y_test, predictions)) # - # **Question:** How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels? # # **Answer:** Decently well, about 95% which is good but not incredible. # --- # + # Import the RandomOverSampler module form imbalanced-learn from imblearn.over_sampling import RandomOverSampler # Instantiate the random oversampler model # # Assign a random_state parameter of 1 to the model random_oversampler = RandomOverSampler(random_state=1) # Fit the original training data to the random_oversampler model X_resampled, y_resampled = random_oversampler.fit_resample(X_train, y_train) # - # Count the distinct values of the resampled labels data y_resampled.value_counts() # + # Instantiate the Logistic Regression model # Assign a random_state parameter of 1 to the model model = LogisticRegression(random_state=1) # Fit the model using the resampled training data model.fit(X_resampled, y_resampled) # Make a prediction using the testing data y_pred = model.predict(X_test) # - balanced_accuracy_score(y_test, y_pred) # Print the balanced_accuracy score of the model balanced_accuracy_score(y_test, y_pred) # Generate a confusion matrix for the model print(confusion_matrix(y_test, y_pred)) # Print the classification report for the model print(classification_report_imbalanced(y_test, y_pred)) # **Question:** How well does the logistic regression model, fit with oversampled data, predict both the `0` (healthy loan) and `1` (high-risk loan) labels? # # **Answer:** Very well, about 99%.
credit_risk_resampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 4.3 希腊字母 # # 我们在初等数学中便已经学习到了一些常用的希腊字母,例如最常见的$\pi$(对应于`\pi`),圆周率$\pi$约等于3.14,圆的面积为$\pi r^{2}$、周长为$2\pi r$。在几何学中,我们习惯用各种希腊字母表示度数,如$\alpha$(对应于`\alpha`)、$\beta$(对应于`\beta`)、$\theta$(对应于`\theta`)、$\phi$(对应于`\phi`)、$\psi$(对应于`\psi`)、$\varphi$(对应于`\varphi`),使用希腊字母既方便,也容易区分于$x,y,z$等其他变量。 # # 实际上,这些希腊字母也可以用来作为变量,在概率论与数理统计中常常出现的变量就包括: # # - 正态分布中的$\mu$(命令为`\mu`)、$\sigma$(命令为`\sigma`); # - 泊松分布中的$\lambda$(命令为`\lambda`); # - 通常表示自由度的希腊字母为$\nu$(命令为`\nu`)。 # # 另外,在不等式中经常用到的希腊字母有$\delta$(命令为`\delta`)和$\epsilon$(命令为`\epsilon`)。除了这些,希腊字母还有$\gamma$(命令为`\gamma`)、$\eta$(命令为`\eta`)、$\kappa$(命令为`\kappa`)、$\rho$(命令为`\rho`)、$\tau$(命令为`\tau`)和$\omega$(命令为`\omega`)。当然,前面提到的这些希腊字母在用途上并没有严格的界定,很多时候,我们书写数学表达式时可以根据需要选用适当的希腊字母。 # # 【**例4-33**】书写椭圆$\frac{x^{2}}{a^{2}}+\frac{y^{2}}{b^{2}}=1$的面积公式$S=\pi ab$。 # # ```tex # \documentclass[12pt]{article} # \begin{document} # # $$S=\pi ab$$ % 椭圆面积公式 # # \end{document} # ``` # # 【**例4-34**】书写不等式$a^{\alpha}b^{\beta}\cdots k^{\kappa}l^{\lambda}\leq a\alpha+b\beta+\cdots+k\kappa+l\lambda$。 # # ```tex # \documentclass[12pt]{article} # \begin{document} # # $$a^{\alpha}b^{\beta}\cdots k^{\kappa}l^{\lambda}\leq a\alpha+b\beta+\cdots+k\kappa+l\lambda$$ # # \end{document} # ``` # # 【**例4-35**】书写不等式 # # $$\phi\left(\frac{x_{1}+x_{2}+\cdots+x_{n}}{n}\right)\leq\frac{\phi\left(x_{1}\right)+\phi\left(x_{2}\right)+\cdots+\phi\left(x_{n}\right)}{n}$$ # # ```tex # \documentclass[12pt]{article} # \begin{document} # # $$\phi\left(\frac{x_{1}+x_{2}+\cdots+x_{n}}{n}\right)\leq\frac{\phi\left(x_{1}\right)+\phi\left(x_{2}\right)+\cdots+\phi\left(x_{n}\right)}{n}$$ # # \end{document} # ``` # # 与英文字母类似的是,有些希腊字母不但有小写字母,还有大写字母,具体如下: # # - 命令`\Gamma`对应于希腊字母$\Gamma$,命令`\varGamma`对应于$\varGamma$; # - 命令`\Delta`对应于希腊字母$\Delta$,命令`\varDelta`对应于$\varDelta$; # - 命令`\Theta`对应于希腊字母$\Theta$,命令`\varTheta`对应于$\varTheta$; # - 命令`\Lambda`对应于希腊字母$\Lambda$,命令`\varLambda`对应于$\varLambda$; # - 命令`\Pi`对应于希腊字母$\Pi$,命令`\varPi`对应于$\varPi$; # - 命令`\Sigma`对应于希腊字母$\Sigma$,命令`\varSigma`对应于$\varSigma$; # - 命令`\Phi`对应于希腊字母$\Phi$,命令`\varPhi`对应于$\varPhi$; # - 命令`\Omega`对应于希腊字母$\Omega$,命令`\varOmega`对应于$\varOmega$。 # # 从这些大写希腊字母中可以看到:大写希腊字母的命令是将小写希腊字母的命令首字母进行大写,但这些大写希腊字母与小写希腊字母的区别却不仅仅是尺寸不同;当大写希腊字母作为变量时,可以采用斜体字。 # # 【**例4-36**】书写$\Delta x+\Delta y$和$(i,j,k)\in\Omega$。 # # ```tex # \documentclass[12pt]{article} # \begin{document} # # $$\Delta x+\Delta y$$ # $$(i,j,k)\in\Omega$$ # # \end{document} # ``` # # 【回放】[**4.2 常用运算符号**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-4/section2.ipynb) # # 【继续】[**4.4 微积分**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-4/section4.ipynb) # ### License # # <div class="alert alert-block alert-danger"> # <b>This work is released under the MIT license.</b> # </div>
chapter-4/section3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Brymer-Meneses/PyTorch-Basics/blob/master/PyTorch_CatsVsDogs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="uyzFImKcavmE" # # Prerequisites # + id="p40PJHgxaDVU" # !wget https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip # + id="GWB4zzT2afgi" from zipfile import ZipFile # + id="Lr9aqUSYaeVb" zipPath = '/content/kagglecatsanddogs_3367a.zip' zip = ZipFile(zipPath, 'r') zip.extractall() # + id="dxuMviqdbcGy" import tensorflow as tf # + id="P7lyxrv9bYUo" outputId="4f8eb964-5ca8-4de6-8760-7961b454dbbc" colab={"base_uri": "https://localhost:8080/", "height": 35} import os num_skipped = 0 for folder_name in ("Cat", "Dog"): folder_path = os.path.join("PetImages", folder_name) for fname in os.listdir(folder_path): fpath = os.path.join(folder_path, fname) try: fobj = open(fpath, "rb") is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10) finally: fobj.close() if not is_jfif: num_skipped += 1 # Delete corrupted image os.remove(fpath) print("Deleted %d images" % num_skipped) # + [markdown] id="gL0NHNRwa4wy" # # Modules # + id="JVf4hE8Oap3T" from torchvision.datasets import ImageFolder from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader from PIL import Image import matplotlib.pyplot as plt from torchvision import transforms # + id="clVTZthRb1f_" path = '/content/PetImages' dataset = ImageFolder(path) trainData, testData, trainLabel, testLabel = train_test_split(dataset.imgs, dataset.targets, test_size =0.2, random_state = 0) transform = transforms.Compose([ transforms.Resize((200,200)), transforms.ToTensor(), transforms.Normalize([0.5]*3, [0.5]*3) ]) # + id="EM2-aXLUb9qK" class ImageLoader(Dataset): def __init__(self, dataset, transform=None): self.dataset = dataset self.transform = transform def __getitem__(self, item): image = Image.open(self.dataset[item][0]) if transform is not None: return self.transform(image), self.dataset[item][1] return image, self.dataset[item][1] def __len__(self): return len(self.dataset) def checkChannel(self, dataset): datasetRGB = [] for index in range(len(dataset)): if Image.open(dataset[index][0]).getbands() == ('R', 'G', 'B'): datasetRGB.append(dataset[index]) return datasetRGB def getResizedImage(self, item): image = Image.open(self.dataset[item][0]) _, _, width, height = image.getbbox() factor = (0,0, width, width) if width > height else (0,0,height, height) return image.crop(factor) imageLoader = ImageLoader(trainData, transform) dataLoader = DataLoader(imageLoader, batch_size = 10, shuffle = True) data= iter(dataLoader) d = next(data) print(d) # + id="EicaR5HXtayQ" from torch.nn import Module, Conv2d, Linear, Flatten, MaxPool2d from torch.nn.functional import relu # + id="kcZQCb0bsvuP" class Network(Module): def __init__(self): super(Network, self).__init__() self.conv_1 = Conv2d(in_channels = 3, out_channels = 64, kernel_size = 5) self.conv_2 = Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3) self.conv_3 = Conv2d(in_channels = 128, out_channels = 256, kernel_size = 3) self.maxPooling = MaxPool2d(kernel_size =4) self.fc1 = Linear(in_features = 256, out_features = 128) self.fc2 = Linear(in_features = 128, out_features =64) self.out = Linear(in_features = 64, out_features = 2) def forward(self): x = self.conv_1(x) x = self.maxPooling(x) x = relu(x) x = self.conv_2(x) x = self.maxPooling(x) x = relu(x) x = self.conv_3(x) x = self.maxPooling(x) x = relu(x) x = self.fc1(x) x = relu(x) x = self.fc2(x) x = relu(x) return self.out(x)
PyTorch_CatsVsDogs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/rafi007akhtar/coursera-tensorflow/blob/master/Course_1_Part_6_Lesson_2_Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="R6gHiH-I7uFa" colab_type="text" # #Improving Computer Vision Accuracy using Convolutions # # In the previous lessons you saw how to do fashion recognition using a Deep Neural Network (DNN) containing three layers -- the input layer (in the shape of the data), the output layer (in the shape of the desired output) and a hidden layer. You experimented with the impact of different sized of hidden layer, number of training epochs etc on the final accuracy. # # For convenience, here's the entire code again. Run it and take a note of the test accuracy that is printed out at the end. # + id="xcsRtq9OLorS" colab_type="code" outputId="bcffef9c-1cdf-40b4-b6b3-70aae27025b3" colab={"base_uri": "https://localhost:8080/", "height": 411} import tensorflow as tf mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images / 255.0 test_images=test_images / 255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=5) test_loss = model.evaluate(test_images, test_labels) # + [markdown] id="zldEXSsF8Noz" colab_type="text" # Your accuracy is probably about 89% on training and 87% on validation...not bad...But how do you make that even better? One way is to use something called Convolutions. I'm not going to details on Convolutions here, but the ultimate concept is that they narrow down the content of the image to focus on specific, distinct, details. # # If you've ever done image processing using a filter (like this: https://en.wikipedia.org/wiki/Kernel_(image_processing)) then convolutions will look very familiar. # # In short, you take an array (usually 3x3 or 5x5) and pass it over the image. By changing the underlying pixels based on the formula within that matrix, you can do things like edge detection. So, for example, if you look at the above link, you'll see a 3x3 that is defined for edge detection where the middle cell is 8, and all of its neighbors are -1. In this case, for each pixel, you would multiply its value by 8, then subtract the value of each neighbor. Do this for every pixel, and you'll end up with a new image that has the edges enhanced. # # This is perfect for computer vision, because often it's features that can get highlighted like this that distinguish one item for another, and the amount of information needed is then much less...because you'll just train on the highlighted features. # # That's the concept of Convolutional Neural Networks. Add some layers to do convolution before you have the dense layers, and then the information going to the dense layers is more focussed, and possibly more accurate. # # Run the below code -- this is the same neural network as earlier, but this time with Convolutional layers added first. It will take longer, but look at the impact on the accuracy: # + id="C0tFgT1MMKi6" colab_type="code" outputId="a84cea9d-305a-49ee-d7c7-6ac82a9e12ae" colab={"base_uri": "https://localhost:8080/", "height": 785} import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 test_images = test_images.reshape(10000, 28, 28, 1) test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() model.fit(training_images, training_labels, epochs=5) test_loss = model.evaluate(test_images, test_labels) # + [markdown] id="uRLfZ0jt-fQI" colab_type="text" # It's likely gone up to about 93% on the training data and 91% on the validation data. # # That's significant, and a step in the right direction! # # Try running it for more epochs -- say about 20, and explore the results! But while the results might seem really good, the validation results may actually go down, due to something called 'overfitting' which will be discussed later. # # (In a nutshell, 'overfitting' occurs when the network learns the data from the training set really well, but it's too specialised to only that data, and as a result is less effective at seeing *other* data. For example, if all your life you only saw red shoes, then when you see a red shoe you would be very good at identifying it, but blue suade shoes might confuse you...and you know you should never mess with my blue suede shoes.) # # Then, look at the code again, and see, step by step how the Convolutions were built: # + [markdown] id="RaLX5cgI_JDb" colab_type="text" # Step 1 is to gather the data. You'll notice that there's a bit of a change here in that the training data needed to be reshaped. That's because the first convolution expects a single tensor containing everything, so instead of 60,000 28x28x1 items in a list, we have a single 4D list that is 60,000x28x28x1, and the same for the test images. If you don't do this, you'll get an error when training as the Convolutions do not recognize the shape. # # # # ``` # import tensorflow as tf # mnist = tf.keras.datasets.fashion_mnist # (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # training_images=training_images.reshape(60000, 28, 28, 1) # training_images=training_images / 255.0 # test_images = test_images.reshape(10000, 28, 28, 1) # test_images=test_images/255.0 # ``` # # # + [markdown] id="SS_W_INc_kJQ" colab_type="text" # Next is to define your model. Now instead of the input layer at the top, you're going to add a Convolution. The parameters are: # # 1. The number of convolutions you want to generate. Purely arbitrary, but good to start with something in the order of 32 # 2. The size of the Convolution, in this case a 3x3 grid # 3. The activation function to use -- in this case we'll use relu, which you might recall is the equivalent of returning x when x>0, else returning 0 # 4. In the first layer, the shape of the input data. # # You'll follow the Convolution with a MaxPooling layer which is then designed to compress the image, while maintaining the content of the features that were highlighted by the convlution. By specifying (2,2) for the MaxPooling, the effect is to quarter the size of the image. Without going into too much detail here, the idea is that it creates a 2x2 array of pixels, and picks the biggest one, thus turning 4 pixels into 1. It repeats this across the image, and in so doing halves the number of horizontal, and halves the number of vertical pixels, effectively reducing the image by 25%. # # You can call model.summary() to see the size and shape of the network, and you'll notice that after every MaxPooling layer, the image size is reduced in this way. # # # ``` # model = tf.keras.models.Sequential([ # tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), # tf.keras.layers.MaxPooling2D(2, 2), # ``` # # # + [markdown] id="RMorM6daADjA" colab_type="text" # Add another convolution # # # # ``` # tf.keras.layers.Conv2D(64, (3,3), activation='relu'), # tf.keras.layers.MaxPooling2D(2,2) # ``` # # # + [markdown] colab_type="text" id="b1-x-kZF4_tC" # Now flatten the output. After this you'll just have the same DNN structure as the non convolutional version # # ``` # tf.keras.layers.Flatten(), # ``` # # # + [markdown] id="qPtqR23uASjX" colab_type="text" # The same 128 dense layers, and 10 output layers as in the pre-convolution example: # # # # ``` # tf.keras.layers.Dense(128, activation='relu'), # tf.keras.layers.Dense(10, activation='softmax') # ]) # ``` # # # + [markdown] id="C0GSsjUhAaSj" colab_type="text" # Now compile the model, call the fit method to do the training, and evaluate the loss and accuracy from the test set. # # # # ``` # model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # model.fit(training_images, training_labels, epochs=5) # test_loss, test_acc = model.evaluate(test_images, test_labels) # print(test_acc) # ``` # # # # + [markdown] id="IXx_LX3SAlFs" colab_type="text" # # Visualizing the Convolutions and Pooling # # This code will show us the convolutions graphically. The print (test_labels[;100]) shows us the first 100 labels in the test set, and you can see that the ones at index 0, index 23 and index 28 are all the same value (9). They're all shoes. Let's take a look at the result of running the convolution on each, and you'll begin to see common features between them emerge. Now, when the DNN is training on that data, it's working with a lot less, and it's perhaps finding a commonality between shoes based on this convolution/pooling combination. # + id="f-6nX4QsOku6" colab_type="code" outputId="6b85ed93-6868-4c2c-b066-0808d6536878" colab={"base_uri": "https://localhost:8080/", "height": 68} print(test_labels[:100]) # + id="9FGsHhv6JvDx" colab_type="code" outputId="e144d639-cebc-4d0a-9c7a-8571f70d6159" colab={"base_uri": "https://localhost:8080/", "height": 349} import matplotlib.pyplot as plt f, axarr = plt.subplots(3,4) FIRST_IMAGE=0 SECOND_IMAGE=7 THIRD_IMAGE=26 CONVOLUTION_NUMBER = 1 from tensorflow.keras import models layer_outputs = [layer.output for layer in model.layers] activation_model = tf.keras.models.Model(inputs = model.input, outputs = layer_outputs) for x in range(0,4): f1 = activation_model.predict(test_images[FIRST_IMAGE].reshape(1, 28, 28, 1))[x] axarr[0,x].imshow(f1[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[0,x].grid(False) f2 = activation_model.predict(test_images[SECOND_IMAGE].reshape(1, 28, 28, 1))[x] axarr[1,x].imshow(f2[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[1,x].grid(False) f3 = activation_model.predict(test_images[THIRD_IMAGE].reshape(1, 28, 28, 1))[x] axarr[2,x].imshow(f3[0, : , :, CONVOLUTION_NUMBER], cmap='inferno') axarr[2,x].grid(False) # + [markdown] id="8KVPZqgHo5Ux" colab_type="text" # EXERCISES # # 1. Try editing the convolutions. Change the 32s to either 16 or 64. What impact will this have on accuracy and/or training time. # # 2. Remove the final Convolution. What impact will this have on accuracy or training time? # # 3. How about adding more Convolutions? What impact do you think this will have? Experiment with it. # # 4. Remove all Convolutions but the first. What impact do you think this will have? Experiment with it. # # 5. In the previous lesson you implemented a callback to check on the loss function and to cancel training once it hit a certain amount. See if you can implement that here! # + id="ZpYRidBXpBPM" colab_type="code" outputId="2bb866fe-3e84-4417-f2f2-ea851ab8ba09" colab={"base_uri": "https://localhost:8080/", "height": 513} import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images.reshape(60000, 28, 28, 1) training_images=training_images / 255.0 test_images = test_images.reshape(10000, 28, 28, 1) test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) # + [markdown] id="arrjp3ghRB51" colab_type="text" # ------------------------------------------------------ # + [markdown] id="LjTVsS1WDfuz" colab_type="text" # SOLUTIONS # # + [markdown] id="7DINhpcvQLMF" colab_type="text" # Question 1: # >Try editing the convolutions. Change the 32s to either 16 or 64. What impact will this have on accuracy and/or training time. # # Therefore: # Changed the convolutions to 64. # # Results: # - Change in accuracy # # <table> # <thead> # <th> </th> # <th> previous </th> # <th> new </th> # </thead> # <tbody> # <tr> # <th> train </th> # <td> 0.9987 </td> # <td> 0.9983 </td> # </tr> # <tr> # <th> test </th> # <td> 0.9847 </td> # <td> 0.9875 </td> # </tr> # </tbody> # </table> # # - Time taken per iteration is about 2 seconds more for training than that of 32 convolutions. # # As can be seen below: # # + id="9e2776FXEbu1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="e3583686-5d83-48b4-e1c5-8deac00793fb" model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) # + [markdown] id="neT2ulGWQ-Re" colab_type="text" # ------------------------------------- # + [markdown] id="8CQXLRcqHLsT" colab_type="text" # Question 2: # > Remove the final Convolution. What impact will this have on accuracy or training time? # # There is only 1 convolution. Removing it would change it back to a DNN. Hence, skipping # + [markdown] id="SiF2SYmNRG2Y" colab_type="text" # ---------------------------------------------------------- # + [markdown] id="Fp26R8trHS9i" colab_type="text" # Question 3: # > How about adding more Convolutions? What impact do you think this will have? Experiment with it. # # Therefore: Adding two more convolution layers below, each having 32 convolutions. # # Results: # - Change in accuracy. (Previous column has entries of original results) # # <table> # <thead> # <th> </th> # <th> previous </th> # <th> new </th> # </thead> # <tbody> # <tr> # <th> train </th> # <td> 0.9987 </td> # <td> 0.9983 </td> # </tr> # <tr> # <th> test </th> # <td> 0.9925 </td> # <td> 0.9848 </td> # </tr> # </tbody> # </table> # # - Time taken per iteration is about 1 second more for training as compared to original time training. # # As can be seen below: # + id="ZIIl70eDEokw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="4a26e242-17a1-400e-988a-7c58ca271588" model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) # + [markdown] id="24NqOoMgRJnj" colab_type="text" # --------------------------------------------------- # + [markdown] id="2qqyeBVdO7EW" colab_type="text" # Question 4: # > Remove all Convolutions but the first. What impact do you think this will have? Experiment with it. # # This will be equivalent to #1, hence skipping. # + [markdown] id="22ixjAymRVfc" colab_type="text" # ----------------------------------------------------------- # + [markdown] id="xZmtOZlJJQo7" colab_type="text" # Question 5: # > In the previous lesson you implemented a callback to check on the loss function and to cancel training once it hit a certain amount. See if you can implement that here! # # Therefore: Added a callback function that stops training once 99.7% accuracy is reached. # # Other changes introduced: # - increased the epochs to 50, so as to reach that accuracy in between. # - added two more layers and increased the convolutions in each layer to 64. # # Results: # - accuracy reached was 99.75% for training and 98.66% for testing. # - the training stopped in the 15th epoch. # - time to train per epoch was around 16s or 17s. # # As can be seen below. # + id="WN8_sT4THwJ3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 595} outputId="5f3677be-e41a-4258-e84f-0f9c7871c584" class myCallback (tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if (logs.get('acc') > 0.997): print("\nReached 99.7% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=50, callbacks=[callbacks]) test_loss, test_acc = model.evaluate(test_images, test_labels) print(test_acc) # + id="BZXXKcEyJmt0" colab_type="code" colab={}
Course_1_Part_6_Lesson_2_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from matplotlib import pyplot as plt import numpy as np from qiskit.quantum_info.operators import Operator from qiskit_dynamics import solve_lmde from qiskit_dynamics.signals import Signal from qiskit_dynamics.models import HamiltonianModel # # In this demo # # We show how `qiskit_dynamics` can be used with the `jax` backend to perform transformations like just-in-time compiling and gradient computation. # + from qiskit_dynamics import dispatch from qiskit_dynamics.dispatch import Array from jax import config import jax jit = dispatch.wrap(jax.jit, decorator=True) grad = dispatch.wrap(jax.grad, decorator=True) config.update("jax_enable_x64", True) dispatch.set_default_backend('jax') def gaussian(amp, sig, t0, t): # Note: to enforce using the jax backend for the computation of the gaussian, wrap the # input to the numpy function in an Array amp = Array(amp) sig = Array(sig) t0 = Array(t0) t = Array(t) return amp * np.exp( -(t - t0)**2 / (2 * sig**2)) # - # # 1. `jit`: Just-in-time compiling a parameterized simulation # # Here we show we can speed up a parameterized simulation through just-in-time compiling. This is useful in situations such as: # - Simulating a variety of pulses in a parameterized family # - Simulating a system over a range of model parameters # # One way to do this is the following. First, construct a model with just operators. # + ##################### # construct operators ##################### r = 0.5 w = 1. X = Operator.from_label('X') Y = Operator.from_label('Y') Z = Operator.from_label('Z') operators = [2 * np.pi * w * Z/2, 2 * np.pi * r * X/2] ##################################### # construct model with just operators ##################################### hamiltonian = HamiltonianModel(operators=operators) # - # Next, define a function which takes the simulation input parameters and outputs the results. In this case, we parameterize a gaussian drive pulse, set it into the model, then solve and return the results. # # *Important note*: # - When applying `jax` transformations, it is important to ensure the function is _pure_, i.e. it has no side effects. As we need to set the signals into the model, to keep the function pure we first make a copy. # # Additional note: # - The `solve` function will automatically use the `scipy` solver. To make the full function jax compatible, we must specify a jax solver. Here `jax_odeint` is the equivalent of the `DOP853` algorithm in `scipy`. # + def sim_function(amp, sig): # define signals from parameters t0 = 3.5*sig T = 7*sig gaussian_envelope = lambda t: gaussian(amp, sig, t0, t) signals = [1., Signal(envelope=gaussian_envelope, carrier_freq=w)] # make copy and set signals ham_copy = hamiltonian.copy() ham_copy.signals = signals ham_copy.frame = ham_copy.get_drift() # simulate and return results # setting user_frame tells solve that states should be specified and returned in the frame # of the drift results = solve_lmde(ham_copy, y0=np.array([0., 1.], dtype=complex), t_span=[0,T], method='jax_odeint', atol=1e-10, rtol=1e-10) return results.y[-1] # default parameters for doing a pi pulse amp = 1. sig = 0.399128/r # - # Next, compile the function by calling `jax.jit`. fast_sim = jit(sim_function) # Run the simulation once. # # Note: the function is only compiled when it is called for the first time. Hence, the first call is slower due to this overhead. # %time fast_sim(2*amp, 2*sig).block_until_ready() # Call a second time: # %time fast_sim(amp, sig).block_until_ready() # For speed comparison, the same simulation using the `numpy` backend and the corresponding `scipy` solver takes about `200ms`. Hence, the compiled simulation is almost 2 orders of magnitude faster. # # 2. Automatic differentiation # # In addition to compilation, `jax` has a variety of function transformations for taking derivatives of functions. The most basic such function is `jax.grad`, which transforms a real scalar-valued function into one that computes its gradient. # # To demonstrate this, we modify the simulation function into one that outputs the excited state population (to convert it into something with a real/scalar output). # + def excited_state_pop(amp): yf = sim_function(amp, sig) return np.abs(Array(yf[0]))**2 excited_state_pop(amp) # - # Next, take the gradient. Here, we also compile the gradient. excited_state_grad = jit(grad(excited_state_pop)) # %time excited_state_grad(2 * amp).block_until_ready() # %time excited_state_grad(amp).block_until_ready() # Note: the derivative at the default `amp` value is near $0$ as the parameters used are near a $\pi$-pulse, and as such near a maximum of the population function. # # 3. State tracking # # Our wrapper for the `jax` integrators also supports the `t_eval` argument in the style of `scipy`'s `solve_ivp`. We can use this argument to view the state of the solution over an interval. # # Note: if `t_eval` is included, both `t_span` and `t_eval` will be handled with pure numpy, and so compilation will treat `t_span` as a static parameter. def sim_function(amp): # define signals from parameters sig = 0.399128/r t0 = 3.5*sig T = 7*sig gaussian_envelope = lambda t: gaussian(amp, sig, t0, t) signals = [1., Signal(envelope=gaussian_envelope, carrier_freq=w)] # make copy and set signals ham_copy = hamiltonian.copy() ham_copy.signals = signals ham_copy.frame = ham_copy.get_drift() # simulate and return results # setting user_frame tells solve that states should be specified and returned in the frame # of the drift results = solve_lmde(ham_copy, y0=np.array([0., 1.], dtype=complex), t_span=[0,T], t_eval=np.linspace(0, T, 100), method='jax_odeint', atol=1e-10, rtol=1e-10) return results.y.data jitted = jit(sim_function) # + T = 7 * 0.399128 / r for amp in np.linspace(0, 1, 10): ys = jitted(amp) plt.plot(np.linspace(0, T, 100), np.real(np.abs(ys[:, 0])**2-np.abs(ys[:, 1])**2)) # - # In the above oscillatory behaviour is due to the RWA not being entirely accurate in this system.
docs/tutorials/jax_backend_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # Image Classification # # The *Computer Vision* cognitive service provides useful pre-built models for working with images, but you'll often need to train your own model for computer vision. For example, suppose the Northwind Traders retail company wants to create an automated checkout system that identifies the grocery items customers want to buy based on an image taken by a camera at the checkout. To do this, you'll need to train a classification model that can classify the images to identify the item being purchased. # # ![A robot holding a clipboard, classifying pictures of an apple, a banana, and an orange](./images/image-classification.jpg) # # In Azure, you can use the ***Custom Vision*** cognitive service to train an image classification model based on existing images. There are two elements to creating an image classification solution. First, you must train a model to recognize different classes using existing images. Then, when the model is trained you must publish it as a service that can be consumed by applications. # # ## Create a Custom Vision resource # # To use the Custom Vision service, you need an Azure resource that you can use to *train* a model, and a resource with which you can *publish* it for applications to use. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. You can use the same Cognitive Services resource for each of these tasks, or you can use different resources (in the same region) for each task to manage costs separately. # # Use the following instructions to create a new **Custom Vision** resource. # # 1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription. # 2. Select the **&#65291;Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings: # - **Create options**: Both # - **Subscription**: *Your Azure subscription* # - **Resource group**: *Create a new resource group with a unique name* # - **Name**: *Enter a unique name* # - **Training location**: *Choose any available region* # - **Training pricing tier**: F0 # - **Prediction location**: *The same region as the training resource* # - **Prediction pricing tier**: F0 # # > **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one. # # 3. Wait for the resources to be created, and note that two Custom Vision resources are provisioned; one for training, and another for prediction. You can view these by navigating to the resource group where you created them. # # ## Create a Custom Vision project # # To train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal. # # 1. Download and extract the training images from https://aka.ms/fruit-images. # 2. In another browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai). If prompted, sign in using the Microsoft account associated with your Azure subscription and agree to the terms of service. # 3. In the Custom Vision portal, create a new project with the following settings: # - **Name**: Grocery Checkout # - **Description**: Image classification for groceries # - **Resource**: *The Custom Vision resource you created previously* # - **Project Types**: Classification # - **Classification Types**: Multiclass (single tag per image) # - **Domains**: Food # 4. Click **\[+\] Add images**, and select all of the files in the **apple** folder you extracted previously. Then upload the image files, specifying the tag *apple*, like this: # # ![Upload apple with apple tag](./images/upload_apples.jpg) # # 5. Repeat the previous step to upload the images in the **banana** folder with the tag *banana*, and the images in the **orange** folder with the tag *orange*. # 6. Explore the images you have uploaded in the Custom Vision project - there should be 15 images of each class, like this: # # ![Tagged images of fruit - 15 apples, 15 bananas, and 15 oranges](./images/fruit.jpg) # # 7. In the Custom Vision project, above the images, click **Train** to train a classification model using the tagged images. Select the **Quick Training** option, and then wait for the training iteration to complete (this may take a minute or so). # 8. When the model iteration has been trained, review the *Precision*, *Recall*, and *AP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high. # # ## Test the model # # Before publishing this iteration of the model for applications to use, you should test it. # # 1. Above the performance metrics, click **Quick Test**. # 2. In the **Image URL** box, type `https://aka.ms/apple-image` and click &#10132; # 3. View the predictions returned by your model - the probability score for *apple* should be the highest, like this: # # ![An image with a class prediction of apple](./images/test-apple.jpg) # # 4. Close the **Quick Test** window. # # ## Publish and consume the image classification model # # Now you're ready to publish your trained model and use it from a client application. # # 9. Click **&#128504; Publish** to publish the trained model with the following settings: # - **Model name**: groceries # - **Prediction Resource**: *The prediction resource you created previously*. # 10. After publishing, click the *settings* (&#9881;) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id** and paste it into the code cell below (replacing **YOUR_PROJECT_ID**). # # ![Project ID in project settings](./images/cv_project_settings.jpg) # # > _**Note**: If you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource._ # # 11. At the top left of the **Project Settings** page, click the *Projects Gallery* (&#128065;) icon to return to the Custom Vision portal home page, where your project is now listed. # 12. On the Custom Vision portal home page, at the top right, click the *settings* (&#9881;) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (<u>not</u> the training resource) and copy its **Key** and **Endpoint** values to the code cell below, replacing **YOUR_KEY** and **YOUR_ENDPOINT**. # # ![Prediction resource key and endpoint in custom vision settings](./images/cv_settings.jpg) # # 13. Run the code cell below by clicking the **Run cell** (&#9655;) button (to the left of the cell) to set the variables to your project ID, key, and endpoint values. # + gather={"logged": 1599691949340} project_id = 'YOUR_PROJECT_ID' cv_key = 'YOUR_KEY' cv_endpoint = 'YOUR_ENDPOINT' model_name = 'groceries' # this must match the model name you set when publishing your model iteration (it's case-sensitive)! print('Ready to predict using model {} in project {}'.format(model_name, project_id)) # - # Now you can use your key and endpoint with a Custom Vision client to connect to your custom vision classification model. # # Run the following code cell to classifiy a selection of test images using your published model. # # > **Note**: Don't worry too much about the details of the code. It uses the Computer Vision SDK for Python to get a class prediction for each image in the /data/image-classification/test-fruit folder # + gather={"logged": 1599692327514} from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient from msrest.authentication import ApiKeyCredentials import matplotlib.pyplot as plt from PIL import Image import os # %matplotlib inline # Get the test images from the data/vision/test folder test_folder = os.path.join('data', 'image-classification', 'test-fruit') test_images = os.listdir(test_folder) # Create an instance of the prediction service credentials = ApiKeyCredentials(in_headers={"Prediction-key": cv_key}) custom_vision_client = CustomVisionPredictionClient(endpoint=cv_endpoint, credentials=credentials) # Create a figure to display the results fig = plt.figure(figsize=(16, 8)) # Get the images and show the predicted classes for each one print('Classifying images in {} ...'.format(test_folder)) for i in range(len(test_images)): # Open the image, and use the custom vision model to classify it image_contents = open(os.path.join(test_folder, test_images[i]), "rb") classification = custom_vision_client.classify_image(project_id, model_name, image_contents.read()) # The results include a prediction for each tag, in descending order of probability - get the first one prediction = classification.predictions[0].tag_name # Display the image with its predicted class img = Image.open(os.path.join(test_folder, test_images[i])) a=fig.add_subplot(len(test_images)/3, 3,i+1) a.axis('off') imgplot = plt.imshow(img) a.set_title(prediction) plt.show() # - # Hopefully, your image classification model has correctly identified the groceries in the images. # # ## Learn more # # The Custom Vision service offers more capabilities than we've explored in this exercise. For example, you can also use the Custom Vision service to create *object detection* models; which not only classify objects in images, but also identify *bounding boxes* that show the location of the object in the image. # # To learn more about the Custom Vision cognitive service, view the [Custom Vision documentation](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/home)
03b - Image Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Concepts for Data Science: List Comprehension # ![List Comprehension](list_comprehension.png) Credit: [Buggy Programmer](http://buggyprogrammer.com) # Lists are one of the most used data structures in Python as they allow us to store data in an easy to handle way. The elements of a list can be explicitely written out one after another by the programmer but if the number of elements increases, this task can become quite tedious. One might need an automated list generation technique, hence the concept of list comprehension. List comprehension, which is a simple and powerfull way of creating a list from any existing iterable object. # # In this post, I will first describe the different ways to create list comprehensions, with examples, then I will state when to avoid using it as every single concept, how powerful it might be, has its own limitations. I will also include the equivalent code using **for loops** to compare with list comprehension, the factor of comparison being the execution time. # ## 1. Simple list comprehension # ### General syntax of list comprehension # A list comprehension is composed of the following items enclosed by square brackets: # - An output # - A collection # - A condition and expression # # A simple list comprehension doesn't contain a condition. Let's create a simple list comprehension and its equivalent using a `for loop` and evaluate the runtime of each. For the execution time to be consistent, we will be dealing with long lists and only the first 10 elements of each list will be printed. # + import time # number of iterations n_iter = 100000 # list comprehension start = time.time() x = [i for i in range(n_iter)] end = time.time() print(f'x = {x[:10]}') print(f'Execution time of list comprehension : {(end - start):.2f} seconds\n') # For loop start = time.time() x = [] for i in range(n_iter): x.append(i) end = time.time() print(f'x = {x[:10]}') print(f'Execution time of for loop : {(end - start):.2f} seconds') # + # list comprehension start = time.time() x = [i**2 for i in range(n_iter)] end = time.time() print(f'x = {x[:10]}') print(f'Execution time of list comprehension : {(end - start):.2f} seconds\n') # For loop start = time.time() x = [] for i in range(n_iter): x.append(i**2) end = time.time() print(f'x = {x[:10]}') print(f'Execution time of for loop : {(end - start):.2f} seconds') # - # ## 2. List Comprehension with *If statement* # ### a. Single *If statement* # # In the example below, we create a list of all the integers less than `n_iters` that are divisible by `3`. # + # list comprehension with if statement start = time.time() x = [i for i in range(n_iter) if i%3 == 0] end = time.time() print(f'x = {x[:10]}') print(f'Execution time of list comprehension : {(end - start):.2f} seconds\n') # Equivalent for loop start = time.time() x = [] for i in range(n_iter): if i%3 == 0: x.append(i) end = time.time() print(f'x = {x[:10]}') print(f'Execution time of for loop : {(end - start):.2f} seconds') # - # ### b. Nested if statement # # Here the elements of the list must meet two different conditions at the same time. The elements of our list must not only be divisible by `3`, but also by `7`. # + # list comprehension with if statement start = time.time() x = [i for i in range(n_iter) if i%3 == 0 if i%7 == 0] end = time.time() print(f'x = {x[:10]}') print(f'Execution time of list comprehension : {(end - start):.2f} seconds\n') # Equivalent for loop start = time.time() x = [] for i in range(n_iter): if i%3 == 0: if i%7 == 0: x.append(i) end = time.time() print(f'x = {x[:10]}') print(f'Execution time of for loop : {(end - start):.2f} seconds') # - # ## 3. List comprehension with *if...else* statement # ### a. Single *if...else* statement # c = [x if x > 5 else x**2 for x in range(n_iter)] c[:10] # Equivalent to c = [] for x in range(n_iter): if x > 5: c.append(x) else: c.append(x**2) c[:10] # ### b. Multiple *if...else* statement # # In the example below we create a list comprehension `divisors` with `3 if...else` statements that works as follows: # # for each elements in `multiples` append # - `'two'` if this elements divisible by `2` # - `'three'` if this elements divisible by `3` # - `'neither'` if this elements neither divisible by `2` nor by `3` # - `'both'` if the element is divible by both. multiples = [0, 54, 86, 1, 5, 9, 2, 45, 6, 75, 23, 14, 5, 65, 81, 60] divisors = ['two' if (x%2==0 and x%3!=0) else "three" if (x%3==0 and x%2!=0) else 'both' if (x%3==0 and x%2==0) else 'neither' for x in multiples] print(divisors) # + # Equivalent divisors = [] for x in multiples: if x%2 == 0 and x%3 != 0: divisors.append('two') elif x%3 == 0 and x%2 != 0: divisors.append("three") elif x%3 == 0 and x%2 == 0: divisors.append("both") else: divisors.append('neither') print(divisors) # - # ### 4. List comprehension with nested *for loops* # # In the example below, we create a list `target` of tuples of elements from two source lists `src1` and `src2`. src1 = [i for i in range(3)] src2 = [j for j in range(2)] target = [(x,y) for x in src1 for y in src2] print(target) # Equivalent target = [] for x in src1: for y in src2: target.append((x,y)) print(target)
list_comprehension.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import numpy as np import h5py import sys sys.path.append('F:\Linux') import illustris_python as il from sklearn.neighbors import KDTree import matplotlib.pyplot as plt def find_id(all_id): il1_id = np.zeros(len(all_id), dtype=int) for i in range(len(all_id)): il1_id[i] = -1 for j in range(5): if all_id[i][j] != -1: il1_id[i] = all_id[i][j] return il1_id def SubhaloMatch(tng_subID): ''' return matched SubhaloID in il1. No match return -1 ''' tng_GID = Groups_tng[tng_subID] il1_GID = tng_to_il1[tng_GID] if il1_GID == -1: return il1_GID try: if len(il1_HaloToSubdisk[il1_GID]) == 1: return (il1_HaloToSubdisk[il1_GID])[0] il1_subIDs = il1_HaloToSubdisk[il1_GID] except KeyError: il1_subIDs = il1_GroupSubhalo[il1_GID] return SubPositionMatch(tng_subID, il1_subIDs) def SubPositionMatch(tng_subID, il1_subIDs): ''' If one dark matter halo has several center galaxies, match them by subhalo position. return a list of their distance ''' mass = abs(tng_DMMass[tng_subID] - il1_DMMass[il1_subIDs]) ind = np.where(mass == mass.min())[0][0] return il1_subIDs[ind] def MatchedCentralGalaxy(tng_subID): ''' return matched central galaxy in il1 No match return -1 ''' tng_GID = Groups_tng[tng_subID] il1_GID = tng_to_il1[tng_GID] if il1_GID == -1: return il1_GID if isCentral(tng_subID): return il1_GFSub[il1_GID] else: return -1 def HistValAndBin(nums, bins): val = [] tmp = nums[nums < bins[1]] val.append(len(tmp)) for i in range(1,len(bins)-1): tmp = nums[(nums > bins[i]) & (nums <= bins[i+1])] val.append(len(tmp)) return val # + #match disk halo between TNG & illustris-1 tngdisk = np.load('f:/Linux/localRUN/diskID_4WP.npy') il1disk = np.load('f:/Linux/localRUN/diskID_il1.npy') tng_barred = np.load('f:/Linux/localRUN/barredID_4WP_TNG.npy') il1_barred = np.load('f:/Linux/localRUN/barredID_il1.npy') Groups_tng = il.func.loadSubhalos('TNG', 99, "SubhaloGrNr") Groups_il1 = il.func.loadSubhalos('il1', 135, "SubhaloGrNr") tng_diskGIDs = Groups_tng[tngdisk] il1_diskGIDs = Groups_il1[il1disk] tng_barredGID = Groups_tng[tng_barred] il1_barredGID = Groups_il1[il1_barred] #load TNG groups informations cm_TNG = il.func.loadhalos('TNG', 99, 'GroupPos') tng_subCM = il.func.loadSubhalos('TNG', 99, 'SubhaloCM') mass_TNG = il.func.loadhalos('TNG', 99, 'GroupMassType')[:, 1] #build illustris halo as a tree cm_il1 = il.func.loadhalos('il1', 135, 'GroupPos') il1_subCM = il.func.loadSubhalos('il1', 135, 'SubhaloCM') mass_il1 = il.func.loadhalos('il1', 135, 'GroupMassType')[:, 1] #Is centre galaxy tng_GFSub = il.func.loadhalos('TNG', 99, 'GroupFirstSub') il1_GFSub = il.func.loadhalos('il1', 135, 'GroupFirstSub') def isCentral(haloID, simu='tng'): if simu == 'tng': GID = Groups_tng[haloID] GFSub = tng_GFSub[GID] else: GID = Groups_il1[haloID] GFSub = il1_GFSub[GID] if GFSub == haloID: return True else: return False def findMatchSubhalo(haloID, simu='tng'): #input subhaloID in TNG, return matched subhaloID in il1 GID = Groups_tng[haloID] try: il1_GID = tng_to_il1[GID] except: print('halo ', haloID,' no match.') return -1 return il1_GFSub[il1_GID] #SFR tng_SFR = il.func.loadSubhalos('TNG', 99, "SubhaloSFRinHalfRad") il1_SFR = il.func.loadSubhalos('il1', 135, "SubhaloSFRinHalfRad") #DM mass tng_DMMass = il.func.loadSubhalos('TNG', 99, "SubhaloMassInRadType")[:,1] il1_DMMass = il.func.loadSubhalos('il1', 135, "SubhaloMassInRadType")[:,1] #Stellar mass tng_StellarMass = il.func.loadSubhalos('TNG', 99, "SubhaloMassInRadType")[:,4] il1_StellarMass = il.func.loadSubhalos('il1', 135, "SubhaloMassInRadType")[:,4] #Gas Fraction tng_GasFraction = il.func.loadSubhalos('TNG', 99, "SubhaloMassInRadType")[:,0] / il.func.loadSubhalos('TNG', 99, "SubhaloMassInHalfRad") il1_GasFraction = il.func.loadSubhalos('il1', 135, "SubhaloMassInRadType")[:,0] / il.func.loadSubhalos('il1', 135, "SubhaloMassInHalfRad") tng_GasFraction[np.isnan(tng_GasFraction)] = 0 il1_GasFraction[np.isnan(il1_GasFraction)] = 0 # - tree = KDTree(cm_il1, leaf_size = 2) # + #tngID, il1ID, dis, absMass sameHalo = [[], [], [], []] for groupID in np.unique(tng_diskGIDs): sameHalo[0].append([groupID]) dis, index = tree.query(cm_TNG[groupID].reshape(1, -1), k=10) il1_id = [] distance = [] absMass = [] for i in range(10): if abs(mass_TNG[groupID] - mass_il1[index[0, i]]) <= mass_TNG[groupID]*0.5: il1_id.append(index[0, i]) distance.append(dis[0, i]) absMass.append(abs(mass_TNG[groupID] - mass_il1[index[0, i]])) else: il1_id.append(-1) distance.append(dis[0, i]) absMass.append(abs(mass_TNG[groupID] - mass_il1[index[0, i]])) sameHalo[1].append(il1_id) sameHalo[2].append(distance) sameHalo[3].append(absMass) sameHalo = np.array(sameHalo) # + tng_to_il1 = np.load('f:/Linux/localRUN/tng_to_il1/tng_to_il1.npy',allow_pickle=True).item() tng_bar_hostGID_matched = np.load('f:/Linux/localRUN/tng_to_il1/tng_bar_hostGID_matched.npy',allow_pickle=True) tng_barred_matchedGID = np.load('f:/Linux/localRUN/tng_to_il1/tng_barred_matchedGID.npy',allow_pickle=True) tng_disk_matchedGID = np.load('f:/Linux/localRUN/tng_to_il1/tng_disk_matchedGID.npy',allow_pickle=True) tng_to_il1_matched_barred_centergalaxies = np.load('f:/Linux/localRUN/tng_to_il1/tng_to_il1_matched_barred_centergalaxies.npy',allow_pickle=True).item() tng_barred_il1_nobarGID = np.load('f:/Linux/localRUN/tng_to_il1/tng_barred_il1_nobarGID.npy',allow_pickle=True) #TNG disk subhalo match il1 subhalo tng_matched_il1disk = {} tng_matched_il1notdisk = {} tng_bar_matched_il1bar = {} tng_bar_matched_il1_nobardisk = {} tng_bar_matched_il1_nobar_others = {} for i in tngdisk: tmp = MatchedCentralGalaxy(i) if tmp != -1: if tmp in il1disk: tng_matched_il1disk[i] = tmp else: tng_matched_il1notdisk[i] = tmp for i in tng_barred: tmp = MatchedCentralGalaxy(i) if tmp != -1: if tmp in il1_barred: tng_bar_matched_il1bar[i] = tmp elif tmp in il1disk: tng_bar_matched_il1_nobardisk[i] = tmp else: tng_bar_matched_il1_nobar_others[i] = tmp # - tng_nobar_matched_il1bar = {} for i in tngdisk: if i not in tng_barred: tmp = MatchedCentralGalaxy(i) if tmp != -1: if tmp in il1_barred: tng_nobar_matched_il1bar[i] = tmp print(len(tng_nobar_matched_il1bar)) len(tng_barred) # + #TNG barred galaxies matched Illustris-1 unbarred disk galaxies #Gas Fraction tng_M_barID = [] il1_M_nobar_diskID = [] for i in tng_bar_matched_il1_nobardisk.keys(): tng_M_barID.append(i) il1_M_nobar_diskID.append(tng_bar_matched_il1_nobardisk[i]) tng_barGasF = tng_GasFraction[tng_M_barID] il1_unbarGasF = il1_GasFraction[il1_M_nobar_diskID] n_il1, bins, o = plt.hist(il1_unbarGasF, rwidth=0.9) n_tng = HistValAndBin(tng_barGasF, bins) np.sum(n_il1), np.sum(n_tng) # - #Plot Gas Histogram plt.bar(bins[:-1], n_tng, width=(bins[1] - bins[0])*0.9, align = 'edge',color='black', label='TNG barred galaxies') plt.bar(bins[:-1], n_il1, width=(bins[1] - bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 unbarred disk galaxies') plt.xlabel('Gas Fraction') plt.ylabel('N') plt.legend() #plt.savefig('f:/Linux/local_result/GasFraction_TNG_barred_il1_unbardisk.png',dpi=300) # + #Stellar Mass tng_barSM = np.log10(tng_StellarMass[tng_M_barID] * 10**10) il1_unbarSM = np.log10(il1_StellarMass[il1_M_nobar_diskID] * 10**10) SM_bins = np.arange(10, 13.5, 0.25) SM_tng = HistValAndBin(tng_barSM, SM_bins) SM_il1 = HistValAndBin(il1_unbarSM, SM_bins) SM_tng,SM_il1 # + #check tng barred galaxies stellar particlses tng_Stellar = il.func.loadSubhalos('TNG', 99, 'SubhaloLenType')[:,4] smallhaloID = [] for i in tng_M_barID: if tng_Stellar[i] < 40000: smallhaloID.append(i) tng_M_barID_big = [] il1_M_nobar_diskID_big = [] for i in tng_M_barID: if i not in smallhaloID: tng_M_barID_big.append(i) il1_M_nobar_diskID_big.append(tng_bar_matched_il1_nobardisk[i]) len(smallhaloID), len(tng_M_barID_big) , len(tng_M_barID), len(il1_M_nobar_diskID_big) # + tng_barGasF_big = tng_GasFraction[tng_M_barID_big] il1_unbarGasF_big = il1_GasFraction[il1_M_nobar_diskID_big] n_il1_big = HistValAndBin(il1_unbarGasF_big, bins) n_tng_big = HistValAndBin(tng_barGasF_big, bins) tng_frac = n_tng_big / np.sum(n_tng_big) il1_frac = n_il1_big / np.sum(n_il1_big) for i in range(1, len(tng_frac)): tng_frac[i] += tng_frac[i-1] il1_frac[i] += il1_frac[i-1] #Plot Gas Histogram fig = plt.figure() ax1 = fig.add_subplot(111) ax2 = ax1.twinx() ax1.bar(bins[:-1], n_tng_big, width=(bins[1] - bins[0])*0.9, align = 'edge',color='black', label='TNG barred galaxies') ax1.bar(bins[:-1], n_il1_big, width=(bins[1] - bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 unbarred disk galaxies') ax1.set_title('TNG barred galaxies match Illustris-1 unbarred disk galaxies') ax1.set_xlabel('(Stellar particle > 40000 disk galaxies) Gas Fraction') ax1.set_ylabel('N') ax1.set_ylim(0,180) ax1.legend() p = (bins[1] - bins[0])*0.5 ax2.plot(bins[:-1] + p, tng_frac, label='TNG', color = 'b') ax2.plot(bins[:-1] + p, il1_frac, label='Illsutris-1', color = 'r') ax2.scatter(bins[:-1] + p, tng_frac, color='b') ax2.scatter(bins[:-1] + p, il1_frac, color='r') ax2.set_ylim(0,1.3) ax2.legend(loc=2) # plt.savefig('f:/Linux/local_result/GasFraction_TNG_barred_il1_unbardisk_40000.png',dpi=300) # - #Plot Stellar Mass Hist plt.bar(SM_bins[:-1], SM_tng, width=(SM_bins[1] - SM_bins[0])*0.9, align = 'edge', color='k',label='TNG barred galaxies') plt.bar(SM_bins[:-1], SM_il1, width=(SM_bins[1] - SM_bins[0])*0.9, align = 'edge', color='y', alpha = 0.7,label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 unbarred disk galaxies') plt.xlabel('Stellar Mass') plt.ylabel('N') plt.legend() #plt.savefig('f:/Linux/local_result/StellarMass_TNG_barred_il1_unbardisk.png',dpi=300) # + #SFR tng_barSFR = tng_SFR[tng_M_barID] il1_unbarSFR = il1_SFR[il1_M_nobar_diskID] SFR_tng, SFR_bins, o = plt.hist(tng_barSFR, rwidth=0.9) SFR_il1 = HistValAndBin(il1_unbarSFR, SFR_bins) SFR_il1 = np.array(SFR_il1) np.sum(SFR_tng), np.sum(SFR_il1) # - #Plot SFR Hist plt.bar(SFR_bins[:-1], SFR_tng, width=(SFR_bins[1] - SFR_bins[0])*0.9, align = 'edge', color='k',label='TNG barred galaxies') plt.bar(SFR_bins[:-1], SFR_il1, width=(SFR_bins[1] - SFR_bins[0])*0.9, align = 'edge', color='y', alpha = 0.7,label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 unbarred disk galaxies') plt.xlabel('Star Formation Rate') plt.ylabel('N') plt.legend() #plt.savefig('f:/Linux/local_result/SFR_TNG_barred_il1_unbardisk.png',dpi=300) #TNG barred vs. Illustris-1 barred tng_Matched_BID = [] il1_Matched_BID = [] for i in tng_bar_matched_il1bar.keys(): tng_Matched_BID.append(i) il1_Matched_BID.append(tng_bar_matched_il1bar[i]) # + #GasFraction tng_MB_GasF = tng_GasFraction[tng_Matched_BID] il1_MB_GasF = il1_GasFraction[il1_Matched_BID] tng_GF = HistValAndBin(tng_MB_GasF, bins) il1_GF = HistValAndBin(il1_MB_GasF, bins) np.sum(tng_GF), np.sum(il1_GF) # - #Plot Gas Histogram plt.bar(bins[:-1], tng_GF, width=(bins[1] - bins[0])*0.9, align = 'edge',color='black', label='TNG barred galaxies') plt.bar(bins[:-1], il1_GF, width=(bins[1] - bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 barred galaxies') plt.xlabel('Gas Fraction') plt.ylabel('N') plt.legend() #plt.savefig('f:/Linux/local_result/GasFraction_TNG_barred_il1_unbardisk.png',dpi=300) # + #Stellar Mass tng_M_barSM = np.log10(tng_StellarMass[tng_Matched_BID] * 10**10) il1_M_barSM = np.log10(il1_StellarMass[il1_Matched_BID] * 10**10) tng_M_SM = HistValAndBin(tng_M_barSM, SM_bins) il1_M_SM = HistValAndBin(il1_M_barSM, SM_bins) np.sum(tng_M_SM), np.sum(il1_M_SM), len(tng_Matched_BID) # - #Plot Stellar Mass Histogram plt.bar(SM_bins[:-1], tng_M_SM, width=(SM_bins[1] - SM_bins[0])*0.9, align = 'edge',color='black', label='TNG barred galaxies') plt.bar(SM_bins[:-1], il1_M_SM, width=(SM_bins[1] - SM_bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 barred galaxies') plt.xlabel('Stellar Mass') plt.ylabel('N') plt.legend() #plt.savefig('f:/Linux/local_result/GasFraction_TNG_barred_il1_unbardisk.png',dpi=300) # + #SFR tng_M_SFR = tng_SFR[tng_Matched_BID] il1_M_SFR = il1_SFR[il1_Matched_BID] tng_M_SFR = HistValAndBin(tng_M_SFR, SFR_bins) il1_M_SFR = HistValAndBin(il1_M_SFR, SFR_bins) #Plot Stellar Mass Histogram plt.bar(SFR_bins[:-1], tng_M_SFR, width=(SFR_bins[1] - SFR_bins[0])*0.9, align = 'edge',color='black', label='TNG barred galaxies') plt.bar(SFR_bins[:-1], il1_M_SFR, width=(SFR_bins[1] - SFR_bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 unbarred disk galaxies') plt.title('TNG barred galaxies match Illustris-1 barred galaxies') plt.xlabel('SFR') plt.ylabel('N') plt.legend() # - #TNG disk galaxies matched Illustris-1 not disk galaxies tng_matched_il1_nodiskID = [] il1_matched_nodiskID = [] for i in tng_matched_il1notdisk.keys(): tng_matched_il1_nodiskID.append(i) il1_matched_nodiskID.append(tng_matched_il1notdisk[i]) tng_matched_nodisk_SM = tng_StellarMass[tng_matched_il1notdisk] il1_matched_nodisk_SM = il1_StellarMass[] # + #TNG-il1 all disk galaxies gas fraction big = [] for i in tngdisk: if tng_Stellar[i] > 40000: big.append(i) tng_gas = tng_GasFraction[big] il1_gas = il1_GasFraction[il1disk] n_tng_disk = HistValAndBin(tng_gas, bins) n_il1_disk = HistValAndBin(il1_gas, bins) tng_disk_frac = n_tng_disk / np.sum(n_tng_disk) il1_disk_frac = n_il1_disk / np.sum(n_il1_disk) for i in range(1, len(tng_frac)): tng_disk_frac[i] += tng_disk_frac[i-1] il1_disk_frac[i] += il1_disk_frac[i-1] #Plot Gas Histogram fig = plt.figure() ax1 = fig.add_subplot(111) ax2 = ax1.twinx() ax1.bar(bins[:-1], n_tng_disk, width=(bins[1] - bins[0])*0.9, align = 'edge',color='black', label='TNG disk galaxies') ax1.bar(bins[:-1], n_il1_disk, width=(bins[1] - bins[0])*0.9, align = 'edge', alpha=0.5,color='y', label='il1 disk galaxies') ax1.set_title('TNG / il1 disk galaxies Gas Fraction') ax1.set_xlabel('(Stellar particle > 40000 disk galaxies) Gas Fraction') ax1.set_ylabel('N') # ax1.set_ylim(0,300) ax1.legend() p = (bins[1] - bins[0])*0.5 ax2.plot(bins[:-1] + p, tng_disk_frac, label='TNG', color = 'b') ax2.plot(bins[:-1] + p, il1_disk_frac, label='Illsutris-1', color = 'r') ax2.scatter(bins[:-1] + p, tng_disk_frac, color='b') ax2.scatter(bins[:-1] + p, il1_disk_frac, color='r') ax2.set_ylim(0,1.3) ax2.legend(loc=2) #plt.savefig('f:/Linux/local_result/TNG_il1_disk_galaxies_GasFraction.png',dpi=300) # - #angular momentum il1_stellarAng = np.load('f:/Linux/localRUN/il1_stellarAng.npy') tng_stellarAng = np.load('f:/Linux/localRUN/tng_StellarAng.npy') # + tng_Arg = [] for i in tng_barred: tng_Arg.append(np.where(tngdisk==i)[0][0]) tng_Arg = np.array(tng_Arg) il1_Arg = [] for i in il1_barred: il1_Arg.append(np.where(il1disk==i)[0][0]) il1_Arg = np.array(il1_Arg) # + il1_Jsp = il1_stellarAng / il1_StellarMass[il1disk, np.newaxis] tng_Jsp = tng_stellarAng / tng_StellarMass[tngdisk, np.newaxis] il1_barJsp = np.linalg.norm(il1_Jsp[il1_Arg], axis = 1) tng_barJsp = np.linalg.norm(tng_Jsp[tng_Arg], axis = 1) # - n, bins, o = plt.hist(il1_barJsp, 10, rwidth=0.9) n, bins, o = plt.hist(tng_barJsp, 10, rwidth=0.9)
JpytrNb/tng_to_il1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rossmann Store Sales # # ## Goal: EDA # We start with the Exploratory Data Analysis of the main metrics revealing present trends and patterns in the data, giving a solid foundation for the further causal analysis. # # In addition to using Prophet, I explore the use one of the most robust and sophisticated algorithm Extreme Gradient Boosting for regression. # ### Import Necessary Modules # + import warnings warnings.filterwarnings("ignore") # loading packages # basic + dates import numpy as np import pandas as pd from pandas import datetime # data visualization import matplotlib.pyplot as plt import seaborn as sns # advanced vizs # %matplotlib inline # statistics from statsmodels.distributions.empirical_distribution import ECDF # time series analysis from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.graphics.tsaplots import plot_acf, plot_pacf # prophet by Facebook from fbprophet import Prophet # machine learning: XGB import xgboost as xgb from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV from xgboost.sklearn import XGBRegressor #wrapper # - # ### Read Data # + # importing train data to learn train = pd.read_csv('/mnt/disks/disk-1/data/rossmann/train.csv', parse_dates = True, low_memory = False, index_col = 'Date') # additional store data store = pd.read_csv("/mnt/disks/disk-1/data/rossmann/store.csv", low_memory = False) # time series as indexes train.index # - # ### Exploratory Data Analysis # # <li> this section will handle missing values and create new features for further analysis. # first glance at the train set: head and tail print("In total: ", train.shape) train.head(5).append(train.tail(5)) # #### Data Dictionary # # - Open: an indicator for whether the store was open: 0 = closed, 1 = open. # - Promo: indicates whether a store is running a promo on that day. # - StateHoliday: indicates a state holiday. Normally all stores, with few exceptions, are closed on state holidays. # - SchoolHoliday: indicates if the (Store, Date) was affected by the closure of public schools. # <li> Since we are dealing with time series data so it's a good idea to break up date into its individual components. # <li> Also, we can define a new feature, "sale per cusomer" # + # data extraction train['Year'] = train.index.year train['Month'] = train.index.month train['Day'] = train.index.day train['WeekOfYear'] = train.index.weekofyear # adding new variable train['SalePerCustomer'] = train['Sales']/train['Customers'] train['SalePerCustomer'].describe() # - # ### [What is ECDF](https://en.wikipedia.org/wiki/Empirical_distribution_function) # <li> ECDF value at any specified value of the measured variable is the fraction of observations of the measured variable that are less than or equal to the specified value. # + sns.set_context('poster') sns.set(style = "ticks")# to format into seaborn c = 'black' # basic color for plots plt.figure(figsize = (18, 12)) plt.subplot(311) cdf = ECDF(train['Sales']) plt.plot(cdf.x, cdf.y, label = "statmodels", color = c); plt.xlabel('Sales'); plt.ylabel('ECDF'); # plot second ECDF plt.subplot(312) cdf = ECDF(train['Customers']) plt.plot(cdf.x, cdf.y, label = "statmodels", color = c); plt.xlabel('Customers'); # plot second ECDF plt.subplot(313) cdf = ECDF(train['SalePerCustomer']) plt.plot(cdf.x, cdf.y, label = "statmodels", color = c); plt.xlabel('Sale per Customer'); # - # <li> About 20% of data has zero amount of sales/customers that we need to deal with and almost 80% of time daily amount of sales was less than 1000. So what about zero sales, is it only due to the fact that the store is closed? # ### Missing values # #### Closed stores with zero sales # closed stores print('Number of closed stores: {0}'.format(len(train[(train.Open == 0) & (train.Sales == 0)]))) # #### Open stores with zero sales # opened stores with zero sales zero_sales = train[(train.Open != 0) & (train.Sales == 0)] print('Number of closed stores: {0}'.format(zero_sales.shape)) zero_sales.head(5) # + print("Closed stores and days with Zero sales won't be counted in the forecast.") train = train[(train["Open"] != 0) & (train['Sales'] != 0)] print("In total: ", train.shape) # - # #### Store Data Dictionary # - Store: a unique Id for each store # - StoreType: differentiates between 4 different store models: a, b, c, d # - Assortment: describes an assortment level: a = basic, b = extra, c = extended # - CompetitionDistance: distance in meters to the nearest competitor store # - CompetitionOpenSince[Month/Year]: gives the approximate year and month of the time the nearest competitor was opened # - Promo2: Promo2 is a continuing a promotion for some stores: 0 = store is not participating, 1 = store is participating # - Promo2Since[Year/Week]: describes the year and calendar week when the store started participating in Promo2 # - PromoInterval: describes the consecutive intervals Promo2 is started, naming the months the promotion is started. E.g. "Feb,May,Aug,Nov" means each round starts in February, May, August, November of any given year for that store # missing values? store.isnull().sum() # ### Data Wrangling # #### CompetitionDistance # missing values in CompetitionDistance len(store[pd.isnull(store['CompetitionDistance'])]) # fill NaN with a median value (skewed distribuion) store['CompetitionDistance'].fillna(store['CompetitionDistance'].median(), inplace = True) # #### Promo2SinceWeek # no promo = no information about the promo? a11 = store[pd.isnull(store.Promo2SinceWeek)] a11[a11.Promo2 != 0].shape # if there's no `Promo2` then there's no information about it and can be replaced by zero. # The same is true for `CompetitionOpenSinceMonth` and `CompetitionOpenSinceYear`. # replace NA's by 0 store.fillna(0, inplace = True) # + # Join train and store by specifying inner join to ensure that only those observations # that are present in both train and store sets are merged together train_store = pd.merge(train, store, how = 'inner', on = 'Store') print("In total: ", train_store.shape) # - # ### Store types # Now let's examine different `StoreType` and distribution of `Sales` across them. train_store.groupby('StoreType')['Sales'].describe() # `StoreType` B has the highest average of Sales, albeit very little data compared to other stores. <br> # Let's print an the sum of `Sales` and `Customers` to see which `StoreType` is the most popular. train_store.groupby('StoreType')['Customers', 'Sales'].sum().sort_values(by='Sales', ascending=False) # Clearly A, and D stores lead in terms of `Sales` and `Customers`. <br> # sales trends sns.set_style('darkgrid') sns.set_context('paper') _=sns.factorplot(data = train_store, x = 'Month', y = "Sales", col = 'StoreType', # per store type in cols palette = 'deep', hue = 'StoreType', row = 'Promo', # per promo in the store in rows color = c) # + slideshow={"slide_type": "-"} # customer trends sns.set_style('darkgrid') sns.set_context('paper') _=sns.factorplot(data = train_store, x = 'Month', y = "Customers", col = 'StoreType', # per store type in cols palette = 'deep', hue = 'StoreType', row = 'Promo', # per promo in the store in rows color = c) # - # All store types follow the same trend but at different scales depending on the presence of the (first) promotion `Promo` and `StoreType` itself (case for B). # # _At this point, increasing Sales towards Christmas holidays is evident, but we'll talk about seasonaliy and trends later in the Time Series Analysis section._ # sale per customer trends sns.set_style('darkgrid') sns.set_context('paper') _=sns.factorplot(data = train_store, x = 'Month', y = "SalePerCustomer", col = 'StoreType', # per store type in cols palette = 'deep', hue = 'StoreType', row = 'Promo', # per promo in the store in rows color = c) # Even though the plots above showed `StoreType` B as the most popular, though in reality that is not true. The highest `SalePerCustomer` amount is observed at the `StoreType` D, ~ 12€ with `Promo` and ~ 10€ without a promo, and for `StoreType` A and C it is about 9€. <br> # Based on low `SalePerCustomer` amount for `StoreType` B, we can attempt to describe the Cart: there are a lot of people who shop essentially for **small** things (or in a little quantity). # customers sns.set_style('darkgrid') sns.set_context('paper') _=sns.factorplot(data = train_store, x = 'Month', y = "Sales", col = 'DayOfWeek', # per store type in cols palette = 'deep', hue = 'StoreType', row = 'StoreType', # per store type in rows color = c) # We see that stores of `StoreType` C are closed on Sundays, whereas others are open. Interestingly enough, stores of `StoreType` D are closed on Sundays only from October to December. # # By the way which stores which are open on Sundays? # stores which are opened on Sundays train_store[(train_store.Open == 1) & (train_store.DayOfWeek == 7)]['Store'].unique() # + # competition open time (in months) train_store['CompetitionOpen'] = 12 * (train_store.Year - train_store.CompetitionOpenSinceYear) + \ (train_store.Month - train_store.CompetitionOpenSinceMonth) # Promo open time train_store['PromoOpen'] = 12 * (train_store.Year - train_store.Promo2SinceYear) + \ (train_store.WeekOfYear - train_store.Promo2SinceWeek) / 4.0 # replace NA's by 0 train_store.fillna(0, inplace = True) # average PromoOpen time and CompetitionOpen time per store type train_store.loc[:, ['StoreType', 'Sales', 'Customers', 'PromoOpen', 'CompetitionOpen']].groupby('StoreType').mean().sort_values(by='Sales', ascending=False) # - # `StoreType` B, has the longest running period of promotion. # ### Correlational Analysis # We are finished with adding new variables to the data, so now we can check the overall correlations by plotting the `seaborn` heatmap: # + # Compute the correlation matrix # exclude 'Open' variable corr_all = train_store.drop('Open', axis = 1).corr() # Generate a mask for the upper triangle mask = np.zeros_like(corr_all, dtype = np.bool) mask[np.triu_indices_from(mask)] = True # Set up the matplotlib figure f, ax = plt.subplots(figsize = (11, 9)) # Draw the heatmap with the mask and correct aspect ratio sns.set_context('poster') sns.set_style('darkgrid') sns.heatmap(corr_all, mask = mask, square = True, linewidths = .5, ax = ax, cmap = "BuPu") plt.show() # - # We can identify a positive correlation between the amount of Sales and Customers of a store.<br> # We can also observe a positive correlation between the fact that the store had a running promotion (`Promo` equal to 1) and amount of `Customers`. <br> # Interestingly enough though, as soon as a store has consecutive promotions (`Promo2` equal to 1) the number of `Customers` and `Sales` seems to stay steady or even decrease, which is described by the pale negative correlation on the heatmap. <br> # The same negative correlation is observed between the presence of the promotion in the store and the day of a week. # sale per customer trends sns.set_context('paper') sns.set_style('darkgrid') _=sns.factorplot(data = train_store, x = 'DayOfWeek', y = "Sales", col = 'Promo', row = 'Promo2', hue = 'Promo2', palette = 'RdPu') # Few things to be noted from above <br> # In case of no active promotion, both `Promo` and `Promo2` equal to 0, `Sales` tend to peak on Sunday. Though we should note that `StoreType` C doesn't work on Sundays.<br> # On the contrary, stores that run the promotion tend to make most of the `Sales` on Monday. This fact could be a good indicator for Rossmann marketing campaigns. The same trend follow the stores which have both promotion at the same time (`Promo` and `Promo2` are equal to 1). <br> # `Promo2` alone doesn't seem to be correlated to any significant change in the `Sales` amount. <br> # ## Summary of EDA # The `StoreType` is A is most crowded with highest sales.<br> # `StoreType` D with highest "**Sale per Customer**" indicates a strong Buyer Cart. An interesting thing to do will be to map these stores on the map to check whether they are urban or rural locations.<br> # Low `SalePerCustomer` amount for `StoreType` B alludes to the possibility that people are essentially shopping for sundry items. An interesting thing to do will be to map these stores on the map to check whether they are urban or rural locations and compare it against locations from point 2. <br> # Customers tends to buy more on Mondays when there's a single promotion running (`Promo`) and on Sundays when there is no promotion at all (both `Promo` and `Promo1` are equal to 0).<br> # My goal is now to create a model which predicts 6 weeks of daily sales for 1,115 stores located across Germany. Reliable sales forecasts enable store managers to create effective staff schedules that increase productivity and motivation.
Capstone_2/Capstone_II_EDA_V2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import netCDF4 as nc import matplotlib.pyplot as plt import numpy as np import datetime # %matplotlib inline from nowcast import analyze, research_VENUS from salishsea_tools import viz_tools import os import scipy.io as sio # - grid = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc') bathy = grid.variables['Bathymetry'] mesh = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/mesh_mask_SalishSea2.nc') # + path = '/ocean/nsoontie/MEOPAR/TidalEllipseData/CODAR/' data = sio.loadmat(os.path.join(path, 'CODAR_region_20141126_20150426_w')) wstruc = data['wstruc'] lons=wstruc['lons'][0,0] lats=wstruc['lats'][0,0] depths=wstruc['deptht'][0,0] # - def cph2rps(freq): """Convert frequency in cycles per hours to radians per second""" return freq*2*np.pi/3600 def get_displacement(wstrc, const): var = wstruc[const] freq = cph2rps(var[0,0]['freq'][0,0]) amp = var[0, 0]['ampl'][0, 0][:] amp = np.ma.masked_invalid(amp) phase = var[0, 0]['phas'][0, 0][:] phase = np.ma.masked_invalid(phase) amp = amp/freq phase = phase+90 return amp, phase # + j=30 vmin=0 vmax=30 fig,ax=plt.subplots(1,1,figsize=(5,5)) const='M2' amp,phase = get_displacement(wstruc, const) ax.pcolormesh(lons,lats,amp[:,:,0]) ax.plot([lons[0,j],lons[-1,j]],[lats[0,j],lats[-1,j]],'r') fig,axs=plt.subplots(2,1,figsize=(10,5)) for const, ax in zip(['K1','M2'], axs): amp,phase = get_displacement(wstruc, const) mesh=ax.pcolormesh(np.arange(amp[:,j,:].shape[0]), depths, amp[:,j,:].T,vmin=vmin,vmax=vmax) ax.set_ylim([300,0]) cbar=plt.colorbar(mesh,ax=ax) cbar.set_label('Isopycnal displacement (m)') ax.set_title(const) # + j=20 fig,ax=plt.subplots(1,1,figsize=(5,5)) const='M2' amp,phase = get_displacement(wstruc, const) ax.pcolormesh(lons,lats,amp[:,:,0]) ax.plot([lons[0,j],lons[-1,j]],[lats[0,j],lats[-1,j]],'r') fig,axs=plt.subplots(2,1,figsize=(10,5)) for const, ax in zip(['K1','M2'], axs): amp,phase = get_displacement(wstruc, const) mesh=ax.pcolormesh(np.arange(amp[:,j,:].shape[0]), depths, amp[:,j,:].T,vmin=vmin,vmax=vmax) ax.set_ylim([300,0]) cbar=plt.colorbar(mesh,ax=ax) cbar.set_label('Isopycnal displacement (m)') ax.set_title(const) # + j=35 fig,ax=plt.subplots(1,1,figsize=(5,5)) const='M2' amp,phase = get_displacement(wstruc, const) ax.pcolormesh(lons,lats,amp[:,:,0]) ax.plot([lons[0,j],lons[-1,j]],[lats[0,j],lats[-1,j]],'r') fig,axs=plt.subplots(2,1,figsize=(10,5)) for const, ax in zip(['K1','M2'], axs): amp,phase = get_displacement(wstruc, const) mesh=ax.pcolormesh(np.arange(amp[:,j,:].shape[0]), depths, amp[:,j,:].T,vmin=vmin,vmax=vmax) ax.set_ylim([300,0]) cbar=plt.colorbar(mesh,ax=ax) cbar.set_label('Isopycnal displacement (m)') ax.set_title(const) # -
notebooks/currents/Isopycnal displacement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import numpy as np import itertools import matplotlib.pyplot as plt import pandas as pd from sklearn.utils import shuffle from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras import layers from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score dias = ["02-03-2018.csv", "21-02-2018.csv", "15-02-2018.csv", "16-02-2018.csv", "28-02-2018.csv", "14-02-2018.csv", "22-02-2018.csv", "23-02-2018.csv"] dfTotal = pd.DataFrame() for dia in dias: dfAux = pd.read_csv("../../../Dataset/" + dia, low_memory = False) dfAux = dfAux.drop([0,1]) dfTotal = pd.concat([dfTotal, dfAux]) print("Ficheiro " + dia + " carregado") dfTotal dfAux = pd.read_csv("../../../Dataset/01-03-2018.csv", low_memory = False) dfAux = dfAux.drop([0,1]) dfTeste = pd.DataFrame() for colu in dfTotal.columns.tolist(): dfTeste[colu] = dfAux[colu] dfAux = None dfTeste input_label_Total = [] output_label_Total= [] input_label_Teste = [] output_label_Teste = [] input_label_Total = np.array(dfTotal.loc[:, dfTotal.columns != "Label"]).astype(np.float) output_label_Total = np.array(dfTotal["Label"]) out = [] for o in output_label_Total: if(o == "Benign"):out.append(0) else: out.append(1) output_label_Total = out import gc dfTotal = None gc.collect() input_label_Teste = np.array(dfTeste.loc[:, dfTeste.columns != "Label"]).astype(np.float) output_label_Teste = np.array(dfTeste["Label"]) out = [] for o in output_label_Teste: if(o == "Benign"):out.append(0) else: out.append(1) output_label_Teste = out dfTeste = None out = None gc.collect() scaler = MinMaxScaler(feature_range=(0,1)) scaler.fit(np.concatenate((input_label_Total, input_label_Teste))) input_label_Total = scaler.transform(input_label_Total) input_label_Teste = scaler.transform(input_label_Teste) input_label_Total, output_label_Total = shuffle(input_label_Total, output_label_Total) input_label_Teste, output_label_Teste = shuffle(input_label_Teste, output_label_Teste) encoder = keras.models.load_model('../Encoder.h5') input_label_Total = encoder.predict(np.array(input_label_Total)) input_label_Teste = encoder.predict(np.array(input_label_Teste)) tamanhoSequencia = 10 inp = [] out = [] num = 0 for i in range(len(input_label_Total) - tamanhoSequencia + 1): aux = [] for j in range(i, i + tamanhoSequencia): aux.append(input_label_Total[j]) inp.append(aux) out.append(output_label_Total[i + tamanhoSequencia - 1]) input_label_Total = inp output_label_Total = out inp = None out = None model = keras.Sequential([ keras.layers.LSTM(units = 16, input_shape = ((tamanhoSequencia,18)), return_sequences = True, use_bias = True), keras.layers.LSTM(units = 8, return_sequences = False, use_bias = True), keras.layers.Dense(units = 2, activation = "softmax") ]) model.compile(optimizer= keras.optimizers.Adam(learning_rate= 0.00025), loss="sparse_categorical_crossentropy", metrics=['accuracy']) model.fit(x = np.array(input_label_Total), y = np.array(output_label_Total), validation_split= 0.1, epochs = 10, shuffle = True,verbose = 1) input_label_Total = None output_label_Total = None model.save("continua.h5") model = keras.models.load_model("continua.h5") inp = [] out = [] num = 0 for i in range(len(input_label_Teste) - tamanhoSequencia + 1): aux = [] for j in range(i, i + tamanhoSequencia): aux.append(input_label_Teste[j]) inp.append(aux) out.append(output_label_Teste[i + tamanhoSequencia - 1]) input_label_Teste = inp output_label_Teste = out inp = None out = None res = [np.argmax(resu) for resu in model.predict(np.array(input_label_Teste))] cm = confusion_matrix(y_true = np.array(output_label_Teste).reshape(len(output_label_Teste)), y_pred = np.array(res)) def plot_confusion_matrix(cm, classes, normaliza = False, title = "Confusion matrix", cmap = plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normaliza: cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) thresh = cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i,j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') labels = ["Benign", "Malign"] plot_confusion_matrix(cm = cm, classes = labels, title = "IDS") output_label_Teste = np.array(output_label_Teste).reshape(len(output_label_Teste)) res = np.array(res) fpr, tpr, _ = roc_curve(output_label_Teste, res) auc = roc_auc_score(output_label_Teste, res) plt.plot(fpr, tpr, label="auc=" + str(auc)) plt.legend(loc=4) plt.show()
Variacoes_de_ataques/Geral_CNN_LSTM/LSTM/LSTM(01-03-2018).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import cdsapi import xarray as xr import rioxarray as rxr import numpy as np import os import datetime import requests # Suppress warnings requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning ) # + # API Key os.environ['URL'] = 'https://ads.atmosphere.copernicus.eu/api/v2' os.environ['KEY'] = '<uid>:<key>' # Product and Variable name os.environ['PRODUCT'] = 'cams-global-atmospheric-composition-forecasts' os.environ['VARIABLE'] = 'particulate_matter_2.5um' # Area of Interest os.environ['WEST'] = '90.0' os.environ['EAST'] = '150.0' os.environ['NORTH'] = '15.0' os.environ['SOUTH'] = '-15.0' # - url = os.environ.get('URL') key = os.environ.get('KEY') client = cdsapi.Client(url=url, key=key) def download_latest_data(variable, forecast_hour): now = datetime.datetime.utcnow() # Calculate nearest model initialization mod = now - datetime.timedelta(hours = 10) obs = mod.date() if mod.hour > 12: base = datetime.datetime( obs.year, obs.month, obs.day, 12 ) time = '12:00' else: base = datetime.datetime( obs.year, obs.month, obs.day, 0 ) time = '00:00' print('Using forecast model initialized at', base.isoformat()) # Observed Datetime obs_time = base + datetime.timedelta(hours = forecast_hour) # Area of Interest bbox = [ float(os.environ.get('SOUTH')), float(os.environ.get('WEST')), float(os.environ.get('NORTH')), float(os.environ.get('EAST')) ] request = { 'date' : f'{obs.isoformat()}/{obs.isoformat()}', 'variable' : variable, 'time' : time, 'leadtime_hour' : str(forecast_hour), 'area' : bbox, 'type' : 'forecast', 'format' : 'netcdf', } fname = f'{obs_time.strftime("%Y%m%dT%H")}.nc' product = os.environ.get('PRODUCT') client.retrieve(product, request, fname) return fname variable = os.environ.get('VARIABLE') fname = download_latest_data(variable, forecast_hour = 22) ds = xr.open_dataset(fname).isel(time=0) # + variables = list(ds.data_vars) for variable in variables: data = ds[variable] * 1e9 date_string = os.path.splitext(fname)[0] out_fname = f'{variable}_{date_string}.tif' data.rio.set_spatial_dims('longitude', 'latitude', inplace=True) data.rio.set_crs(4326, inplace=True) data.rio.to_raster(out_fname) os.system(f'rio cogeo create {out_fname} {out_fname}')
CAMS/01_CDSAPI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9 (tensorflow) # language: python # name: tensorflow # --- # <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_4_dropout.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # T81-558: Applications of Deep Neural Networks # **Module 5: Regularization and Dropout** # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # # Module 5 Material # # * Part 5.1: Part 5.1: Introduction to Regularization: Ridge and Lasso [[Video]](https://www.youtube.com/watch?v=jfgRtCYjoBs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_1_reg_ridge_lasso.ipynb) # * Part 5.2: Using K-Fold Cross Validation with Keras [[Video]](https://www.youtube.com/watch?v=maiQf8ray_s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_2_kfold.ipynb) # * Part 5.3: Using L1 and L2 Regularization with Keras to Decrease Overfitting [[Video]](https://www.youtube.com/watch?v=JEWzWv1fBFQ&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_3_keras_l1_l2.ipynb) # * **Part 5.4: Drop Out for Keras to Decrease Overfitting** [[Video]](https://www.youtube.com/watch?v=bRyOi0L6Rs8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_4_dropout.ipynb) # * Part 5.5: Benchmarking Keras Deep Learning Regularization Techniques [[Video]](https://www.youtube.com/watch?v=1NLBwPumUAs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_05_5_bootstrap.ipynb) # # # Google CoLab Instructions # # The following code ensures that Google CoLab is running the correct version of TensorFlow. try: # %tensorflow_version 2.x COLAB = True print("Note: using Google CoLab") except: print("Note: not using Google CoLab") COLAB = False # # Part 5.4: Drop Out for Keras to Decrease Overfitting # Hinton, Srivastava, Krizhevsky, Sutskever, & Salakhutdinov (2012) introduced the dropout regularization algorithm. [[Cite:srivastava2014dropout]](http://www.jmlr.org/papers/volume15/nandan14a/nandan14a.pdf) Although dropout works in a different way than L1 and L2, it accomplishes the same goal—the prevention of overfitting. However, the algorithm goes about the task by actually removing neurons and connections—at least temporarily. Unlike L1 and L2, no weight penalty is added. Dropout does not directly seek to train small weights. # Dropout works by causing hidden neurons of the neural network to be unavailable during part of the training. Dropping part of the neural network causes the remaining portion to be trained to still achieve a good score even without the dropped neurons. This decreases coadaption between neurons, which results in less overfitting. # # Most neural network frameworks implement dropout as a separate layer. Dropout layers function as a regular, densely connected neural network layer. The only difference is that the dropout layers will periodically drop some of their neurons during training. You can use dropout layers on regular feedforward neural networks. # # The program implements a dropout layer as a dense layer that can eliminate some of its neurons. Contrary to popular belief about the dropout layer, the program does not permanently remove these discarded neurons. A dropout layer does not lose any of its neurons during the training process, and it will still have exactly the same number of neurons after training. In this way, the program only temporarily masks the neurons rather than dropping them. # Figure 5.DROPOUT shows how a dropout layer might be situated with other layers. # # **Figure 5.DROPOUT: Dropout Regularization** # ![Dropout Regularization](https://raw.githubusercontent.com/jeffheaton/t81_558_deep_learning/master/images/class_9_dropout.png "Dropout Regularization") # # The discarded neurons and their connections are shown as dashed lines. The input layer has two input neurons as well as a bias neuron. The second layer is a dense layer with three neurons as well as a bias neuron. The third layer is a dropout layer with six regular neurons even though the program has dropped 50% of them. While the program drops these neurons, it neither calculates nor trains them. However, the final neural network will use all of these neurons for the output. As previously mentioned, the program only temporarily discards the neurons. # # During subsequent training iterations, the program chooses different sets of neurons from the dropout layer. Although we chose a probability of 50% for dropout, the computer will not necessarily drop three neurons. It is as if we flipped a coin for each of the dropout candidate neurons to choose if that neuron was dropped out. You must know that the program should never drop the bias neuron. Only the regular neurons on a dropout layer are candidates. # The implementation of the training algorithm influences the process of discarding neurons. The dropout set frequently changes once per training iteration or batch. The program can also provide intervals where all neurons are present. Some neural network frameworks give additional hyper-parameters to allow you to specify exactly the rate of this interval. # # Why dropout is capable of decreasing overfitting is a common question. The answer is that dropout can reduce the chance of a codependency developing between two neurons. Two neurons that develop a codependency will not be able to operate effectively when one is dropped out. As a result, the neural network can no longer rely on the presence of every neuron, and it trains accordingly. This characteristic decreases its ability to memorize the information presented to it, thereby forcing generalization. # # Dropout also decreases overfitting by forcing a bootstrapping process upon the neural network. Bootstrapping is a very common ensemble technique. We will discuss ensembling in greater detail in Chapter 16, “Modeling with Neural Networks.” Basically, ensembling is a technique of machine learning that combines multiple models to produce a better result than those achieved by individual models. Ensemble is a term that originates from the musical ensembles in which the final music product that the audience hears is the combination of many instruments. # # Bootstrapping is one of the most simple ensemble techniques. The programmer using bootstrapping simply trains a number of neural networks to perform exactly the same task. However, each of these neural networks will perform differently because of some training techniques and the random numbers used in the neural network weight initialization. The difference in weights causes the performance variance. The output from this ensemble of neural networks becomes the average output of the members taken together. This process decreases overfitting through the consensus of differently trained neural networks. # # Dropout works somewhat like bootstrapping. You might think of each neural network that results from a different set of neurons being dropped out as an individual member in an ensemble. As training progresses, the program creates more neural networks in this way. However, dropout does not require the same amount of processing as does bootstrapping. The new neural networks created are temporary; they exist only for a training iteration. The final result is also a single neural network, rather than an ensemble of neural networks to be averaged together. # # The following animation that shows how dropout works: [animation link](https://yusugomori.com/projects/deep-learning/dropout-relu) # + import pandas as pd from scipy.stats import zscore # Read the data set df = pd.read_csv( "https://data.heatonresearch.com/data/t81-558/jh-simple-dataset.csv", na_values=['NA','?']) # Generate dummies for job df = pd.concat([df,pd.get_dummies(df['job'],prefix="job")],axis=1) df.drop('job', axis=1, inplace=True) # Generate dummies for area df = pd.concat([df,pd.get_dummies(df['area'],prefix="area")],axis=1) df.drop('area', axis=1, inplace=True) # Missing values for income med = df['income'].median() df['income'] = df['income'].fillna(med) # Standardize ranges df['income'] = zscore(df['income']) df['aspect'] = zscore(df['aspect']) df['save_rate'] = zscore(df['save_rate']) df['age'] = zscore(df['age']) df['subscriptions'] = zscore(df['subscriptions']) # Convert to numpy - Classification x_columns = df.columns.drop('product').drop('id') x = df[x_columns].values dummies = pd.get_dummies(df['product']) # Classification products = dummies.columns y = dummies.values # - # Now we will see how to apply dropout to classification. # + ######################################## # Keras with dropout for Classification ######################################## import pandas as pd import os import numpy as np from sklearn import metrics from sklearn.model_selection import KFold from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation, Dropout from tensorflow.keras import regularizers # Cross-validate kf = KFold(5, shuffle=True, random_state=42) oos_y = [] oos_pred = [] fold = 0 for train, test in kf.split(x): fold+=1 print(f"Fold #{fold}") x_train = x[train] y_train = y[train] x_test = x[test] y_test = y[test] #kernel_regularizer=regularizers.l2(0.01), model = Sequential() model.add(Dense(50, input_dim=x.shape[1], activation='relu')) # Hidden 1 model.add(Dropout(0.5)) model.add(Dense(25, activation='relu', \ activity_regularizer=regularizers.l1(1e-4))) # Hidden 2 # Usually do not add dropout after final hidden layer #model.add(Dropout(0.5)) model.add(Dense(y.shape[1],activation='softmax')) # Output model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(x_train,y_train,validation_data=(x_test,y_test),\ verbose=0,epochs=500) pred = model.predict(x_test) oos_y.append(y_test) # raw probabilities to chosen class (highest probability) pred = np.argmax(pred,axis=1) oos_pred.append(pred) # Measure this fold's accuracy y_compare = np.argmax(y_test,axis=1) # For accuracy calculation score = metrics.accuracy_score(y_compare, pred) print(f"Fold score (accuracy): {score}") # Build the oos prediction list and calculate the error. oos_y = np.concatenate(oos_y) oos_pred = np.concatenate(oos_pred) oos_y_compare = np.argmax(oos_y,axis=1) # For accuracy calculation score = metrics.accuracy_score(oos_y_compare, oos_pred) print(f"Final score (accuracy): {score}") # Write the cross-validated prediction oos_y = pd.DataFrame(oos_y) oos_pred = pd.DataFrame(oos_pred) oosDF = pd.concat( [df, oos_y, oos_pred],axis=1 ) #oosDF.to_csv(filename_write,index=False) # -
t81_558_class_05_4_dropout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys sys.path.append("/Users/xli27/GitHub/gym") ## gym for jupyter import gym import numpy as np import matplotlib.pyplot as plt # - # Import and initialize Mountain Car Environment env = gym.make('MountainCar-v0') env.reset() # Define Q-learning function def QLearning(env, learning, discount, epsilon, min_eps, episodes): # Determine size of discretized state space num_states = (env.observation_space.high - env.observation_space.low)*np.array([10, 100]) num_states = np.round(num_states, 0).astype(int) + 1 # [19 15] # Initialize Q table Q = np.random.uniform(low = -1, high = 1, size = (num_states[0], num_states[1], env.action_space.n)) # Initialize variables to track rewards reward_list = [] ave_reward_list = [] # Calculate episodic reduction in epsilon reduction = (epsilon - min_eps)/episodes # Run Q learning algorithm for i in range(episodes): # Initialize parameters done = False tot_reward, reward = 0,0 state = env.reset() ## like [-0.56237947 0. ] # Discretize state state_adj = (state - env.observation_space.low)*np.array([10, 100]) state_adj = np.round(state_adj, 0).astype(int) ## numpy.around(a, decimals=0, out=None ## like [6 7] while done != True: # Render environment for last five episodes if i >= (episodes - 5): env.render() # Determine next action - epsilon greedy strategy if np.random.random() < 1 - epsilon: action = np.argmax(Q[state_adj[0], state_adj[1]]) ## find the best action; argmax for index else: action = np.random.randint(0, env.action_space.n) # Get next state and reward state2, reward, done, info = env.step(action) # Discretize state2 state2_adj = (state2 - env.observation_space.low)*np.array([10, 100]) state2_adj = np.round(state2_adj, 0).astype(int) #Allow for terminal states if done and state2[0] >= 0.5: Q[state_adj[0], state_adj[1], action] = reward # Adjust Q value for current state else: delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) - Q[state_adj[0], state_adj[1],action]) ## update equation; max for value Q[state_adj[0], state_adj[1],action] += delta ## q-table updated # Update variables tot_reward += reward state_adj = state2_adj # Decay epsilon if epsilon > min_eps: epsilon -= reduction # Track rewards reward_list.append(tot_reward) if (i+1) % 100 == 0: ave_reward = np.mean(reward_list) ave_reward_list.append(ave_reward) reward_list = [] if (i+1) % 100 == 0: print('Episode {} Average Reward: {}'.format(i+1, ave_reward)) env.close() return ave_reward_list # + # Run Q-learning algorithm rewards = QLearning(env, 0.2, 0.9, 0.8, 0, 5000) # # Plot Rewards # plt.plot(100*(np.arange(len(rewards)) + 1), rewards) # plt.xlabel('Episodes') # plt.ylabel('Average Reward') # plt.title('Average Reward vs Episodes') # plt.savefig('rewards.jpg') # plt.close() # - plt.plot(100*(np.arange(len(rewards)) + 1), rewards) plt.xlabel('Episodes') plt.ylabel('Average Reward') plt.title('Average Reward vs Episodes')
xp_models/ql_moutain_car/xp_q_MountainCar_v0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import re # script_main = "'She was cute!'" # script_main = "She was cute?" # script_main = "'This was terrible!'" script_main = "'This was terrible?'" # pattern = "(\?|\!)" # pattern = "(\'|\")" # pattern = "." pattern = "[\.+|\?+|\!+]{1}[\'+|\"+]{1}$" repatter = re.compile(pattern) # matchOB = repatter.match(script_main) matchOB = re.match(pattern , script_main) if matchOB is not None: print(matchOB.group()) elif matchOB is None: print(matchOB) else: "?" # - pattern = r"ca" text = "caabsacasca" repatter = re.compile(pattern) matchOB = repatter.match(text) print(matchOB.group()) pattern = r"ca" text = "caabsacasca" matchOB = re.match(pattern , text) print(matchOB.group())
.ipynb_checkpoints/Regx test-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.0 # language: julia # name: julia-1.4 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Design and implementation of [Tulip.jl](https://github.com/ds4dm/Tulip.jl) # # <br /> # # <NAME> <br /> # <EMAIL> # # JuMP developper call - Aug. 28th, 2020 # # <br /> # # # Technical report: https://arxiv.org/pdf/2006.08814.pdf <br /> # Material: https://github.com/mtanneau/jump-dev_2020 # + slideshow={"slide_type": "skip"} using Pkg Pkg.activate(".") # + slideshow={"slide_type": "skip"} using LinearAlgebra using SparseArrays using Quadmath using Tulip using Tulip.TLA using Tulip.KKT const Krylov = Tulip.KKT.Krylov # + [markdown] slideshow={"slide_type": "subslide"} # ## Shameless advertising # # I'll be defending on October 2nd. # * Decomposition techniques in power systems # * Structured interior-point methods # * Disjunctive cuts in mixed-integer conic optimization # # Details coming at https://cerc-datascience.polymtl.ca/ # + [markdown] slideshow={"slide_type": "subslide"} # ## Overview # # This talk is about # 1. The motivation behind Tulip # 2. Design choices to allow specialized linear algebra # 3. Some lessons learned about internal data structures # + [markdown] slideshow={"slide_type": "slide"} # # Motivation # + [markdown] slideshow={"slide_type": "subslide"} # Linear programming problems: # * linear objective # * linear constraints # * all variables continuous # # In *standard form*: # # \begin{align} # (LP) \ \ \ \min_{x} \ \ \ # & c^{T}x\\ # & A x = b\\ # & x \geq 0 # \end{align} # # Typically solved with simplex or interior-point method (IPM). # + [markdown] slideshow={"slide_type": "subslide"} # ~~Almost~~ all IPM algorithms end solve _augmented systems_ # # $$ # \begin{bmatrix} # -\Theta^{-1} & A^{T}\\ # A & 0 # \end{bmatrix} # \begin{bmatrix} # \Delta x\\ # \Delta y # \end{bmatrix} # = # \begin{bmatrix} # b\\ # c # \end{bmatrix} # $$ # # * Symmetric, indefinite # * Can be reduced to normal equations $(A \Theta A^{T}) \Delta y = \xi$ # + [markdown] slideshow={"slide_type": "fragment"} # Tulip: _regularized_ augmented system # # $$ # \begin{bmatrix} # -(\Theta^{-1} + {\color{red}{R_{p}}}) & A^{T}\\ # A & {\color{red}{R_{d}}} # \end{bmatrix} # \begin{bmatrix} # \Delta x\\ # \Delta y # \end{bmatrix} # = # \begin{bmatrix} # b\\ # c # \end{bmatrix} # $$ # where $R_{p}, R_{d} \succ 0$. (see paper for the mathematical justification) # # * Symmetric quasi-definite (SQD) # * Amenable to $LDL^{T}$-type factorizations # + [markdown] slideshow={"slide_type": "subslide"} # Design of Tulip: exploit structure of $A$ to speed-up the resolution of # # $$ # \begin{bmatrix} # -(\Theta^{-1} + R_{p}) & A^{T}\\ # A & R_{d} # \end{bmatrix} # \begin{bmatrix} # \Delta x\\ # \Delta y # \end{bmatrix} # = # \begin{bmatrix} # b\\ # c # \end{bmatrix} # $$ # + [markdown] slideshow={"slide_type": "fragment"} # E.g.: # * Efficient data structures # * Specialized matrix-vector product # * Specialized factorization routines # + [markdown] slideshow={"slide_type": "subslide"} # 3 requirements to do that in practice: # 1. Develop structure-exploiting data structures & routines... # 2. ... and integrate them in the IPM algorithmic framework # 3. Convey structure information to the solver # # This presentation: 2. & 3. # + [markdown] slideshow={"slide_type": "slide"} # # Some examples # + [markdown] slideshow={"slide_type": "subslide"} # ## Using a different arithmetic # # The model's arithmetic is chosen at instantiation. # + slideshow={"slide_type": "fragment"} tlp = Tulip.Model{Float64}() # Double-precision Tulip.load_problem!(tlp, "dat/afiro.mps") Tulip.optimize!(tlp) Tulip.get_attribute(tlp, Tulip.ObjectiveValue()) # + slideshow={"slide_type": "fragment"} tlp = Tulip.Model{Float128}() # Quadruple-precision Tulip.load_problem!(tlp, "dat/afiro.mps") Tulip.optimize!(tlp) Tulip.get_attribute(tlp, Tulip.ObjectiveValue()) # + [markdown] slideshow={"slide_type": "fragment"} # Also available in MOI with `Tulip.Optimizer{T}` # ```julia # moi64 = Tulip.Optimizer{Float64}() # double precision # moi128 = Tulip.Optimizer{Float128}() # quad precision # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ## Using specialized linear algebra # + slideshow={"slide_type": "skip"} tlp = Tulip.Model{Float64}() # Double-precision Tulip.load_problem!(tlp, "dat/afiro.mps") tlp.params.OutputLevel = 1; # + [markdown] slideshow={"slide_type": "-"} # The user can specify: # * Which data structure for $A$ (via `MatrixOptions`) # * Which linear solver (via `KKTOptions`), i.e., # * Augmented system vs normal equations # * Direct vs Indirect # * Backend # # Multiple dispatch takes care of the rest. # + slideshow={"slide_type": "fragment"} # Dense linear algebra tlp.params.MatrixOptions = Tulip.TLA.MatrixOptions(Matrix) tlp.params.KKTOptions = KKT.SolverOptions(KKT.Dense_SymPosDef); # + [markdown] slideshow={"slide_type": "-"} # Information about the linear solver is displayed in the log # ``` # Linear solver options # Arithmetic : Float64 # Backend : LAPACK # System : Normal equations # ``` # + slideshow={"slide_type": "skip"} Tulip.optimize!(tlp) # + [markdown] slideshow={"slide_type": "subslide"} # ### Switching between linear solvers # + slideshow={"slide_type": "fragment"} # Solve the normal equations with CHOLMOD tlp.params.KKTOptions = KKT.SolverOptions( KKT.CholmodSolver; normal_equations=true ); # + slideshow={"slide_type": "skip"} Tulip.optimize!(tlp) # + slideshow={"slide_type": "fragment"} # Solve the augmented system with LDLFactorizations tlp.params.KKTOptions = KKT.SolverOptions( KKT.LDLFact_SymQuasDef ); # + slideshow={"slide_type": "skip"} Tulip.optimize!(tlp) # + slideshow={"slide_type": "fragment"} # Solve the normal equations with a Krylov method (PR #56) tlp.params.KKTOptions = KKT.SolverOptions( KKT.KrylovPDSolver; method=Krylov.minres # Use the MINRES method ); # + slideshow={"slide_type": "skip"} Tulip.optimize!(tlp) # + [markdown] slideshow={"slide_type": "subslide"} # ### Using custom routines # # A unit block-angular matrix has the form # # $$ # A = # \begin{bmatrix} # B_{0} & B_{1} & B_{2} & \dots & B_{R}\\ # 0 & e^{T}\\ # 0 & & e^{T}\\ # \vdots &&& \ddots \\ # 0 &&&& e^{T} # \end{bmatrix} # $$ # # The [UnitBlockAngular](https://github.com/mtanneau/UnitBlockAngular.jl) package defines # * Custom data structure `UnitBlockAngularMatrix <: AbstractMatrix` # * Custom linear solver `UnitBlockAngularFactor <: AbstractKKTSolver` # * Specialized matrix-vector and factorization routines # + slideshow={"slide_type": "skip"} tlp = Tulip.Model{Float64}() # Double-precision Tulip.load_problem!(tlp, "dat/DER_24_1024_43.mps") tlp.params.OutputLevel = 1 tlp.params.Presolve = 0; # + slideshow={"slide_type": "skip"} # Solve with default linear algebra Tulip.optimize!(tlp) # + slideshow={"slide_type": "subslide"} using UnitBlockAngular # Store A as a unit block-angular matrix with 24 linking constraints, # 72 linking variables, 6421 columns and 1024 blocks tlp.params.MatrixOptions = Tulip.TLA.MatrixOptions( UnitBlockAngularMatrix, m0=24, n0=72, n=6421 , R=1024 ) # Select custom linear solver tlp.params.KKTOptions = Tulip.KKT.SolverOptions(UnitBlockAngularFactor); # + slideshow={"slide_type": "skip"} tlp.params.Presolve = 0 Tulip.optimize!(tlp) # + [markdown] slideshow={"slide_type": "subslide"} # Default linear algebra # ``` # Backend : CHOLMOD # System : Augmented system # # Itn PObj DObj PFeas DFeas GFeas Mu Time # ... # 33 +1.4743650e+03 +1.4743650e+03 2.00e-10 4.38e-11 9.60e-10 5.1e-15 0.54 # Solver exited with status Trm_Optimal # ``` # + [markdown] slideshow={"slide_type": "fragment"} # Specialized linear algebra # ``` # Backend : UnitBlockAngular # System : Normal equations # # Itn PObj DObj PFeas DFeas GFeas Mu Time # ... # 33 +1.4743650e+03 +1.4743650e+03 2.01e-10 4.38e-11 9.60e-10 5.1e-15 0.14 # Solver exited with status Trm_Optimal # ``` # + [markdown] slideshow={"slide_type": "fragment"} # 265 lines of code --> up to 10x speedup on structured problems # + [markdown] slideshow={"slide_type": "slide"} # # Implementation # # Tulip's codebase breaks down into # # | module | #loc | # |:--|---:| # | Core | 594 # | Presolve | 1080 # | KKT | 609 # | IPM | 748 # | Interfaces | 1011 # | **Total** | **4240** # # (numbers are computed with `cloc` and include docstrings, but not comments) # + [markdown] slideshow={"slide_type": "fragment"} # $\rightarrow$ Specialized linear algebra is only ~5% of total code 😱 # + [markdown] slideshow={"slide_type": "subslide"} # ## Model representation # # 3 representations of the same LP. # + [markdown] slideshow={"slide_type": "fragment"} # * Original (user-facing) LP # # \begin{align} # \min_{x} \ \ \ # & c^{T}x + c_{0}\\ # s.t. \ \ \ # & l_{r} \leq Ax \leq u_{r}\\ # & l_{x} \leq x \leq u_{x} # \end{align} # # * Presolved problem: same as above, but some rows/columns removed # # * Standard form LP (internal) # # \begin{align} # \min_{\tilde{x}} \ \ \ # & \tilde{c}^{T} \tilde{x} + \tilde{c}_{0}\\ # s.t. \ \ \ # & \tilde{A} \tilde{x} = \tilde{b}\\ # & 0 \leq \tilde{x} \leq \tilde{u} # \end{align} # + [markdown] slideshow={"slide_type": "subslide"} # Internal workflow is # # 1. User inputs the model and calls `optimize!` # 2. Pre-solve and extract reduced problem # 3. Convert reduced problem to standard form # 4. Instantiate matrix # 5. Instantiate KKT solver # 6. Optimize # 7. Post-solve # + [markdown] slideshow={"slide_type": "subslide"} # ## Matrix instantiation # # The (standard form) matrix $A$ is instantiated by calling # ```julia # A = construct_matrix(Ta, m, n, aI, aJ, aV; kwargs...) # ``` # # This method needs to be extended for custom matrix types `Ta`. # + [markdown] slideshow={"slide_type": "fragment"} # Example for `SparseMatrixCSC`: # ```julia # construct_matrix( # ::Type{SparseMatrixCSC}, m::Int, n::Int, # aI::Vector{Int}, aJ::Vector{Int}, aV::Vector{Tv} # ) where{Tv<:Real} = sparse(aI, aJ, aV, m, n) # ``` # + slideshow={"slide_type": "subslide"} m, n = 2, 4 aI = [1, 2, 2] aJ = [1, 2, 3] aV = [1.1, 2.2, 2.3] A = construct_matrix(SparseMatrixCSC, m, n, aI, aJ, aV) # + slideshow={"slide_type": "-"} Matrix(A) # + [markdown] slideshow={"slide_type": "subslide"} # ## Linear systems # # # Each IPM iteration comprises # * One factorization (expensive) # * A few triangular solves # * Several matrix-vector products # * Many vector operations # # $\Longrightarrow$ Persistent data structures to re-use storage and factorization + dispatch # + [markdown] slideshow={"slide_type": "subslide"} # ### Current implementation # # Root type `AbstractKKTSolver` and 3 methods # * `KKT.setup` (once per `optimize!`): instantiate the KKT solver # * `KKT.update!(kkt, θ⁻¹, rp, rd)` (once per iteration): update data and factorization # * `KKT.solve!(Δx, Δy, kkt, ξp, ξd)` (multiple times per iteration): solve the augmented system # + [markdown] slideshow={"slide_type": "fragment"} # KKT solvers are chosen via the `KKTOptions` parameter # ```julia # tlp.params.KKTOptions = KKT.SolverOptions( # KKT.KrylovPDSolver; # method=Krylov.minres # ) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### `KKT.setup` # # # Called after presolve and matrix instantiation as # # ```julia # KKT.setup(params.KKTOptions.Ts, A; params.KKTOptions.options...) # ``` # + [markdown] slideshow={"slide_type": "fragment"} # If no specialized dispatch, defaults to # ``` # setup(Ts, args...; kwargs...) = Ts(args...; kwargs...) # ``` # $\rightarrow$ `KKTOptions` is basically a factory # # Custom KKT solvers must implement # ```julia # MyKKTSolver(A; kwargs...) # ``` # + slideshow={"slide_type": "skip"} ?KKT.setup # + [markdown] slideshow={"slide_type": "subslide"} # ### `KKT.update!` # # Called once per IPM iteration (unless numerical issues) as # ```julia # KKT.update!(kkt, θ⁻¹, regP, regD) # ``` # # After calling `update!`, `kkt` can be used to solve the augmented system # # $$ # \begin{bmatrix} # -(\Theta^{-1} + R_{p}) & A^{T}\\ # A & R_{d} # \end{bmatrix} # \begin{bmatrix} # \Delta x\\ # \Delta y # \end{bmatrix} # = # \begin{bmatrix} # \xi_{p}\\ # \xi_{d} # \end{bmatrix} # $$ # # Primal-dual regularizations are handled by the IPM algorithm. # + [markdown] slideshow={"slide_type": "fragment"} # Direct methods: update the factorization # # Iterative methods: update the preconditioner # + slideshow={"slide_type": "skip"} ?KKT.update! # + [markdown] slideshow={"slide_type": "subslide"} # ### `KKT.solve!` # # Called multiple times per iteration. # Solves the augmented system. # + [markdown] slideshow={"slide_type": "slide"} # # Some lessons learned # + [markdown] slideshow={"slide_type": "fragment"} # User-facing LP # # \begin{align} # \min_{x} \ \ \ # & c^{T}x + c_{0}\\ # s.t. \ \ \ # & l_{r} \leq Ax \leq u_{r}\\ # & l_{x} \leq x \leq u_{x} # \end{align} # # Very similar in spirit to [MatrixOptInterface.jl](https://github.com/jump-dev/MatrixOptInterface.jl) # + [markdown] slideshow={"slide_type": "fragment"} # For fast build/modify & presolve, we need: # * Add/remove rows/columns/coefficients # * Access to individual coefficients (more rare) # * Ensure row/column index consistency # * Fast row-based _and_ column-based iteration over $A$ # + [markdown] slideshow={"slide_type": "subslide"} # The initial version stored coefficients in a `Dict`... # # which is very slow and actually not that convenient. # + [markdown] slideshow={"slide_type": "subslide"} # ## Current implementation # # Similar in principle to [SoPlex](https://soplex.zib.de/), we store two copies of $A$ in sync: # * Column-based: `acols::Vector{Col}` # * Row-based: `arows::Vector{Row}` # # Redundant but simplifies the access. # + [markdown] slideshow={"slide_type": "fragment"} # Each row/column is a (sorted) sparse vector to speed up modifications # ```julia # mutable struct RowOrCol{T} # nzind::Vector{Int} # nzval::Vector{T} # end # ``` # + [markdown] slideshow={"slide_type": "fragment"} # Row/column deletion is still slow because all the matrix has to shift. # + [markdown] slideshow={"slide_type": "subslide"} # ## Future versions (?) # # Improvement opportunities # 1. Batch modifications (currently defaults to individual calls) # 2. Recently discovered [DynamicSparseArrays.jl](https://github.com/atoptima/DynamicSparseArrays.jl) # 3. Also enable customization of these data structures, for, e.g.: # * Distributed models # * Operator-based modeling # * ... # + [markdown] slideshow={"slide_type": "fragment"} # ⚠️ Modifying this part will interfere with presolve. ⚠️ # + [markdown] slideshow={"slide_type": "slide"} # # Future directions # + [markdown] slideshow={"slide_type": "subslide"} # ## (convex) Quadratic programming # + [markdown] slideshow={"slide_type": "fragment"} # Quadratic objective, linear constraints # # \begin{align} # (QP) \ \ \ \min_{x} \ \ \ # & \frac{1}{2} x^{T}Qx + c^{T}x\\ # & A x = b\\ # & x \geq 0 # \end{align} # + [markdown] slideshow={"slide_type": "fragment"} # Same algorithm, but augmented systems are of the form # # $$ # \begin{bmatrix} # -(Q + \Theta^{-1}) & A^{T}\\ # A & 0 # \end{bmatrix} # \begin{bmatrix} # \Delta x\\ # \Delta y # \end{bmatrix} # = # \begin{bmatrix} # b\\ # c # \end{bmatrix} # $$ # + [markdown] slideshow={"slide_type": "subslide"} # ## Iterative linear solvers 🚧 # # WIP # * Initial infrastructure using [Krylov.jl](https://github.com/JuliaSmoothOptimizers/Krylov.jl): cf [#56](https://github.com/ds4dm/Tulip.jl/pull/56) # * Changes to the IPM needed for speed & numerics # * Some research on good preconditioners is also needed. # + [markdown] slideshow={"slide_type": "fragment"} # ## Specialized solvers # # * Block-angular problems: large-scale stochastic LPs # * Integration with [StructJuMP.jl](https://github.com/StructJuMP/StructJuMP.jl) & [BlockDecomposition.jl](https://github.com/atoptima/BlockDecomposition.jl) # + [markdown] slideshow={"slide_type": "fragment"} # ## GPU support # # Most likely as part of a specialized linear solver, e.g., in combination with a Krylov method.
Tulip_JuMP-Dev.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="a43kzGAqldvF" colab_type="text" # # Using GANs on Fashion MNIST # + id="31jeVn-hw2OF" colab_type="code" colab={} # !mkdir images # + [markdown] id="k12WQC13loLE" colab_type="text" # ### Import Libraries # + id="2-U3XBQAmyeQ" colab_type="code" colab={} from __future__ import print_function, division import tensorflow as tf from tensorflow.python.keras.datasets import fashion_mnist from tensorflow.python.keras.layers import Input, Dense, Reshape, Flatten, Dropout from tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D from tensorflow.python.keras.models import Sequential, Model from tensorflow.python.keras.optimizers import Adam from tensorflow.python.keras.layers import BatchNormalization, Activation, ZeroPadding2D from tensorflow.python.keras.layers.advanced_activations import LeakyReLU import matplotlib.pyplot as plt import sys import numpy as np # + [markdown] id="ODMHtpC_qpv1" colab_type="text" # ### Function for image visualisation # + id="b6Z5eCW6qwsT" colab_type="code" colab={} def sample_images(gen_imgs,img_name): r, c = 5, 5 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray') axs[i,j].axis('off') cnt += 1 fig.savefig("images/{}.png".format(img_name)) plt.show() plt.close() # + [markdown] id="mF9XuM6IqKKa" colab_type="text" # ### Download the dataset from the keras function # + id="DqjVjyW4pVJu" colab_type="code" colab={} (X_train, _), (_, _) = fashion_mnist.load_data() X_train = X_train / 127.5 - 1. X_train = np.expand_dims(X_train, axis=3) # + id="ejZVCGiHqTBW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="8bacfe24-199c-4a3c-d6c2-3ab1e897c9b9" sample_images(X_train,"train_images") # + [markdown] id="xQanq86Erqn_" colab_type="text" # ### DCGAN model # + id="tzzE1sh4rwS9" colab_type="code" colab={} class GAN(): def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated images as input and determines validity validity = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(z, validity) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer) def build_generator(self): model = Sequential([ Dense(256, input_dim=self.latent_dim), LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8), Dense(512), LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8), Dense(1024), LeakyReLU(alpha=0.2), BatchNormalization(momentum=0.8), Dense(np.prod(self.img_shape), activation='tanh'), Reshape(self.img_shape), ]) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img) def build_discriminator(self): model = Sequential([ Flatten(input_shape=self.img_shape), Dense(512), LeakyReLU(alpha=0.2), Dense(256), LeakyReLU(alpha=0.2), Dense(1, activation='sigmoid'), ]) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity) # + id="d62y6cZ_nmjK" colab_type="code" colab={} def train(gan,gan_noise, epochs, batch_size=128, sample_interval=50): # Adversarial ground truths valid = np.ones((batch_size, 1)) fake = np.zeros((batch_size, 1)) for epoch in range(epochs): idx = np.random.randint(0, X_train.shape[0], batch_size) imgs = X_train[idx] noise = np.random.normal(0, 1, (batch_size, gan.latent_dim)) gen_imgs = gan.generator.predict(noise) # Train the discriminator d_loss_real = gan.discriminator.train_on_batch(imgs, valid) d_loss_fake = gan.discriminator.train_on_batch(gen_imgs, fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # --------------------- # Train Generator # --------------------- noise = np.random.normal(0, 1, (batch_size, gan.latent_dim)) # Train the generator (to have the discriminator label samples as valid) g_loss = gan.combined.train_on_batch(noise, valid) # Plot the progress print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss)) # If at save interval => save generated image samples if epoch % sample_interval == 0: gen_imgs = gan.generator.predict(gan_noise) sample_images(gen_imgs,100000+epoch) # + [markdown] id="UT7MAGC4tZsR" colab_type="text" # ### Train GANs # + colab_type="code" id="pzk2Pql3xi86" colab={} gan = GAN() r, c = 5, 5 gan_noise = np.random.normal(0, 1, (r * c, gan.latent_dim)) train(gan,gan_noise, epochs=10000, batch_size=128, sample_interval=50) # + [markdown] id="ndizTeXmvhtS" colab_type="text" # ## Final result # + [markdown] id="iyqCktaV0SWd" colab_type="text" # **At epoch 0** # + id="d9uXQ2eq1Kdg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} outputId="d48c615f-8c06-40fb-9889-1fb817b4da22" Image(filename='images/100000.png') # + [markdown] colab_type="text" id="VpmxWqPe1Xgq" # **At epoch 2500** # + id="fg7lslJx1kkU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} outputId="083d66d2-be41-4529-86d2-6845edd9d0ef" Image(filename='images/102500.png') # + [markdown] colab_type="text" id="7rfkNUzA1X43" # **At epoch 5000** # + id="zQrBAGdc1q6i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} outputId="6fce53d9-c3de-49d0-bed5-31cb3e000cc8" Image(filename='images/105000.png') # + [markdown] colab_type="text" id="EZjr-0GS1YG4" # **At epoch 7500** # + colab_type="code" outputId="303c9110-57fd-47d3-c7de-77cd26e13e72" id="N0T52olk1YHD" colab={"base_uri": "https://localhost:8080/", "height": 305} Image(filename='images/107500.png') # + [markdown] id="VpGVJcKT14md" colab_type="text" # **At epoch 10000** # + id="rAk3DTx9vRY0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="ef44afb5-cb22-4b21-f4f7-3a3679e01284" r, c = 5, 5 noise = np.random.normal(0, 1, (r * c, gan.latent_dim)) gen_imgs = gan.generator.predict(noise) sample_images(gen_imgs,100000+10000) # + [markdown] id="oUls2I6iDkdB" colab_type="text" # ## Generator and discrimnator structure # + id="bx8m1LhAKY7h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 885} outputId="e79e25db-8d82-461e-d980-cd628859290f" gan=GAN()
GAN_on_FASHION_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Les procédures et fonctions # Lorsque les programmes sont plus longs, il est souvent plus judicieux de les découper en plusieurs parties. On crée alors des "morceaux" qui traitent un sous problème bien précis. On parle alors de : # * procedures # * fonctions # # Remarques : # * dans la pratique on confond souvent les deux. # * si le programme est encore plus gros on peut le découper en différents modules. # ### Les procédures # D'une façon générale, une **procédure** est une sorte de boite noire qui réalise une action donnée à partir d'un certains nombres d'entrées que l'on appelle des **paramètres**. # # Vous les avez déja utilisé sans le savoir. Par exemple, lorsqu'on utilise l'instruction *print("Bonjour les secondes")*, on appelle la procédure *print* qui prend comme paramètre le texte que l'on veut afficher et réalise l'action d'afficher à l'écran de l'utilisateur ce texte. Cependant nous ne savons pas précisément le contenu de cette boite. # # Dans notre cas, ce sera à nous de la créer pour pouvoir l'utiliser. Prenons un cas concret, nous souhaitons afficher le volume de deux pavés droits (ou plus) dont nous connaissons respectivement la largeur, la longueur et la hauteur. Nous voyons aisément que nous allons avoir besoin de créer une procédure permettant de calculer et d'afficher le volume. # + #************************************************************************************************ # Procédure permettant de calculer et d'afficher le volume d'un pavé droit les paramètres sont : # larg : la largeur de notre pavé # long : la longueur de notre pavé # haut : La hauteur de notre pavé # ces trois variables sont données à la procédure lors de l'appel (ce sont des paramètres) #************************************************************************************************ def Calcul_et_affichage_volume_pave(larg,long,haut): volume = larg * long *haut print("Le volume du pavé droit de largeur {} et de longueur {} et de hauteur {} est de {} mètres cubes".format(larg,long,haut, volume)) # - # Nous avons juste créé un "bout" de programme qui réalise notre action. Il nous maintenant à l'utiliser. C'est une procédure Calcul_et_affichage_volume_pave(3,5.5,2) Calcul_et_affichage_volume_pave(20.2,2,3) # A noter que les résultats ne sont pas conservés en mémoire ! (pas d'affectation de variable) # ### Les fonctions # De façon simple, une fonction est une procédure qui renvoie un résultat stockable dans une variable. Reprenons notre exemple précédent en contraignant simplement de calculer le volume de notre pavé dont nous connaissons la largeur, la longueur et la hauteur. #************************************************************************************************ # Procédure permettant de calculer et d'afficher le volume d'un pavé droit les paramètres sont : # larg : la largeur de notre pavé # long : la longueur de notre pavé # haut : La hauteur de notre pavé # ces trois variables sont données à la procédure lors de l'appel (ce sont des paramètres) #************************************************************************************************ def Calcul_Volume_Pave(larg, long,haut): volume = larg * long*haut return volume # Nous l'utilisons v1 = Calcul_Volume_Pave(3,5.5,4) print("Le volume du pavé droit {} mètres cubes".format(v1)) v2 = Calcul_Volume_Pave(20.2,2,3) print("Le volume du pavé droit {} mètres cubes".format(v2)) # Définissons maintenant une fonction "comme" en mathématiques. def f(x): return 3*x**2-2*x+1 # On utilise notre fonction : print(f(3)) # Représentons maintenant cette fonction : from matplotlib.pylab import plot X= [1,2,3,4,5] Y=[f(1),f(2),f(3),f(4),f(5)] plot(X,Y) # Remarque : pour gagner en efficacité on peut créer le X et le Y en utilisant une boucle ! # Dans le cas d'une fonction, les résultats sont gardés en mémoire et peuvent donc être réutilisés plus tard. # Par abus de langage, on rassemble souvent les procédures et fonctions sous le terme global de **fonctions**. # L'intérêt de ces manières de procéder est d'isoler et de compartimenter les problèmes. On comprend bien l'intérêt, et dans notre dernier exemple nous sommes très proches de la notion de fonction en mathématiques.
fonctions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class node: def __init__(self): self.level = 0 self.val = 0 self.left = None self.right = None def addLeft(self): if not self.left: self.left = node() self.left.level = self.level+1 return self.left def addRight(self): if not self.right: self.right=node() self.right.level=self.level+1 return self.right # + sum = 0 def dfs(node): global sum #print(sum) if not node: # happens as long as I dont have a child # if L have but R dont still apply return 1 l=dfs(node.left) r=dfs(node.right) if l==0 or r==0: # my child covers me and its children # mark as 2 # deploy camera # WHEN EVERY CHILD IS NOT COVER YOU MUST COVER sum+=1 return 2 elif l==2 or r==2: # 1 is parent of 2 # or 1 is empty # already covered by camera return 1 else: # mark node with parent and a child # when progress up, parent will be mark with 2 # node is the end: # when progress up must have a camera so mark 0 # ** not covered ** # NOT COVERED BY ANYTHING HERE return 0 # - root = node() l1=root.addLeft() l1l=l1.addLeft() l1R=l1.addRight() l2l=l1l.addLeft() # + # case 1 # - # ``` # root # / # node # / \ # node node # / # sum+1 node(0) # / \ # (1) (1) # # root # / # node # / \ # node(2) node # / \ # node(0) (1) # / \ # (1) (1) # # # root # / # node # / \ # node(2) node(0) # / \ / \ # node(0) (1) (1) (1) # / \ # (1) (1) # # # root (1) # / \ # sum+1 node(2)(1) # / \ # node(2) node(0) # / \ / \ # node(0) (1) (1) (1) # / \ # (1) (1) # # # # ``` #if my root is leaf then root will return 0 sum=0 if dfs(root)==0: sum+=1 print('outside dfs: ', sum) sum #2 # + # case 2 # - root = node() l=root.addLeft() r=root.addRight() ll=l.addLeft() lr=l.addRight() l11=l1.addLeft() # ``` # root # / \ # sum+1 node(2) node # / \ # node(0) node(0) # / \ / \ # (1) (1) (1) (1) # # # sum+1 root(2) # / \ # sum+1 node(2) node(0) # / \ # node(0) node(0) # / \ / \ # (1) (1) (1) (1) # # # # # # # ``` sum=0 if dfs(root)==0: sum+=1 print('outside dfs: ', sum) sum #2
BinaryTreeCamera-least camera to cover binary tree (DFS).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (Back_Propagation)= # # Chapter 9 -- Back Propagation # The ultimate goal of neural network, don't forget, is to find the best weight and bias. So when we obtain the predicted $\hat{y}$, we need to use it to compare with the actual result set $y$, and adjust weight and bias matrix $W$ in accordance. # The thought process is identical to the previous two layers neural network as we introduced before. However, we have three layers in this case, and potentially much more layers. We need to adjust and update the weight in each layer, instead of just one layer in the two layer example. # So we need to obtain the gradient of the cost function in order to update weights. Let's take the example of the first weight in the input layer in figure 8.1 in chapter 8. We need a longer chain rule to obtain the gradient because we have one more layer: # $$ # \frac{\delta C}{\delta w^{(1)}_{11}} = \frac{\delta C}{\delta\hat{y}}\frac{\delta\hat{y}}{\delta h_2} \frac{\delta h_2}{\delta h_1} \frac{\delta h_1}{\delta w^{(1)}_{11}} # \label{real chain rule} # $$ (eq9_1) # Note that the gradient of a weight is evaluated by the sum of all data/student ($y_i$), as explained in the previous example. # $$ # -\frac{\delta C}{w^{(1)}_{11}} = \frac{1}{m} \frac{\delta}{\delta w^{(1)}_{11}} [\sum_{i=1}^m [y_i*ln(\sigma(h_2)]+(1-y_i)*ln(1-\sigma(h_2))] # $$ (eq9_2) # For simplicity, we just derive the gradient for weight $w^{(1)}_{11}$ for a single $y$, and use for loop to sum up them up and divide by $m$. # $$ # -\frac{\delta C}{w^{(1)}_{11}} = \frac{\delta}{\delta w^{(1)}_{11}} [y*ln(\sigma(h_2)]+(1-y)*ln(1-\sigma(h_2))] # $$ (eq9_3) # Again, the $y$ is just a constant, so we put it outside of the differential equation. # $$ # -\frac{\delta C}{w^{(1)}_{11}} = y*\frac{\delta}{\delta w^{(1)}_{11}} ln(\sigma(h_2)]+ (1-y)*\frac{\delta}{\delta w^{(1)}_{11}}ln(1-\sigma(h_2)) # $$ (eq9_4) # # where $\sigma(h_2) = \hat{y}$. # We also know that # $$ # h_2 = \sigma(h_1)*w^{(2)}_{11} + \sigma(h_2)*w^{(2)}_{21} + 1*w^{(2)}_{31} # $$ (eq9_5) # and # $$ # h_1 = w^{(1)}_{11}*x_1 + w^{(1)}_{21}*x_2 + w^{(1)}_{31}*x_3 + w^{(1)}_{41}*1 # $$ (eq9_6) # So, for the parts that can be combined into the chain rule: # $$ # \frac{\delta C}{\delta \hat{y}} = \frac{1-y}{1-\hat{y}} - \frac{y}{\hat{y}} # $$ (eq9_7) # $$ # \frac{\delta \hat{y}}{\delta h_2} = \sigma^{'}(h_2)= \sigma(h_2)[1-\sigma(h_2)] # $$ (eq9_8) # $$ # \frac{\delta h_2}{\delta h_1} = w^{(2)}_{11}\sigma^{'}(h_1) # $$ (eq9_9) # $$ # \frac{\delta h_1}{\delta w^{(1)}_{11}} = x_1 # $$ (eq9_10) # As a result, we have the gradient of $w_{11}^{(1)}$ from the input layer to the hidden layer as follow # $$ # -\frac{\delta C}{w^{(1)}_{11}} = (\frac{1-y}{1-\hat{y}} - \frac{y}{\hat{y}})*\sigma^{'}(h_2)*w^{(2)}_{11}*\sigma^{'}(h_1)*x_1 # \label{inputToHidden} # $$ (eq9_11) # On the other hand, we also want to get the gradient of weight (e.g. $w_{11}^{(2)}$) from the hidden layer to the output layer: # $$ # \frac{\delta C}{\delta w^{(2)}_{11}} = \frac{\delta C}{\delta\hat{y}}\frac{\delta\hat{y}}{\delta h_2} \frac{\delta h_2}{\delta w^{(2)}_{11}} # $$ (eq9_12) # $$ # \frac{\delta C}{\delta w^{(2)}_{11}} = (\frac{1-y}{1-\hat{y}} - \frac{y}{\hat{y}})*\sigma^{'}(h_2) * \sigma(h_1) # \label{hiddenToOutput} # $$ (eq9_13) # We realise that the first two terms are the same in equation {eq}`eq9_11` and {eq}`eq9_13`, so we define an `Error Term' $\delta^{n-1}$ for simplicity ($n$ is the number of layers in the network, i.e. $n=3$): # $$ # \delta^{n-1} = \frac{\delta C}{\delta \hat{y}}\frac{\delta \hat{y}}{\delta h_2} = (\frac{1-y}{1-\hat{y}} - \frac{y}{\hat{y}})*\sigma^{'}(h_2) # $$ (eq9_14) # So the weight $w_{11}^{(2)}$ from the hidden to output ($2nd$) layer in equation {eq}`eq9_13` can be expressed by the $\delta^2$ for the $2nd$ layer as # $$ # \frac{\delta C}{\delta w^{(2)}_{11}} = \delta^{3-1} *\sigma(h_1) # $$ (eq9_15) # And the weight $w_{11}^{(1)}$ from the input to hidden layer in equation {eq}`eq9_11` can be written as # $$ # \frac{\delta C}{\delta w^{(1)}_{11}} = \delta^{3-1} * w^{(2)}_{11} * \sigma^{'}(h_1)*x_1 # \label{inputToHidden1} # $$ (eq9_16) # To further simplify, we have another `Error Term' $\delta^{n-2}$ (i.e. $\delta^{3-2}$ for the $1st$ layer) # $$ # \delta^{n-2} = \delta^{n-1} * w^{(2)}_{11} * \sigma^{'}(h_1) # $$ (eq9_17) # where $\delta^1$ is for the next layer to the left, in this 3 layer neural network example, it is for the weight between input and hidden layer. $\delta^1$ is updated by multiplying the corresponding weight $w^{(2)}_{11}$ in the current layer and the derivative of sigma of the next layer to the left $\sigma^{'}(h_1)$. # So the equation {eq}`eq9_16` for weights in the $1st$ layer can be written as # $$ # \frac{\delta C}{\delta w^{(1)}_{11}} = \delta^1 *x_1 # \label{inputToHidden2} # $$ (eq9_18) # So in a sense, in the back propagation, we update the weights in layers closer to the output layer first and then update towards the input layer. Every time we go back to one layer, we update the $\delta^1$ based on the $\delta^0$ in the previous layer. # For updating the weight from the hidden to the output ($2nd$) layer, we have # $$ # w_i^{(2)'} = w_i^{(2)} - \eta \nabla C = w_i^{(2)} - \eta \frac{\delta C}{\delta w_i} = w_i^{(2)} - \eta \sum_{i=1}^m \delta^2\sigma(h_1) # $$ (eq9_19) # Remember that the $\delta^0$ is a function of $y$, where we need to sum up all $y_i$ in the dataset. # For updating the weight from the input to the hidden ($1st$) layer, we have # $$ # w_i^{(1)'} = w_i^{(1)} - \eta \nabla C = w_i^{(1)} - \eta \frac{\delta C}{\delta w_i} = w_i^{(1)} - \eta \sum_{i=1}^m \delta^1 x_1 # $$ (eq9_20)
notebooks/e_extra/pytorch_image_filtering_ml/Chapter 9 -- Back Propagation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import pysal as ps from pysal import weights as w import numpy as np import scipy.sparse as sp # + def OD(Wo, Wd): Wo = Wo.sparse Wd = Wd.sparse Ww = sp.kron(Wo, Wd) return w.WSP2W(w.WSP(Ww)) # - origins = ps.weights.lat2W(4,4) dests = ps.weights.lat2W(4,4) Ww = OD(origins, dests) Ww.transform = 'r' print Ww.full()[0].shape flows = np.random.randint(0,100, (4,4)) np.fill_diagonal(flows, 0) flows = flows.reshape((16,1)) print flows slag = ps.lag_spatial(Ww, flows) print slag origins.weights import os os.chdir('/Users/toshan/dev/pysal/pysal/weights') from spintW import ODW origins = ps.weights.lat2W(2,2) dests = ps.weights.lat2W(2,2) Ww = ODW(origins, dests) Ww.full()[0]
pysal/contrib/spint/notebooks/OD_weights.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- """ Financial Machine Learning Part 0: Bars https://towardsdatascience.com/financial-machine-learning-part-0-bars-745897d4e4ba We must aim for a bar representation in which each bar contains the same amount of information, however time-based bars will oversample slow periods and undersample high activity periods. To avoid this problem, the idea is to sample observations as a function of market activity. """ # + """ Setup """ import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates from datetime import datetime # raw trade data from https://public.bitmex.com/?prefix=data/trade/ data = pd.read_csv('data/trade_20181127.csv') data = data.append(pd.read_csv('data/trade_20181128.csv')) # add a few more days data = data.append(pd.read_csv('data/trade_20181129.csv')) data = data[data.symbol == 'XBTUSD'] # timestamp parsing data['timestamp'] = data.timestamp.map(lambda t: datetime.strptime(t[:-3], "%Y-%m-%dD%H:%M:%S.%f")) # - data.head() data['price'].plot() """ Time Bars """ def compute_vwap(df): q = df['foreignNotional'] p = df['price'] vwap = np.sum(p * q) / np.sum(q) df['vwap'] = vwap return df data_timeidx = data.set_index('timestamp') data_time_grp = data_timeidx.groupby(pd.Grouper(freq='15Min')) num_time_bars = len(data_time_grp) # comes in handy later data_time_vwap = data_time_grp.apply(compute_vwap) data_time_vwap.head() data_time_vwap['vwap'].plot() """ Tick Bars The idea behind tick bars is to sample observations every N transactions, aka "ticks", instead of fixed time buckets. This allows us to capture more information at times when many trades take place, and vice-versa. """ total_ticks = len(data) num_ticks_per_bar = total_ticks / num_time_bars num_ticks_per_bar = round(num_ticks_per_bar, -3) # round to the nearest thousand data_tick_grp = data.reset_index().assign(grpId=lambda row: row.index // num_ticks_per_bar) data_tick_vwap = data_tick_grp.groupby('grpId').apply(compute_vwap) data_tick_vwap.set_index('timestamp', inplace=True) total_ticks, num_time_bars, num_ticks_per_bar data_tick_vwap.head() data_time_vwap['vwap'].plot() data_tick_vwap['vwap'].plot() """ Volume Bars One shortcoming of tick bars is that not all trades are equal. Consider that an order to buy 1000 contracts is executed as one transaction, and 10 orders for 100 contracts will count for 10 transactions. It may make sense to sample observations for every N contracts exchanged independent of how many trades took place. """ data_cm_vol = data.assign(cmVol=data['homeNotional'].cumsum()) total_vol = data_cm_vol.cmVol.values[-1] vol_per_bar = total_vol / num_time_bars vol_per_bar = round(vol_per_bar, -2) # round to the nearest hundred data_vol_grp = data_cm_vol.assign(grpId=lambda row: row.cmVol // vol_per_bar) data_vol_vwap = data_vol_grp.groupby('grpId').apply(compute_vwap) data_vol_vwap.set_index('timestamp', inplace=True) data_vol_vwap.head() fig, ax = plt.subplots(figsize = (10,6)) ax.plot(data_time_vwap['vwap'], label='time bars') ax.plot(data_vol_vwap['vwap'], label='BTC volume bars') plt.title('XBT time and volume bars') # + data_time_vwap['vwap'].plot() data_vol_vwap['vwap'].plot() # + # code omitted for brevity # same as volume bars, except using data['foreignNotional'] instead of data['homeNotional'] # - """ Implementing Dollar Imbalance Bars """ def convert_tick_direction(tick_direction): if tick_direction in ('PlusTick', 'ZeroPlusTick'): return 1 elif tick_direction in ('MinusTick', 'ZeroMinusTick'): return -1 else: raise ValueError('converting invalid input: '+ str(tick_direction)) data_timeidx['tickDirection'] = data_timeidx.tickDirection.map(convert_tick_direction) # 1.2 Compute signed flows at each tick data_signed_flow = data_timeidx.assign(bv = data_timeidx.tickDirection * data_timeidx.size) # 2. Accumulate dollar imbalance bars from fast_ewma import _ewma abs_Ebv_init = np.abs(data_signed_flow['bv'].mean()) E_T_init = 500000 # 500000 ticks to warm up def compute_Ts(bvs, E_T_init, abs_Ebv_init): Ts, i_s = [], [] i_prev, E_T, abs_Ebv = 0, E_T_init, abs_Ebv_init n = bvs.shape[0] bvs_val = bvs.values.astype(np.float64) abs_thetas, thresholds = np.zeros(n), np.zeros(n) abs_thetas[0], cur_theta = np.abs(bvs_val[0]), bvs_val[0] for i in range(1, n): cur_theta += bvs_val[i] abs_theta = np.abs(cur_theta) abs_thetas[i] = abs_theta threshold = E_T * abs_Ebv thresholds[i] = threshold if abs_theta >= threshold: cur_theta = 0 Ts.append(np.float64(i - i_prev)) i_s.append(i) i_prev = i E_T = _ewma(np.array(Ts), window=np.int64(len(Ts)))[-1] abs_Ebv = np.abs( _ewma(bvs_val[:i], window=np.int64(E_T_init * 3))[-1] ) # window of 3 bars return Ts, abs_thetas, thresholds, i_s Ts, abs_thetas, thresholds, i_s = compute_Ts(data_signed_flow.bv, E_T_init, abs_Ebv_init) # Aggregate the ticks into groups based on computed boundaries n = data_signed_flow.shape[0] i_iter = iter(i_s + [n]) i_cur = i_iter.__next__() grpId = np.zeros(n) for i in range(1, n): if i <= i_cur: grpId[i] = grpId[i-1] else: grpId[i] = grpId[i-1] + 1 i_cur = i_iter.__next__() # Putting it all together: Dollar Imbalance Bars data_dollar_imb_grp = data_signed_flow.assign(grpId = grpId) data_dollar_imb_vwap = data_dollar_imb_grp.groupby('grpId').apply(compute_vwap).vwap
Python/bars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Selection in der Data Science Pipeline # ## *am Beispiel von medizinischen Diagnosen (Brustkrebs Diagnose)* # # Wir haben den öffentlich verfügbaren Datensatz Breast Cancer Wisconsin verwendet und vom UCI Machine Learning Repository heruntergeladen. # # Repository: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29 # ![](./data.png) # Unser Ziel ist es, herauszufinden, welche Merkmale bei der Vorhersage von bösartigem oder gutartigem Krebs am hilfreichsten sind, und zu klassifizieren, ob der Brustkrebs gutartig oder bösartig ist. # # Wir haben den öffentlich verfügbaren Datensatz Breast Cancer Wisconsin verwendet und vom UCI Machine Learning Repository heruntergeladen. # # Die typische Leistungsanalyse wird durchgeführt # # ![](conf_matrix.png) # + code_folding=[0] # Bibliotheken importieren from sklearn.datasets import load_breast_cancer from sklearn.datasets import make_classification from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from xgboost import XGBClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from cf_matrix import make_confusion_matrix from tensorflow.keras.models import Model from tensorflow.keras.layers import Input from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.utils import plot_model from sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, StratifiedKFold, GridSearchCV, cross_validate import matplotlib.pylab as pl import pandas as pd import numpy as np import sklearn.metrics as metrics import matplotlib.pyplot as plt from sklearn import preprocessing # + code_folding=[] # Download des Krebs-Datensatzes import seaborn as sns from sklearn import preprocessing (X, y) = load_breast_cancer(return_X_y=True, as_frame=True) # Überblick über die Daten X # - # ### Baseline: Entscheidungsbäumen Klassifikator # Baseline in der Performance mit Entscheidungsbäumen from sklearn.datasets import make_classification from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from xgboost import XGBClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from cf_matrix import make_confusion_matrix # Daten Skalierung t = MinMaxScaler() t.fit(X) X = t.transform(X) # Einfacher binärer Klassifikator model = XGBClassifier() # definieren Sie das Verfahren der Kreuzvalidierung cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=1) # Modell auswerten scores = cross_validate(estimator=model, X=X, y=y, cv=cv, n_jobs=-1, scoring=['accuracy', 'roc_auc', 'precision', 'recall', 'f1']) print('Accuracy: ', scores['test_accuracy'].mean()) print('Precision: ', scores['test_precision'].mean()) print('Recall: ', scores['test_recall'].mean()) print('F1: ', scores['test_f1'].mean(), '\n') # # Feature-Selection Methoden # # Es ist üblich, statistische Maße des Korrelationstyps zwischen Eingangs- und Ausgangsvariablen als Grundlage für die Feature Selection zu verwenden. # # Die Wahl der statistischen Maße hängt also stark von den Datentypen der Variablen ab. # # $\chi^2$ # # Für den ersten Ansatz berechnen wir die Chi-Quadrat-Statistik zwischen jedem nicht-negativen Feature und der Klasse. # # Dieser Wert kann verwendet werden, um die n_features-Merkmale mit den höchsten Werten für die Test-Chi-Quadrat-Statistik aus dem Eingabe-Merkmalsvektor relativ zu den Klassen auszuwählen. # # Erinnern Sie sich, dass der Chi-Quadrat-Test die Abhängigkeit zwischen stochastischen Variablen misst, so dass wir damit die Features entfernen, die am wahrscheinlichsten unabhängig von der Klasse sind und daher für die Klassifizierung irrelevant sind. # + code_folding=[] # Download des Krebs-Datensatzes import seaborn as sns from sklearn import preprocessing (X, y) = load_breast_cancer(return_X_y=True, as_frame=True) # + code_folding=[] # Chi-Quadrat Feature Selection from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 # Ermitteln der besten k = 20 Features chi_best = SelectKBest(chi2, k=20).fit(X, y) mask_features = chi_best.get_support() new_features = [] # The list of your K best features for bool, feature in zip(mask_features, X.columns): if bool: new_features.append(feature) X_new = chi_best.fit_transform(X, y) # Überblick über die filtrierte Daten new_features # - # chi-quadrat Feature Selection in der Performance mit XGBoost Modell # Einfacher binärer Klassifikator model = XGBClassifier() # definieren Sie das Verfahren der Kreuzvalidierung cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=1) # Modell auswerten scores = cross_validate(estimator=model, X=X_new, y=y, cv=cv, n_jobs=-1, scoring=['accuracy', 'roc_auc', 'precision', 'recall', 'f1']) print('Accuracy: ', scores['test_accuracy'].mean()) print('Precision: ', scores['test_precision'].mean()) print('Recall: ', scores['test_recall'].mean()) print('F1: ', scores['test_f1'].mean(), '\n') # # minimum-Redundancy-Maximum-Relevance (mRMR) # # Das Ziel ist es, eine Feature-Submenge auszuwählen, die die statistische Eigenschaft einer Ziel-Klassifikationsvariable am besten charakterisiert, unter der Einschränkung, dass diese Features untereinander so unähnlich wie möglich sind, aber der Klassifikationsvariable so wenig wie möglich ähnlich sind. # # Es gibt verschiedene Formen von mRMR, wobei "Relevanz" und "Redundanz" durch Mutual Information, Korrelation, t-Test/F-Test, Distanzen, etc. definiert wurden. # # + import pymrmr rel_feat = pymrmr.mRMR(X, 'MID', 20) X_new = X[X.columns.intersection(rel_feat)] rel_feat # - # mRMR Feature Selection in der Performance mit XGBoost Modell # Einfacher binärer Klassifikator model = XGBClassifier() # definieren Sie das Verfahren der Kreuzvalidierung cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=1) # Modell auswerten scores = cross_validate(estimator=model, X=X_new, y=y, cv=cv, n_jobs=-1, scoring=['accuracy', 'roc_auc', 'precision', 'recall', 'f1']) print('Accuracy: ', scores['test_accuracy'].mean()) print('Precision: ', scores['test_precision'].mean()) print('Recall: ', scores['test_recall'].mean()) print('F1: ', scores['test_f1'].mean(), '\n') # # Vergleichende Analyse der Feature Selection Methoden # # ![](evaluation.png) # Über diesen Github-Link können Sie auf den gesamten Code und die Daten des Projekts zugreifen. # # ![](download_code.png)
Feature_Selection__Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="OaeSlvRlQxZr" colab_type="text" # # Python for Loop # + [markdown] id="mWRuZi5wQxZu" colab_type="text" # The for loop in Python is used to iterate over a sequence (list, tuple, string) or other iterable objects. # # Iterating over a sequence is called traversal. # + [markdown] id="HoHdlyZkQxZx" colab_type="text" # # Syntax: # + [markdown] id="8zrVIhSuQxZz" colab_type="text" # for element in sequence : # # Body of for # + [markdown] id="MmQ0IYynQxZ1" colab_type="text" # Here, element is the variable that takes the value of the item inside the sequence on each iteration. # # Loop continues until we reach the last item in the sequence. # + [markdown] id="kEAa_ccxQxZ2" colab_type="text" # # Flow Chart # + [markdown] id="521Cq0xfQxZ4" colab_type="text" # ![title](forLoop.jpg) # + [markdown] id="OuwDO8SlQxZ6" colab_type="text" # # Example # + id="htEE3yt7QxZ8" colab_type="code" colab={} outputId="215cb822-3d89-4abb-8f79-636007e1c1c3" #Find product of all numbers present in a list lst = [10, 20, 30, 40, 50] product = 1 #iterating over the list for ele in lst: product *= ele print("Product is: {}".format(product)) # + [markdown] id="3FMtLcozQxaG" colab_type="text" # # range() function # + [markdown] id="lVF4Vy6KQxaH" colab_type="text" # We can generate a sequence of numbers using range() function. range(10) will generate numbers from 0 to 9 (10 numbers). # # We can also define the start, stop and step size as range(start,stop,step size). step size defaults to 1 if not provided. # # This function does not store all the values in memory, it would be inefficient. So it remembers the start, stop, step size and generates the next number on the go. # + id="xp3D6jNTQxaI" colab_type="code" colab={} outputId="622ffafa-4ae9-4602-d6ae-02500828aa7a" #print range of 10 for i in range(10): print(i) # + id="HeGUHz5BQxaN" colab_type="code" colab={} outputId="a27f27fc-0629-40b1-9f98-a90e7faca0d1" #print range of numbers from 1 to 20 with step size of 2 for i in range(0, 20, 5): print(i) # + id="6qX7nCCEQxaQ" colab_type="code" colab={} outputId="f20c8ea8-5ea5-4c64-ae23-665de980254a" lst = ["satish", "srinu", "murali", "naveen", "bramha"] #iterate over the list using index #for index in range(len(lst)): # print(lst[index]) for ele in lst: print(ele) # + [markdown] id="q4hvJ3n6QxaU" colab_type="text" # # for loop with else # + [markdown] id="tUGZf_sQQxaW" colab_type="text" # A for loop can have an optional else block as well. The else part is executed if the items in the sequence used in for loop exhausts. # # break statement can be used to stop a for loop. In such case, the else part is ignored. # # Hence, a for loop's else part runs if no break occurs. # + id="j0fE1yOGQxaX" colab_type="code" colab={} outputId="d29bfdbb-d235-4eba-8b5b-d9031772a474" numbers = [1, 2, 3] #iterating over the list for item in numbers: print(item) else: print("no item left in the list") # + id="FXDUZ8eaQxac" colab_type="code" colab={} outputId="367100da-0fa9-499c-d4c3-0990af072d93" for item in numbers: print(item) if item % 2 == 0: break else: print("no item left in the list") # + [markdown] id="m689PmHOQxah" colab_type="text" # # Python Program to display all prime numbers within an interval # + id="5jmBW7vaQxai" colab_type="code" colab={} outputId="e3e354d9-986d-4687-edeb-edc5aadc77c8" index1 = 20 index2 = 50 print("Prime numbers between {0} and {1} are :".format(index1, index2)) for num in range(index1, index2+1): #default step size is 1 if num > 1: isDivisible = False; for index in range(2, num): if num % index == 0: isDivisible = True; if not isDivisible: print(num); # + id="JTmwS0CEQxam" colab_type="code" colab={}
Day 4/for_loop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": true, "row": 0, "width": 4}, "report_default": {}}}} # %matplotlib notebook from IPython.core.display import display, HTML display(HTML("<style>.container { width:90% !important; }</style>")) import warnings import os from gammaboard import GammaBoard, plot_migration_matrices warnings.filterwarnings("ignore") # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 32, "hidden": false, "row": 0, "width": 9}, "report_default": {}}}} gb = GammaBoard(os.environ['GAMMABOARD_DATA'], site='south', ref='performances') # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {}, "report_default": {}}}} gb.exp_box # + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {}, "report_default": {}}}} exp_name = list(gb.experiments_dict.keys())[0] exp = gb.experiments_dict[exp_name] exp.load_data() fig = plot_migration_matrices(exp, figsize=(20,4))
gammaboard/dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression Week 4: Ridge Regression (interpretation) # In this notebook, we will run ridge regression multiple times with different L2 penalties to see which one produces the best fit. We will revisit the example of polynomial regression as a means to see the effect of L2 regularization. In particular, we will: # * Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression # * Use matplotlib to visualize polynomial regressions # * Use a pre-built implementation of regression (GraphLab Create) to run polynomial regression, this time with L2 penalty # * Use matplotlib to visualize polynomial regressions under L2 regularization # * Choose best L2 penalty using cross-validation. # * Assess the final fit using test data. # # We will continue to use the House data from previous notebooks. (In the next programming assignment for this module, you will implement your own ridge regression learning algorithm using gradient descent.) # # Fire up graphlab create import graphlab # # Polynomial regression, revisited # We build on the material from Week 3, where we wrote the function to produce an SFrame with columns containing the powers of a given input. Copy and paste the function `polynomial_sframe` from Week 3: def polynomial_sframe(feature, degree): # Let's use matplotlib to visualize what a polynomial regression looks like on the house data. import matplotlib.pyplot as plt # %matplotlib inline sales = graphlab.SFrame('kc_house_data.gl/') # As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices. sales = sales.sort(['sqft_living','price']) # Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using `polynomial_sframe()` and fit a model with these features. When fitting the model, use an L2 penalty of `1e-5`: l2_small_penalty = 1e-5 # Note: When we have so many features and so few data points, the solution can become highly numerically unstable, which can sometimes lead to strange unpredictable results. Thus, rather than using no regularization, we will introduce a tiny amount of regularization (`l2_penalty=1e-5`) to make the solution numerically stable. (In lecture, we discussed the fact that regularization can also help with numerical stability, and here we are seeing a practical example.) # # With the L2 penalty specified above, fit the model and print out the learned weights. # # Hint: make sure to add 'price' column to the new SFrame before calling `graphlab.linear_regression.create()`. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set=None` in this call. # ***QUIZ QUESTION: What's the learned value for the coefficient of feature `power_1`?*** # # Observe overfitting # Recall from Week 3 that the polynomial fit of degree 15 changed wildly whenever the data changed. In particular, when we split the sales data into four subsets and fit the model of degree 15, the result came out to be very different for each subset. The model had a *high variance*. We will see in a moment that ridge regression reduces such variance. But first, we must reproduce the experiment we did in Week 3. # First, split the data into split the sales data into four subsets of roughly equal size and call them `set_1`, `set_2`, `set_3`, and `set_4`. Use `.random_split` function and make sure you set `seed=0`. (semi_split1, semi_split2) = sales.random_split(.5,seed=0) (set_1, set_2) = semi_split1.random_split(0.5, seed=0) (set_3, set_4) = semi_split2.random_split(0.5, seed=0) # Next, fit a 15th degree polynomial on `set_1`, `set_2`, `set_3`, and `set_4`, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model. # # Hint: When calling `graphlab.linear_regression.create()`, use the same L2 penalty as before (i.e. `l2_small_penalty`). Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call. # The four curves should differ from one another a lot, as should the coefficients you learned. # # ***QUIZ QUESTION: For the models learned in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.) # # Ridge regression comes to rescue # Generally, whenever we see weights change so much in response to change in data, we believe the variance of our estimate to be large. Ridge regression aims to address this issue by penalizing "large" weights. (Weights of `model15` looked quite small, but they are not that small because 'sqft_living' input is in the order of thousands.) # # With the argument `l2_penalty=1e5`, fit a 15th-order polynomial model on `set_1`, `set_2`, `set_3`, and `set_4`. Other than the change in the `l2_penalty` parameter, the code should be the same as the experiment above. Also, make sure GraphLab Create doesn't create its own validation set by using the option `validation_set = None` in this call. # These curves should vary a lot less, now that you applied a high degree of regularization. # # ***QUIZ QUESTION: For the models learned with the high level of regularization in each of these training sets, what are the smallest and largest values you learned for the coefficient of feature `power_1`?*** (For the purpose of answering this question, negative numbers are considered "smaller" than positive numbers. So -5 is smaller than -3, and -3 is smaller than 5 and so forth.) # # Selecting an L2 penalty via cross-validation # Just like the polynomial degree, the L2 penalty is a "magic" parameter we need to select. We could use the validation set approach as we did in the last module, but that approach has a major disadvantage: it leaves fewer observations available for training. **Cross-validation** seeks to overcome this issue by using all of the training set in a smart way. # # We will implement a kind of cross-validation called **k-fold cross-validation**. The method gets its name because it involves dividing the training set into k segments of roughtly equal size. Similar to the validation set method, we measure the validation error with one of the segments designated as the validation set. The major difference is that we repeat the process k times as follows: # # Set aside segment 0 as the validation set, and fit a model on rest of data, and evalutate it on this validation set<br> # Set aside segment 1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set<br> # ...<br> # Set aside segment k-1 as the validation set, and fit a model on rest of data, and evalutate it on this validation set # # After this process, we compute the average of the k validation errors, and use it as an estimate of the generalization error. Notice that all observations are used for both training and validation, as we iterate over segments of data. # # To estimate the generalization error well, it is crucial to shuffle the training data before dividing them into segments. GraphLab Create has a utility function for shuffling a given SFrame. We reserve 10% of the data as the test set and shuffle the remainder. (Make sure to use `seed=1` to get consistent answer.) (train_valid, test) = sales.random_split(.9, seed=1) train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1) # Once the data is shuffled, we divide it into equal segments. Each segment should receive `n/k` elements, where `n` is the number of observations in the training set and `k` is the number of segments. Since the segment 0 starts at index 0 and contains `n/k` elements, it ends at index `(n/k)-1`. The segment 1 starts where the segment 0 left off, at index `(n/k)`. With `n/k` elements, the segment 1 ends at index `(n*2/k)-1`. Continuing in this fashion, we deduce that the segment `i` starts at index `(n*i/k)` and ends at `(n*(i+1)/k)-1`. # With this pattern in mind, we write a short loop that prints the starting and ending indices of each segment, just to make sure you are getting the splits right. # + n = len(train_valid_shuffled) k = 10 # 10-fold cross-validation for i in xrange(k): start = (n*i)/k end = (n*(i+1))/k-1 print i, (start, end) # - # Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of `train_valid_shuffled`. Notice that the first index (0) is included in the slice but the last index (10) is omitted. train_valid_shuffled[0:10] # rows 0 to 9 # Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the `train_valid_shuffled` dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above. # Extract the fourth segment (segment 3) and assign it to a variable called `validation4`. # To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234. print int(round(validation4['price'].mean(), 0)) # After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0:start) and (end+1:n) of the data and paste them together. SFrame has `append()` method that pastes together two disjoint sets of rows originating from a common dataset. For instance, the following cell pastes together the first and last two rows of the `train_valid_shuffled` dataframe. n = len(train_valid_shuffled) first_two = train_valid_shuffled[0:2] last_two = train_valid_shuffled[n-2:n] print first_two.append(last_two) # Extract the remainder of the data after *excluding* fourth segment (segment 3) and assign the subset to `train4`. # To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450. print int(round(train4['price'].mean(), 0)) # Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) `k`, (ii) `l2_penalty`, (iii) dataframe, (iv) name of output column (e.g. `price`) and (v) list of feature names. The function returns the average validation error using k segments as validation sets. # # * For each i in [0, 1, ..., k-1]: # * Compute starting and ending indices of segment i and call 'start' and 'end' # * Form validation set by taking a slice (start:end+1) from the data. # * Form training set by appending slice (end+1:n) to the end of slice (0:start). # * Train a linear model using training set just formed, with a given l2_penalty # * Compute validation error using validation set just formed def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list): # Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following: # * We will again be aiming to fit a 15th-order polynomial model using the `sqft_living` input # * For `l2_penalty` in [10^1, 10^1.5, 10^2, 10^2.5, ..., 10^7] (to get this in Python, you can use this Numpy function: `np.logspace(1, 7, num=13)`.) # * Run 10-fold cross-validation with `l2_penalty` # * Report which L2 penalty produced the lowest average validation error. # # Note: since the degree of the polynomial is now fixed to 15, to make things faster, you should generate polynomial features in advance and re-use them throughout the loop. Make sure to use `train_valid_shuffled` when generating polynomial features! # ***QUIZ QUESTIONS: What is the best value for the L2 penalty according to 10-fold validation?*** # You may find it useful to plot the k-fold cross-validation errors you have obtained to better understand the behavior of the method. # + # Plot the l2_penalty values in the x axis and the cross-validation error in the y axis. # Using plt.xscale('log') will make your plot more intuitive. # - # Once you found the best value for the L2 penalty using cross-validation, it is important to retrain a final model on all of the training data using this value of `l2_penalty`. This way, your final model will be trained on the entire dataset. # ***QUIZ QUESTION: Using the best L2 penalty found above, train a model using all training data. What is the RSS on the TEST data of the model you learn with this L2 penalty? ***
course_materials/course_2/05_ridge-regression/05_programming-assignment-1/week-4-ridge-regression-assignment-1-blank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="zdjHLuBr2SvD" dataset=[['i1','i2','i5'], ['i2','i4'], ['i2','i3'], ['i1','i2','i4'], ['i1','i3'], ['i2','i3'], ['i1','i3'], ['i1','i2','i3','i5'], ['i1','i2','i3']] # + id="4g75Cf_m8Jbe" import pandas as pd # + id="Txt2JT3i8QFm" from mlxtend.preprocessing import TransactionEncoder # + colab={"base_uri": "https://localhost:8080/", "height": 311} id="yDyxdLbo8hSL" outputId="6aecd254-f288-43c5-a3f8-ac55d8bb91d6" te=TransactionEncoder() te_ary=te.fit(dataset).transform(dataset) df=pd.DataFrame(te_ary,columns=te.columns_) df # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="nRI2KCRt9eFb" outputId="75c08f0c-9c45-4932-b62e-7b75bca1c86b" from mlxtend.frequent_patterns import apriori apriori(df,min_support=0.22) # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="EkcV_neu-D2g" outputId="ca550999-6905-48ca-9e67-ff5f5a76abad" apriori(df,min_support=0.22,use_colnames=True) # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="K8ekV_OX-m_S" outputId="870f0122-2414-4288-a0b5-a9af1c0e63e5" frequent_itemsets=apriori(df,min_support=0.2,use_colnames=True) frequent_itemsets['length']=frequent_itemsets['itemsets'].apply(lambda x: len(x)) frequent_itemsets # + colab={"base_uri": "https://localhost:8080/", "height": 223} id="pzszRh5R_ahX" outputId="b336d608-cd39-4c8c-abd1-94e1fd1b082f" frequent_itemsets[(frequent_itemsets['length']==2) & (frequent_itemsets['support']>=0.22)] # + [markdown] id="lfP6SP5BAF9n" # ASSOCIATION RULE MINING # + id="5HbjAW8CAELb" from mlxtend.frequent_patterns import association_rules # + colab={"base_uri": "https://localhost:8080/", "height": 223} id="2VLNIyqvB6Jd" outputId="c5c8c7dd-2507-46ee-997f-5c19c9daae47" rules=association_rules(frequent_itemsets) rules
Assignment_7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Banking and Unemployment # --- # + # Dependencies import numpy as np import pandas as pd import matplotlib.pyplot as plt import requests import time from census import Census from us import states # census key from config import (census_key, gkey) # Census API Key c = Census(census_key, year=2013) # - # ## Data Retrieval # + # Run Census Search to retrieve data on all zip codes (2013 ACS5 Census) # See: https://github.com/CommerceDataService/census-wrapper for library documentation # See: https://gist.github.com/afhaque/60558290d6efd892351c4b64e5c01e9b for labels census_data = c.acs5.get(("B01003_001E", "B23025_005E"), { 'for': 'zip code tabulation area:*'}) # Convert to DataFrame census_pd = pd.DataFrame(census_data) # Column Reordering census_pd = census_pd.rename(columns={"B01003_001E": "Population", "B23025_005E": "Unemployment Count", "zip code tabulation area": "Zipcode"}) # Add in Employment Rate (Employment Count / Population) # Final DataFrame # Visualize # - # ## Combine Data # + # Import the original data we analyzed earlier. Use dtype="object" to match other # Visualize # + # Merge the two data sets along zip code # Save the revised Data Frame as a csv # Visualize # - # ## Heatmap of poverty rate # + # Configure gmaps with API key # + # Store 'Lat' and 'Lng' into locations # Convert Poverty Rate to float and store # HINT: be sure to handle NaN values # - # Create a poverty Heatmap layer # Convert bank rate to list # Create bank symbol layer # Create a combined map
3/Activities/10-Stu_BankDeserts_Heatmap/Unsolved/Banking_Deserts_HeatMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Collision Avoidance - Data Collection # # If you ran through the basic motion notebook, hopefully you're enjoying how easy it can be to make your Jetbot move around! Thats very cool! But what's even cooler, is making JetBot move around all by itself! # # This is a super hard task, that has many different approaches but the whole problem is usually broken down into easier sub-problems. It could be argued that one of the most # important sub-problems to solve, is the problem of preventing the robot from entering dangerous situations! We're calling this *collision avoidance*. # # In this set of notebooks, we're going to attempt to solve the problem using deep learning and a single, very versatile, sensor: the camera. You'll see how with a neural network, camera, and the NVIDIA Jetson Nano, we can teach the robot a very useful behavior! # # The approach we take to avoiding collisions is to create a virtual "safety bubble" around the robot. Within this safety bubble, the robot is able to spin in a circle without hitting any objects (or other dangerous situations like falling off a ledge). # # # Of course, the robot is limited by what's in it's field of vision, and we can't prevent objects from being placed behind the robot, etc. But we can prevent the robot from entering these scenarios itself. # # The way we'll do this is super simple: # # First, we'll manually place the robot in scenarios where it's "safety bubble" is violated, and label these scenarios ``blocked``. We save a snapshot of what the robot sees along with this label. # # Second, we'll manually place the robot in scenarios where it's safe to move forward a bit, and label these scenarios ``free``. Likewise, we save a snapshot along with this label. # # That's all that we'll do in this notebook; data collection. Once we have lots of images and labels, we'll upload this data to a GPU enabled machine where we'll *train* a neural network to predict whether the robot's safety bubble is being violated based off of the image it sees. We'll use this to implement a simple collision avoidance behavior in the end :) # # > IMPORTANT NOTE: When JetBot spins in place, it actually spins about the center between the two wheels, not the center of the robot chassis itself. This is an important detail to remember when you're trying to estimate whether the robot's safety bubble is violated or not. But don't worry, you don't have to be exact. If in doubt it's better to lean on the cautious side (a big safety bubble). We want to make sure JetBot doesn't enter a scenario that it couldn't get out of by turning in place. # ### Display live camera feed # # So let's get started. First, let's initialize and display our camera like we did in the *teleoperation* notebook. # # > Our neural network takes a 224x224 pixel image as input. We'll set our camera to that size to minimize the filesize of our dataset (we've tested that it works for this task). # > In some scenarios it may be better to collect data in a larger image size and downscale to the desired size later. # + import traitlets import ipywidgets.widgets as widgets from IPython.display import display from jetbot import Camera, bgr8_to_jpeg camera = Camera.instance(width=224, height=224, flipmode=2) image = widgets.Image(format='jpeg', width=224, height=224) # this width and height doesn't necessarily have to match the camera camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg) display(image) # - # Awesome, next let's create a few directories where we'll store all our data. We'll create a folder ``dataset`` that will contain two sub-folders ``free`` and ``blocked``, # where we'll place the images for each scenario. # + import os blocked_dir = 'dataset/blocked' free_dir = 'dataset/free' # we have this "try/except" statement because these next functions can throw an error if the directories exist already try: os.makedirs(free_dir) os.makedirs(blocked_dir) except FileExistsError: print('Directories not created becasue they already exist') # - # If you refresh the Jupyter file browser on the left, you should now see those directories appear. Next, let's create and display some buttons that we'll use to save snapshots # for each class label. We'll also add some text boxes that will display how many images of each category that we've collected so far. This is useful because we want to make # sure we collect about as many ``free`` images as ``blocked`` images. It also helps to know how many images we've collected overall. # + button_layout = widgets.Layout(width='128px', height='64px') free_button = widgets.Button(description='add free', button_style='success', layout=button_layout) blocked_button = widgets.Button(description='add blocked', button_style='danger', layout=button_layout) free_count = widgets.IntText(layout=button_layout, value=len(os.listdir(free_dir))) blocked_count = widgets.IntText(layout=button_layout, value=len(os.listdir(blocked_dir))) display(widgets.HBox([free_count, free_button])) display(widgets.HBox([blocked_count, blocked_button])) # - # Right now, these buttons wont do anything. We have to attach functions to save images for each category to the buttons' ``on_click`` event. We'll save the value # of the ``Image`` widget (rather than the camera), because it's already in compressed JPEG format! # # To make sure we don't repeat any file names (even across different machines!) we'll use the ``uuid`` package in python, which defines the ``uuid1`` method to generate # a unique identifier. This unique identifier is generated from information like the current time and the machine address. # + from uuid import uuid1 def save_snapshot(directory): image_path = os.path.join(directory, str(uuid1()) + '.jpg') with open(image_path, 'wb') as f: f.write(image.value) def save_free(): global free_dir, free_count save_snapshot(free_dir) free_count.value = len(os.listdir(free_dir)) def save_blocked(): global blocked_dir, blocked_count save_snapshot(blocked_dir) blocked_count.value = len(os.listdir(blocked_dir)) # attach the callbacks, we use a 'lambda' function to ignore the # parameter that the on_click event would provide to our function # because we don't need it. free_button.on_click(lambda x: save_free()) blocked_button.on_click(lambda x: save_blocked()) # - # Great! Now the buttons above should save images to the ``free`` and ``blocked`` directories. You can use the Jupyter Lab file browser to view these files! # # Now go ahead and collect some data # # 1. Place the robot in a scenario where it's blocked and press ``add blocked`` # 2. Place the robot in a scenario where it's free and press ``add free`` # 3. Repeat 1, 2 # # > REMINDER: You can move the widgets to new windows by right clicking the cell and clicking ``Create New View for Output``. Or, you can just re-display them # > together as we will below # # Here are some tips for labeling data # # 1. Try different orientations # 2. Try different lighting # 3. Try varied object / collision types; walls, ledges, objects # 4. Try different textured floors / objects; patterned, smooth, glass, etc. # # Ultimately, the more data we have of scenarios the robot will encounter in the real world, the better our collision avoidance behavior will be. It's important # to get *varied* data (as described by the above tips) and not just a lot of data, but you'll probably need at least 100 images of each class (that's not a science, just a helpful tip here). But don't worry, it goes pretty fast once you get going :) display(image) display(widgets.HBox([free_count, free_button])) display(widgets.HBox([blocked_count, blocked_button])) # ## Next # # Once you've collected enough data, we'll need to copy that data to our GPU desktop or cloud machine for training. First, we can call the following *terminal* command to compress # our dataset folder into a single *zip* file. # # > The ! prefix indicates that we want to run the cell as a *shell* (or *terminal*) command. # # > The -r flag in the zip command below indicates *recursive* so that we include all nested files, the -q flag indicates *quiet* so that the zip command doesn't print any output # !zip -r -q dataset.zip dataset # You should see a file named ``dataset.zip`` in the Jupyter Lab file browser. You should download the zip file using the Jupyter Lab file browser by right clicking and selecting ``Download``. # # Next, we'll need to upload this data to our GPU desktop or cloud machine (we refer to this as the *host*) to train the collision avoidance neural network. We'll assume that you've set up your training # machine as described in the JetBot WiKi. If you have, you can navigate to ``http://<host_ip_address>:8888`` to open up the Jupyter Lab environment running on the host. The notebook you'll need to open there is called ``collision_avoidance/train_model.ipynb``. # # So head on over to your training machine and follow the instructions there! Once your model is trained, we'll return to the robot Jupyter Lab enivornment to use the model for a live demo!
notebooks/collision_avoidance/data_collection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # # ## Project: **Finding Lane Lines on the Road** # *** # In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. # # Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right. # # In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project. # # --- # Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image. # # **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".** # # --- # **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.** # # --- # # <figure> # <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" /> # <figcaption> # <p></p> # <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p> # </figcaption> # </figure> # <p></p> # <figure> # <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" /> # <figcaption> # <p></p> # <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p> # </figcaption> # </figure> # **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** # ## Import Packages #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 # %matplotlib inline # ## Read in an Image # + #reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') # - # ## Ideas for Lane Detection Pipeline # **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:** # # `cv2.inRange()` for color selection # `cv2.fillPoly()` for regions selection # `cv2.line()` to draw lines on an image given endpoints # `cv2.addWeighted()` to coadd / overlay two images # `cv2.cvtColor()` to grayscale or change color # `cv2.imwrite()` to output images to file # `cv2.bitwise_and()` to apply a mask to an image # # **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** # ## Helper Functions # Below are some helper functions to help get you started. They should look familiar from the lesson! # + import math import statistics ONE_DEGREE = np.pi / 180 def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ for line in lines: for x1,y1,x2,y2 in line: cv2.line(img, (x1, y1), (x2, y2), color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ) def bounded_line_properties(line, y_bound): """Returns properties of the given line as a tuple of floats, members of which could be inf.""" x1, y1, x2, y2 = line[0] if x2 - x1 == 0: slope = float('inf') else: slope = float(y2 - y1) / float(x2 - x1) y_intercept = y1 - slope * x1 if slope == 0: x_intercept = float('inf') x_bound_intercept = float('inf') else: x_intercept = -y_intercept / slope x_bound_intercept = (y_bound - y_intercept) / slope return slope, y_intercept, x_intercept, x_bound_intercept def smooth_line(lines): """Return a line (slope, x_bound) that is a smooth representation of the given list of lines.""" x_bound_intercept = int(statistics.median([line[4] for line in lines])) slope = statistics.median([line[1] for line in lines]) return slope, x_bound_intercept def lines_intersection(right_line, left_line): """Lines intersect where x = (b2 - b1) / (m1 - m2).""" m1, b1, _ = right_line m2, b2, _ = left_line if m1 == m2: return float('inf'), float('inf') x_intersect = int((b2 - b1) / (m1 - m2)) y_intersect = int(m1 * x_intersect + b1) return x_intersect, y_intersect class LaneBoundaryZone(object): """Identify the current path of travel.""" BUCKET_SIZE = 20 MIN_LANE_LINE_CLUSTER_SIZE = 3 LANE_FRAME_HIST_SIZE = 15 ROI_BOUNDARY_FUZZ = 35 MAX_LANE_SLOPE = 5 MIN_LANE_SLOPE = 0.3 def __init__(self, height, width): # Zone dimensions. self.width = width self.height = height # Pre-calculate these handy quantities. self.width_minus1 = width - 1 self.height_minus1 = height - 1 self.halfwidth = self.width // 2 self.halfheight = self.height // 2 # Params for canny edge detector step (based on gradient, change in pixel values). # # Minimum gradient threshold, below which, pixels are ignored. # # Integer self.canny_low_gradient = 92 # # Gradient threshold at which an edge is determined. Anything between the low/hi # thresholds is ok if next to an edge pixel. # # Integer self.canny_hi_gradient = 309 # Kernel size for the blur step. # # Integer self.blur_kernel = 3 # Params for the Hough line detector step (based on point representation space). # Resolution of the radius param in pixels during Hough line detection. # # Integer self.hough_rho = 1 # # Resolution of the angle (in degrees Pi/180) during Hough line detection. # # Integer self.hough_theta = 1 # # The minimum number of intersections to detect a line. # # Integer self.hough_threshold = 51 # # The minimum number of points that can form a line. # # Integer self.hough_min_line_len = 35 # # The maximum gap between two points to be considered in the same line. # # Integer self.hough_max_line_gap = 16 self.lane_line_bucket_cnt = self.width // self.BUCKET_SIZE + 1 self.lane_line_bucket_cnt_half = self.lane_line_bucket_cnt // 2 self.horizon = self.halfheight self.horizon_max = self.height * 0.67 self.right_roi = self.width_minus1 self.right_roi_min = self.width * 0.67 self.left_roi = 0 self.left_roi_max = self.width * 0.33 self.hist_idx = 0 self.right_lane_bound_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.right_lane_upper_bound_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.left_lane_bound_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.left_lane_upper_bound_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.horizon_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.right_lane_slope_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] self.left_lane_slope_hist = [None for i in range(self.LANE_FRAME_HIST_SIZE)] def set_horizon(self, horizon): if 0 <= horizon <= self.horizon_max: self.horizon = horizon def set_right_roi(self, roi): if self.right_roi_min <= roi <= self.width_minus1: self.right_roi = roi def set_left_roi(self, roi): if 0 <= roi <= self.left_roi_max: self.left_roi = roi def bucket_line(self, line, buckets): """Bucket here, as in verb - throw the line into an appropriate bucket based on where it projects onto the bottom of the purview. """ line_props = bounded_line_properties(line, self.height_minus1) slope, y_intercept, x_intercept, x_bound_intercept = line_props # Cheating here by discarding lines with infinite slope, which otherwise should be considered, but they'll # start mucking up further calculations, mean, median, and throw exceptions on int(...), etc. if (abs(slope) <= self.MAX_LANE_SLOPE and abs(slope) >= self.MIN_LANE_SLOPE and 0 <= x_bound_intercept < self.width and ( x_bound_intercept > self.halfwidth and slope > 0 or x_bound_intercept < self.halfwidth and slope < 0)): bucket_idx = int(x_bound_intercept / self.BUCKET_SIZE) buckets[bucket_idx].append((line, slope, int(y_intercept), int(x_intercept), int(x_bound_intercept))) def closest_cluster(self, buckets): """Return the contents of the first group of non-empty buckets (cluster) that satisfy a minimum cluster size.""" bucket_group = [] for bucket in buckets: if len(bucket) > 0: bucket_group.extend(bucket) else: if len(bucket_group) >= self.MIN_LANE_LINE_CLUSTER_SIZE: return bucket_group bucket_group = [] return [] def project_lanes(self, right_lane, left_lane): """Given properties of a right and left lane, project them onto the purview with specific endpoints bounded by the horizon. Update region of interest (roi) properties based on projections. """ right_proj = None left_proj = None if right_lane is not None and left_lane is not None: x_intersect, y_intersect = lines_intersection(right_lane, left_lane) if 0 <= x_intersect < self.width and 0 <= y_intersect < self.height: self.set_horizon(y_intersect) ave_right_x, ave_left_x, ave_horizon = self.smooth_top_points(x_intersect, x_intersect, self.horizon) self.set_horizon(ave_horizon) right_proj = ((ave_right_x, self.horizon), (right_lane[2], self.height_minus1)) self.set_right_roi(right_lane[2]) if ave_right_x > right_lane[2]: self.set_right_roi(ave_right_x) left_proj = ((ave_left_x, self.horizon), (left_lane[2], self.height_minus1)) self.set_left_roi(left_lane[2]) if ave_left_x < left_lane[2]: self.set_left_roi(ave_left_x) else: print("intersection out of bounds") elif right_lane is not None: # Horizon won't change if either lane is missing. r_slope, r_y_intercept, r_x_bound = right_lane x1 = int((self.horizon - r_y_intercept) / r_slope) ave_right_x, ave_left_x, ave_horizon = self.smooth_top_points(x1, None, self.horizon) self.set_horizon(ave_horizon) right_proj = ((ave_right_x, self.horizon), (r_x_bound, self.height_minus1)) self.set_right_roi(r_x_bound) if ave_right_x > r_x_bound: self.set_right_roi(ave_right_x) elif left_lane is not None: # Horizon won't change if either lane is missing. l_slope, l_y_intercept, l_x_bound = left_lane x1 = int((self.horizon - l_y_intercept) / l_slope) ave_right_x, ave_left_x, ave_horizon = self.smooth_top_points(None, x1, self.horizon) self.set_horizon(ave_horizon) left_proj = ((ave_left_x, self.horizon), (l_x_bound, self.height_minus1)) self.set_left_roi(l_x_bound) if ave_left_x > l_x_bound: self.set_left_roi(ave_left_x) else: _, _, ave_horizon = self.smooth_top_points(None, None, self.horizon) self.set_horizon(ave_horizon) return right_proj, left_proj def smooth_lane_hist(self, slope, x_bound, slope_hist, bound_hist): """Smooth lanes from frame to frame in the video by averaging over a history of lanes in previous frames.""" if self.hist_idx >= self.LANE_FRAME_HIST_SIZE: self.hist_idx = 0 slope_hist[self.hist_idx] = slope bound_hist[self.hist_idx] = x_bound slope_usable_hist = [e for e in slope_hist if e is not None] bound_usable_hist = [e for e in bound_hist if e is not None] # Reset roi if we're not getting any new signals. if not slope_usable_hist: self.left_roi = 0 self.right_roi = self.width_minus1 self.horizon = self.halfheight return None med_x_bound = int(statistics.median(bound_usable_hist)) med_slope = statistics.median(slope_usable_hist) y_intercept = int(self.height_minus1 - med_slope * med_x_bound) return med_slope, y_intercept, med_x_bound def smooth_dim_hist(self, dim, dim_hist): """Smooth dimensions from frame to frame in the video by averaging over the history of previous frames.""" dim_hist[self.hist_idx] = dim dim_usable_hist = [e for e in dim_hist if e is not None] if not dim_usable_hist: return None med_dim = int(statistics.median(dim_usable_hist)) return med_dim def smooth_top_points(self, x_right, x_left, horizon): """Smooth the horizon based on the history of horizon positions.""" return (self.smooth_dim_hist(x_right, self.right_lane_upper_bound_hist), self.smooth_dim_hist(x_left, self.left_lane_upper_bound_hist), self.smooth_dim_hist(horizon, self.horizon_hist)) def mask_roi(self, img): """Apply a mask of the region of interest onto the given image and return the masked image.""" ##at The right way to do this is to slice the image at roi boundaries and operate only with that slice, then ##at simply translate final coordinates back and forth between between the roi system and the reference point ##at which is the source of the data. For now, will add in extra fuzz to the boundaries. left_roi_bound = self.left_roi - self.ROI_BOUNDARY_FUZZ if left_roi_bound < 0: left_roi_bound = 0 right_roi_bound = self.right_roi + self.ROI_BOUNDARY_FUZZ if right_roi_bound > self.width_minus1: right_roi_bound = self.width_minus1 return region_of_interest(img, np.array([[(left_roi_bound, self.horizon), (left_roi_bound, self.height_minus1), (right_roi_bound, self.height_minus1), (right_roi_bound, self.horizon)]], dtype='int32')) def locate_lane_bounds(self, image): """Entry point to locating the immediately bounding lanes of the road.""" # Image processing pipeline. img = grayscale(image) img = self.mask_roi(img) img = canny(img, self.canny_low_gradient, self.canny_hi_gradient) img = gaussian_blur(img, self.blur_kernel) lines = cv2.HoughLinesP(img, self.hough_rho, ONE_DEGREE * self.hough_theta, self.hough_threshold, np.array([]), minLineLength=self.hough_min_line_len, maxLineGap=self.hough_max_line_gap) # With some quality lines extracted from the image, we're ready to find our lanes. # First, bucketize the lines based on where they project onto the bottom of the screen. line_buckets = [[] for i in range(self.lane_line_bucket_cnt)] for line in lines: for x1, y1, x2, y2 in line: self.bucket_line(line, line_buckets) # With prominent lanes in the image, the lines should have formed clusters around the lanes. Take the first # qualifying cluster immediately to the left and to the right of the center line of the image. right_bounding_lines = self.closest_cluster(line_buckets[self.lane_line_bucket_cnt_half:]) left_bounding_lines = self.closest_cluster(reversed(line_buckets[:self.lane_line_bucket_cnt_half])) # For each choice cluster, smooth them out into right and left lane lines. slope = None x_bound = None if right_bounding_lines: slope, x_bound = smooth_line(right_bounding_lines) right_lane = self.smooth_lane_hist(slope, x_bound, self.right_lane_slope_hist, self.right_lane_bound_hist) slope = None x_bound = None if left_bounding_lines: slope, x_bound = smooth_line(left_bounding_lines) left_lane = self.smooth_lane_hist(slope, x_bound, self.left_lane_slope_hist, self.left_lane_bound_hist) right_proj, left_proj = self.project_lanes(right_lane, left_lane) self.hist_idx += 1 # Draw any lanes found over the original image. lane_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) if right_proj is not None: cv2.line(lane_img, right_proj[0], right_proj[1], [255, 0, 0], 5) if left_proj is not None: cv2.line(lane_img, left_proj[0], left_proj[1], [255, 0, 0], 5) overlay_lanes = weighted_img(lane_img, image) return overlay_lanes # - # ## Test Images # # Build your pipeline to work on the images in the directory "test_images" # **You should make sure your pipeline works well on these images before you try the videos.** import os os.listdir("test_images/") # ## Build a Lane Finding Pipeline # # # Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report. # # Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters. # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. image = mpimg.imread('test_images/solidYellowCurve.jpg') img = grayscale(image) #plt.imsave('test_images_output/solidYellowCurve-grayscale.jpg', img, cmap='gray') #plt.imshow(img, cmap='gray') img = canny(img, 92, 309) #plt.imsave('test_images_output/solidYellowCurve-canny.jpg', img, cmap='gray') img = gaussian_blur(img, 3) #plt.imsave('test_images_output/solidYellowCurve-blur.jpg', img, cmap='gray') img = hough_lines(img, 1, np.pi/180, 51, 35, 16) #plt.imshow(img, cmap='gray') overlay = weighted_img(img, image) plt.imshow(overlay) plt.imsave('test_images_output/solidYellowCurve-hough.jpg', img) #plt.savefig('test_images_output/solidYellowCurve.jpg') # ## Test on Videos # # You know what's cooler than drawing lanes over images? Drawing lanes over video! # # We can test our solution on two provided videos: # # `solidWhiteRight.mp4` # # `solidYellowLeft.mp4` # # **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** # # **If you get an error that looks like this:** # ``` # NeedDownloadError: Need ffmpeg exe. # You can download it by calling: # imageio.plugins.ffmpeg.download() # ``` # **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.** # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) img = grayscale(image) img = canny(img, 92, 309) img = gaussian_blur(img, 3) img = hough_lines(img, 1, np.pi/180, 51, 35, 16) overlay = weighted_img(img, image) return overlay # Let's try the one with the solid white lane on the right first ... white_output = 'test_videos_output/solidYellowLeft.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidYellowLeft.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False) # Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) # ## Improve the draw_lines() function # # **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".** # # **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.** # Now for the one with the solid yellow lane on the left. This one's more tricky! yellow_output = 'test_videos_output/solidWhiteRight-solidLanes.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5) clip2 = VideoFileClip('test_videos/solidWhiteRight.mp4') sample_img = clip2.get_frame(0) zone = LaneBoundaryZone(sample_img.shape[0], sample_img.shape[1]) yellow_clip = clip2.fl_image(zone.locate_lane_bounds) # %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) # ## Writeup and Submission # # If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file. # # ## Optional Challenge # # Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! challenge_output = 'test_videos_output/challenge.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5) clip3 = VideoFileClip('test_videos/challenge.mp4') sample_img = clip3.get_frame(0) zone = LaneBoundaryZone(sample_img.shape[0], sample_img.shape[1]) challenge_clip = clip3.fl_image(zone.locate_lane_bounds) # %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output))
P1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf import random from tensorflow.keras import datasets, layers, models import sklearn from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np from tranco import Tranco from datetime import date, datetime, timedelta from tqdm import tqdm import random config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True session = tf.compat.v1.InteractiveSession(config=config) # - ####################### Prepare Dataset ####################### dga = list() with open("dga.txt", "r") as f: content = f.readlines() dga = list(set([ l.split()[1] for l in content if not l.startswith("#") and len(l.split()) == 6])) print(len(dga), dga[:30]) # + t = Tranco(cache=True, cache_dir='.tranco') def daterange(start_date, end_date): for n in range(int ((end_date - start_date).days)): yield start_date + timedelta(n) start_date = datetime(2020, 4, 20) end_date = datetime(2020, 6, 20) + timedelta(days = 1) occurence_counter = dict() for single_date in tqdm(list(daterange(start_date, end_date))): tranco_1M = set(t.list(date=single_date.strftime("%Y-%m-%d")).top(1000000)) for d in tranco_1M: if d not in occurence_counter: occurence_counter[d] = 0 occurence_counter[d] += 1 benign = [ k for k, v in occurence_counter.items() if v >= 30 ] benign = list(set(benign) - set(dga)) print(len(benign), benign[:30]) # + X = np.array(dga + benign) Y = np.array([1] * len(dga) + [0] * len(benign)) valid_chars = {x:idx+1 for idx, x in enumerate(set(''.join(X)))} max_features = len(valid_chars) + 1 max_len = max([len(i) for i in dga + benign]) X = [[valid_chars[y] for y in x] for x in X] X = tf.keras.preprocessing.sequence.pad_sequences(X, maxlen=max_len) print(X[0]) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) print(len(x_train), len(x_test)) print(valid_chars, max_len, max_features) # + domain_input = tf.keras.Input(shape = (max_len,), name='text_input') input_layer = layers.Embedding(max_features, 128, input_length=max_len)(domain_input) lstm = layers.LSTM(128)(input_layer) lstm = layers.Dropout(0.5)(lstm) lstm = layers.Dense(1)(lstm) block_1_output = layers.Activation('sigmoid')(lstm) conv_a = layers.Conv1D(15,2, activation='relu')(input_layer) conv_b = layers.Conv1D(15,4, activation='relu')(input_layer) conv_c = layers.Conv1D(15,6, activation='relu')(input_layer) pool_a = layers.GlobalMaxPooling1D()(conv_a) pool_b = layers.GlobalMaxPooling1D()(conv_b) pool_c = layers.GlobalMaxPooling1D()(conv_c) flattened = layers.add([pool_a, pool_b, pool_c]) drop = layers.Dropout(0.5)(flattened) dense = layers.Dense(1)(drop) block_2_output = layers.Activation("sigmoid")(dense) output = layers.add([block_1_output, block_2_output]) model = tf.keras.Model(domain_input, output) model.summary() model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # - tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True) # + earlyStopping = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3) history = model.fit(x_train, y_train, batch_size=128, epochs=100, validation_split=0.1, callbacks=[earlyStopping]) # - # summarize history for accuracy plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.legend(['train', 'validate']) plt.title('model accuracy') plt.xlabel('epoch') plt.ylabel('accuracy') y_test_pred = model.predict(x_test) sklearn.metrics.confusion_matrix(y_test, y_test_pred > .5) print(sklearn.metrics.classification_report(y_test, y_test_pred > .5)) # + fpr, tpr, threshold = sklearn.metrics.roc_curve(y_test, y_test_pred) roc_auc = sklearn.metrics.auc(fpr, tpr) plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # -
dga-detection.ipynb
# + """ Checking Imported Data III - DataFrame Labels """ import numpy as np import pandas as pd wine_reviews = pd.read_csv('../winemag-data-130k.csv') # Access the labels on the rows of data. # Access the labels on the columns of data. # Return the labels for the rows and columns in wine_reviews in one command.
pset_pandas1_wine_reviews/check_imported_data/nb/p3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: nlp # language: python # name: nlp # --- # + #default_exp ethnicity_icao # + #export from fastai import * from fastai.vision import * from pathlib import PosixPath import cv2 from fastai.callbacks.hooks import * from fastai.callbacks import * from fastai.utils.mem import * from facelib.core import save_inference, load_inference from torchvision.models import vgg16_bn from torchvision import models as torchmodels def save_torch_inference(model, path): torch.save(model.state_dict(), path) def load_torch_inference(model, path, device=torch.device('cpu')): model.load_state_dict(torch.load(path, map_location=device)) model.eval() # - laofiw_images_dir = Path('/data/faces/LAOFIW/images/') images_dir = Path('../data/icao/ethnicity/') # + from facenet_pytorch import MTCNN mtcnn = MTCNN( image_size=224, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False, device='cuda' ) from torchvision import datasets class ImagesWithPath(datasets.ImageFolder): def __getitem__(self, index): original_tuple = super().__getitem__(index) path = self.imgs[index][0] return (original_tuple + (path,)) dataset = ImagesWithPath(str(laofiw_images_dir.parent)) dataset.idx_to_class = {i:c for c, i in dataset.class_to_idx.items()} loader = DataLoader(dataset, collate_fn=lambda x: x[0], num_workers=1) # - def create_cropped_images(): not_found_names = [] for x, y, path in progress_bar(loader): name = Path(path).name ethnicity = name.split('_')[-2] try:Image(mtcnn(x)/255.).save(str(images_dir/ethnicity/name)) except: not_found_names.append(Path(path).name) return not_found_names not_found_names = create_cropped_images() # not_found_names = list(filter(lambda p: not (images_dir/p).exists(), [path.name for path in get_image_files(laofiw_images_dir)])) len(not_found_names) # # Data tfms = get_transforms(flip_vert=False, max_lighting=0.1, max_zoom=1.05, max_warp=0., max_rotate=7) def get_data(src, size, tfms=get_transforms(), bs=4): data = (src.transform(tfms, size=size) .databunch(bs=bs) .normalize(imagenet_stats)) return data src = (ImageList.from_folder('../data/icao/ethnicity/').split_by_rand_pct(0.1, seed=42).label_from_folder()) data = get_data(src, 224, bs=64, tfms=tfms) data # acc_02 = partial(accuracy_thresh, thresh=0.2) # f_score = partial(fbeta, thresh=0.2) learner = cnn_learner(data, models.resnet34, metrics=[accuracy]) learner.data = get_data(src, 224, bs=64, tfms=tfms) learner.fit(10, 1e-3) learner.data = get_data(src, 224, bs=8, tfms=tfms) learner.fit(10, 1e-3) interp = ClassificationInterpretation.from_learner(learner) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs) interp.most_confused() interp.plot_top_losses(3*4, figsize=(15,11), largest=True) interp.plot_confusion_matrix(figsize=(4,4), dpi=60) # ## Split other datasets icao_label_path = Path('../data/icao/mouthopen_celeba/') out_label_path = Path('../data/icao/mouthopen_celeba_black/') errors = 0 for path in progress_bar(get_image_files(icao_label_path, recurse=True)): try: im_label_path = Path('/'.join(str(path).split('/')[-2:])) im = open_image(path) o,_,out_tensor = learner.predict(im) if o.obj == 'black': im.save(out_label_path/im_label_path) except: errors += 1 errors
nbs/09_icao_ethnicity.ipynb