text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` import torch import pandas as pd import numpy as np import sklearn from collections import Counter from sklearn.utils import Bunch from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from itertools import combinations import re import os import torch.nn as nn import matplotlib.pyplot as plt ``` # Data Loading ``` path = r"E:\github\movie_hatespeech_detection\data\fox_news\fox_news.csv" df = pd.read_csv(path, index_col=0) df = df.rename(columns={'class': 'label'}) df['label'] = df['label'].replace({2:1}) df = df.append({'comment': 'I love you', 'label': 0}, ignore_index=True) df = df.append({'comment': 'I hate you', 'label': 1}, ignore_index=True) df.tail() path = r'E:\github\movie_hatespeech_detection\data\movies_for_training\all_movies.csv' movie_data = pd.read_csv(path, index_col=0) movie_data.head() print(df.label.value_counts()) df.label.value_counts().plot(kind='pie', subplots=True, autopct='%1.0f%%', title='Hate Speech Distribution') ``` ## Data Splitting ``` def split_dataset(df, seed): df = df.copy() test = df.loc[1513:1514] df.drop(df.tail(1).index, inplace=True) train = df.sample(frac=1, random_state=seed) return train.comment.values, train.label.values, test.comment, test.label categories = [0,1] seed = 11 train, train_targets, test, test_targets = split_dataset(df, seed=seed) train_size = len(train) test_size = len(test) print(train_size) print(test_size) def calculate_dataset_class_distribution(targets, categories): df = pd.DataFrame({'category':targets}) s = df.category.value_counts(normalize=True) s = s.reindex(categories) return [s.index[0], s[0]], [s.index[1], s[1]] train_class_distribution = calculate_dataset_class_distribution(train_targets, categories) test_class_distribution = calculate_dataset_class_distribution(test_targets, categories) print(train_class_distribution) print(test_class_distribution) train_ds = Bunch(data=train, target=train_targets) test_ds = Bunch(data=test, target=test_targets) ``` ## Buidling the Model ``` # Getting all the vocabularies and indexing to a unique position vocab = Counter() #Indexing words from the training data for text in train_ds.data: for word in text.split(' '): vocab[word.lower()]+=1 #Indexing words from the training data for text in test_ds.data: for word in text.split(' '): vocab[word.lower()]+=1 for text in movie_data.text.values: for word in text.split(' '): vocab[word.lower()]+=1 total_words = len(vocab) def get_word_2_index(vocab): word2index = {} for i,word in enumerate(vocab): word2index[word.lower()] = i return word2index word2index = get_word_2_index(vocab) print(len(word2index)) print(word2index["the"]) # Showing the index of 'the' print (total_words) # define the network class News_20_Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(News_20_Net, self).__init__() self.layer_1 = nn.Linear(input_size,hidden_size, bias=True).cuda() self.relu = nn.ReLU().cuda() self.layer_2 = nn.Linear(hidden_size, hidden_size, bias=True).cuda() self.output_layer = nn.Linear(hidden_size, num_classes, bias=True).cuda() # accept input and return an output def forward(self, x): out = self.layer_1(x) out = self.relu(out) out = self.layer_2(out) out = self.relu(out) out = self.output_layer(out) return out def get_batch(df,i,batch_size): batches = [] results = [] # Split into different batchs, get the next batch texts = df.data[i*batch_size:i*batch_size+batch_size] # get the targets categories = df.target[i*batch_size:i*batch_size+batch_size] #print(categories) for text in texts: # Dimension, 196609 layer = np.zeros(total_words,dtype=float) for word in text.split(' '): layer[word2index[word.lower()]] += 1 batches.append(layer) # We have 5 categories for category in categories: #print(category) index_y = -1 if category == 0: index_y = 0 elif category == 1: index_y = 1 elif category == 2: index_y = 2 results.append(index_y) # the training and the targets return np.array(batches),np.array(results) # Parameters learning_rate = 0.001 num_epochs = 8 batch_size = 32 display_step = 1 # ADDED will multiplied by 10 # Network Parameters hidden_size = 100 # 1st layer and 2nd layer number of features input_size = total_words # Words in vocab num_classes = len(categories) # Categories: "graphics","space","baseball","guns", "christian" ``` ## Training ``` results = [] news_net = News_20_Net(input_size, hidden_size, num_classes) # Loss and Optimizer criterion = nn.CrossEntropyLoss() # This includes the Softmax loss function optimizer = torch.optim.Adam(news_net.parameters(), lr=learning_rate) # Train the Model for epoch in range(num_epochs): # determine the number of min-batches based on the batch size and size of training data total_batch = int(len(train_ds.data)/batch_size) # Loop over all batches for i in range(total_batch): batch_x,batch_y = get_batch(train_ds,i,batch_size) articles = torch.cuda.FloatTensor(batch_x, device='cuda') labels = torch.cuda.LongTensor(batch_y, device='cuda') # Forward + Backward + Optimize optimizer.zero_grad() # zero the gradient buffer outputs = news_net(articles) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i+1) % display_step == 0: result = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f'%(epoch+1, num_epochs, i+1, len(train_ds.data)/batch_size, loss.data) results.append({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()}) if (i+1) % (display_step*10) == 0: print({'Epoch': epoch+1, 'Step': i+1, 'Loss': loss.data.item()}) ``` ## Validation ``` # Test the Model correct = 0 total = 0 total_test_data = len(test_ds.target) iterates = total_test_data/batch_size # ignore last (<batch_size) batch all_total = [] all_correct = [] labels_all = [] predicted_all = [] for i in range(int(iterates)): batch_x_test,batch_y_test = get_batch(test_ds,i,batch_size) articles = torch.FloatTensor(batch_x_test).to('cuda') labels = torch.LongTensor(batch_y_test).to('cuda') outputs = news_net(articles) _, predicted = torch.max(outputs.data, 1) labels_all.extend([x.item() for x in labels]) predicted_all.extend([x.item() for x in predicted]) report = classification_report(labels_all, predicted_all, output_dict=True) df_report = pd.DataFrame(report).transpose() df_report.round(2) ``` ---- ## Classication of Movies ``` def annotate_df(movie_df): utterances = movie_df.text.values predictions = [] batch = [] for text in utterances: # Dimension, 196609 layer = np.zeros(total_words,dtype=float) for word in text.split(' '): layer[word2index[word.lower()]] += 1 batch.append(layer) texts = torch.FloatTensor(batch).to('cuda') outputs = news_net(texts) _, predicted = torch.max(outputs.data, 1) predictions.extend([x.item() for x in predicted]) result = [] for i, pred in enumerate(predictions): result.append({'index': i, 'label_bow_fox_news': pred}) result_df = pd.DataFrame(result) movie_df = movie_df.merge(result_df, right_index=True, left_index=True) return movie_df result_df = annotate_df(movie_data) result_for_sana = result_df[['text', 'label_bow_fox_news']] result_df result_df.label_bow_fox_news.value_counts() result_df.majority_answer.value_counts() def get_classifications_results(df): df['majority_answer'] = df['majority_answer'].replace({2:1}) labels_all = df.majority_answer.values predicted_all = df.label_bow_fox_news.values results_classification = classification_report(labels_all, predicted_all, output_dict=True) df_report = pd.DataFrame(results_classification).transpose() return df_report get_classifications_results(result_df).round(2) ```
github_jupyter
# Example: Swiss Referenda We propose in this notebook an example of how to use the `predikon` library to make vote predictions. The data is a subsample (10%) of Swiss referenda results. The full dataset can be found in the [submatrix-factorization](https://github.com/indy-lab/submatrix-factorization/blob/master/data/munvoteinfo.pkl) repo. ## Imports ``` import numpy as np from predikon import LogisticSubSVD, GaussianSubSVD, WeightedAveraging DATA_PATH = '../tests/data/' ``` ## Load Data Each entry `data[i,j]` is the percentage of "yes" in region `i` for referendum `j`. A region in this dataset is a Swiss municipality. The `weights` are the number of valid votes in each municipality. The `outcomes` are the aggregate national outcomes for each referendum. ``` data = np.loadtxt(f'{DATA_PATH}/data.csv', dtype=np.float, delimiter=',') weights = np.loadtxt(f'{DATA_PATH}/weights.csv', dtype=np.int, delimiter=',') outcomes = np.loadtxt(f'{DATA_PATH}/outcomes.csv', dtype=np.float, delimiter=',') ``` ## Prepare Data The matrix `Y` contains historical data up to vote `V`. The vector `y` contains the vote results for the vote we would like to make predictions. ``` Y, y = data[:, :-1], data[:, -1] ytrue = outcomes[-1] R, V = Y.shape print(f'Number of regions: {R:>3}') print(f'Number of votes: {V:>3}') ``` ## Set Observations Set which regions are observed. The unobserved regional results are `nan`. ``` # Fix the seed for reproducibility. np.random.seed(200) # Random permutation of the regions. inds = np.random.permutation(R) # Proportion of observed results. p = 0.1 # Number of observations (10 %). n = int(np.ceil(R * p)) # Set observations. obs = inds[:n] # Define new vector of (partial) regional results. ynew = np.array([np.nan] * R) ynew[obs] = y[obs] ``` ## Evaluate Models We evaluate three models: 1. A weighted average baseline 2. Our algorithm with a Gaussian likelihood 3. Our algorithm with a Bernoulli likelihood We set the latent dimensions `D=10` and the regularizer `reg=1e-5`. We report the predicted aggregated outcome, and we compare it against the true aggregate outcome. An aggregate outcome is the weighted average of the regional observations and the regional predictions, where the weight is the number of valid votes in each region. ``` # Hyperparameters: number of latent dimensions and regularizers. D, reg = 10, 1e-5 # Define models. base = WeightedAveraging(Y, weighting=weights) gaus = GaussianSubSVD(Y, weighting=weights, n_dim=D, add_bias=True, l2_reg=reg) bern = LogisticSubSVD(Y, weighting=weights, n_dim=D, add_bias=True, l2_reg=reg) for model in [base, gaus, bern]: print(model) # Predict missing results. pred = model.fit_predict(ynew) # Compute aggregate outcome. ypred = 1/np.sum(weights) * np.sum(weights.dot(pred)) print(f' Predicted outcome: {ypred*100:.2f}%') print(f' True outcome: {ytrue*100:.2f}%') print(f' Absolute diff.: {np.abs(ypred - ytrue)*100:.4f}\n') ```
github_jupyter
## Macrorheology As the material model is based on microscopic parameters and not on macroscopic parameters (as e.g. the bulk stiffness), the material parameters cannot directly be measured using a rheometer. Instead the rheological experiments are "simulated" on the material model and the resulting curves can be used to fit the material parameters so that the the "simulated" rheological experiments on the material model fit the measured rheological response of the material. Here, we describe three different rheological experiments that can be simulated on the material model. - Shear Rheometer - Stretch Thinning - Extensional Rheometer The stretch experiment is needed to reliably fit later on the buckling of the material and either the Shear Rheometer or the Extensional Rheometer experiment can be used to fit the fiber stiffness and the strain stiffening. This section first describes the functions that simulate these experiments on the material model and the next section explains how these functions can be used to fit the material parameters from experimental data. ### Shear Rheometer ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from saenopy import macro from saenopy.materials import SemiAffineFiberMaterial material = SemiAffineFiberMaterial(900, 0.0004, 0.0075, 0.033) print(material) gamma = np.arange(0.005, 0.3, 0.0001) x, y = macro.getShearRheometerStress(gamma, material) plt.loglog(x, y, "-", lw=3, label="model") plt.xlabel("strain") plt.ylabel("shear stress [Pa]") plt.show() ``` ### Stretcher ``` import numpy as np import matplotlib.pyplot as plt from saenopy import macro from saenopy.materials import SemiAffineFiberMaterial material = SemiAffineFiberMaterial(900, 0.0004, 0.0075, 0.033) print(material) lambda_h = np.arange(1-0.05, 1+0.07, 0.01) lambda_v = np.arange(0, 1.1, 0.001) x, y = macro.getStretchThinning(lambda_h, lambda_v, material) plt.plot(x, y, lw=3, label="model") plt.xlabel("horizontal stretch") plt.ylabel("vertical contraction") plt.ylim(0, 1.2) plt.xlim(0.9, 1.2) plt.show() ``` ### Extensional Rheometer ``` import numpy as np import matplotlib.pyplot as plt from saenopy import macro from saenopy.materials import SemiAffineFiberMaterial, LinearMaterial material = SemiAffineFiberMaterial(900, 0.0004, 0.0075, 0.033) print(material) epsilon = np.arange(1, 1.17, 0.0001) x, y = macro.getExtensionalRheometerStress(epsilon, material) plt.plot(x, y, lw=3, label="model") plt.xlabel("strain") plt.ylabel("stress [Pa]") plt.show() ``` ## Fitting material parameters ``` from saenopy import macro import numpy as np # example data, stress-strain curves for collagen of three different concentrations data0_6 = np.array([[4.27e-06,-2.26e-03],[1.89e-02,5.90e-01],[3.93e-02,1.08e+00],[5.97e-02,1.57e+00],[8.01e-02,2.14e+00],[1.00e-01,2.89e+00],[1.21e-01,3.83e+00],[1.41e-01,5.09e+00],[1.62e-01,6.77e+00],[1.82e-01,8.94e+00],[2.02e-01,1.17e+01],[2.23e-01,1.49e+01],[2.43e-01,1.86e+01],[2.63e-01,2.28e+01],[2.84e-01,2.71e+01]]) data1_2 = np.array([[1.22e-05,-1.61e-01],[1.71e-02,2.57e+00],[3.81e-02,4.69e+00],[5.87e-02,6.34e+00],[7.92e-02,7.93e+00],[9.96e-02,9.56e+00],[1.20e-01,1.14e+01],[1.40e-01,1.35e+01],[1.61e-01,1.62e+01],[1.81e-01,1.97e+01],[2.02e-01,2.41e+01],[2.22e-01,2.95e+01],[2.42e-01,3.63e+01],[2.63e-01,4.43e+01],[2.83e-01,5.36e+01],[3.04e-01,6.37e+01],[3.24e-01,7.47e+01],[3.44e-01,8.61e+01],[3.65e-01,9.75e+01],[3.85e-01,1.10e+02],[4.06e-01,1.22e+02],[4.26e-01,1.33e+02]]) data2_4 = np.array([[2.02e-05,-6.50e-02],[1.59e-02,8.46e+00],[3.76e-02,1.68e+01],[5.82e-02,2.43e+01],[7.86e-02,3.34e+01],[9.90e-02,4.54e+01],[1.19e-01,6.11e+01],[1.40e-01,8.16e+01],[1.60e-01,1.06e+02],[1.80e-01,1.34e+02],[2.01e-01,1.65e+02],[2.21e-01,1.96e+02],[2.41e-01,2.26e+02]]) # hold the buckling parameter constant, as it cannot be deferimed well with shear experiments ds0 = 0.0004 # minimize 3 shear rheometer experiments with different collagen concentration and, therefore, different k1 parameters # but keep the other paramters the same parameters, plot = macro.minimize([ [macro.getShearRheometerStress, data0_6, lambda p: (p[0], ds0, p[3], p[4])], [macro.getShearRheometerStress, data1_2, lambda p: (p[1], ds0, p[3], p[4])], [macro.getShearRheometerStress, data2_4, lambda p: (p[2], ds0, p[3], p[4])], ], [900, 1800, 12000, 0.013, 0.1], ) # print the resulting parameters print(parameters) # and plot the results plot() ``` To fit all parameters, experiments of different types should be combined, e.g. a shear rheological experiment and a stretching experiment. ``` from saenopy import macro import numpy as np # example data, stress-strain curves and a stretch experiment shear = np.array([[7.50e-03,2.78e-01],[1.25e-02,4.35e-01],[1.75e-02,6.44e-01],[2.25e-02,7.86e-01],[2.75e-02,9.98e-01],[3.25e-02,1.13e+00],[3.75e-02,1.52e+00],[4.25e-02,1.57e+00],[4.75e-02,1.89e+00],[5.25e-02,2.10e+00],[5.75e-02,2.46e+00],[6.25e-02,2.67e+00],[6.75e-02,3.15e+00],[7.25e-02,3.13e+00],[7.75e-02,3.83e+00],[8.25e-02,4.32e+00],[8.75e-02,4.35e+00],[9.25e-02,4.78e+00],[9.75e-02,5.45e+00],[1.02e-01,5.87e+00],[1.07e-01,6.16e+00],[1.13e-01,6.89e+00],[1.17e-01,7.89e+00],[1.22e-01,8.28e+00],[1.28e-01,9.13e+00],[1.33e-01,1.06e+01],[1.38e-01,1.10e+01],[1.42e-01,1.27e+01],[1.47e-01,1.39e+01],[1.52e-01,1.53e+01],[1.58e-01,1.62e+01],[1.63e-01,1.78e+01],[1.68e-01,1.89e+01],[1.72e-01,2.03e+01],[1.77e-01,2.13e+01],[1.82e-01,2.23e+01],[1.88e-01,2.38e+01],[1.93e-01,2.56e+01],[1.98e-01,2.78e+01],[2.03e-01,3.02e+01],[2.07e-01,3.28e+01],[2.12e-01,3.55e+01],[2.17e-01,3.83e+01],[2.23e-01,4.13e+01],[2.28e-01,4.48e+01],[2.33e-01,4.86e+01],[2.37e-01,5.27e+01],[2.42e-01,5.64e+01],[2.47e-01,6.08e+01],[2.53e-01,6.48e+01],[2.58e-01,6.93e+01],[2.63e-01,7.44e+01],[2.68e-01,7.89e+01],[2.73e-01,8.40e+01],[2.78e-01,8.91e+01],[2.82e-01,9.41e+01],[2.87e-01,1.01e+02],[2.92e-01,1.07e+02],[2.97e-01,1.12e+02],[3.02e-01,1.19e+02],[3.07e-01,1.25e+02],[3.12e-01,1.32e+02],[3.18e-01,1.39e+02],[3.23e-01,1.45e+02],[3.28e-01,1.53e+02],[3.33e-01,1.60e+02],[3.38e-01,1.67e+02],[3.43e-01,1.76e+02],[3.47e-01,1.83e+02],[3.52e-01,1.90e+02],[3.57e-01,1.99e+02],[3.62e-01,2.06e+02],[3.67e-01,2.15e+02],[3.72e-01,2.23e+02],[3.78e-01,2.31e+02],[3.83e-01,2.40e+02],[3.88e-01,2.48e+02],[3.93e-01,2.56e+02],[3.98e-01,2.56e+02],[4.03e-01,2.73e+02],[4.07e-01,2.77e+02],[4.12e-01,2.86e+02],[4.17e-01,2.97e+02],[4.22e-01,3.08e+02],[4.27e-01,3.15e+02],[4.32e-01,3.25e+02],[4.38e-01,3.33e+02],[4.43e-01,3.39e+02],[4.48e-01,3.51e+02],[4.53e-01,3.59e+02],[4.58e-01,3.69e+02],[4.63e-01,3.76e+02],[4.68e-01,3.83e+02],[4.72e-01,3.93e+02],[4.77e-01,3.97e+02],[4.82e-01,4.04e+02],[4.87e-01,4.13e+02],[4.92e-01,4.18e+02],[4.97e-01,4.31e+02],[5.02e-01,4.38e+02],[5.07e-01,4.25e+02],[5.12e-01,4.48e+02],[5.17e-01,4.49e+02],[5.22e-01,4.56e+02],[5.27e-01,4.66e+02],[5.32e-01,4.70e+02],[5.37e-01,4.76e+02],[5.42e-01,4.82e+02],[5.47e-01,4.89e+02],[5.52e-01,4.99e+02],[5.57e-01,5.01e+02],[5.62e-01,5.06e+02],[5.68e-01,5.14e+02],[5.73e-01,5.15e+02],[5.78e-01,5.21e+02],[5.83e-01,5.28e+02],[5.88e-01,5.30e+02],[5.93e-01,5.38e+02],[5.98e-01,5.40e+02],[6.03e-01,5.41e+02],[6.08e-01,5.38e+02],[6.13e-01,5.39e+02],[6.18e-01,5.50e+02],[6.23e-01,5.56e+02],[6.27e-01,5.59e+02],[6.32e-01,5.68e+02],[6.37e-01,5.69e+02],[6.42e-01,5.70e+02],[6.47e-01,5.79e+02],[6.52e-01,5.78e+02],[6.57e-01,5.80e+02],[6.62e-01,5.83e+02],[6.67e-01,5.83e+02],[6.72e-01,5.89e+02],[6.77e-01,5.86e+02],[6.82e-01,5.88e+02],[6.88e-01,5.91e+02],[6.93e-01,5.86e+02],[6.98e-01,5.91e+02],[7.03e-01,5.91e+02],[7.08e-01,5.87e+02],[7.13e-01,5.89e+02],[7.18e-01,5.88e+02],[7.23e-01,5.89e+02],[7.28e-01,5.89e+02],[7.33e-01,5.81e+02],[7.38e-01,5.85e+02],[7.43e-01,5.86e+02],[7.48e-01,5.78e+02],[7.52e-01,5.78e+02],[7.57e-01,5.79e+02],[7.62e-01,5.76e+02],[7.67e-01,5.74e+02],[7.72e-01,5.70e+02],[7.77e-01,5.73e+02],[7.82e-01,5.70e+02],[7.87e-01,5.66e+02],[7.92e-01,5.69e+02],[7.97e-01,5.59e+02],[8.02e-01,5.50e+02],[8.07e-01,5.52e+02],[8.12e-01,5.52e+02],[8.18e-01,5.59e+02],[8.23e-01,5.58e+02],[8.28e-01,5.57e+02],[8.33e-01,5.59e+02],[8.38e-01,5.53e+02],[8.43e-01,5.55e+02],[8.48e-01,5.56e+02],[8.53e-01,5.49e+02],[8.58e-01,5.50e+02],[8.63e-01,5.47e+02],[8.68e-01,5.22e+02],[8.73e-01,5.44e+02],[8.77e-01,5.36e+02],[8.82e-01,5.38e+02],[8.87e-01,5.33e+02],[8.92e-01,5.28e+02],[8.97e-01,5.30e+02],[9.02e-01,5.23e+02],[9.07e-01,5.22e+02],[9.12e-01,5.18e+02],[9.17e-01,5.12e+02],[9.22e-01,5.11e+02],[9.27e-01,5.05e+02],[9.32e-01,5.03e+02],[9.38e-01,4.99e+02],[9.43e-01,4.88e+02],[9.48e-01,4.86e+02],[9.53e-01,4.82e+02],[9.58e-01,4.73e+02],[9.63e-01,4.70e+02],[9.68e-01,4.61e+02],[9.73e-01,4.56e+02],[9.78e-01,4.51e+02],[9.83e-01,4.41e+02],[9.88e-01,4.36e+02],[9.93e-01,4.22e+02],[9.98e-01,4.09e+02],[1.00e+00,4.07e+02]])[:50] stretch = np.array([[9.33e-01,1.02e+00],[9.40e-01,1.01e+00],[9.47e-01,1.02e+00],[9.53e-01,1.02e+00],[9.60e-01,1.02e+00],[9.67e-01,1.01e+00],[9.73e-01,1.01e+00],[9.80e-01,1.01e+00],[9.87e-01,1.01e+00],[9.93e-01,1.00e+00],[1.00e+00,1.00e+00],[1.01e+00,9.89e-01],[1.01e+00,9.70e-01],[1.02e+00,9.41e-01],[1.03e+00,9.00e-01],[1.03e+00,8.46e-01],[1.04e+00,7.76e-01],[1.05e+00,6.89e-01],[1.05e+00,6.02e-01],[1.06e+00,5.17e-01],[1.07e+00,4.39e-01],[1.07e+00,3.74e-01],[1.08e+00,3.17e-01],[1.09e+00,2.72e-01],[1.09e+00,2.30e-01],[1.10e+00,2.02e-01]]) # fit all 4 parameters simultaneously to two experiments parameters, plot = macro.minimize([ [macro.getShearRheometerStress, shear, lambda p: (p[0], p[1], p[2], p[3])], [macro.getStretchThinning, stretch, lambda p: (p[0], p[1], p[2], p[3])], ], [900, 0.0004, 0.075, 0.33], ) # print the resulting parameters print(parameters) # and plot the results plot() ```
github_jupyter
<h2>Python NumPy</h2> #To install do pip install numpy <b>What is a Python NumPy?</b> NumPy is a Python package which stands for โ€˜Numerical Pythonโ€™. It is the core library for scientific computing, which contains a powerful n-dimensional array object, provide tools for integrating C, C++ etc. It is also useful in linear algebra, random number capability etc. NumPy array can also be used as an efficient multi-dimensional container for generic data. <b>NumPy Array:</b> Numpy array is a powerful N-dimensional array object which is in the form of rows and columns. We can initialize numpy arrays from nested Python lists and access it elements. ![image.png](attachment:image.png) Here, I have different elements that are stored in their respective memory locations. It is said to be two dimensional because it has rows as well as columns. In the above image, we have 3 columns and 4 rows available. <h3>Single & Multi dimensional Numpy Array:</h3> ``` import numpy as np a=np.array([1,2,3]) print(a) #Multi-dimensional Array: a=np.array([(1,2,3),(4,5,6)]) print(a) ``` <h3>Python NumPy Array v/s List</h3> We use python numpy array instead of a list because of the below three reasons: Less Memory Fast Convenient The very first reason to choose python numpy array is that it occupies less memory as compared to list. Then, it is pretty fast in terms of execution and at the same time it is very convenient to work with numpy. So these are the major advantages that python numpy array has over list. ``` import numpy as np import time import sys S= range(1000) print(sys.getsizeof(S)*len(S)) D= np.arange(1000) print(D.size*D.itemsize) ``` The above output shows that the memory allocated by list (denoted by S) is 14000 whereas the memory allocated by the numpy array is just 4000. From this, you can conclude that there is a major difference between the two and this makes python numpy array as the preferred choice over list. ``` #python numpy array is faster and more convenient when compared to list import time import sys SIZE = 1000000 L1= range(SIZE) L2= range(SIZE) A1= np.arange(SIZE) A2=np.arange(SIZE) start= time.time() result=[x+y for x,y in zip(L1,L2)] print((time.time()-start)*1000) start=time.time() result= A1+A2 print((time.time()-start)*1000) ``` In the above code, we have defined two lists and two numpy arrays. Then, we have compared the time taken in order to find the sum of lists and sum of numpy arrays both. If you see the output of the above program, there is a significant change in the two values. List took 380ms whereas the numpy array took almost 49ms. Hence, numpy array is faster than list. Now, if you noticed we had run a โ€˜forโ€™ loop for a list which returns the concatenation of both the lists whereas for numpy arrays, we have just added the two array by simply printing A1+A2. Thatโ€™s why working with numpy is much easier and convenient when compared to the lists. <h3>Python NumPy Operations</h3> ``` #ndim: import numpy as np a = np.array([(1,2,3),(4,5,6),(8,9,10)]) print(a.ndim) #Since the output is 2, it is a two-dimensional array (multi dimension). ``` ![numpy-array-150x148.jpg](attachment:numpy-array-150x148.jpg) <b>itemsize:</b> You can calculate the byte size of each element. In the below code, I have defined a single dimensional array and with the help of โ€˜itemsizeโ€™ function, we can find the size of each element. ``` import numpy as np a = np.array([(1,2,3)]) print(a.itemsize) ``` <b>dtype:</b> You can find the data type of the elements that are stored in an array. So, if you want to know the data type of a particular element, you can use โ€˜dtypeโ€™ function which will print the datatype along with the size. In the below code, I have defined an array where I have used the same function. ``` import numpy as np a = np.array([(1,1,1)]) print(a.dtype) #Calculating size and shape of an array import numpy as np a = np.array([(1,2,3,4,5,6),(2,3,4,5,6,7)]) print(a.size) print(a.shape) ``` <b>reshape:</b> Reshape is when you change the number of rows and columns which gives a new view to an object. ![image.png](attachment:image.png) As you can see in the above image, we have 3 columns and 2 rows which has converted into 2 columns and 3 rows ``` import numpy as np a = np.array([(8,9,10),(11,12,13)]) print('Old -->',a) a=a.reshape(3,2) print('New-->',a) ``` <b>slicing:</b> Slicing is basically extracting particular set of elements from an array. This slicing operation is pretty much similar to the one which is there in the list as well. ![image.png](attachment:image.png) ``` #We have an array and we need a particular element (say 3) out of a given array. import numpy as np a=np.array([(1,2,3,4),(3,4,5,6)]) print(a[0,2]) #Here, the array(1,2,3,4) is your index 0 and (3,4,5,6) is index 1 of the python numpy array. #Therefore, we have printed the second element from the zeroth index. #letโ€™s say we need the 2nd element from the zeroth and first index of the array import numpy as np a=np.array([(1,2,3,4),(3,4,5,6)]) print(a[0:,2]) # Here colon represents all the rows, including zero. #Now to get the 2nd element, weโ€™ll call index 2 from both of the rows which gives us the value 3 and 5 respectively. import numpy as np a=np.array([(8,9),(10,11),(12,13)]) print(a[0:2,1]) #As you can see in the above code, only 9 and 11 gets printed. Now when I have written 0:2, this does not include the second index of the third row of an array. #Therefore, only 9 and 11 gets printed else you will get all the elements i.e [9 11 13]. ``` <b>linspace:</b> This is another operation in python numpy which returns evenly spaced numbers over a specified interval. ``` import numpy as np a=np.linspace(10,1,5) print(a) #it has printed 10 values between 1 to 3. ``` <b>Min, max, mean, sum ,Square Root, Standard Deviationetc</b> ``` import numpy as np a= np.array([19,23,56,10,19,76,84,90,12]) print(a.min()) print(a.max()) print(a.sum()) print(a.mean()) print(np.sqrt(a)) print(np.std(a)) a=np.array([(8,9),(10,11),(12,13)]) print(a.min()) print(a.max()) print(a.sum()) print(a.mean()) print(np.sqrt(a)) print(np.std(a)) ``` <b>Calculating mean, median with numpy inbuilt functions</b> ``` import numpy as np # 1D array arr = [20, 2, 7, 1, 34,45,67] print("arr : ", arr) print("arr : ", np.mean(arr)) print("median of arr : ", np.median(arr)) import numpy as np # 2D array arr = [[14, 17, 12, 33, 44], [15, 6, 27, 8, 19], [23, 2, 54, 1, 4, ]] # median of the flattened array print("\nmedian of arr, axis = None : ", np.median(arr)) print("\nmean of arr, axis = None : ", np.mean(arr)) # median along the axis = 0 print("\nmedian of arr, axis = 0 : ", np.median(arr, axis = 0)) print("\nmean of arr, axis = 0 : ", np.mean(arr, axis = 0)) # median along the axis = 1 print("\nmedian of arr, axis = 1 : ", np.median(arr, axis = 1)) print("\nmean of arr, axis = 1 : ", np.mean(arr, axis = 1)) out_arr = np.arange(3) print("\nout_arr : ", out_arr) print("median of arr, axis = 1 : ", np.median(arr, axis = 1, out = out_arr)) ``` <b>Addition Operation</b> ``` #You can perform more operations on numpy array i.e addition, subtraction,multiplication and division of the two matrices. import numpy as np x= np.array([(1,2,3),(3,4,5)]) y= np.array([(1,2,3),(3,4,5)]) print(x+y) print(x-y) print(x*y) print(x/y) ``` <b>Vertical & Horizontal Stacking</b> if you want to concatenate two arrays and not just add them, you can perform it using two ways โ€“ vertical stacking and horizontal stacking. ``` import numpy as np x= np.array([(1,2,3),(3,4,5)]) y= np.array([(1,2,3),(3,4,5)]) print(np.vstack((x,y))) print(np.hstack((x,y))) ``` <b>ravel</b> There is one more operation where you can convert one numpy array into a single column i.e ravel. ``` import numpy as np x= np.array([(1,2,3),(3,4,5)]) print(x.ravel()) ``` <b>Python Numpy Special Functions</b> ``` #There are various special functions available in numpy such as sine, cosine, tan, log etc import numpy as np import matplotlib.pyplot as plt x= np.arange(0,3*np.pi,0.1) y=np.sin(x) plt.plot(x,y) plt.show() import numpy as np import matplotlib.pyplot as plt x= np.arange(0,3*np.pi,0.1) y=np.cos(x) plt.plot(x,y) plt.show() #Exp a= np.array([1,2,3]) print(np.exp(a)) #log import numpy as np import matplotlib.pyplot as plt a= np.array([1,2,3]) print(np.log(a)) ``` <b>Creating Identity matrix,zero matrix , matrix multiplication using numpy</b> ``` #Identity matrix import numpy as np # 2x2 matrix with 1's on main diagnol b = np.identity(2, dtype = float) print("Matrix b : \n", b) a = np.identity(4) print("\nMatrix a : \n", a) #Zero matrix import numpy as np # 2x2 matrix with 1's on main diagnol b = np.zeros((2,2), dtype = float) print("Matrix b : \n", b) a = np.zeros((4,4)) print("\nMatrix a : \n", a) #Matrix multiplication a = np.array([[1, 0],[0, 1]]) b = np.array([[4, 1],[2, 2]]) np.matmul(a, b) #Matrix transpose x = np.arange(4).reshape((2,2)) x np.transpose(x) ``` <h2>An Introduction to Pandas in Python</h2> Pandas is a software library written for the Python programming language. It is used for data manipulation and analysis. It provides special data structures and operations for the manipulation of numerical tables and time series. Pandas is the name for a Python module, which is rounding up the capabilities of Numpy, Scipy and Matplotlab. The word pandas is an acronym which is derived from "Python and data analysis" and "panel data". ``` #pip install pandas import pandas as pd ``` <h3>Data structures in pandas</h3> <b>Dataframe and series</b> <b>A DataFrame is a two-dimensional array of values with both a row and a column index.</b> <b>A Series is a one-dimensional array of values with an index.</b> ![1_o5c599ueURBTZWDGmx1SiA.png](attachment:1_o5c599ueURBTZWDGmx1SiA.png) If it looks like the picture on the left is also present in the picture on the right, youโ€™re right! Where a DataFrame is the entire dataset, including all rows and columns โ€” a Series is essentially a single column within that DataFrame. <h3>Series</h3> A Series is a one-dimensional labelled array-like object. It is capable of holding any data type, e.g. integers, floats, strings, Python objects, and so on. It can be seen as a data structure with two arrays: one functioning as the index, i.e. the labels, and the other one contains the actual data ``` import pandas as pd S = pd.Series([11, 28, 72, 3, 5, 8]) print(S) ``` We haven't defined an index in our example, but we see two columns in our output: The right column contains our data, whereas the left column contains the index. Pandas created a default index starting with 0 going to 5, which is the length of the data minus 1. ``` print(S.index) print(S.values) ``` <b>Difference between Numpy array and Series</b> There is often some confusion about whether Pandas is an alternative to Numpy, SciPy and Matplotlib. The truth is that it is built on top of Numpy. This means that Numpy is required by pandas. Scipy and Matplotlib on the other hand are not required by pandas but they are extremely useful. That's why the Pandas project lists them as "optional dependency". ``` import numpy as np X = np.array([11, 28, 72, 3, 5, 8]) print(X) print(S.values) # both are the same type: print(type(S.values), type(X)) #What is the actual difference fruits = ['apples', 'oranges', 'cherries', 'pears'] #We can define Series objects with individual indices(We can use arbitrary indices.) quantities = [20, 33, 52, 10] S = pd.Series(quantities, index=fruits) print(S) #add two series with the same indices, we get a new series with the same index and the correponding values will be added fruits = ['apples', 'oranges', 'cherries', 'pears'] S = pd.Series([20, 33, 52, 10], index=fruits) S2 = pd.Series([17, 13, 31, 32], index=fruits) print(S + S2) print("sum of S: ", sum(S)) #The indices do not have to be the same for the Series addition. The index will be the "union" of both indices. #If an index doesn't occur in both Series, the value for this Series will be NaN fruits = ['peaches', 'oranges', 'cherries', 'pears'] fruits2 = ['raspberries', 'oranges', 'cherries', 'pears'] S = pd.Series([20, 33, 52, 10], index=fruits) S2 = pd.Series([17, 13, 31, 32], index=fruits2) print(S + S2) #indices can be completely different, as in the following example. #We have two indices. One is the Turkish translation of the English fruit names: fruits = ['apples', 'oranges', 'cherries', 'pears'] fruits_tr = ['elma', 'portakal', 'kiraz', 'armut'] S = pd.Series([20, 33, 52, 10], index=fruits) S2 = pd.Series([17, 13, 31, 32], index=fruits_tr) print(S + S2) ``` <h3>Series indexing</h3> ``` print('Single Indexing',S['apples']) print('@@@@@@@@@@@@@@@@') print(r'Multi Indexing ',S[['apples', 'oranges', 'cherries']]) ``` <h3>pandas.Series.apply</h3> The function "func" will be applied to the Series and it returns either a Series or a DataFrame, depending on "func". Parameter Meaning func a function, which can be a NumPy function that will be applied to the entire Series or a Python function that will be applied to every single value of the series convert_dtype A boolean value. If it is set to True (default), apply will try to find better dtype for elementwise function results. If False, leave as dtype=object args Positional arguments which will be passed to the function "func" additionally to the values from the series. **kwds Additional keyword arguments will be passed as keywords to the function ``` #Ex S.apply(np.log) # Let's assume, we have the following task. The test the amount of fruit for every kind. #If there are less than 50 available, we will augment the stock by 10: S.apply(lambda x: x if x > 50 else x+10 ) S>30 #Conditioning in a series S[S>30] "apples" in S #Creating Series Objects from Dictionaries cities = {"London": 8615246, "Berlin": 3562166, "Madrid": 3165235, "Rome": 2874038, "Paris": 2273305, "Vienna": 1805681, "Bucharest": 1803425, "Hamburg": 1760433, "Budapest": 1754000, "Warsaw": 1740119, "Barcelona": 1602386, "Munich": 1493900, "Milan": 1350680} city_series = pd.Series(cities) print(city_series) ``` <h3>Handling missing data in pandas</h3> One problem in dealing with data analysis tasks consists in missing data. Pandas makes it as easy as possible to work with missing data. ``` my_cities = ["London", "Paris", "Zurich", "Berlin", "Stuttgart", "Hamburg"] my_city_series = pd.Series(cities, index=my_cities) my_city_series ``` Due to the Nan values the population values for the other cities are turned into floats. There is no missing data in the following examples, so the values are int: ``` my_cities = ["London", "Paris", "Berlin", "Hamburg"] my_city_series = pd.Series(cities, index=my_cities) my_city_series #Finding whether a data is null or not my_cities = ["London", "Paris", "Zurich", "Berlin", "Stuttgart", "Hamburg"] my_city_series = pd.Series(cities, index=my_cities) print(my_city_series.isnull()) print(my_city_series.notnull()) #Drop the nulls print(my_city_series.dropna()) #Fill the nulls print(my_city_series.fillna(0)) missing_cities = {"Stuttgart":597939, "Zurich":378884} my_city_series.fillna(missing_cities) #Still the values are not integers, we can convert it into int my_city_series = my_city_series.fillna(0).astype(int) print(my_city_series) #Pandas next topics will be continued in week4 classes l1=[1,2,3] l2=[10,4,5] list(zip(l1,l2)) ``` <h3>Pandas Dataframes</h3> ``` #Creating a pandas dataframe from list of lists import pandas as pd Df = pd.DataFrame(data = [ ['NJ', 'Towaco', 'Square'], ['CA', 'San Francisco', 'Oval'], ['TX', 'Austin', 'Triangle'], ['MD', 'Baltimore', 'Square'], ['OH', 'Columbus', 'Hexagon'], ['IL', 'Chicago', 'Circle']], columns = ['State', 'City', 'Shape']) Df #Creating DataFrame from dict of narray/lists import pandas as pd # intialise data of lists. data = {'Name':['Tom', 'nick', 'krish', 'jack'], 'Age':[20, 21, 19, 18]} # Create DataFrame df = pd.DataFrame(data) df #Creates a indexes DataFrame using arrays. import pandas as pd # initialise data of lists. data = {'Name':['Tom', 'Jack', 'nick', 'juli'], 'marks':[99, 98, 95, 90]} # Creates pandas DataFrame. df = pd.DataFrame(data, index =['rank1', 'rank2', 'rank3', 'rank4']) # print the data df l1=[1,2,3] l2=[10,4,5] list(zip(l1,l2)) #Creating DataFrame using zip() function. #Two lists can be merged by using list(zip()) function. Now, create the pandas DataFrame by calling pd.DataFrame() function. import pandas as pd # List1 Name = ['tom', 'krish', 'nick', 'juli'] # List2 Age = [25, 30, 26, 22] # get the list of tuples from two lists. # and merge them by using zip(). list_of_tuples = list(zip(Name, Age)) # Assign data to tuples. list_of_tuples print(list_of_tuples) # Converting lists of tuples into # pandas Dataframe. df = pd.DataFrame(list_of_tuples, columns = ['Name', 'Age']) # Print data. df #Creating DataFrame from Dicts of series. import pandas as pd # Intialise data to Dicts of series. d = {'one' : pd.Series([10, 20, 30, 40], index =['a', 'b', 'c', 'd']), 'two' : pd.Series([10, 20, 30, 40], index =['a', 'b', 'c', 'd'])} # creates Dataframe. df = pd.DataFrame(d) # print the data. df #Load data from csv import pandas as pd df = pd.read_csv('RegularSeasonCompactResults.csv') #Head,tail df.head(5) df.dtypes df[['Wscore', 'Lscore']].head() #Shape of dataset df.shape #Columns in dataset df.columns #we can call the describe() function to see statistics like mean, min, etc about each column of the dataset. df.describe() #Max ,min,mean,median df.max() df['Wscore'].max() df['Wscore'].argmax()#Let's say we want to actually see the game(row) where this max score happened. #We can call the argmax() function to identify the row index df.iloc[[df['Wscore'].argmax()]] #Let's take this a step further. Let's say you want to know the game with the highest scoring winning team (this is what we just calculated), #but you then want to know how many points the losing team scored. df.iloc[[df['Wscore'].argmax()]]['Lscore'] df[df['Wscore'] > 150] #Extracting only values df.values df.values[0][1] #Dataframe Iteration df.isnull().sum() #https://github.com/jonathanrocher/pandas_tutorial/blob/master/analyzing_and_manipulating_data_with_pandas_manual.pdf #https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html ``` <h3>Append,concatenate and merge dataframes</h3> <h3>Append a dataframe</h3> ``` #Creating an empty dataframe dfObj = pd.DataFrame(columns=['User_ID', 'UserName', 'Action']) print( dfObj, sep='\n') dfObj # Append Dataframe by adding dictionaries dfObj = dfObj.append({'User_ID': 23, 'UserName': 'Riti', 'Action': 'Login'}, ignore_index=True) dfObj = dfObj.append({'User_ID': 24, 'UserName': 'Aadi', 'Action': 'Logout'}, ignore_index=True) dfObj = dfObj.append({'User_ID': 25, 'UserName': 'Jack', 'Action': 'Login'}, ignore_index=True) print("Dataframe Contens ", dfObj, sep='\n') #Create an complete empty DataFrame without any column name or indices dfObj = pd.DataFrame() print(dfObj) # Append columns to the Empty DataFrame dfObj['UserName'] = ['Riti', 'Aadi', 'Jack'] dfObj['Name'] = ['Riti', 'Aadi', 'Jack'] #dfObj['Name'] = ['Riti', 'Aadi', 'Jack'] print("Dataframe Contents ", dfObj, sep='\n') #Automatically a range index is been formed #Create an empty Dataframe with column names & row indices but no data dfObj = pd.DataFrame(columns=['User_ID', 'UserName', 'Action'], index=['a', 'b', 'c']) print("Empty Dataframe", dfObj, sep='\n') #Appending the data directly to the indices dfObj.loc['a'] = [23, 'Riti', 'Login'] dfObj.loc['b'] = [24, 'Aadi', 'Logout'] dfObj.loc['c'] = [25, 'Jack', 'Login'] print("Dataframe Contents ", dfObj, sep='\n') #Appending rows with for loop import pandas as pd cols = ['Zip'] lst = [] zip = 32100 for a in range(10): lst.append([zip]) zip = zip + 1 df = pd.DataFrame(lst, columns=cols) print(df) ``` <h3>Concatenate dataframe</h3> ``` #Concatenate two columns of dataframe in pandas python import pandas as pd import numpy as np #Create a DataFrame df1 = { 'State':['Arizona','Georgia','Newyork','Indiana','Florida'], 'State_code':['AZ','GG','NY','IN','SL'], 'Score':[62,47,55,74,31]} df1 = pd.DataFrame(df1,columns=['State','State_code','Score']) print(df1) #Concatenate two string columns pandas:Letโ€™s concatenate two columns of dataframe with โ€˜+โ€™ as shown below df1['state_and_code'] = df1['State'] + df1['State_code'] print(df1) #Concatenate two string columns with space df1['state_and_code'] = df1['State'] +' '+ df1['State_code'] print(df1) #Concatenate String and numeric column df1['code_and_score'] = df1["State_code"]+ "-" + df1["Score"].map(str) print(df1) df2=df1.copy() #Concating two dataframes # Stack the DataFrames on top of each other, by default it is vertical concatenation df3 = pd.concat([df1, df2], axis=0) print(df3) print('*' * 100) # Place the DataFrames side by side df4 = pd.concat([df1, df3.head(5)], axis=1) print(df4) #Concatenating with index resetting df3 = pd.concat([df1, df2], axis=0,ignore_index=True) print(df3) #Concatenating pandas dataframes using .append() dataflair_A = pd.DataFrame([['a', 1], ['b', 2]], columns=['letter', 'number']) dataflair_B = pd.DataFrame([['c', 3], ['d', 4]], columns=['letter', 'number']) result = dataflair_A.append(dataflair_B) result print('dataflair_A ->') print(dataflair_A) print('dataflair_B ->') print(dataflair_B) print('Result ->') print(result) ``` <h3>Merge dataframes</h3> #Why โ€œMergeโ€? Youโ€™d have probably encountered multiple data tables that have various bits of information that you would like to see all in one place โ€” one dataframe in this case. And this is where the power of merge comes in to efficiently combine multiple data tables together in a nice and orderly fashion into a single dataframe for further analysis. โ€œMergingโ€ two datasets is the process of bringing two datasets together into one, and aligning the rows from each based on common attributes or columns. The words โ€œmergeโ€ and โ€œjoinโ€ are used relatively interchangeably in Pandas and other languages. Despite the fact that Pandas has both โ€œmergeโ€ and โ€œjoinโ€ functions, essentially they both do the similar things. ![1_RQnYfkGBTsA28WDdkudzvw.png](attachment:1_RQnYfkGBTsA28WDdkudzvw.png) To understand pd.merge, letโ€™s start with a simple line of code as below. What this line of code does is to merge two dataframes โ€” left_dfand right_df โ€” into one based on their values with the samecolumn_name available in both dataframes. With the how='inner', this will perform inner merge to only combine values in the column_name that match. pd.merge(left_df, right_df, on='column_name', how='inner' Since the method how has different parameters (by default Pandas uses inner), weโ€™ll look into different parameters (left, right, inner, outer) and their use cases. Quick loot at data: user_usage โ€” A first dataset containing users monthly mobile usage statistics. user_device โ€” A second dataset containing details of an individual โ€œuseโ€ of the system, with dates and device information. android_device โ€” A third dataset with device and manufacturer data, which lists all Android devices and their model code ``` user_usage = pd.read_csv(r"D:\Data_Science\Batch1_Lessons\user_usage.csv") user_device = pd.read_csv(r"D:\Data_Science\Batch1_Lessons\user_device.csv") android_device = pd.read_csv(r"D:\Data_Science\Batch1_Lessons\android_devices.csv") user_usage.head(5) user_device.shape user_device.head(5) # INNER Merge #Pandas uses โ€œinnerโ€ merge by default. This keeps only the common values in both the left and right dataframes for the merged data. #In our case, only the rows that contain use_id values that are common between user_usage and user_device remain in the merged data โ€” inner_merge. inner_merge = pd.merge(user_usage,user_device, on='use_id',how='inner') inner_merge.head() inner_merge.shape android_device.head(5) ``` Itโ€™s important to note here that: The column name use_id is shared between the user_usage and user_device. The device column of user_device and Model column of the android_device dataframe contain common codes ![join-types-merge-names.jpg](attachment:join-types-merge-names.jpg) ``` #LEFT Merge #Keep every row in the left dataframe. #Where there are missing values of the โ€œonโ€ variable in the right dataframe, add empty / NaN values in the result. left_merge = pd.merge(user_usage,user_device, on='use_id',how='left') left_merge.head() left_merge.tail() ``` As expected, the column use_id has already been merged together. We also see that the empty values are replaced by NaN in the right dataframe โ€” user_device. ``` #RIGHT Merge #To perform the right merge, we just repeat the code above by simply changing the parameter of how from left to right. right_merge = pd.merge(user_usage,user_device, on='use_id',how='right') right_merge.head() right_merge.head() ``` This time, we see that the empty values are replaced by NaN in the left dataframe โ€” user_usage. ``` inner_merge.tail() ``` Although the โ€œinnerโ€ merge is used by Pandas by default, the parameter inner is specified above to be explicit. With the operation above, the merged data โ€” inner_merge has different size compared to the original left and right dataframes (user_usage & user_device) as only common values are merged. ``` #Finally, we have โ€œouterโ€ merge. #The โ€œouterโ€ merge combines all the rows for left and right dataframes with NaN when there are no matched values in the rows. outer_merge = pd.merge(user_usage,user_device, on='use_id',how='outer',indicator=True) outer_merge.head() outer_merge.iloc[[0,1,200,201,350,351]] ``` To further illustrate how the โ€œouterโ€ merge works, we purposely specify certain rows of the outer_merge to understand where the rows originate from. For the 1st and 2th rows, the rows come from both the dataframes as they have the same values of use_id to be merged. For the 3rd and 4th rows, the rows come from the left dataframe as the right dataframe doesnโ€™t have the common values of use_id. For the 5th and 6th rows, the rows come from the right dataframe as the left dataframe doesnโ€™t have the common values of use_id. ``` #Merge Dataframes with Different Column Names #So weโ€™ve talked about how to merge data using different ways โ€” left, right, inner, and outer. #But the method on only works for the same column name in the left and right dataframes. #Therefore, we use left_on and right_on to replace the method on as shown below. left_merge = pd.merge(user_device,android_device, left_on='device',right_on='Model',how='left',indicator=True) left_merge.head() #Here weโ€™ve merged user_device with android_device since they both contain common codes in their columns โ€” device and Model respectively. ``` <h2>A Brief Introduction to matplotlib for Data Visualization</h2> ``` #Install matplotlib in python python3 -m pip install matplotlib ``` <h3>Data import and modules import</h3> ``` #We have to import pyplot to have an matlab like graphical environment and mlines to draw lines on a plot import matplotlib.pyplot as plt import matplotlib.lines as mlines #Lets import the data and work on it import numpy as np import matplotlib.pyplot as plt import matplotlib.cbook as cbook with cbook.get_sample_data('goog.npz') as datafile: price_data = np.load(datafile)['price_data'].view(np.recarray) price_data = price_data[-250:] # get the most recent 250 trading days type(price_data) #We then transform the data in a way that is done quite often for time series, etc. #We find the difference, $d_i$, between each observation and the one before it: delta1 = np.diff(price_data.adj_close) / price_data.adj_close[:-1] #We can also look at the transformations of different variables, such as volume and closing price: # Marker size in units of points^2 volume = (15 * price_data.volume[:-2] / price_data.volume[0])**2 close = 0.003 * price_data.close[:-2] / 0.003 * price_data.open[:-2] ``` To actually plot this data, you can use the subplots() functions from plt (matplotlib.pyplot). By default this generates the area for the figure and the axes of a plot. Here we will make a scatter plot of the differences between successive days. To elaborate, x is the difference between day i and the previous day. y is the difference between day i+1 and the previous day (i): ``` fig, ax = plt.subplots() ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5) ax.set_xlabel(r'$\Delta_i$', fontsize=15) ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=15) ax.set_title('Volume and percent change') ax.grid(True) fig.tight_layout() plt.show() #plt.show() displays the plot for us. ``` <h3>Adding a Line</h3> ``` #We can add a line to this plot by providing x and y coordinates as lists to a Line2D instance: import matplotlib.lines as mlines fig, ax = plt.subplots() line = mlines.Line2D([-.15,0.25], [-.07,0.09], color='red') ax.add_line(line) # reusing scatterplot code ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5) ax.set_xlabel(r'$\Delta_i$', fontsize=15) ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=15) ax.set_title('Volume and percent change') ax.grid(True) fig.tight_layout() plt.show() ``` <h3>Plotting Histograms</h3> To plot a histogram, we follow a similar process and use the hist() function from pyplot. We will generate 10000 random data points, x, with a mean of 100 and standard deviation of 15. The hist function takes the data, x, number of bins, and other arguments such as density, which normalizes the data to a probability density, or alpha, which sets the transparency of the histogram. We will also use the library mlab to add a line representing a normal density function with the same mean and standard deviation: ``` import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt mu, sigma = 100, 15 x = mu + sigma*np.random.randn(10000) # the histogram of the data n, bins, patches = plt.hist(x, 30, density=1, facecolor='blue', alpha=0.75) # add a 'best fit' line y = mlab.normpdf( bins, mu, sigma) l = plt.plot(bins, y, 'r--', linewidth=4) plt.xlabel('IQ') plt.ylabel('Probability') plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$') plt.axis([40, 160, 0, 0.03]) plt.grid(True) plt.show() ``` <h3>Bar Charts</h3> While histograms helped us with visual densities, bar charts help us view counts of data. To plot a bar chart with matplotlib, we use the bar() function. This takes the counts and data labels as x and y, along with other arguments. As an example, we could look at a sample of the number of programmers that use different languages: ``` import numpy as np import matplotlib.pyplot as plt objects = ('Python', 'C++', 'Java', 'Perl', 'Scala', 'Lisp') y_pos = np.arange(len(objects)) performance = [10,8,6,4,2,1] plt.bar(y_pos, performance, align='center', alpha=0.5) plt.xticks(y_pos, objects) plt.ylabel('Usage') plt.title('Programming language usage') plt.show() ``` <h3>Boxplot</h3> ``` import numpy as np import matplotlib.pyplot as plt # Fixing random state for reproducibility np.random.seed(19680801) # fake up some data spread = np.random.rand(50) * 100 center = np.ones(25) * 50 flier_high = np.random.rand(10) * 100 + 100 flier_low = np.random.rand(10) * -100 data = np.concatenate((spread, center, flier_high, flier_low)) fig1, ax1 = plt.subplots() ax1.set_title('Basic Plot') ax1.boxplot(data) ``` <h3>Subplots</h3> <b>The subplot() function allows you to plot different things in the same figure. In the following script, sine and cosine values are plotted.</b> ``` import numpy as np import matplotlib.pyplot as plt # Compute the x and y coordinates for points on sine and cosine curves x = np.arange(0, 3 * np.pi, 0.1) y_sin = np.sin(x) y_cos = np.cos(x) # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 1, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 1, 2) plt.plot(x, y_cos) plt.title('Cosine') # Show the figure. plt.show() plt.figure(figsize=(10,4), dpi=120) # 10 is width, 4 is height # Left hand side plot plt.subplot(1,2,1) # (nRows, nColumns, axes number to plot) plt.plot([1,2,3,4,5], [1,2,3,4,10], 'go') # green dots plt.title('Scatterplot Greendots') plt.xlabel('X'); plt.ylabel('Y') plt.xlim(0, 6); plt.ylim(0, 12) # Right hand side plot plt.subplot(1,2,2) plt.plot([1,2,3,4,5], [2,3,4,5,11], 'b*') # blue stars plt.title('Scatterplot Bluestars') plt.xlabel('X'); plt.ylabel('Y') plt.xlim(0, 6); plt.ylim(0, 12) plt.show() ``` #matplotlib.pyplot.subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs)[source] shape : sequence of 2 ints Shape of grid in which to place axis. First entry is number of rows, second entry is number of columns. loc : sequence of 2 ints Location to place axis within grid. First entry is row number, second entry is column number. rowspan : int Number of rows for the axis to span to the right. colspan : int Number of columns for the axis to span downwards. fig : Figure, optional Figure to place axis in. Defaults to current figure. **kwargs Additional keyword arguments are handed to add_subplot. ``` import pandas as pd # Setup the subplot2grid Layout fig = plt.figure(figsize=(10, 5)) ax1 = plt.subplot2grid((2,4), (0,0)) ax2 = plt.subplot2grid((2,4), (0,1)) ax3 = plt.subplot2grid((2,4), (0,2)) ax4 = plt.subplot2grid((2,4), (0,3)) ax5 = plt.subplot2grid((2,4), (1,0), colspan=2) ax6 = plt.subplot2grid((2,4), (1,2)) ax7 = plt.subplot2grid((2,4), (1,3)) # Input Arrays n = np.array([0,1,2,3,4,5]) x = np.linspace(0,5,10) xx = np.linspace(-0.75, 1., 100) # Scatterplot ax1.scatter(xx, xx + np.random.randn(len(xx))) ax1.set_title("Scatter Plot") # Step Chart ax2.step(n, n**2, lw=2) ax2.set_title("Step Plot") # Bar Chart ax3.bar(n, n**2, align="center", width=0.5, alpha=0.5) ax3.set_title("Bar Chart") # Fill Between ax4.fill_between(x, x**2, x**3, color="steelblue", alpha=0.5); ax4.set_title("Fill Between"); # Time Series dates = pd.date_range('2018-01-01', periods = len(xx)) ax5.plot(dates, xx + np.random.randn(len(xx))) ax5.set_xticks(dates[::30]) ax5.set_xticklabels(dates.strftime('%Y-%m-%d')[::30]) ax5.set_title("Time Series") # Box Plot ax6.boxplot(np.random.randn(len(xx))) ax6.set_title("Box Plot") # Histogram ax7.hist(xx + np.random.randn(len(xx))) ax7.set_title("Histogram") fig.tight_layout() # Credits to # Matplotlib tutorial # Nicolas P. Rougier # https://github.com/rougier/matplotlib-tutorial ```
github_jupyter
# Soring, searching, and counting ``` import numpy as np np.__version__ author = 'kyubyong. longinglove@nate.com' ``` ## Sorting Q1. Sort x along the second axis. ``` x = np.array([[1,4],[3,1]]) out = np.sort(x, axis=1) x.sort(axis=1) assert np.array_equal(out, x) print out ``` Q2. Sort pairs of surnames and first names and return their indices. (first by surname, then by name). ``` surnames = ('Hertz', 'Galilei', 'Hertz') first_names = ('Heinrich', 'Galileo', 'Gustav') print np.lexsort((first_names, surnames)) ``` Q3. Get the indices that would sort x along the second axis. ``` x = np.array([[1,4],[3,1]]) out = np.argsort(x, axis=1) print out ``` Q4. Create an array such that its fifth element would be the same as the element of sorted x, and it divide other elements by their value. ``` x = np.random.permutation(10) print "x =", x print "\nCheck the fifth element of this new array is 5, the first four elements are all smaller than 5, and 6th through the end are bigger than 5\n", out = np.partition(x, 5) x.partition(5) # in-place equivalent assert np.array_equal(x, out) print out ``` Q5. Create the indices of an array such that its third element would be the same as the element of sorted x, and it divide other elements by their value. ``` x = np.random.permutation(10) print "x =", x partitioned = np.partition(x, 3) indices = np.argpartition(x, 3) print "partitioned =", partitioned print "indices =", partitioned assert np.array_equiv(x[indices], partitioned) ``` ## Searching Q6. Get the maximum and minimum values and their indices of x along the second axis. ``` x = np.random.permutation(10).reshape(2, 5) print "x =", x print "maximum values =", np.max(x, 1) print "max indices =", np.argmax(x, 1) print "minimum values =", np.min(x, 1) print "min indices =", np.argmin(x, 1) ``` Q7. Get the maximum and minimum values and their indices of x along the second axis, ignoring NaNs. ``` x = np.array([[np.nan, 4], [3, 2]]) print "maximum values ignoring NaNs =", np.nanmax(x, 1) print "max indices =", np.nanargmax(x, 1) print "minimum values ignoring NaNs =", np.nanmin(x, 1) print "min indices =", np.nanargmin(x, 1) ``` Q8. Get the values and indices of the elements that are bigger than 2 in x. ``` x = np.array([[1, 2, 3], [1, 3, 5]]) print "Values bigger than 2 =", x[x>2] print "Their indices are ", np.nonzero(x > 2) assert np.array_equiv(x[x>2], x[np.nonzero(x > 2)]) assert np.array_equiv(x[x>2], np.extract(x > 2, x)) ``` Q9. Get the indices of the elements that are bigger than 2 in the flattend x. ``` x = np.array([[1, 2, 3], [1, 3, 5]]) print np.flatnonzero(x) assert np.array_equiv(np.flatnonzero(x), x.ravel().nonzero()) ``` Q10. Check the elements of x and return 0 if it is less than 0, otherwise the element itself. ``` x = np.arange(-5, 4).reshape(3, 3) print np.where(x <0, 0, x) ``` Q11. Get the indices where elements of y should be inserted to x to maintain order. ``` x = [1, 3, 5, 7, 9] y = [0, 4, 2, 6] np.searchsorted(x, y) ``` ## Counting Q12. Get the number of nonzero elements in x. ``` x = [[0,1,7,0,0],[3,0,0,2,19]] print np.count_nonzero(x) assert np.count_nonzero(x) == len(x[x!=0]) ```
github_jupyter
# Introduction Machine learning competitions are a great way to improve your data science skills and measure your progress. In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to improve and see how you stack up to others taking this micro-course. The steps in this notebook are: 1. Build a Random Forest model with all of your data (**X** and **y**) 2. Read in the "test" data, which doesn't include values for the target. Predict home values in the test data with your Random Forest model. 3. Submit those predictions to the competition and see your score. 4. Optionally, come back to see if you can improve your model by adding features or changing your model. Then you can resubmit to see how that stacks up on the competition leaderboard. ## Recap Here's the code you've written so far. Start by running it again. ``` # Code you have previously used to load data import pandas as pd from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor # Set up code checking import os if not os.path.exists("../input/train.csv"): os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv") os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv") from learntools.core import binder binder.bind(globals()) from learntools.machine_learning.ex7 import * # Path of the file to read. We changed the directory structure to simplify submitting to a competition iowa_file_path = '../input/train.csv' home_data = pd.read_csv(iowa_file_path) # Create target object and call it y y = home_data.SalePrice # Create X features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = home_data[features] # Split into validation and training data train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) # Specify Model iowa_model = DecisionTreeRegressor(random_state=1) # Fit Model iowa_model.fit(train_X, train_y) # Make validation predictions and calculate mean absolute error val_predictions = iowa_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print("Validation MAE when not specifying max_leaf_nodes: {:,.0f}".format(val_mae)) # Using best value for max_leaf_nodes iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1) iowa_model.fit(train_X, train_y) val_predictions = iowa_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print("Validation MAE for best value of max_leaf_nodes: {:,.0f}".format(val_mae)) # Define the model. Set random_state to 1 rf_model = RandomForestRegressor(random_state=1) rf_model.fit(train_X, train_y) rf_val_predictions = rf_model.predict(val_X) rf_val_mae = mean_absolute_error(rf_val_predictions, val_y) print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae)) ``` # Creating a Model For the Competition Build a Random Forest model and train it on all of **X** and **y**. ``` # To improve accuracy, create a new Random Forest model which you will train on all training data rf_model_on_full_data = ____ # fit rf_model_on_full_data on all data from the training data ____ ``` # Make Predictions Read the file of "test" data. And apply your model to make predictions ``` # path to file you will use for predictions test_data_path = '../input/test.csv' # read test data file using pandas test_data = ____ # create test_X which comes from test_data but includes only the columns you used for prediction. # The list of columns is stored in a variable called features test_X = ____ # make predictions which we will submit. test_preds = ____ # The lines below shows how to save predictions in format used for competition scoring # Just uncomment them. #output = pd.DataFrame({'Id': test_data.Id, # 'SalePrice': test_preds}) #output.to_csv('submission.csv', index=False) ``` Before submitting, run a check to make sure your `test_preds` have the right format. ``` #%%RM_IF(PROD)%% rf_model_on_full_data = RandomForestRegressor() rf_model_on_full_data.fit(X, y) test_data_path = '../input/test.csv' test_data = pd.read_csv(test_data_path) test_X = test_data[features] test_preds = rf_model_on_full_data.predict(test_X) step_1.assert_check_passed() # Check your answer step_1.check() # step_1.solution() ``` # Test Your Work To test your results, you'll need to join the competition (if you haven't already). So open a new window by clicking on [this link](https://www.kaggle.com/c/home-data-for-ml-course). Then click on the **Join Competition** button. ![join competition image](https://i.imgur.com/wLmFtH3.png) Next, follow the instructions below: #$SUBMIT_TO_COMP$ # Continuing Your Progress There are many ways to improve your model, and **experimenting is a great way to learn at this point.** The best way to improve your model is to add features. Look at the list of columns and think about what might affect home prices. Some features will cause errors because of issues like missing values or non-numeric data types. The **[Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning)** micro-course will teach you how to handle these types of features. You will also learn to use **xgboost**, a technique giving even better accuracy than Random Forest. # Other Micro-Courses The **[Pandas](https://kaggle.com/Learn/Pandas)** micro-course will give you the data manipulation skills to quickly go from conceptual idea to implementation in your data science projects. You are also ready for the **[Deep Learning](https://kaggle.com/Learn/Deep-Learning)** micro-course, where you will build models with better-than-human level performance at computer vision tasks.
github_jupyter
### Your very own neural network In this notebook, we're going to build a neural network using naught but pure numpy and steel nerves. It's going to be fun, I promise! ![img](https://s27.postimg.org/vpui4r5n7/cartoon-2029952_960_720.png) ``` # use the preloaded keras datasets and models ! mkdir -p ~/.keras/datasets ! mkdir -p ~/.keras/models ! ln -s $(realpath ../readonly/keras/datasets/*) ~/.keras/datasets/ ! ln -s $(realpath ../readonly/keras/models/*) ~/.keras/models/ from __future__ import print_function import numpy as np np.random.seed(42) ``` Here goes our main class: a layer that can .forward() and .backward(). ``` class Layer: """ A building block. Each layer is capable of performing two things: - Process input to get output: output = layer.forward(input) - Propagate gradients through itself: grad_input = layer.backward(input, grad_output) Some layers also have learnable parameters which they update during layer.backward. """ def __init__(self): """Here you can initialize layer parameters (if any) and auxiliary stuff.""" # A dummy layer does nothing pass def forward(self, input): """ Takes input data of shape [batch, input_units], returns output data [batch, output_units] """ # A dummy layer just returns whatever it gets as input. return input def backward(self, input, grad_output): """ Performs a backpropagation step through the layer, with respect to the given input. To compute loss gradients w.r.t input, you need to apply chain rule (backprop): d loss / d x = (d loss / d layer) * (d layer / d x) Luckily, you already receive d loss / d layer as input, so you only need to multiply it by d layer / d x. If your layer has parameters (e.g. dense layer), you also need to update them here using d loss / d layer """ # The gradient of a dummy layer is precisely grad_output, but we'll write it more explicitly num_units = input.shape[1] d_layer_d_input = np.eye(num_units) return np.dot(grad_output, d_layer_d_input) # chain rule ``` ### The road ahead We're going to build a neural network that classifies MNIST digits. To do so, we'll need a few building blocks: - Dense layer - a fully-connected layer, $f(X)=W \cdot X + \vec{b}$ - ReLU layer (or any other nonlinearity you want) - Loss function - crossentropy - Backprop algorithm - a stochastic gradient descent with backpropageted gradients Let's approach them one at a time. ### Nonlinearity layer This is the simplest layer you can get: it simply applies a nonlinearity to each element of your network. ``` class ReLU(Layer): def __init__(self): """ReLU layer simply applies elementwise rectified linear unit to all inputs""" pass def forward(self, input): """Apply elementwise ReLU to [batch, input_units] matrix""" # <your code. Try np.maximum> def backward(self, input, grad_output): """Compute gradient of loss w.r.t. ReLU input""" relu_grad = input > 0 return grad_output*relu_grad # some tests from util import eval_numerical_gradient x = np.linspace(-1,1,10*32).reshape([10,32]) l = ReLU() grads = l.backward(x,np.ones([10,32])/(32*10)) numeric_grads = eval_numerical_gradient(lambda x: l.forward(x).mean(), x=x) assert np.allclose(grads, numeric_grads, rtol=1e-3, atol=0),\ "gradient returned by your layer does not match the numerically computed gradient" ``` #### Instant primer: lambda functions In python, you can define functions in one line using the `lambda` syntax: `lambda param1, param2: expression` For example: `f = lambda x, y: x+y` is equivalent to a normal function: ``` def f(x,y): return x+y ``` For more information, click [here](http://www.secnetix.de/olli/Python/lambda_functions.hawk). ### Dense layer Now let's build something more complicated. Unlike nonlinearity, a dense layer actually has something to learn. A dense layer applies affine transformation. In a vectorized form, it can be described as: $$f(X)= W \cdot X + \vec b $$ Where * X is an object-feature matrix of shape [batch_size, num_features], * W is a weight matrix [num_features, num_outputs] * and b is a vector of num_outputs biases. Both W and b are initialized during layer creation and updated each time backward is called. ``` class Dense(Layer): def __init__(self, input_units, output_units, learning_rate=0.1): """ A dense layer is a layer which performs a learned affine transformation: f(x) = <W*x> + b """ self.learning_rate = learning_rate # initialize weights with small random numbers. We use normal initialization, # but surely there is something better. Try this once you got it working: http://bit.ly/2vTlmaJ self.weights = np.random.randn(input_units, output_units)*0.01 self.biases = np.zeros(output_units) def forward(self,input): """ Perform an affine transformation: f(x) = <W*x> + b input shape: [batch, input_units] output shape: [batch, output units] """ return #<your code here> def backward(self,input,grad_output): # compute d f / d x = d f / d dense * d dense / d x # where d dense/ d x = weights transposed grad_input = #<your code here> # compute gradient w.r.t. weights and biases grad_weights = #<your code here> grad_biases = #<your code here> assert grad_weights.shape == self.weights.shape and grad_biases.shape == self.biases.shape # Here we perform a stochastic gradient descent step. # Later on, you can try replacing that with something better. self.weights = self.weights - self.learning_rate * grad_weights self.biases = self.biases - self.learning_rate * grad_biases return grad_input ``` ### Testing the dense layer Here we have a few tests to make sure your dense layer works properly. You can just run them, get 3 "well done"s and forget they ever existed. ... or not get 3 "well done"s and go fix stuff. If that is the case, here are some tips for you: * Make sure you compute gradients for W and b as __sum of gradients over batch__, not mean over gradients. Grad_output is already divided by batch size. * If you're debugging, try saving gradients in class fields, like "self.grad_w = grad_w" or print first 3-5 weights. This helps debugging. * If nothing else helps, try ignoring tests and proceed to network training. If it trains alright, you may be off by something that does not affect network training. ``` l = Dense(128, 150) assert -0.05 < l.weights.mean() < 0.05 and 1e-3 < l.weights.std() < 1e-1,\ "The initial weights must have zero mean and small variance. "\ "If you know what you're doing, remove this assertion." assert -0.05 < l.biases.mean() < 0.05, "Biases must be zero mean. Ignore if you have a reason to do otherwise." # To test the outputs, we explicitly set weights with fixed values. DO NOT DO THAT IN ACTUAL NETWORK! l = Dense(3,4) x = np.linspace(-1,1,2*3).reshape([2,3]) l.weights = np.linspace(-1,1,3*4).reshape([3,4]) l.biases = np.linspace(-1,1,4) assert np.allclose(l.forward(x),np.array([[ 0.07272727, 0.41212121, 0.75151515, 1.09090909], [-0.90909091, 0.08484848, 1.07878788, 2.07272727]])) print("Well done!") # To test the grads, we use gradients obtained via finite differences from util import eval_numerical_gradient x = np.linspace(-1,1,10*32).reshape([10,32]) l = Dense(32,64,learning_rate=0) numeric_grads = eval_numerical_gradient(lambda x: l.forward(x).sum(),x) grads = l.backward(x,np.ones([10,64])) assert np.allclose(grads,numeric_grads,rtol=1e-3,atol=0), "input gradient does not match numeric grad" print("Well done!") #test gradients w.r.t. params def compute_out_given_wb(w,b): l = Dense(32,64,learning_rate=1) l.weights = np.array(w) l.biases = np.array(b) x = np.linspace(-1,1,10*32).reshape([10,32]) return l.forward(x) def compute_grad_by_params(w,b): l = Dense(32,64,learning_rate=1) l.weights = np.array(w) l.biases = np.array(b) x = np.linspace(-1,1,10*32).reshape([10,32]) l.backward(x,np.ones([10,64]) / 10.) return w - l.weights, b - l.biases w,b = np.random.randn(32,64), np.linspace(-1,1,64) numeric_dw = eval_numerical_gradient(lambda w: compute_out_given_wb(w,b).mean(0).sum(),w ) numeric_db = eval_numerical_gradient(lambda b: compute_out_given_wb(w,b).mean(0).sum(),b ) grad_w,grad_b = compute_grad_by_params(w,b) assert np.allclose(numeric_dw,grad_w,rtol=1e-3,atol=0), "weight gradient does not match numeric weight gradient" assert np.allclose(numeric_db,grad_b,rtol=1e-3,atol=0), "weight gradient does not match numeric weight gradient" print("Well done!") ``` ### The loss function Since we want to predict probabilities, it would be logical for us to define softmax nonlinearity on top of our network and compute loss given predicted probabilities. However, there is a better way to do so. If you write down the expression for crossentropy as a function of softmax logits (a), you'll see: $$ loss = - log \space {e^{a_{correct}} \over {\underset i \sum e^{a_i} } } $$ If you take a closer look, ya'll see that it can be rewritten as: $$ loss = - a_{correct} + log {\underset i \sum e^{a_i} } $$ It's called Log-softmax and it's better than naive log(softmax(a)) in all aspects: * Better numerical stability * Easier to get derivative right * Marginally faster to compute So why not just use log-softmax throughout our computation and never actually bother to estimate probabilities. Here you are! We've defined the both loss functions for you so that you could focus on neural network part. ``` def softmax_crossentropy_with_logits(logits,reference_answers): """Compute crossentropy from logits[batch,n_classes] and ids of correct answers""" logits_for_answers = logits[np.arange(len(logits)),reference_answers] xentropy = - logits_for_answers + np.log(np.sum(np.exp(logits),axis=-1)) return xentropy def grad_softmax_crossentropy_with_logits(logits,reference_answers): """Compute crossentropy gradient from logits[batch,n_classes] and ids of correct answers""" ones_for_answers = np.zeros_like(logits) ones_for_answers[np.arange(len(logits)),reference_answers] = 1 softmax = np.exp(logits) / np.exp(logits).sum(axis=-1,keepdims=True) return (- ones_for_answers + softmax) / logits.shape[0] logits = np.linspace(-1,1,500).reshape([50,10]) answers = np.arange(50)%10 softmax_crossentropy_with_logits(logits,answers) grads = grad_softmax_crossentropy_with_logits(logits,answers) numeric_grads = eval_numerical_gradient(lambda l: softmax_crossentropy_with_logits(l,answers).mean(),logits) assert np.allclose(numeric_grads,grads,rtol=1e-3,atol=0), "The reference implementation has just failed. Someone has just changed the rules of math." ``` ### Full network Now let's combine what we've just built into a working neural network. As we announced, we're gonna use this monster to classify handwritten digits, so let's get them loaded. ``` import matplotlib.pyplot as plt %matplotlib inline from preprocessed_mnist import load_dataset X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(flatten=True) plt.figure(figsize=[6,6]) for i in range(4): plt.subplot(2,2,i+1) plt.title("Label: %i"%y_train[i]) plt.imshow(X_train[i].reshape([28,28]),cmap='gray'); ``` We'll define network as a list of layers, each applied on top of previous one. In this setting, computing predictions and training becomes trivial. ``` network = [] network.append(Dense(X_train.shape[1],100)) network.append(ReLU()) network.append(Dense(100,200)) network.append(ReLU()) network.append(Dense(200,10)) def forward(network, X): """ Compute activations of all network layers by applying them sequentially. Return a list of activations for each layer. Make sure last activation corresponds to network logits. """ activations = [] input = X # <your code here> assert len(activations) == len(network) return activations def predict(network,X): """ Compute network predictions. """ logits = forward(network,X)[-1] return logits.argmax(axis=-1) def train(network,X,y): """ Train your network on a given batch of X and y. You first need to run forward to get all layer activations. Then you can run layer.backward going from last to first layer. After you called backward for all layers, all Dense layers have already made one gradient step. """ # Get the layer activations layer_activations = forward(network,X) layer_inputs = [X]+layer_activations #layer_input[i] is an input for network[i] logits = layer_activations[-1] # Compute the loss and the initial gradient loss = softmax_crossentropy_with_logits(logits,y) loss_grad = grad_softmax_crossentropy_with_logits(logits,y) # <your code: propagate gradients through the network> return np.mean(loss) ``` Instead of tests, we provide you with a training loop that prints training and validation accuracies on every epoch. If your implementation of forward and backward are correct, your accuracy should grow from 90~93% to >97% with the default network. ### Training loop As usual, we split data into minibatches, feed each such minibatch into the network and update weights. ``` from tqdm import trange def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.random.permutation(len(inputs)) for start_idx in trange(0, len(inputs) - batchsize + 1, batchsize): if shuffle: excerpt = indices[start_idx:start_idx + batchsize] else: excerpt = slice(start_idx, start_idx + batchsize) yield inputs[excerpt], targets[excerpt] from IPython.display import clear_output train_log = [] val_log = [] for epoch in range(25): for x_batch,y_batch in iterate_minibatches(X_train,y_train,batchsize=32,shuffle=True): train(network,x_batch,y_batch) train_log.append(np.mean(predict(network,X_train)==y_train)) val_log.append(np.mean(predict(network,X_val)==y_val)) clear_output() print("Epoch",epoch) print("Train accuracy:",train_log[-1]) print("Val accuracy:",val_log[-1]) plt.plot(train_log,label='train accuracy') plt.plot(val_log,label='val accuracy') plt.legend(loc='best') plt.grid() plt.show() ``` ### Peer-reviewed assignment Congradulations, you managed to get this far! There is just one quest left undone, and this time you'll get to choose what to do. #### Option I: initialization * Implement Dense layer with Xavier initialization as explained [here](http://bit.ly/2vTlmaJ) To pass this assignment, you must conduct an experiment showing how xavier initialization compares to default initialization on deep networks (5+ layers). #### Option II: regularization * Implement a version of Dense layer with L2 regularization penalty: when updating Dense Layer weights, adjust gradients to minimize $$ Loss = Crossentropy + \alpha \cdot \underset i \sum {w_i}^2 $$ To pass this assignment, you must conduct an experiment showing if regularization mitigates overfitting in case of abundantly large number of neurons. Consider tuning $\alpha$ for better results. #### Option III: optimization * Implement a version of Dense layer that uses momentum/rmsprop or whatever method worked best for you last time. Most of those methods require persistent parameters like momentum direction or moving average grad norm, but you can easily store those params inside your layers. To pass this assignment, you must conduct an experiment showing how your chosen method performs compared to vanilla SGD. ### General remarks _Please read the peer-review guidelines before starting this part of the assignment._ In short, a good solution is one that: * is based on this notebook * runs in the default course environment with Run All * its code doesn't cause spontaneous eye bleeding * its report is easy to read. _Formally we can't ban you from writing boring reports, but if you bored your reviewer to death, there's noone left alive to give you the grade you want._ ### Bonus assignments As a bonus assignment (no points, just swag), consider implementing Batch Normalization ([guide](https://gab41.lab41.org/batch-normalization-what-the-hey-d480039a9e3b)) or Dropout ([guide](https://medium.com/@amarbudhiraja/https-medium-com-amarbudhiraja-learning-less-to-learn-better-dropout-in-deep-machine-learning-74334da4bfc5)). Note, however, that those "layers" behave differently when training and when predicting on test set. * Dropout: * During training: drop units randomly with probability __p__ and multiply everything by __1/(1-p)__ * During final predicton: do nothing; pretend there's no dropout * Batch normalization * During training, it substracts mean-over-batch and divides by std-over-batch and updates mean and variance. * During final prediction, it uses accumulated mean and variance.
github_jupyter
## Hospital Preparation ``` import requests import math import pprint import time import numpy as np import pandas as pd import urllib # List from ATC EMS raw data ems_hospitals = ["South Austin Hospital", "Seton Northwest", "North Austin Hospital", "Baylor Scott & White - Austin", "Baylor Scott & White - Lakeway", "Seton Southwest", "Dell Childrens Med Ctr", "Dell Seton Med Ctr", "Seton Med Ctr", "Saint Davids Med Ctr", "Westlake Hospital", "Heart Hospital"] pprint.pprint(ems_hospitals) # Setup and test Bing Maps API querying BingMapsKey = "AgmNkDXiPX65nDUv6MUa0gRtHyrrGAMuD5n8mNQfl72ISI6xGZA37clQ0OcP56y-" # Example call = requests.get("https://dev.virtualearth.net/REST/v1/LocalSearch/?query=hospital&userLocation=47.602038,-122.333964&key=" + BingMapsKey) pprint.pprint(call.json()) # Test Find a location by query query = ems_hospitals[0] encoded_query = urllib.parse.quote(query) call = requests.get("http://dev.virtualearth.net/REST/v1/Locations?query=" + encoded_query + "&key=" + BingMapsKey) pprint.pprint(call.json()) # Test local search query = ems_hospitals[3] area = "30.2672,-97.7431" if("Lakeway" in query): area = "30.365307, -97.976154" call = requests.get("https://dev.virtualearth.net/REST/v1/LocalSearch/?query=" + query + "&userLocation=" + area + "&maxResults=25&key=" + BingMapsKey) pprint.pprint(call.json()) hospital_data_list = [] for h in ems_hospitals: query = h area = "30.2672,-97.7431" if("Lakeway" in query): area = "30.365307, -97.976154" call = requests.get("https://dev.virtualearth.net/REST/v1/LocalSearch/?query=" + query + "&userLocation=" + area + "&maxResults=25&key=" + BingMapsKey) i = 0 while(1): hospital_data = call.json()['resourceSets'][0]['resources'][i] if("Animal" not in hospital_data['name'] and "Clinic" not in hospital_data['name']): hospital_data_list.append(hospital_data) break else: i += 1 # Verify data pprint.pprint(ems_hospitals) print() #pprint.pprint(hospital_data_list) for i in range(len(hospital_data_list)): print(hospital_data_list[i]['name']) #print(hospital_data_list[i]['point']['coordinates']) # Export data to csv hospital_names = [] hospital_coords = [] for i in range(len(hospital_data_list)): hospital_names.append(hospital_data_list[i]['name']) hospital_coords.append(hospital_data_list[i]['point']['coordinates']) hospital_locations = np.array(hospital_coords) dataset = pd.DataFrame({'hospital_name': hospital_names, "longitude": hospital_locations[:,1], "latitude": hospital_locations[:,0]}) print(dataset) dataset.to_csv('ems_hospitals.csv', index = False) ```
github_jupyter
``` import pandas as pd, numpy as np from scipy import stats stations=pd.read_csv('data/stations.csv').set_index('ID') c='ro' df=pd.read_csv('data/'+c+'_ds.csv') #daily data # df=pd.read_csv('data/'+c+'_hs.csv') #high_res data df['time']=pd.to_datetime(df['time']) df['year']=df['time'].dt.year df['month']=df['time'].dt.month df['day']=df['time'].dt.day df['hour']=df['time'].dt.hour df=df.set_index('time') df=df.sort_index() df.groupby('year').nunique()['ID'].plot() history=df.groupby('ID').nunique()['year'].sort_values(ascending=False) history=pd.DataFrame(history).join(stations) history.head() nepi=pd.read_excel(c+'/idojaras_'+c+'.xlsx') Setup plot params import matplotlib.pyplot as plt import seaborn as sns from matplotlib.collections import PolyCollection %matplotlib inline import matplotlib as mpl import matplotlib.font_manager as font_manager path = 'KulimPark-Regular.ttf' path2 = 'Symbola.ttf' prop = font_manager.FontProperties(fname=path) prop2 = font_manager.FontProperties(fname=path2) color_ax='#E7CFBC' color_bg='#FFF4EC' color_obs_right0='#F2B880' color_obs_left0=color_ax color_pred_right0='#C98686' color_pred_left0='#966B9D' color_pred_talalt0='#59c687' color_pred_nem_talalt0='#c95498' font_size=12 s=40 obs_talalt_glyph0='โ˜…' obs_nem_talalt_glyph0='โ˜†' pred_talalt_glyph0='โœ”๏ธ' pred_nem_talalt_glyph0='โœ–๏ธ' title_icon_right={'Temp':'โ˜ผ','Wind':'โ–บ','Hail':'โ–ฒ','Snow':'โ–ฒ','Snow Depth':'โ–ฒ','Rain':'โ˜”๏ธ','Visib':'โ˜€๏ธ'} title_icon_left={'Temp':'โ„๏ธ','Wind':'โ—„','Hail':'โ–ผ','Snow':'โ–ผ','Snow Depth':'โ–ผ','Rain':'โ˜‚๏ธ','Visib':'โ˜๏ธ'} title_icon={'Temp':'โ™จ๏ธ','Rain':'โ˜‚๏ธ','Hail':'โ„๏ธ','Snow':'โ›ท๏ธ','Snow Depth':'โ›„๏ธ','Wind':'โ˜˜','Cloud':'โ˜๏ธ','Visib':'โ˜€๏ธ'} def get_data(data,th): a1=pd.DataFrame(data[data<=th]) a1['g']='left' a2=pd.DataFrame(data[data>th]) a2['g']='right' a3=pd.concat([a1,a2]) a3['x']='x' return a1,a2,a3 def violin_plot(data,th,ax,color_left,color_right): a=0.3 a1,a2,a3=get_data(data,th) if len(a1)==1: a11=pd.DataFrame(a1) a11['x']='x' a11[a1.columns[0]]=[a1[a1.columns[0]].values[0]*1.1] a3=pd.concat([a3,a11]) ax.axvline(0,color=color_ax) if a3.nunique()['g']>1: sns.violinplot(y=a1.columns[0], x='x',hue='g', data=a3, split=True, ax=ax, inner=None,linewidth=1, scale="count", saturation=1) ax.get_children()[0].set_color(matplotlib.colors.colorConverter.to_rgba(color_left, alpha=a)) ax.get_children()[0].set_edgecolor(color_left) ax.get_children()[1].set_color(matplotlib.colors.colorConverter.to_rgba(color_right, alpha=a)) ax.get_children()[1].set_edgecolor(color_right) ax.legend().remove() else: if len(a1)>0: w=a1 c=color_left else: w=a2 c=color_right sns.violinplot(y=w.columns[0], data=w, ax=ax, inner=None,linewidth=1, scale="count", saturation=1) ax.set_xlim([-1,0]) ax.get_children()[0].set_color(matplotlib.colors.colorConverter.to_rgba(c, alpha=a)) ax.get_children()[0].set_edgecolor(c) spine_plot(datum,nep['Mondรกs'].strip(),mondas,nep['Jelentรฉs'].strip(),nep['Kondรญciรณ'],nep['Mennyisรฉg'], observation_ts,observation_th,prediction_ts,prediction_th) def setup_axes(): fig,axes=plt.subplots(1,3,figsize=(8,5),gridspec_kw={'width_ratios': [1, 3, 1]}) axi_top= axes[2].inset_axes([0.1, 0.65, 1, 0.3]) axi_top.axis('off') axi_bottom= axes[2].inset_axes([0.1, 0, 1, 0.5]) axi_bottom.axis('off') axes[0].axis('off') axes[1].axis('off') axes[2].axis('off') axes[0]=axes[0].inset_axes([0, 0.15, 1, 0.85]) axes[1]=axes[1].inset_axes([0, 0.15, 1, 0.85]) axes[0].axis('off') axes[1].axis('off') return fig, axes, axi_top, axi_bottom def stem_plot(data,ax,color,s=s): data=pd.DataFrame(data) x=data.index y=data[data.columns[0]].values for i,e in enumerate(y): ax.plot([0,e],[x[i],x[i]],color=color) ax.scatter(y,x,s,color=color,zorder=10) def stem2_plot(data,th,ax,color_left,color_right,s=s,axv_color=None): if axv_color==None:axv_color=color_right a1,a2,a3=get_data(data,th) stem_plot(a1,ax,color_left,s) stem_plot(a2,ax,color_right,s) ax.axvline(0,color=color_ax) #if th!=0: if True: ax.axvline(th,color=axv_color,ls='--',zorder=5) def icons_plot(axes,kondicio,mennyiseg,observation_th,prediction_th): ylim=axes[0].get_ylim() xlim=axes[1].get_xlim() y_max_coord=ylim[0]+(ylim[1]-ylim[0])*1.05 y_max_coord2=ylim[0]+(ylim[1]-ylim[0])*1.05 #1.04 x_icon_coord_shift=(xlim[1]-xlim[0])*0.1 axes[0].text(observation_th, y_max_coord, title_icon[kondicio], horizontalalignment='center', color=color_obs_right0, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th, y_max_coord, title_icon[mennyiseg], horizontalalignment='center', color=color_ax, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th+x_icon_coord_shift, y_max_coord2, title_icon_right[mennyiseg], horizontalalignment='center', color=color_pred_right, fontproperties=prop2, fontsize=font_size*1.5) axes[1].text(prediction_th-x_icon_coord_shift, y_max_coord2, title_icon_left[mennyiseg], horizontalalignment='center', color=color_pred_left, fontproperties=prop2, fontsize=font_size*1.5) def talalat_plot_line(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt, observation_th,prediction_th): ylim=axes[0].get_ylim() xlim=axes[0].get_xlim() y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.07) x_icon_coord_shift=(xlim[1]-xlim[0])*0.1 x_icon_coord_shift2=(xlim[1]-xlim[0])*0.27 axes[0].text(observation_th+x_icon_coord_shift, y_max_coord, obs_talalt_glyph, horizontalalignment='center', color=color_obs_right, fontproperties=prop2) axes[0].text(observation_th-x_icon_coord_shift, y_max_coord, obs_nem_talalt_glyph, horizontalalignment='center', color=color_obs_left, fontproperties=prop2) axes[0].text(observation_th+x_icon_coord_shift2, y_max_coord, n_prediction_ts_good, horizontalalignment='center', color=color_obs_right, fontproperties=prop) axes[0].text(observation_th-x_icon_coord_shift2, y_max_coord, n_prediction_ts_bad, horizontalalignment='center', color=color_obs_left, fontproperties=prop) axes[0].text(observation_th, y_max_coord, '|', horizontalalignment='center', color=color_obs_right0, fontproperties=prop,fontsize=19) xlim=axes[1].get_xlim() x_icon_coord_shift=(xlim[1]-xlim[0])*0.04 x_icon_coord_shift2=(xlim[1]-xlim[0])*0.1 axes[1].text(prediction_th+x_icon_coord_shift, y_max_coord, pred_talalt_glyph, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop2) axes[1].text(prediction_th-x_icon_coord_shift, y_max_coord, pred_nem_talalt_glyph, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop2) axes[1].text(prediction_th+x_icon_coord_shift2, y_max_coord, n_prediction_ts_good_talalt, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop) axes[1].text(prediction_th-x_icon_coord_shift2, y_max_coord, n_prediction_ts_good_nem_talalt, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop) axes[1].text(prediction_th, y_max_coord, '|', horizontalalignment='center', color=color_pred_right, fontproperties=prop,fontsize=19) y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.14) axes[0].text(observation_th, y_max_coord, 'feltรฉtel', horizontalalignment='center', color=color_obs_right0, fontproperties=prop) axes[1].text(prediction_th, y_max_coord, 'jรณslat', horizontalalignment='center', color=color_pred_right, fontproperties=prop) y_max_coord=ylim[0]+(ylim[1]-ylim[0])*(-0.13) x_coord_shift=prediction_th+(prediction_th-xlim[0])*(-0.4) axes[1].annotate('', xy=(x_coord_shift, y_max_coord),xycoords='data',annotation_clip=False, xytext=(xlim[0], y_max_coord),arrowprops=dict(arrowstyle= '->',color=color_obs_right0)) def talalat_plot_violin(axes,n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt): y_icon_obs=0.65 y_icon_pred=0.5 if color_obs_right==color_obs_right0: x=0.72 else: x=0.47 axes[2].text(0.72, y_icon_obs, obs_talalt_glyph, horizontalalignment='center', color=color_obs_right, fontproperties=prop2) axes[2].text(0.9, y_icon_obs,n_prediction_ts_good, horizontalalignment='center', color=color_obs_right, fontproperties=prop) axes[2].text(0.47, y_icon_obs, obs_nem_talalt_glyph, horizontalalignment='center', color=color_obs_left, fontproperties=prop2) axes[2].text(0.29, y_icon_obs, n_prediction_ts_bad, horizontalalignment='center', color=color_obs_left, fontproperties=prop) axes[2].text(0.72, y_icon_pred, pred_talalt_glyph, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop2) axes[2].text(0.9, y_icon_pred, n_prediction_ts_good_talalt, horizontalalignment='center', color=color_pred_talalt, fontproperties=prop) axes[2].text(0.47, y_icon_pred, pred_nem_talalt_glyph, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop2) axes[2].text(0.29, y_icon_pred, n_prediction_ts_good_nem_talalt, horizontalalignment='center', color=color_pred_nem_talalt, fontproperties=prop) axes[2].annotate('', xy=(0.59, y_icon_pred*1.04),xycoords='data', xytext=(x, y_icon_obs*0.98),arrowprops=dict(arrowstyle= '->',color=color_obs_right0)) def talalat_plot(axes,ns,observation_th,prediction_th): n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt=ns talalat_plot_line(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt, observation_th,prediction_th) talalat_plot_violin(axes,n_prediction_ts_good,n_prediction_ts_bad, n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt) def year_plot(data,ax,k): y=data.values x=data.index ex=max(y)-min(y) text_off=abs(ex*k) text_align='left' if y[0]<0: text_off=-text_off text_align='right' ax.text(y[0]+text_off, x[0], str(x[0]), horizontalalignment=text_align, verticalalignment='center', color=color_ax, fontproperties=prop) text_off=abs(text_off) text_align='left' if y[-1]<0: text_off=-text_off text_align='right' ax.text(y[-1]+text_off, x[-1], str(x[-1]), horizontalalignment=text_align, verticalalignment='center', color=color_ax, fontproperties=prop) def spine_plot(datum,title,mondas,jelentes,kondicio,mennyiseg, observation_ts,observation_th,prediction_ts,prediction_th): #data prediction_ts_good=prediction_ts.loc[observation_ts[observation_ts>observation_th].index] prediction_ts_bad=prediction_ts.loc[observation_ts[observation_ts<=observation_th].index] n_prediction_ts_good=len(prediction_ts_good) n_prediction_ts_bad=len(prediction_ts_bad) if color_obs_right0!=color_obs_right: prediction_ts_good,prediction_ts_bad=prediction_ts_bad,prediction_ts_good prediction_ts_good_nem_talalt,prediction_ts_good_talalt,\ prediction_ts_good_joined=get_data(prediction_ts_good,prediction_th) n_prediction_ts_good_talalt=len(prediction_ts_good_talalt) n_prediction_ts_good_nem_talalt=len(prediction_ts_good_nem_talalt) ns=[n_prediction_ts_good,n_prediction_ts_bad,n_prediction_ts_good_talalt,n_prediction_ts_good_nem_talalt] #plots fig, axes, axi_top, axi_bottom=setup_axes() stem2_plot(observation_ts,observation_th,axes[0],color_obs_left,color_obs_right,s/2,color_obs_right0) stem2_plot(prediction_ts_good,prediction_th,axes[1],color_pred_left,color_pred_right) stem_plot(prediction_ts_bad,axes[1],color_ax) violin_plot(observation_ts,observation_th,axi_top,color_obs_left,color_obs_right) violin_plot(prediction_ts_good,prediction_th,axi_bottom,color_pred_left,color_pred_right) #icons icons_plot(axes,kondicio,mennyiseg,observation_th,prediction_th) #talalat talalat_plot(axes,ns,observation_th,prediction_th) #years year_plot(observation_ts,axes[0],0.09) year_plot(prediction_ts,axes[1],0.03) #titles len_ratio=0.15*(-1+(len(jelentes.split(',')[0])/len(jelentes.split(',')[1]))) fig.text(0.5+len_ratio,0.04,jelentes.split(',')[0]+',',color=color_obs_right0, fontproperties=prop,fontsize=font_size*0.7,horizontalalignment='right') if color_pred_talalt==color_pred_talalt0: color_pred_side=color_pred_right else: color_pred_side=color_pred_left fig.text(0.5+len_ratio,0.04,jelentes.split(',')[1],color=color_pred_side, fontproperties=prop,fontsize=font_size*0.7,horizontalalignment='left') if n_prediction_ts_good_nem_talalt>=n_prediction_ts_good_talalt: color_title=color_pred_nem_talalt verdict=pred_nem_talalt_glyph else: color_title=color_pred_talalt verdict=pred_talalt_glyph plt.suptitle(title,y=0.11,color=color_title,fontproperties=prop,fontsize=font_size) fig.text(0.96,0.04,verdict, fontproperties=prop2, horizontalalignment='right', color=color_title, fontsize=font_size*2, ) fig.text(0.04,0.045, datum, fontproperties=prop, horizontalalignment='left', color=color_obs_right0, fontsize=font_size*2, ) plt.savefig(c+'/'+str(mondas)+'.png',dpi=300, facecolor=color_bg) plt.show() def filter_data(dz,observation_range,prediction_range): dgs=[] dhs=[] for year in range(int(dz.min()['year']),int(dz.max()['year'])): k=0 from_date=pd.to_datetime(str(year)+'-'+str(observation_range[k].month)+'-'+str(observation_range[k].day)) from_pred=pd.to_datetime(str(year)+'-'+str(prediction_range[k].month)+'-'+str(prediction_range[k].day)) k=1 to_date=pd.to_datetime(str(year)+'-'+str(observation_range[k].month)+'-'+str(observation_range[k].day)) to_pred=pd.to_datetime(str(year)+'-'+str(prediction_range[k].month)+'-'+str(prediction_range[k].day)) if to_pred<to_date: to_pred+=pd.to_timedelta('1Y') dg=dz.loc[from_date:] dg=dg[:to_date] dg['pyear']=year dgs.append(dg) dh=dz.loc[from_pred:] dh=dh[:to_pred] dh['pyear']=year dhs.append(dh) return pd.concat(dgs),pd.concat(dhs) dz=df.groupby(['time']).mean() dz['year']=dz['year'].astype(int) dz.head() def set_direction(kondicio, mennyiseg): if kondicio: color_obs_right=color_obs_right0 color_obs_left=color_obs_left0 obs_talalt_glyph='โ˜…' obs_nem_talalt_glyph='โ˜†' else: color_obs_right=color_obs_left0 color_obs_left=color_obs_right0 obs_talalt_glyph='โ˜†' obs_nem_talalt_glyph='โ˜…' if mennyiseg: color_pred_talalt=color_pred_talalt0 color_pred_nem_talalt=color_pred_nem_talalt0 pred_talalt_glyph='โœ”๏ธ' pred_nem_talalt_glyph='โœ–๏ธ' else: color_pred_talalt=color_pred_nem_talalt0 color_pred_nem_talalt=color_pred_talalt0 pred_talalt_glyph='โœ–๏ธ' pred_nem_talalt_glyph='โœ”๏ธ' return color_obs_right,color_obs_left,obs_talalt_glyph,obs_nem_talalt_glyph,\ color_pred_talalt,color_pred_nem_talalt,pred_talalt_glyph,pred_nem_talalt_glyph def get_sign(sign,key): positive=True if (('-' in sign) or ('+' in sign)): if sign=='-': positive=False elif sign=='+': positive=True elif (('<' in sign) or ('>' in sign)): if '<' in sign: positive=False elif '>' in sign: positive=True return positive universal_normalize=['XTEMP','XVSB','XSPD'] def get_ts_data(data,key,sign): ts=data.groupby('year').mean()[key] if (('-' in sign) or ('+' in sign)): th=ts.mean() else: th=float(sign[1:]) if key in universal_normalize: th-=ts.mean() ts-=ts.mean() return ts,th def get_comp_data(observation_data,obs_key,obs_sign,prediction_data,pred_key,pred_sign): ertek_sign=True irany_sign=True observation_ts=observation_data.groupby('year').mean()[obs_key] prediction_ts=prediction_data.groupby('year').mean()[pred_key] prediction_th=observation_ts.mean() observation_ts-=observation_ts.mean() observation_th=observation_ts.min()*1.01 prediction_th-=prediction_ts.mean() prediction_ts-=prediction_ts.mean() if obs_sign=='A': if pred_sign=='A': observation_th=0 prediction_th=0 else: irany_sign=False return observation_ts,observation_th,prediction_ts,prediction_th,ertek_sign,irany_sign mennyiseg_key={'Temp':'XTEMP','Snow Depth':'XSD','Wind':'XSPD','Rain':'YPCP','Visib':'XVSB', 'Snow':'YSNW','Hail':'YHAL'} nepi=pd.read_excel(c+'/idojaras_'+c+'.xlsx') mondasok=nepi['ID'].values # mondasok=range(60,61) mondasok=[55] for mondas in mondasok: nep=nepi.loc[mondas] if str(nep['Mennyisรฉg'])!='nan': print(mondas) obs_key=mennyiseg_key[nep['Kondรญciรณ']] pred_key=mennyiseg_key[nep['Mennyisรฉg']] observation_range=[nep['Dรกtum:mettล‘l']+pd.to_timedelta('-1D'),nep['Dรกtum:meddig']+pd.to_timedelta('+2D')] prediction_range=[nep['Periรณdus:mettล‘l'],nep['Periรณdus:meddig']+pd.to_timedelta('+1D')] observation_data,prediction_data=filter_data(dz,observation_range,prediction_range) #comparison if str(nep['ร‰rtรฉk']) in ['A','B']: observation_ts,observation_th,prediction_ts,prediction_th,ertek_sign,irany_sign=\ get_comp_data(observation_data,obs_key,nep['ร‰rtรฉk'],\ prediction_data,pred_key,nep['Irรกny']) #time series else: ertek_sign=get_sign(nep['ร‰rtรฉk'],obs_key) irany_sign=get_sign(nep['Irรกny'],pred_key) observation_ts,observation_th=get_ts_data(observation_data,obs_key,nep['ร‰rtรฉk']) prediction_ts,prediction_th=get_ts_data(prediction_data,pred_key,nep['Irรกny']) color_obs_right,color_obs_left,obs_talalt_glyph,obs_nem_talalt_glyph,\ color_pred_talalt,color_pred_nem_talalt,pred_talalt_glyph,pred_nem_talalt_glyph=\ set_direction(ertek_sign, irany_sign) datum=str(nep['Dรกtum:mettล‘l'].month)+'.'+str(nep['Dรกtum:mettล‘l'].day)+'.' datum=str(nep['Dรกtums'])[:3]+'. '+str(nep['Dรกtum:mettล‘l'].day) spine_plot(datum,nep['Mondรกs'].strip(),mondas,nep['Jelentรฉs'].strip(),nep['Kondรญciรณ'],nep['Mennyisรฉg'], observation_ts,observation_th,prediction_ts,prediction_th) ```
github_jupyter
# Convolutional Neural Networks --- In this notebook, we train a **CNN** to classify images from the CIFAR-10 database. The process will be broken down into the following steps: >1. Load and visualize the data 2. Define a neural network 3. Train the model 4. Evaluate the performance of our trained model on a test dataset! Before we begin, we have to import the necessary libraries for working with data and PyTorch. also want to make this dynamic if in case some one want to use GPU ``` import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data.sampler import SubsetRandomSampler from torchvision import datasets import torchvision.transforms as transforms %matplotlib inline ``` ### Test for [CUDA](http://pytorch.org/docs/stable/cuda.html) Since these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation. ``` class Env: """ ENV class will be used as a dict for constant """ batch_size = 20 valid_size = 0.2 num_workers = 0 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_on_gpu = torch.cuda.is_available() epochs = 20 valid_loss_min = np.Inf classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] ``` --- ## Load the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html) Downloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data. ``` train_data = datasets.CIFAR10(root='', download=True, train=True, transform=Env.transform) test_data = datasets.CIFAR10(root='', download=False, train=False, transform=Env.transform) #create train test and validation data loader. indices = list(range(len(train_data))) np.random.shuffle(indices) split = int(np.floor(Env.valid_size * len(train_data))) train_sampler = SubsetRandomSampler(indices[split:]) valid_sampler = SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, num_workers=Env.num_workers, sampler=train_sampler, batch_size=Env.batch_size) valid_loader = torch.utils.data.DataLoader(train_data, num_workers=Env.num_workers, sampler=valid_sampler, batch_size=Env.batch_size) test_loader = torch.utils.data.DataLoader(test_data, num_workers=Env.num_workers, batch_size=Env.batch_size) #visualize data def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() fig = plt.figure(figsize=(20,4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(Env.classes[labels[idx]]) ``` --- ## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html) This time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following: * [Convolutional layers](https://pytorch.org/docs/stable/nn.html#conv2d), which can be thought of as stack of filtered images. * [Maxpooling layers](https://pytorch.org/docs/stable/nn.html#maxpool2d), which reduce the x-y size of an input, keeping only the most _active_ pixels from the previous layer. * The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output. A network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer. <img src='notebook_ims/2_layer_conv.png' height=50% width=50% /> #### TODO: Define a model with multiple convolutional layers, and define the feedforward metwork behavior. The more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting. It's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at [this PyTorch classification example](https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py) or [this, more complex Keras example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) to help decide on a final structure. #### Output volume for a convolutional layer To compute the output size of a given convolutional layer we can perform the following calculation (taken from [Stanford's cs231n course](http://cs231n.github.io/convolutional-networks/#layers)): > We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by `(Wโˆ’F+2P)/S+1`. For example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output. ``` class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2,2) self.fc1 = nn.Linear(64*4*4, 500) self.fc2 = nn.Linear(500, 10) self.dropout = nn.Dropout(0.25) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 64 * 4 * 4) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x model = NeuralNet() print(model) if Env.train_on_gpu: model.cuda() # specify loss function (categorical cross-entropy) criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.SGD(model.parameters(), lr=0.01) ``` --- ## Train the Network Remember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting. (In fact, in the below example, we could have stopped around epoch 33 or so!) ``` valid_loss_min = np.inf for epoch in range(1, Env.epochs + 1): train_loss = 0.0 valid_loss = 0.0 model.train() for images, targets in train_loader: if Env.train_on_gpu: images, targets = images.cuda(), targets.cuda() optimizer.zero_grad() output = model(images) loss = criterion(output, targets) loss.backward() optimizer.step() train_loss += loss.item() * images.size(0) model.eval() for images, targets in valid_loader: if Env.train_on_gpu: images, targets = images.cuda(), targets.cuda() output = model(images) loss = criterion(output, targets) valid_loss += loss.item() * images.size(0) train_loss = train_loss / len(train_loader.dataset) valid_loss = valid_loss / len(valid_loader.dataset) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss)) if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), 'cnn.pt') valid_loss_min = valid_loss ``` ### Load the Model with the Lowest Validation Loss ``` model.load_state_dict(torch.load('cnn.pt')) ``` --- ## Test the Trained Network Test your trained model on previously unseen data! A "good" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images. ``` # track test loss test_loss = 0.0 class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) model.eval() # iterate over test data for data, target in test_loader: # move tensors to GPU if CUDA is available if Env.train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update test loss test_loss += loss.item()*data.size(0) # convert output probabilities to predicted class _, pred = torch.max(output, 1) # compare predictions to true label correct_tensor = pred.eq(target.data.view_as(pred)) correct = np.squeeze(correct_tensor.numpy()) if not Env.train_on_gpu else np.squeeze(correct_tensor.cpu().numpy()) # calculate test accuracy for each object class for i in range(Env.batch_size): label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 # average test loss test_loss = test_loss/len(test_loader.dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) for i in range(10): if class_total[i] > 0: print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % ( Env.classes[i], 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Test Accuracy of %5s: N/A (no training examples)' % (Env.classes[i])) print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total))) # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() images.numpy() # move model inputs to cuda, if GPU available if Env.train_on_gpu: images = images.cuda() # get sample outputs output = model(images) # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) preds = np.squeeze(preds_tensor.numpy()) if not Env.train_on_gpu else np.squeeze(preds_tensor.cpu().numpy()) # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(30, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title("{} ({})".format(Env.classes[preds[idx]], Env.classes[labels[idx]]), color=("green" if preds[idx]==labels[idx].item() else "red")) ```
github_jupyter
# Lecture 10 (https://bit.ly/intro_python_10) Today: * Finishing up sequences: * Iterators vs. lists * Generators and the yield keyword * Modules: * Some useful modules * Hierarchical namespaces * Making your own modules * The main() function * PEP 8 (v. briefly) * A revisit to debugging, now that we're writing longer programs: * Looking at different error types (syntax, runtime, logical) # Generators vs. lists ``` # Recall that range can be used to iterate through a sequence of numbers: for i in range(10): print("i is", i) # We can convert range to a list x = list(range(10)) # Makes a list [ 0, 1, ... 9 ] print(x) ``` **But isn't range a list to start with?** ``` # No! x = range(10) print(x) # So what is the type of range: x = range(10) # So what is a range? print(type(x)) ``` A range, or (as we'll see in a minute) generator function, is a promise to produce a sequence when asked. Essentially, you can think of it like a function you can call repeatedly to get successive values from an underlying sequence, e.g. 1, 2, ... etc. Why not just make a list? In a word: memory. ``` x = list(range(100)) # This requires allocating memory to store 100 integers print(x) x = range(100) # This does not make the list, so the memory for the list is never allocated. print(x) # This requires only the memory for j, i and the Python system # Compute the sum of integers from 1 (inclusive) to 100 (exclusive) j = 0 for i in range(100): j += i print(j) # Alternatively, this requires memory for j, i and the list of 100 integers: j = 0 for i in list(range(100)): j += i print(j) ``` * Range, as an iterator, is the promise to produce a sequence of integers, but this does not require they all exist in memory at the same time. * With a list, however, by definition, all the elements are present in memory. * As a general guide, if we can be "lazy", and avoid ever building a complete sequence in memory, then we should be lazy about evaluation of sequences. * So how do you code a function like range? This is where the "yield" keyword comes in, which allows you to create generator functions. # Yield keyword With *return* you exit a function completely, returning a value. The internal state of the function is lost. Yield is like return, in that you return a value from the function and temporarily the function exits, however the state of the function is not lost, and the function can be resumed to return more values. This allows a function to act like an iterator over a sequence, where the function incrementally yields values, one for each successive resumption of the function. It's easiest to understand this by example: ``` def make_numbers(m): i = 0 while i < m: yield i i += 1 for i in make_numbers(10): print("i is now", i) # What is the type? x = make_numbers(5) print(type(x)) ``` Why use yield to write generator functions?: * Shorter, cleaner code - here we saved all the messing around with lists * More efficient in memory - we never have to construct the complete list in memory, rather we keep track of a limited amount of state in memory that represents where we are in the sequence of permutations. # Challenge 1 ``` # Write a iterator function to enumerate numbers in the Collatz 3n + 1 sequence # Recall: # The sequence is produced iteratively such that the next term is determined # by the current value, n. If n is even then the next term in the sequence is n/2 otherwise # it is n*3 + 1. The sequence terminates when n equals 1 (i.e. 1 is always the last integer returned). def collatz(n): pass #replace with your code for i in collatz(11): print(i) ``` # Modules * A language like Python has vast libraries of useful functions, classes, etc. See https://pypi.org/: * As of Dec 2020 there are over 270K different Python "packages" in PyPi. * To make it possible to use these, and importantly, ensure the namespace of our code does not explode in size, Python has a hierarchical system for managing these libraries using "modules" and "packages". ``` # From a user perspective, modules are variables, functions, objects etc. defined separately # to the code we're working on. import math # This line "imports" the math module, so that we can refer to it math.log10(100) # Now we're calling a function from the math module to compute log_10(100) # The math module contains lots of math functions and constants dir(math) # As I've shown you before, use dir to list the contents of an object or module # Use help() to give you info (Note: this is great to use in the interactive interpretor) help(math.sqrt) # e.g. get info on the math.sqrt function - this is pulling the doc string of the function ``` * In general, the Python standard library provides loads of useful modules for all sorts of things: https://docs.python.org/3/py-modindex.html * Standard library packages are installed as part of a default Python installation - they are part of every Python of that version (e.g. 3.XX) ``` # For example, the random module from the standard library provides loads of functions # for making random numbers/choices - this is useful for games, algorithms # and machine learning where stochastic behaviour is needed import random def make_random_ints(num, lower_bound, upper_bound): """ Generate a list containing num random ints between lower_bound and upper_bound. upper_bound is an open bound. """ rng = random.Random() # Create a random number generator # Makes a sequence of random numbers using rng.randrange() return [ rng.randrange(lower_bound, upper_bound) for i in range(num) ] make_random_ints(10, 0, 6) # Make a sequence of 10 random numbers, each in the # interval (0 6] ``` * There is a much larger universe of open source Python packages you can install from: https://pypi.org/ # Challenge 2 ``` # Use the median function from the statistics module to calculate the median of the following list: l = [ 1, 8, 3, 4, 2, 8, 7, 2, 6 ] ``` # Namespaces and dot notation * To recap, the namespace is all the identifiers (variables, functions, classes (to be covered soon), and modules) available to a line of code (See previous notes on scope and name space rules). * In Python (like most programming languages), namespaces are organized hierarchically into subpieces using modules and functions and classes. * If all identifiers were in one namespace without any hierarchy then we would get lots of collisions between names, and this would result in ambiguity. (see Module1.py and Module2.py example in textbook: http://openbookproject.net/thinkcs/python/english3e/modules.html) * The upshot is if you want to use a function from another module you need to import it into the "namespace" of your code and use '.' notation: ``` import math # Imports the math module into the current namespace # The '.' syntax is a way of indicating membership math.sqrt(2) # sqrt is a function that "belongs" to the math module # (Later we'll see this notation reused with objects) ``` # Import statements * As you've seen, to import a module just write "import x", where x is the module name. **Import from** * You can also import a specific function, class or object from a module into your program's namespace using the import from syntax: ``` from math import sqrt sqrt(2.0) # Now sqrt is a just a function in the current program's name space, # no dot notation required ``` If you want to import all the functions from a module you can use: ``` from math import * # Import all functions from math # But, this is generally a BAD IDEA, because you need to be sure # this doesn't bring in things that will collide with other things # used by the program log(10) #etc. ``` More useful is the "as" modifier ``` from math import sqrt as square_root # This imports the sqrt function from math # but names it square_root. This is useful if you want to abbreviate a long function # name, or if you want to import two separate things with the same name square_root(2.0) ``` # Challenge 3 ``` # Write a statement to import the 'beep' function from the 'curses' module ``` # Writing your own modules You can write your own modules. * Create a file whose name is x.py, where x is the name of the module you want to create. * Edit x.py to contain the stuff you want * Create a new python file, call it y.py, in the same directory as x.py and include "import x" at the top of y.py. (NOTE: do demo) **Packages** Packages are collections of modules, organized hierarchically (and accessed using the dot notation). Beyond the scope here, but you can look more at environment setup to create your own "packages". If you're curious see: https://docs.python.org/3/tutorial/modules.html#packages # The main() function * You may write a program and then want to reuse some of the functions by importing them into another program. In this case you are treating the original program as a module. * The problem is that when you import a module it is executed. * Question: How do you stop the original program from running when you import it as a module? * Answer: By putting the logic for the program in a "main()", which is only called if the program is being run by user, not imported as a module. ``` def some_useful_function(): """Defines a function that would be useful to other programs outside of main""" pass def main(): x = input() print("python main function, x is:", x) # Put the program logic in this function if __name__ == '__main__': # This will only be true # when the program is executed by a user main() print(__name__) # The name of the current module type(__name__) ``` **Live demo!** # PEP8: Use Style It is easy to rush and write poorly structured, hard-to-read code. However, generally, this proves a false-economy, resulting in longer debug cycles, a larger maintenance burden (like, what was I thinking?) and less code reuse. Although many sins have nothing to do with the cosmetics of the code, some can be fixed by adopting a consistent, sane set of coding conventions. Python did this with Python Enhancement Proposal (PEP) 8: https://www.python.org/dev/peps/pep-0008/ Some things PEP-8 covers: * use 4 spaces (instead of tabs) for indentation - you can make your text editor do this (insert spaces for tabs) * limit line length to 78 characters * when naming identifiers, use CamelCase for classes (weโ€™ll get to those) and lowercase_with_underscores for functions and variables * place imports at the top of the file * keep function definitions together * use docstrings to document functions * use two blank lines to separate function definitions from each other * keep top level statements, including function calls, together at the bottom of the program # Debugging Revisited We mentioned earlier that a lot of programming is debugging. Now we're going to debug programs and understand the different errors you can get. There are three principle types of error: - syntax errors - runtime errors - semantic/logical errors # Syntax Errors * when what you've written is not valid Python ``` # Syntax errors - when what you've written is not valid Python for i in range(10) print(i) # What's wrong with this? # Syntax errors - when what you've written is not valid Python for i in range(10): print(i) # What's wrong with this? # Syntax errors - when what you've written is not valid Python for i in range(10): """ This loop will print stuff "" print(i) # Syntax errors - when what you've written is not valid Python # (note, this kind of print statement was legal in Python 2.XX and earlier) print "Forgetting parentheses" ``` # Runtime Errors * when the program crashes during runtime because it tries to do something invalid ``` # Runtime errors - when the program errors out during runtime because it # tries to do something invalid print("This is an integer: " + 10) # Runtime errors - when the program errors out during runtime because it # tries to do something invalid assert 1 + 1 == 3 ``` # Semantic Errors (aka Logical Errors) * when the program runs and exits without error, but produces an unexpected result ``` # Semantic errors - when the program runs and exits without error, # but produces an unexpected result j = int(input("Input a number: ")) x = 1 for i in range(1, j): # should be range(1, j+1): x = x * i print(str(j) + " factorial is " + str(x)) ``` In my experience syntax errors are easy to fix, runtime errors are generally solvable fast, but semantic errors can take the longest time to fix **Debug strategies** To debug a failing program, you can: * Use print statements dotted around the code to figure out what code is doing at specific points of time (remember to remove / comment these out when you're done!) * Use a debugger - this allows you to step through execution, line-by-line, seeing what the program is up to at each step. (PyCharm has a nice interface to the Python debugger) * Write unit-tests for individual parts of the code * Use assert to check that expected properties are true during runtime * Stare hard at it! Semantic errors will generally require you to question your program's logic. # Challenge 4 See if you can get this to work: ``` import time # Try debugging the following - a number guessing program # It has all three types of errors print("Think of a number from 1 to 100") time.sleep(3) min = 1 max = 100 while max == min i = (min + max) // 2 answer = input("Is your number greater than " + str(i) + " Type YES or NO: ") assert answer == "YES" or answer == "YES" # Check the value is what we expect if answer == "YES": min = i+1 else: max = i print("Your number is: " + str(min)) ``` # Reading Open book chapter 12: http://openbookproject.net/thinkcs/python/english3e/modules.html # Homework ZyBook Reading 10
github_jupyter
``` import numpy as np from tifffile import imread, imsave from glob import glob import random import tqdm from matplotlib import pyplot as plt from sklearn.feature_extraction import image X = sorted(glob("/Users/prakash/Desktop/flywing/images/*.tif")) Y = sorted(glob("/Users/prakash/Desktop/flywing/gt/*.tif")) X = list(map(imread,X)) Y = list(map(imread,Y)) plt.subplot(121); plt.imshow(X[9],cmap='gray'); plt.axis('off'); plt.title('Raw image'); plt.show() rng = np.random.RandomState(42) ind = rng.permutation(len(X)) n_test = int(round(0.2*len(X))) ind_pretrn, ind_test = ind[:-n_test], ind[-n_test:] X_test, Y_test = [X[i] for i in ind_test] , [Y[i] for i in ind_test] X_pretrn, Y_pretrn = [X[i] for i in ind_pretrn] , [Y[i] for i in ind_pretrn] print('number of images: %3d' % len(X)) print('- training+validation: %3d' % len(X_pretrn)) print('- test: %3d' % len(X_test)) for i in range(len(X_test)): imsave('/Users/prakash/Desktop/flywing/test/images/'+str(i)+'.tif', X_test[i]) imsave('/Users/prakash/Desktop/flywing/test/gt/'+str(i)+'.tif', Y_test[i]) count =0 for i in range (len(X_pretrn)): patchesimages = image.extract_patches_2d(X_pretrn[i], patch_size=(128,128), max_patches=10, random_state=0) patchesmasks = image.extract_patches_2d(Y_pretrn[i], patch_size=(128,128), max_patches=10, random_state=0) for j in range(0, np.shape(patchesimages)[0]): imsave('/Users/prakash/Desktop/flywing/patches/images/'+str(count).zfill(4)+'.tif', patchesimages[j]) imsave('/Users/prakash/Desktop/flywing/patches/gt/'+str(count).zfill(4)+'.tif', patchesmasks[j]) count+=1 X_pretrn= sorted(glob('/Users/prakash/Desktop/flywing/patches/images/*.tif')) Y_pretrn= sorted(glob('/Users/prakash/Desktop/flywing/patches/gt/*.tif')) X_test = sorted(glob('/Users/prakash/Desktop/flywing/test/images/*.tif')) Y_test = sorted(glob('/Users/prakash/Desktop/flywing/test/gt/*.tif')) X_pretrn = list(map(imread,X_pretrn)) Y_pretrn = list(map(imread,Y_pretrn)) X_test = list(map(imread,X_test)) Y_test = list(map(imread,Y_test)) print('- training+validation: %3d' % len(X_pretrn)) print('- test: %3d' % len(X_test)) rng = np.random.RandomState(42) ind = rng.permutation(len(X_pretrn)) n_val = int(round(0.15 * len(X_pretrn))) ind_train, ind_val = ind[:-n_val], ind[-n_val:] X_val, Y_val = [X_pretrn[i] for i in ind_val] , [Y_pretrn[i] for i in ind_val] X_train, Y_train = [X_pretrn[i] for i in ind_train] , [Y_pretrn[i] for i in ind_train] print('- training: %3d' % len(X_train)) print('- validation: %3d' % len(X_val)) X_train = np.array(X_train) X_test = np.array(X_test) X_val = np.array(X_val) Y_train = np.array(Y_train) Y_test = np.array(Y_test) Y_val = np.array(Y_val) i = 9 img, lbl = X_train[i], Y_train[i] plt.figure(figsize=(16,10)) plt.subplot(121); plt.imshow(img,cmap='gray'); plt.axis('off'); plt.title('Raw image (train)') plt.subplot(122); plt.imshow(lbl,cmap='gray'); plt.axis('off'); plt.title('Mask image (train)') plt.show() print(np.min(img),np.max(img)) print(np.min(lbl),np.max(lbl)) def noisy(image, sigma): row,col= image.shape mean = 0 img=np.array(image).astype(np.float32) gauss = np.random.normal(mean,sigma,(row,col)) gauss = gauss.reshape(row,col) noisy = img + gauss return noisy std=10.0 X_train10 = np.array([noisy(x,std) for x in X_train]) X_test10 = np.array([noisy(x,std) for x in X_test]) X_val10 = np.array([noisy(x,std) for x in X_val]) std=20.0 X_train20 = np.array([noisy(x,std) for x in X_train]) X_test20 = np.array([noisy(x,std) for x in X_test]) X_val20 = np.array([noisy(x,std) for x in X_val]) np.savez_compressed('/Users/prakash/Desktop/flywing/train/train_data_n0.npz', X_train=X_train, Y_train=Y_train, X_val=X_val, Y_val=Y_val) np.savez_compressed('/Users/prakash/Desktop/flywing/test/test_data_n0.npz', X_test=X_test, Y_test=Y_test) np.savez_compressed('/Users/prakash/Desktop/flywing/train/train_data_n10.npz', X_train=X_train10, Y_train=Y_train, X_val=X_val10, Y_val=Y_val) np.savez_compressed('/Users/prakash/Desktop/flywing/test/test_data_n10.npz', X_test=X_test10, Y_test=Y_test) np.savez_compressed('/Users/prakash/Desktop/flywing/train/train_data_n20.npz', X_train=X_train20, Y_train=Y_train, X_val = X_val20, Y_val = Y_val) np.savez_compressed('/Users/prakash/Desktop/flywing/test/test_data_n20.npz', X_test=X_test20, Y_test=Y_test) ```
github_jupyter
# Decision Trees and Random Forests in Python This is the code for the lecture video which goes over tree methods in Python. Reference the video lecture for the full explanation of the code! Jose also wrote a [blog post](https://medium.com/@josemarcialportilla/enchanted-random-forest-b08d418cb411#.hh7n1co54) explaining the general logic of decision trees and random forests which you can check out. ## Import Libraries ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline ``` ## Get the Data ``` df = pd.read_csv('kyphosis.csv') df.head() ``` - This dataset essentially represents a number of patients who had Kyphosis, which is a spinal condition, and then they had an operation, and that operation was a corrective spinal surgery. And this dataframe basically represents whether or not, the kyphosis condition was present after the operation. - Age : Age of person in months. This is data on children and their age being in months. - Number : Number of vertebrae involved in the operation. - Start : Number of first or top most vertebrae that was operated on. ## EDA We'll just check out a simple pairplot for this small dataset. ``` sns.pairplot(df,hue='Kyphosis',palette='Set1') ``` ## Train Test Split Let's split up the data into a training set and a test set! ``` from sklearn.model_selection import train_test_split X = df.drop('Kyphosis',axis=1) y = df['Kyphosis'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) ``` ## Decision Trees We'll start just by training a single decision tree. ``` from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(X_train,y_train) ``` ## Prediction and Evaluation Let's evaluate our decision tree. ``` predictions = dtree.predict(X_test) from sklearn.metrics import classification_report,confusion_matrix print(classification_report(y_test,predictions)) print(confusion_matrix(y_test,predictions)) ``` ## Tree Visualization Scikit learn actually has some built-in visualization capabilities for decision trees, you won't use this often and it requires you to install the pydot library, but here is an example of what it looks like and the code to execute this: ``` from IPython.display import Image from sklearn.externals.six import StringIO from sklearn.tree import export_graphviz import pydotplus features = list(df.columns[1:]) features import os os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/' dot_data = StringIO() export_graphviz(dtree, out_file=dot_data, feature_names=features,filled=True, rounded=True) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_pdf("tree.pdf") graph.write_jpeg("tree.jpeg") ``` ## Random Forests Now let's compare the decision tree model to a random forest. ``` from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(n_estimators=100) rfc.fit(X_train, y_train) rfc_pred = rfc.predict(X_test) print(confusion_matrix(y_test,rfc_pred)) print(classification_report(y_test,rfc_pred)) ```
github_jupyter
### OCP Data Preprocessing Tutorial This notebook provides an overview of converting ASE Atoms objects to PyTorch Geometric Data objects. To better understand the raw data contained within OC20, check out the following tutorial first: https://github.com/Open-Catalyst-Project/ocp/blob/master/docs/source/tutorials/data_playground.ipynb ``` from ocpmodels.preprocessing import AtomsToGraphs import ase.io from ase.build import bulk from ase.build import fcc100, add_adsorbate, molecule from ase.constraints import FixAtoms from ase.calculators.emt import EMT from ase.optimize import BFGS ``` ### Generate toy dataset: Relaxation of CO on Cu ``` adslab = fcc100("Cu", size=(2, 2, 3)) ads = molecule("CO") add_adsorbate(adslab, ads, 3, offset=(1, 1)) cons = FixAtoms(indices=[atom.index for atom in adslab if (atom.tag == 3)]) adslab.set_constraint(cons) adslab.center(vacuum=13.0, axis=2) adslab.set_pbc(True) adslab.set_calculator(EMT()) dyn = BFGS(adslab, trajectory="CuCO_adslab.traj", logfile=None) dyn.run(fmax=0, steps=1000) raw_data = ase.io.read("CuCO_adslab.traj", ":") print(len(raw_data)) ``` ### Convert Atoms object to Data object The AtomsToGraphs class takes in several arguments to control how Data objects created: - max_neigh (int): Maximum number of neighbors a given atom is allowed to have, discarding the furthest - radius (float): Cutoff radius to compute nearest neighbors around - r_energy (bool): Write energy to Data object - r_forces (bool): Write forces to Data object - r_distances (bool): Write distances between neighbors to Data object - r_edges (bool): Write neigbhor edge indices to Data object - r_fixed (bool): Write indices of fixed atoms to Data object ``` a2g = AtomsToGraphs( max_neigh=50, radius=6, r_energy=True, r_forces=True, r_distances=False, r_edges=True, r_fixed=True, ) data_objects = a2g.convert_all(raw_data, disable_tqdm=True) data = data_objects[0] data data.atomic_numbers data.cell data.edge_index #neighbor idx, source idx from torch_geometric.utils import degree # Degree corresponds to the number of neighbors a given node has. Note there is no more than max_neigh neighbors for # any given node. degree(data.edge_index[1]) data.fixed data.force data.pos data.y ``` ### Adding additional info to your Data objects In addition to the above information, the OCP repo requires several other pieces of information for your data to work with the provided trainers: - sid (int): A unique identifier for a particular system. Does not affect your model performance, used for prediction saving - fid (int) (S2EF only): If training for the S2EF task, your data must also contain a unique frame identifier for atoms objects coming from the same system. - tags (tensor): Tag information - 0 for adsorbate, 1 for surface, 2 for subsurface. Optional, can be used for training. Other information may be added her as well if you choose to incorporate other information in your models/frameworks ``` data_objects = [] for idx, system in enumerate(raw_data): data = a2g.convert(system) data.fid = idx data.sid = 0 # All data points come from the same system, arbitrarly define this as 0 data_objects.append(data) data = data_objects[100] data data.sid data.fid ``` Resources: - https://github.com/Open-Catalyst-Project/ocp/blob/6604e7130ea41fabff93c229af2486433093e3b4/ocpmodels/preprocessing/atoms_to_graphs.py - https://github.com/Open-Catalyst-Project/ocp/blob/master/scripts/preprocess_ef.py
github_jupyter
``` import numpy as onp import jax.numpy as np from jax import random, vmap from jax.config import config config.update("jax_enable_x64", True) from scipy.optimize import minimize from pyDOE import lhs import matplotlib.pyplot as plt from matplotlib import rc from scipy.interpolate import griddata from jaxbo.models import MultipleIndependentOutputsGP, GP from jaxbo.utils import normalize, normalize_constraint, compute_w_gmm from jaxbo.test_functions import * from jax.scipy.stats import norm import jaxbo.acquisitions as acquisitions from jaxbo.input_priors import uniform_prior, gaussian_prior onp.random.seed(1234) # Example from # https://asmedigitalcollection.asme.org/mechanicaldesign/article/141/12/121001/975244?casa_token=45A-r7iV9IUAAAAA:ji-aHZ_T_HQ5Q1xgNxloqrG2LjOpFkXMItdWnuGH9d02MysONc3VTfrtM8GSB5oTdE2jcQ # Section 4, and constraint in section 4.2 def f(x): x1, x2 = x[0], x[1] a = 1.0 b = 5.1 / (4*np.pi**2) c = 5 / np.pi r = 6 s = 10 t = 1 / (8*np.pi) f = a * (x2 - b*x1**2 + c*x1 -r)**2 + s * (1-t) * np.cos(x1) + s return f def constraint1(x): x1, x2 = (x[0]-2.5)/7.5, (x[1] - 7.5)/7.5 g1 = (4 - 2.1*x1**2 + 1./3*x1**4)*x1**2 + x1*x2 + (-4+4*x2**2)*x2**2 + 3*np.sin(6*(1-x1)) + 3*np.sin(6*(1-x2)) return g1 - 6. # Dimension of the problem dim = 2 # Boundary of the domain lb = np.array([-5.0, 0.0]) ub = np.array([10.0, 15.0]) bounds = {'lb': lb, 'ub': ub} # Visualization of the function and constraints in 2D grid nn = 100 xx = np.linspace(lb[0], ub[0], nn) yy = np.linspace(lb[1], ub[1], nn) XX, YY = np.meshgrid(xx, yy) X_star = np.concatenate([XX.flatten()[:,None], YY.flatten()[:,None]], axis = 1) y_f_star = vmap(f)(X_star) y1_c_star = vmap(constraint1)(X_star) Y_f_star = griddata(onp.array(X_star), onp.array(y_f_star), (onp.array(XX), onp.array(YY)), method='cubic') Y1_c_star = griddata(onp.array(X_star), onp.array(y1_c_star), (onp.array(XX), onp.array(YY)), method='cubic') plt.figure(figsize = (16, 5)) plt.subplot(1, 2, 1) fig = plt.contourf(XX, YY, Y_f_star) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact objective') plt.colorbar(fig) plt.subplot(1, 2, 2) fig = plt.contourf(XX, YY, Y1_c_star) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'constraint1') plt.colorbar(fig) # Visualize the feasible domain and the location of the best value of this problem judge1 = (y1_c_star >= 0) total_judge = judge1 valid_index = np.where(total_judge) #print(valid_index) valid_x = X_star[valid_index] valid_y = y_f_star[valid_index] #print(valid_x.shape, valid_y.shape) idx_best = np.argmin(valid_y) x_best = valid_x[idx_best] y_best = valid_y[idx_best] plt.figure(figsize = (6,4)) fig = plt.contourf(XX, YY, Y_f_star) plt.plot(valid_x[:,0], valid_x[:, 1], 'r.', markersize = 2.) plt.plot(x_best[0], x_best[1], 'y.', markersize = 8.) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact objective') plt.colorbar(fig) print("best y", y_best, "best x", x_best) true_x = x_best true_y = y_best # Problem settings # Number of initial data for objective and constraints N_f = 20 N_c = 50 noise_f = 0.00 noise_c = 0.01 nIter = 10 # Define prior distribution p_x = uniform_prior(lb, ub) # JAX-BO setting options = {'kernel': 'RBF', 'input_prior': p_x, 'constrained_criterion': 'LCBC', 'criterion': 'LW_CLSF', 'kappa': 2.0, 'nIter': nIter} gp_model = MultipleIndependentOutputsGP(options) # JAX-BO setting for constraint options_constraint = {'kernel': 'RBF', 'criterion': 'LW_CLSF', 'input_prior': p_x, 'kappa': 2.0, 'nIter': nIter} gp_model_constraint = GP(options_constraint) # Domain bounds (already defined before where we visualized the data) bounds = {'lb': lb, 'ub': ub} # Initial training data for objective X_f = lb + (ub-lb)*lhs(dim, N_f) y_f = vmap(f)(X_f) y_f = y_f + noise_f*y_f_star.std(0)*onp.random.normal(0, 1, size=y_f.shape) # Initial training data for constraints X_c = lb + (ub-lb)*lhs(dim, N_c) y1_c = vmap(constraint1)(X_c) y1_c = y1_c + noise_c*y1_c_star.std(0)*onp.random.normal(0, 1, size=y1_c.shape) # Visualize the initial data for objective and constraints plt.figure(figsize = (10,5)) plt.subplot(1, 2, 1) fig = plt.contourf(XX, YY, Y_f_star) plt.plot(X_f[:,0], X_f[:,1], 'ro', label = "Initial objective data") plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact objective') plt.colorbar(fig) plt.subplot(1, 2, 2) fig = plt.contourf(XX, YY, Y1_c_star) plt.plot(X_c[:,0], X_c[:,1], 'bo', label = "Initial constraint data") plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'constraint1') plt.colorbar(fig) plt.legend() # Main Bayesian optimization loop rng_key = random.PRNGKey(0) for it in range(options['nIter']): print('-------------------------------------------------------------------') print('------------------------- Iteration %d/%d -------------------------' % (it+1, options['nIter'])) print('-------------------------------------------------------------------') # Fetch normalized training data (for objective and all the constraints) norm_batch_f, norm_const_f = normalize(X_f, y_f, bounds) norm_batch_c1, norm_const_c1 = normalize(X_c, y1_c, bounds) # Define a list using the normalized data and the normalizing constants norm_batch_list = [norm_batch_f, norm_batch_c1] norm_const_list = [norm_const_f, norm_const_c1] # Train GP model with 100 random restart print('Train GP...') rng_key = random.split(rng_key, 2)[0] opt_params_list = gp_model.train(norm_batch_list, rng_key, num_restarts = 10) # Fit GMM if options['constrained_criterion'] == 'LW_LCBC' or options['constrained_criterion'] == 'LW_CLSF' or options['constrained_criterion'] == 'LW-US': print('Fit GMM...') rng_key = random.split(rng_key)[0] kwargs = {'params': opt_params_list, 'batch': norm_batch_list, 'norm_const': norm_const_list, 'bounds': bounds, 'rng_key': rng_key} gmm_vars = gp_model.fit_gmm(**kwargs, N_samples = 10000) else: gmm_vars = None # Find the next acquisition point with 50 random restart print('Computing next acquisition point (objective)...') kwargs = {'params': opt_params_list, 'batch': norm_batch_list, 'norm_const': norm_const_list, 'bounds': bounds, 'kappa': options['kappa'], 'gmm_vars': gmm_vars, 'rng_key': rng_key} # Acquire objective data new_X_f,_,_ = gp_model.constrained_compute_next_point_lbfgs(num_restarts=50, **kwargs) new_y_f = vmap(f)(new_X_f) # This is the output of the solver for generating the objective function new_y_f = new_y_f + noise_f*y_f_star.std(0)*onp.random.normal(new_y_f.shape) #################### Fit GP for constraint ################## # Fetch transformed data for only constraint norm_batch_c1, norm_const_c1 = normalize_constraint(X_c, y1_c, bounds) # Train GP model print('Train GP...') rng_key = random.split(rng_key)[0] opt_params = gp_model_constraint.train(norm_batch_c1, rng_key, num_restarts = 50) # Fit GMM if options_constraint['criterion'] == 'LW-LCB' or options_constraint['criterion'] == "LW_CLSF": print('Fit GMM...') rng_key = random.split(rng_key)[0] kwargs = {'params': opt_params, 'batch': norm_batch_c1, 'norm_const': norm_const_c1, 'bounds': bounds, 'kappa': gp_model_constraint.options['kappa'], 'rng_key': rng_key} gmm_vars = gp_model_constraint.fit_gmm(**kwargs, N_samples = 10000) else: gmm_vars = None # Compute next point via minimizing the acquisition function print('Computing next acquisition point...') kwargs = {'params': opt_params, 'batch': norm_batch_c1, 'norm_const': norm_const_c1, 'bounds': bounds, 'kappa': gp_model_constraint.options['kappa'], 'gmm_vars': gmm_vars, 'rng_key': rng_key} # Acquire constraint data new_X_c,_,_ = gp_model_constraint.compute_next_point_lbfgs(num_restarts=50, **kwargs) new_y1_c = vmap(constraint1)(new_X_c) # This is the output of the solver for generating the constraint1 functions new_y1_c = new_y1_c + noise_c*y1_c_star.std(0)*onp.random.normal(new_y1_c.shape) # # Augment training data print('Updating data-set...') X_f = np.concatenate([X_f, new_X_f], axis = 0) X_c = np.concatenate([X_c, new_X_c], axis = 0) y_f = np.concatenate([y_f, new_y_f], axis = 0) y1_c = np.concatenate([y1_c, new_y1_c], axis = 0) # # Print current best print('True location: ({}), True value: {}'.format(true_x, true_y)) print('New location: ({}), New value: {}'.format(new_X_f, new_y_f)) # # Making prediction on the posterior objective and all constraints mean, std = gp_model.predict(X_star, **kwargs) mean = onp.array(mean * norm_const_list[-1]["sigma_y"] + norm_const_list[-1]["mu_y"]) Y1_c_pred = griddata(onp.array(X_star), mean, (onp.array(XX), onp.array(YY)), method='cubic') # Visualize the final outputs kwargs = {'params': opt_params_list, 'batch': norm_batch_list, 'norm_const': norm_const_list, 'bounds': bounds, 'kappa': gp_model.options['kappa'], 'rng_key': rng_key, 'gmm_vars': gmm_vars} # Making prediction on the posterior objective and all constraints mean, std = gp_model.predict_all(X_star, **kwargs) mean = onp.array(mean) std = onp.array(std) mean[0:1,:] = mean[0:1,:] * norm_const_list[0]['sigma_y'] + norm_const_list[0]['mu_y'] std[0:1,:] = std[0:1,:] * norm_const_list[0]['sigma_y'] # Compute the weight if options['constrained_criterion'] == 'LW_LCBC': w_pred = compute_w_gmm(X_star, **kwargs) # Compute the upper and lower bounds of the posterior distributions lower = mean - 2.0*std upper = mean + 2.0*std print(mean.shape, std.shape, lower.shape, upper.shape) # Evaluate the acquisition function acq_fn1 = lambda x: gp_model.constrained_acquisition(x, **kwargs) LW_LCBCacq = vmap(acq_fn1)(X_star) # Compute the ratio and weights derived by the constraints and convert everything into numpy for plotting ratio1 = mean[1,:] / std[1,:] weight1 = norm.cdf(mean[1,:]/std[1,:]) LW_LCBCacq = onp.array(LW_LCBCacq) mean = onp.array(mean) std = onp.array(std) ratio1 = onp.array(ratio1) weight1 = onp.array(weight1) y_f_pred = onp.array(mean[0,:]) y1_c_pred = onp.array(mean[1,:]) y_f_std = onp.array(std[0,:]) try: w_pred = onp.array(w_pred) except: w_pred = onp.ones_like(y_f_std) kappa = 2. # Convert the numpy variable into grid data for visualization Y_f_pred = griddata(onp.array(X_star), y_f_pred, (onp.array(XX), onp.array(YY)), method='cubic') Y1_c_pred = griddata(onp.array(X_star), y1_c_pred, (onp.array(XX), onp.array(YY)), method='cubic') Y_f_std = griddata(onp.array(X_star), y_f_std, (onp.array(XX), onp.array(YY)), method='cubic') Ratio1 = griddata(onp.array(X_star), ratio1, (onp.array(XX), onp.array(YY)), method='cubic') Weight1 = griddata(onp.array(X_star), weight1, (onp.array(XX), onp.array(YY)), method='cubic') LW_LCBCacq = griddata(onp.array(X_star), LW_LCBCacq.flatten(), (onp.array(XX), onp.array(YY)), method='cubic') W_pred = griddata(onp.array(X_star), w_pred.flatten(), (onp.array(XX), onp.array(YY)), method='cubic') LCBacq = Y_f_pred - 3. - kappa*Y_f_std # Visualization plt.figure(figsize = (16,10)) plt.subplot(2, 4, 1) fig = plt.contourf(XX, YY, Y1_c_star) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact constraint1') plt.colorbar(fig) plt.subplot(2, 4, 2) fig = plt.contourf(XX, YY, Y1_c_pred) plt.plot(X_c[:,0], X_c[:,1], 'r.') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Pred constraint1') plt.colorbar(fig) plt.subplot(2, 4, 3) fig = plt.contourf(XX, YY, Ratio1) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Ratio1') plt.colorbar(fig) plt.subplot(2, 4, 4) fig = plt.contourf(XX, YY, np.clip(Weight1, 0, np.inf)) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Weight1') plt.colorbar(fig) plt.subplot(2, 4, 5) fig = plt.contourf(XX, YY, Y_f_star) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact objective') plt.colorbar(fig) plt.subplot(2, 4, 6) fig = plt.contourf(XX, YY, Y_f_pred) plt.plot(X_f[:,0], X_f[:,1], 'r.') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Pred objective') plt.colorbar(fig) plt.subplot(2, 4, 7) fig = plt.contourf(XX, YY, LCBacq) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'LCB') plt.colorbar(fig) plt.subplot(2, 4, 8) fig = plt.contourf(XX, YY, LW_LCBCacq) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'LW_LCBC') plt.colorbar(fig) # Data we collected and the ground truth plt.figure(figsize = (15, 5)) plt.subplot(1, 3, 1) fig = plt.contourf(XX, YY, Y_f_star) plt.plot(valid_x[:,0], valid_x[:, 1], 'r.', markersize = 2.) plt.plot(true_x[0], true_x[1], 'k.', markersize = 10.) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Exact objective') plt.colorbar(fig) plt.subplot(1, 3, 2) fig = plt.contourf(XX, YY, Y_f_pred) plt.plot(X_f[:,0], X_f[:,1], 'r.') plt.plot(true_x[0], true_x[1], 'k.', markersize = 10.) plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Pred objective') plt.colorbar(fig) plt.subplot(1, 3, 3) fig = plt.contourf(XX, YY, W_pred) plt.plot(X_f[:,0], X_f[:,1], 'r.') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.title(r'Pred output weight') plt.colorbar(fig) ```
github_jupyter
# Basic Example ## Vector Addition Add a fixed value to an array with numbers in the range [0..99]. The example uses the vector addition kernel included in the [hello world](https://github.com/Xilinx/Vitis_Accel_Examples/tree/63bae10d581df40cf9402ed71ea825476751305d/hello_world) application of the [Vitis Accel Examples' Repository](https://github.com/Xilinx/Vitis_Accel_Examples/tree/63bae10d581df40cf9402ed71ea825476751305d). ![vadd](img/vadd.png "Vector Addition") See below for a [breakdown of the code](#Step-by-step-walkthrough-of-the-example). ``` import pynq import numpy as np # program the device ol = pynq.Overlay("intro.xclbin") vadd = ol.vadd_1 # allocate buffers size = 1024*1024 in1_vadd = pynq.allocate((1024, 1024), np.uint32) in2_vadd = pynq.allocate((1024, 1024), np.uint32) out = pynq.allocate((1024, 1024), np.uint32) # initialize input in1_vadd[:] = np.random.randint(low=0, high=100, size=(1024, 1024), dtype=np.uint32) in2_vadd[:] = 200 # send data to the device in1_vadd.sync_to_device() in2_vadd.sync_to_device() # call kernel vadd.call(in1_vadd, in2_vadd, out, size) # get data from the device out.sync_from_device() # check results msg = "SUCCESS!" if np.array_equal(out, in1_vadd + in2_vadd) else "FAILURE!" print(msg) # clean up del in1_vadd del in2_vadd del out ol.free() ``` ## Step-by-step walkthrough of the example ### Overlay download First, let's import `pynq`, download the overlay, and assign the vadd kernel IP to a variable called `vadd`. ``` import pynq ol = pynq.Overlay("intro.xclbin") vadd = ol.vadd_1 ``` ### Buffers allocation Let's first take a look at the signature of the vadd kernel. To do so, we use the `.signature` property. The accelerator takes two input vectors, the output vector, and the vectors' size as arguments ``` vadd.signature ``` Data types in the signature that have the *pointer* (`*`) qualifier represent *buffers* that must be allocated in memory. Non-pointer data types represent registers and are set directly when the kernel is executed with `.call()`. Buffers allocation is carried by [`pynq.allocate`](https://pynq.readthedocs.io/en/v2.5/pynq_libraries/allocate.html), which provides the same interface as a [`numpy.ndarray`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html). The `numpy.ndarray` constructor represents the low-level API to instantiate multidimensional arrays in NumPy. ```python import numpy as np foo = np.ndarray(shape=(10,), dtype=int) ``` The `pynq.allocate` API provides a buffer object that can be used to interact with both host and device buffers. Host and FPGA buffers here are transparently managed, and the user is only presented with a single interface for both. The user is only asked to explicitly sync host and FPGA buffers before and after a kernel call through the `.sync_to_device()` and `.sync_from_device()` API, as will be shown later. If you are familiar with the PYNQ embedded API `sync_to_device` and `sync_from_device` are the mirrored buffer equivalent to `flush` and `invalidate` functions used for cache-coherent buffers. In this case we're going to create 3 1024x1024 arrays, two input and one output. Since the kernel uses unsigned integers we specify `u4` as data type when performing allocation, which is shorthand for `numpy.uint32`, as explained in the [`numpy.dtypes`](https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html#arrays-dtypes) documentation. ``` size = 1024*1024 in1_vadd = pynq.allocate((1024, 1024), 'u4') in2_vadd = pynq.allocate((1024, 1024), 'u4') out = pynq.allocate((1024, 1024), 'u4') ``` We can use numpy to easily initialize one of the input arrays with random data, with numbers in the range [0, 100). We instead set all the elements of the second input array to a fixed value so we can see at a glance whether the addition was successful. ``` import numpy as np in1_vadd[:] = np.random.randint(low=0, high=100, size=(1024, 1024), dtype='u4') in2_vadd[:] = 200 ``` ### Run the kernel Before we can start the kernel we need to make sure that the buffers are synced to the FPGA card. We do this by calling `.sync_to_device()` on each of our input arrays. To start the accelerator, we can use the `.call()` function and pass the kernel arguments. The function will take care of correctly setting the `register_map` of the IP and send the start signal. We pass the arguments to `.call()` following the `.signature` we previously inspected. Once the kernel has completed, we can `.sync_from_device()` the output buffer to ensure that data from the FPGA is transferred back to the host memory. We use the `%%timeit` magic to get the average execution time. This magic will automatically decide how many runs to perform to get a reliable average. ``` %%timeit in1_vadd.sync_to_device() in2_vadd.sync_to_device() vadd.call(in1_vadd, in2_vadd, out, size) out.sync_from_device() ``` Finally, let's compare the FPGA results with software, using [`numpy.array_equal`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.array_equal.html) ``` np.array_equal(out, in1_vadd + in2_vadd) ``` ## Cleaning up Finally, we have to deallocate the buffers and free the FPGA context using `Overlay.free`. In case buffers are used as output of a cell, we will have to use the [`%xdel`](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-xdel) magic to also remove any reference to these buffers in Jupyter/IPython. IPython holds on to references of cell outputs so a standard `del` isnโ€™t sufficient to remove all references to the array and hence trigger the memory to be freed. The same effect can also be achieved by *shutting down* the notebook. ``` %xdel in1_vadd %xdel in2_vadd %xdel out ol.free() ``` Copyright (C) 2020 Xilinx, Inc
github_jupyter
# Querying JSON Databases This notebook demonstrates how to query hierarchical databases with our toy query language BQL. We start with loading a demo JSON dataset with data on departments and employees of the city of Chicago ([source](https://data.cityofchicago.org/Administration-Finance/Current-Employee-Names-Salaries-and-Position-Title/xzkq-xp2w)). In general, we can treat any JSON document with a regular structure as a hierarchical database. ``` %cd -q .. from citydb_json import citydb ``` This is the structure of the `citydb` database: ``` { "departments": [ { "name": ..., "employees": [ { "name": ..., "surname": ..., "position": ..., "salary": ... }, ... ] }, ... ] } ``` The top-level **City** object has the following fields: * `departments`: an array of department objects. **Department** objects have the following fields: * `name`: the name of the department. * `employees`: an array of employee objects. **Employee** objects have the following fields: * `name`: employee's first name. * `surname`: employee's last name. * `position`: employee's title. * `salary`: annual salary of the employee. Next, we import the BQL library. ``` from bql import * ``` The BQL query language is embedded in Python, which means any BQL query is a regular Python function which maps JSON input to JSON output. We call such functions _JSON combinators_. Two trivial examples of JSON combinators are: * `Const(val)`, which maps all input to the same output value; * `Here()`, which returns its input unchanged. ``` C = Const(42) C(None), C(42), C([1, 2, 3]) I = Here() I(None), I(42), I([1, 2, 3]) ``` More impressive is combinator `Field(name)` that extracts a field value from a JSON object. ``` F = Field('x') F({'x': 24, 'y': 42}) ``` By composing two field extractors, we can build a query that produces **the names of all departments**. ``` Departments = Field('departments') Name = Field('name') Dept_Names = Departments >> Name dept_names = Dept_Names(citydb) dept_names [:5] ``` What does the `>>` operator do exactly? Fundamentally, `(A >> B)` composes `A` and `B` by sending the output of `A` to the input of `B`. $$ (A \gg B):\; x \;\overset{A}{\longmapsto}\; y \;\overset{B}{\longmapsto}\; z \quad \text{(where $y = A(x),\, z = B(y)$)} $$ However, if we directly apply this rule to evaluate the expression ``(Departments >> Name)(citydb)``, we will fail because `citydb['departments']['name']` does not exist. To make this work, we need to clarify the composition rule. Namely, expression `(A >> B)(x)`, when `A(x)` is an array, applies `B` to _each_ element of the array. $$ (A \gg B):\; x \;\overset{A}{\longmapsto}\; [y_1,\, y_2,\, \ldots] \;\overset{B}{\longmapsto}\; [z_1,\, z_2,\, \ldots] \quad \text{(when $A(x) = [y_1,\, y_2\, \ldots],\, B(y_k) = z_k$)} $$ Moreover, when `B` itself produces array values, all `B` outputs are combined into one array, which becomes the output of `(A >> B)`. $$ (A \gg B):\; x \;\overset{A}{\longmapsto}\; [y_1,\, y_2,\, \ldots] \;\overset{B}{\longmapsto}\; [z_{11},\, z_{12},\, \ldots\, z_{21},\, z_{22},\, \ldots] \quad \text{(when also $B(y_k)$ are arrays $[z_{k1},\, z_{k2},\, \ldots]$)} $$ The last feature is used when we list **the names of all employees**. ``` Employees = Field('employees') Empl_Names = Departments >> Employees >> Name empl_names = Empl_Names(citydb) empl_names [:5] ``` Dual to `Field(name)`, combinator `Select(...)` *constructs* JSON objects. Parameters of `Select(...)` are combinators that construct object fields. Here is a trivial example. ``` S = Select(x=Const(42), y=Here()) S(24) ``` Let us use `Select(...)` to generate **the name and the number of employees for each department**. ``` Depts_With_Size = Departments >> Select(name=Name, size=Count(Employees)) depts_with_size = Depts_With_Size(citydb) depts_with_size [:5] ``` Here, combinator `Count(Employees)` returns the length of the `employees` array. In general, `Count(F)` lets `F` process its input expecting the output of `F` to be an array, then returns the length of the array. $$ \operatorname{Count}(F):\; x \;\overset{F}{\longmapsto}\; [y_1,\, y_2,\, \ldots\, y_N] \;\overset{\operatorname{len}}{\longmapsto}\; N $$ (You may've expected `Employees >> Count()`, but that'd make operator `>>` non-associative). Array combinators such as `Count(...)` are called *aggregate combinators*. The following aggregate combinators are defined in BQL: `Count()`, `Min()`, `Max()`, `First()`. ``` Num_Depts = Count(Departments) Num_Depts(citydb) Salary = Field('salary') Top_Salary = Max(Departments >> Employees >> Salary) Top_Salary(citydb) One_Empl = First(Departments >> Employees) One_Empl(citydb) Three_Depts = First(Departments >> Name, Const(3)) Three_Depts(citydb) Half_Depts = First(Departments >> Name, Count(Departments)//2) Half_Depts(citydb) ``` Combinator `Filter(P)` applies predicate `P` to its input. If the predicate condition is not satisfied, the input is dropped, otherwise it is returned unchanged. Let us use `Filter()` to find **the departments with more than 1000 employees**. ``` Size = Field('size') Large_Depts = Depts_With_Size >> Filter(Size > 1000) Large_Depts(citydb) ``` Here, combinator `Depts_With_Size`, which adds `size` field to each department object, is composed with combinator `Filter(Size > 1000)`, which gathers the departments that satisfy condition `Size > 1000`. In the following example, we use `Filter()` to find **the number of employees whose annual salary exceeds 200k**. ``` Num_Well_Paid_Empls = \ Count(Departments >> Employees >> Filter(Salary >= 200000)) Num_Well_Paid_Empls(citydb) ``` Now suppose we'd like to find **the number of employees with salary in a certain range**, but we don't know the range in advance. In this case, we can construct a *parameterized query*. ``` Min_Salary = Ref('min_salary') Max_Salary = Ref('max_salary') Num_Empls_By_Salary_Range = \ Count(Departments >> Employees >> Filter((Salary >= Min_Salary) & (Salary < Max_Salary))) ``` To run the `Num_Empls_By_Salary_Range` query, we need to supply it with parameters `min_salary` and `max_salary`. ``` Num_Empls_By_Salary_Range(citydb, {'min_salary': 200000, 'max_salary': 1000000}) Num_Empls_By_Salary_Range(citydb, {'min_salary': 100000, 'max_salary': 200000}) Num_Empls_By_Salary_Range(citydb, {'min_salary': 0, 'max_salary': 100000}) ``` The query knows which parameters it needs. ``` Num_Empls_By_Salary_Range.refs() ``` The last feature we discuss here is an ability to assign parameter values dynamically. Consider a query: find **the top salary for each department**. It could be easily implemented using `Max()` aggregate. ``` Depts_With_Max_Salary = \ Departments >> Select(name=Name, max_salary=Max(Employees >> Salary)) Depts_With_Max_Salary(citydb) [:5] ``` Now let us ask a slightly different question: find **the employees with the highest salary at their department**. We may try to use the `Filter()` combinator as follows. ``` Highest_Paid_Empls_By_Dept = \ Departments >> Employees >> Filter(Salary == Max_Salary) ``` But the filter condition `(Salary == Max_Salary)` is problematic since we cannot supply `max_salary` as a query parameter. Instead it must be calculated dynamically for each department. The `Given(...)` combinator does exactly that. ``` Highest_Paid_Empls_By_Dept = \ Departments >> \ Given( Employees >> Filter(Salary == Max_Salary), max_salary=Max(Employees >> Salary)) Highest_Paid_Empls_By_Dept(citydb) [:5] ``` Notably, `Highest_Paid_Empls_By_Dept` requires no parameters despite the fact that its definition refers to `max_salary`. ``` Highest_Paid_Empls_By_Dept.refs() ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Data augmentation <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/images/data_augmentation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/data_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/data_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/images/data_augmentation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Overview This tutorial demonstrates manual image manipulations and augmentation using `tf.image`. Data augmentation is a common technique to improve results and avoid overfitting, see [Overfitting and Underfitting](../keras/overfit_and_underfit.ipynb) for others. ## Setup ``` !pip install git+https://github.com/tensorflow/docs import urllib import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow.keras import layers AUTOTUNE = tf.data.experimental.AUTOTUNE import tensorflow_docs as tfdocs import tensorflow_docs.plots import tensorflow_datasets as tfds import PIL.Image import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['figure.figsize'] = (12, 5) import numpy as np ``` Let's check the data augmentation features on an image and then augment a whole dataset later to train a model. Download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg), by Von.grzanka, for augmentation. ``` image_path = tf.keras.utils.get_file("cat.jpg", "https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg") PIL.Image.open(image_path) ``` Read and decode the image to tensor format. ``` image_string=tf.io.read_file(image_path) image=tf.image.decode_jpeg(image_string,channels=3) ``` A function to visualize and compare the original and augmented image side by side. ``` def visualize(original, augmented): fig = plt.figure() plt.subplot(1,2,1) plt.title('Original image') plt.imshow(original) plt.subplot(1,2,2) plt.title('Augmented image') plt.imshow(augmented) ``` ## Augment a single image ### Flipping the image Flip the image either vertically or horizontally. ``` flipped = tf.image.flip_left_right(image) visualize(image, flipped) ``` ### Grayscale the image Grayscale an image. ``` grayscaled = tf.image.rgb_to_grayscale(image) visualize(image, tf.squeeze(grayscaled)) plt.colorbar() ``` ### Saturate the image Saturate an image by providing a saturation factor. ``` saturated = tf.image.adjust_saturation(image, 3) visualize(image, saturated) ``` ### Change image brightness Change the brightness of image by providing a brightness factor. ``` bright = tf.image.adjust_brightness(image, 0.4) visualize(image, bright) ``` ### Rotate the image Rotate an image by 90 degrees. ``` rotated = tf.image.rot90(image) visualize(image, rotated) ``` ### Center crop the image Crop the image from center upto the image part you desire. ``` cropped = tf.image.central_crop(image, central_fraction=0.5) visualize(image,cropped) ``` See the `tf.image` reference for details about available augmentation options. ## Augment a dataset and train a model with it Train a model on an augmented dataset. Note: The problem solved here is somewhat artificial. It trains a densely connected network to be shift invariant by jittering the input images. It's much more efficient to use convolutional layers instead. ``` dataset, info = tfds.load('mnist', as_supervised=True, with_info=True) train_dataset, test_dataset = dataset['train'], dataset['test'] num_train_examples= info.splits['train'].num_examples ``` Write a function to augment the images. Map it over the the dataset. This returns a dataset that augments the data on the fly. ``` def convert(image, label): image = tf.image.convert_image_dtype(image, tf.float32) # Cast and normalize the image to [0,1] return image, label def augment(image,label): image,label = convert(image, label) image = tf.image.convert_image_dtype(image, tf.float32) # Cast and normalize the image to [0,1] image = tf.image.resize_with_crop_or_pad(image, 34, 34) # Add 6 pixels of padding image = tf.image.random_crop(image, size=[28, 28, 1]) # Random crop back to 28x28 image = tf.image.random_brightness(image, max_delta=0.5) # Random brightness return image,label BATCH_SIZE = 64 # Only use a subset of the data so it's easier to overfit, for this tutorial NUM_EXAMPLES = 2048 ``` Create the augmented dataset. ``` augmented_train_batches = ( train_dataset # Only train on a subset, so you can quickly see the effect. .take(NUM_EXAMPLES) .cache() .shuffle(num_train_examples//4) # The augmentation is added here. .map(augment, num_parallel_calls=AUTOTUNE) .batch(BATCH_SIZE) .prefetch(AUTOTUNE) ) ``` And a non-augmented one for comparison. ``` non_augmented_train_batches = ( train_dataset # Only train on a subset, so you can quickly see the effect. .take(NUM_EXAMPLES) .cache() .shuffle(num_train_examples//4) # No augmentation. .map(convert, num_parallel_calls=AUTOTUNE) .batch(BATCH_SIZE) .prefetch(AUTOTUNE) ) ``` Setup the validation dataset. This doesn't change whether or not you're using the augmentation. ``` validation_batches = ( test_dataset .map(convert, num_parallel_calls=AUTOTUNE) .batch(2*BATCH_SIZE) ) ``` Create and compile the model. The model is a two layered, fully-connected neural network without convolution. ``` def make_model(): model = tf.keras.Sequential([ layers.Flatten(input_shape=(28, 28, 1)), layers.Dense(4096, activation='relu'), layers.Dense(4096, activation='relu'), layers.Dense(10) ]) model.compile(optimizer = 'adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model ``` Train the model, **without** augmentation: ``` model_without_aug = make_model() no_aug_history = model_without_aug.fit(non_augmented_train_batches, epochs=50, validation_data=validation_batches) ``` Train it again with augmentation: ``` model_with_aug = make_model() aug_history = model_with_aug.fit(augmented_train_batches, epochs=50, validation_data=validation_batches) ``` ## Conclusion: In this example the augmented model converges to an accuracy ~95% on validation set. This is slightly higher (+1%) than the model trained without data augmentation. ``` plotter = tfdocs.plots.HistoryPlotter() plotter.plot({"Augmented": aug_history, "Non-Augmented": no_aug_history}, metric = "accuracy") plt.title("Accuracy") plt.ylim([0.75,1]) ``` In terms of loss, the non-augmented model is obviously in the overfitting regime. The augmented model, while a few epoch slower, is still training correctly and clearly not overfitting. ``` plotter = tfdocs.plots.HistoryPlotter() plotter.plot({"Augmented": aug_history, "Non-Augmented": no_aug_history}, metric = "loss") plt.title("Loss") plt.ylim([0,1]) ```
github_jupyter
# Lab 03: Estimate proportions using SGD Task: Debug some code to use stochastic gradient descent to estimate two proportions. # Scenario Suppose I have two boxes (A and B), each of which have a bunch of small beads in them. Peeking inside, it looks like there are 3 different colors of beads (red, orange, and yellow), but the two boxes have very different colors. Each box has a lever on it. When I push the lever, a bead comes out of the box. (We can assume it's a random one, and we'll put the bead back in the box it came from so we don't lose beads.) My friend suggests we play a game: they'll pick a box and press the lever a few times; I have to guess what color beads are going to come out. But I complain that I'm never going to be able to guess 100% correctly, since the boxes have mixtures of beads in them. So here's what they propose: I can spread out my one guess among the different colors, e.g., 0.5 for red and 0.25 for orange or yellow--as long as they add up to 1. Okay...sounds good? Even though there's no way I could count the number of each color bead in each box (way too many!), I think I can do well at this game after a few rounds. What do you think? ## Setup ``` import torch from torch import tensor import matplotlib.pyplot as plt %matplotlib inline torch.manual_seed(10); ``` ### 1. Define the true (hidden) proportions Define the true proportions of the 3 colors in each box. ``` boxes = tensor([ [600, 550, 350], [100, 1300, 100] ]).float() ``` ### 2. Define how we're going to get observations. Here's how the friend is going to pick which box. We'll get to see which box they pick. ``` def pick_box(): return int(torch.rand(1) < .5) pick_box() def draw_beads(box, num_beads): return torch.multinomial(boxes[box], num_beads, replacement=True) example_beads = draw_beads(box=0, num_beads=5); example_beads ``` # Task The code below plays this game, but it encounters some major problems: it crashes, and even once you fix the crashes, it still doesn't learn the correct proportions. Debug the code below so that running `get_guesses` gives a good estimate of the true proportions of each color in the given box. **Mathy Notes**: * Guessing the true proportions for each box minimizes the cross-entropy loss between observations and guesses (in expectation). So your loss function should be cross-entropy (the negative log of the probability given to the observed sample). * To ensure that the guesses are valid probability distributions, I recommend you store the *logits* instead of *probabilities*. The `softmax` function turns logits into probabilities. (The `log_softmax` function turns logits into log-probabilities aka logprobs.) # Solution First, let's compute the true proportions: divide the counts (in `boxes`) by the total number of beads in each box. Use `sum`, and pass `keepdim=True` ``` # your code here # boxes.sum(___) # boxes / _____ ``` ### 3. Define how we're going to make a guess ``` params = tensor([ [.25, .4, .35], [1/3, 1/3, 1/3]]) def get_guess(box): guesses_for_box = params[box] return guesses_for_box # <-- you will need to change this line to ensure that the result is a valid probability distribution example_guess = get_guess(0); example_guess ``` ### 4. Define how score is computed. We can get the probabilities of the actual beads using an indexing trick. For example: ``` example_guess[example_beads] def score_guesses(guess, beads): # <-- note that this is a "score" (higher = better)... you may want to change it to be a "loss" (lower = better). probs_for_observed_beads = guess[beads] return probs_for_observed_beads.mean() # <-- you will need to change this line so that we're using cross-entropy loss score_guesses(example_guess, example_beads) ``` ### 5. Use stochastic gradient descent to learn the proportions. ``` params = torch.ones((2, 3)) / 3.0 params.requires_grad_() scores = [] for i in range(50): box = pick_box() # friend picks a box my_guess = get_guess(box) # I make a guess # Check that my guess is valid. assert (my_guess > 0).all() assert (my_guess.sum() - 1.0).abs() < .01 beads = draw_beads(box, 10) # friend draws a bunch of beads score = score_guesses(my_guess, beads) # friend computes my score scores.append(score.item()) # I figure out how I should have guessed differently score.backward() params.data -= params.grad # Plot the scores plt.plot(scores) # Show the proportions. These should be very close to the true proportions. torch.stack([get_guess(box=0), get_guess(box=1)]) ```
github_jupyter
## 04. Explain the Otimizer in Detail ### Instancing a Optimizer Object ๅœจBasic Tutorialไธญ๏ผŒๆˆ‘ไปฌ็Ÿฅ้“UltraOptไธญๆœ‰ๅฆ‚ไธ‹ไผ˜ๅŒ–ๅ™จ๏ผš |ไผ˜ๅŒ–ๅ™จ|ๆ่ฟฐ| |-----|---| |ETPE| Embedding-Tree-Parzen-Estimator, ๆ˜ฏUltraOptไฝœ่€…่‡ชๅˆ›็š„ไธ€็งไผ˜ๅŒ–็ฎ—ๆณ•๏ผŒๅœจTPE็ฎ—ๆณ•[<sup>[4]</sup>](#refer-anchor-4)็š„ๅŸบ็ก€ไธŠๅฏน็ฑปๅˆซๅ˜้‡้‡‡็”จEmbedding้™็ปดไธบไฝŽ็ปด่ฟž็ปญๅ˜้‡๏ผŒ<br>ๅนถๅœจๅ…ถไป–็š„ไธ€ไบ›ๆ–น้ขไนŸๅšไบ†ๆ”น่ฟ›ใ€‚ETPEๅœจๆŸไบ›ๅœบๆ™ฏไธ‹่กจ็Žฐๆฏ”HyperOpt็š„TPE็ฎ—ๆณ•่ฆๅฅฝใ€‚ | |Forest |ๅŸบไบŽ้šๆœบๆฃฎๆž—็š„่ดๅถๆ–ฏไผ˜ๅŒ–็ฎ—ๆณ•ใ€‚ๆฆ‚็އๆจกๅž‹ๅผ•็”จไบ†`scikit-optimize`[<sup>[1]</sup>](#refer-anchor-1)ๅŒ…็š„`skopt.learning.forest`ๆจกๅž‹[<sup>[2]</sup>](#refer-anchor-2)๏ผŒ<br>ๅนถๅ€Ÿ้‰ดไบ†`SMAC3`[<sup>[3]</sup>](#refer-anchor-3)ไธญ็š„ๅฑ€้ƒจๆœ็ดขๆ–นๆณ•| |GBRT| ๅŸบไบŽๆขฏๅบฆๆๅ‡ๅ›žๅฝ’ๆ ‘(Gradient Boosting Resgression Tree)็š„่ดๅถๆ–ฏไผ˜ๅŒ–็ฎ—ๆณ•๏ผŒ<br>ๆฆ‚็އๆจกๅž‹ๅผ•็”จไบ†`scikit-optimize`ๅŒ…็š„`skopt.learning.gbrt`ๆจกๅž‹ | |Random| ้šๆœบๆœ็ดขใ€‚ | ๆˆ‘ไปฌๅœจ่ฐƒ็”จ`ultraopt.fmin`ๅ‡ฝๆ•ฐ่ฟ›่กŒไผ˜ๅŒ–ๆ—ถ๏ผŒๅœจไฝฟ็”จไผ˜ๅŒ–ๅ™จ้ป˜่ฎคๅ‚ๆ•ฐ็š„ๆƒ…ๅ†ตไธ‹๏ผŒๅฏไปฅๅชไผ ๅ…ฅไผ˜ๅŒ–ๅ™จ็š„ๅๅญ—๏ผŒๅฆ‚๏ผš ```python from ultraopt import fmin result = fmin(evaluate_function, config_space, optimizer="ETPE") ``` ไฝ†ๅฆ‚ๆžœๆˆ‘ไปฌ่ฆ่ฐƒๆ•ดไผ˜ๅŒ–ๅ™จ็š„ๅ‚ๆ•ฐ๏ผŒๅฆ‚ไฟฎๆ”น`ForestOptimizer`้šๆœบๆฃฎๆž—ๆ ‘็š„ไธชๆ•ฐ๏ผŒๆˆ– `ETPEOptimizer` ็š„ไธ€ไบ›ๆถ‰ๅŠๅ†ทๅฏๅŠจๅ’Œ้‡‡ๆ ทไธชๆ•ฐ็š„ๅ‚ๆ•ฐๆ—ถ๏ผŒๅฐฑ้œ€่ฆไปŽ`ultraopt.optimizer`ๅŒ…ไธญๅผ•ๅ…ฅ็›ธๅบ”็š„ไผ˜ๅŒ–็ฑป๏ผŒๅนถๅฏนๅ…ถๅฎžไพ‹ๅŒ–ไธบไผ˜ๅŒ–ๅ™จๅฏน่ฑก ๅผ•ๅ…ฅ`fmin`ๅ’Œไฝ ้œ€่ฆไฝฟ็”จ็š„ไผ˜ๅŒ–ๅ™จ็ฑป๏ผš ``` from ultraopt import fmin from ultraopt.optimizer import ETPEOptimizer from ultraopt.optimizer import RandomOptimizer from ultraopt.optimizer import ForestOptimizer from ultraopt.optimizer import GBRTOptimizer ``` ๅผ•ๅ…ฅไธ€ไธชๆต‹่ฏ•็”จ็š„้…็ฝฎ็ฉบ้—ดๅ’Œ่ฏ„ไปทๅ‡ฝๆ•ฐ๏ผš ``` from ultraopt.tests.mock import config_space, evaluate ``` ๆ นๆฎ่‡ชๅทฑๆƒณ่ฆ็š„ๅ‚ๆ•ฐๅฎžไพ‹ๅŒ–ไธ€ไธช`ForestOptimizer`๏ผš ``` optimizer = ForestOptimizer(n_estimators=20) # ไปฃ็†ๆจกๅž‹ไธบ20ๆฃตๆ ‘็š„้šๆœบๆฃฎๆž— ``` ๆŠŠไผ˜ๅŒ–ๅ™จๅฏน่ฑกไผ ๅ…ฅfminๅ‡ฝๆ•ฐ๏ผŒๅผ€ๅง‹ไผ˜ๅŒ–่ฟ‡็จ‹ ``` fmin(evaluate, config_space, optimizer) ``` ### Implement an Optimization Process Outside of fmin ไผ˜ๅŒ–ๅ™จๆœ‰3ไธช้‡่ฆ็š„ๅ‡ฝๆ•ฐ๏ผš - `initialize(config_space, ...` : ๅ‚ๆ•ฐไธบ้…็ฝฎ็ฉบ้—ด็ญ‰๏ผŒๅˆๅง‹ๅŒ–ไผ˜ๅŒ–ๅ™จ - `ask(n_points=None , ...` : ่ฏทๆฑ‚ไผ˜ๅŒ–ๅ™จๆŽจ่`n_points`ไธช้…็ฝฎ๏ผˆ้ป˜่ฎคไธบ1ไธช๏ผ‰ - `tell(config, loss, ...)` : ๅ‘Š็Ÿฅไผ˜ๅŒ–ๅ™จ๏ผŒไน‹ๅ‰ๆŽจ่้…็ฝฎ`config`็š„ๅฅฝๅ๏ผˆๆŸๅคฑ`loss`่ถŠๅฐ่ถŠๅฅฝ๏ผ‰ ไผ˜ๅŒ–ๅ™จ็š„่ฟ่กŒๆต็จ‹ๅฆ‚ไธ‹ๅ›พๆ‰€็คบ๏ผš ``` from graphviz import Digraph; g = Digraph() g.node("config space", shape="ellipse"); g.node("optimizer", shape="box") g.node("config", shape="ellipse"); g.node("loss", shape="circle"); g.node("evaluator", shape="box") g.edge("config space", "optimizer", label="initialize"); g.edge("optimizer", "config", label="<<b>ask</b>>", color='blue') g.edge("config","evaluator" , label="send to"); g.edge("evaluator","loss" , label="evaluate") g.edge("config", "optimizer", label="<<b>tell</b>>", color='red'); g.edge("loss", "optimizer", label="<<b>tell</b>>", color='red') g.graph_attr['rankdir'] = 'LR'; g ``` ๅ›พไธญ็š„`evaluator`่ฏ„ไปทๅ™จไผšๅฏน`config`้…็ฝฎ่ฟ›่กŒ่ฏ„ไปท๏ผŒ็„ถๅŽ่ฟ”ๅ›žไธ€ไธชๆŸๅคฑ`loss`ใ€‚ ไธพไธชไพ‹ๅญ๏ผŒๅœจAutoML้—ฎ้ข˜ไธญ๏ผŒ่ฏ„ไปทๅ™จ็š„ๅทฅไฝœๆต็จ‹ๅฆ‚ไธ‹๏ผš 1. ๅฐ†config่ฝฌๅŒ–ไธบไธ€ไธชๆœบๅ™จๅญฆไน ๅฎžไพ‹ 2. ๅœจ่ฎญ็ปƒ้›†ไธŠๅฏนๆœบๅ™จๅญฆไน ๅฎžไพ‹่ฟ›่กŒ่ฎญ็ปƒ 3. ๅœจ้ชŒ่ฏ้›†ไธŠๅพ—ๅˆฐ็›ธๅบ”็š„่ฏ„ไปทๆŒ‡ๆ ‡ 4. ๅฏน่ฏ„ไปทๆŒ‡ๆ ‡่ฟ›่กŒๅค„็†๏ผŒไฝฟๅ…ถ`่ถŠๅฐ่ถŠๅฅฝ`๏ผŒ่ฟ”ๅ›ž`loss` ๅ…ทไฝ“็š„่ฏ„ไปทๅ™จๆˆ‘ไปฌไผšๅœจไธ‹ไธชๆ•™็จ‹ไธญๅฎž็Žฐใ€‚ๅœจๅญฆไน ไบ†่ฟ™ไบ›็Ÿฅ่ฏ†ๅŽ๏ผŒ ๆˆ‘ไปฌ่ƒฝๅฆ่„ฑ็ฆป`fmin`ๅ‡ฝๆ•ฐ๏ผŒ่‡ชๅทฑๅฎž็Žฐไธ€ไธชไผ˜ๅŒ–่ฟ‡็จ‹ๅ‘ข๏ผŸ็ญ”ๆกˆๆ˜ฏๅฏไปฅ็š„ใ€‚ ๅœจUltraOpt็š„่ฎพ่ฎกๅ“ฒๅญฆไธญ๏ผŒไผ˜ๅŒ–ๅ™จๅช้œ€่ฆๅ…ทๅค‡ไธŠ่ฟฐ็š„3ไธชๆŽฅๅฃ๏ผŒ่ฏ„ไปทๅ™จๅ’Œๅˆ†ๅธƒๅผ็ญ–็•ฅ็š„่ฎพ่ฎก้ƒฝๅฏไปฅ็”ฑ็”จๆˆทๅฎŒๆˆใ€‚ #### ๅ…ˆไปŽไธ€ๆฌกๅพช็Žฏไธญไฝ“ไผšๆ•ดไธช่ฟ‡็จ‹ > Step 1. ้ฆ–ๅ…ˆๅฎžไพ‹ๅŒ–ๅ’Œๅˆๅง‹ๅŒ–ไผ˜ๅŒ–ๅ™จ ``` optimizer = ETPEOptimizer() optimizer.initialize(config_space) ``` > Step 2. ่ฐƒ็”จไผ˜ๅŒ–ๅ™จ็š„`ask`ๅ‡ฝๆ•ฐ่Žทๅ–ไธ€ไธชๅ…ถๆŽจ่็š„้…็ฝฎ๏ผš ``` recommend_config, config_info = optimizer.ask() recommend_config config_info ``` > Step 3. ็”จ่ฏ„ไผฐๅ™จ๏ผŒๅœจ่ฟ™ๆ˜ฏ`evaluate`ๅ‡ฝๆ•ฐๆฅ่ฏ„ไปท้…็ฝฎ็š„ๅฅฝๅ ``` loss = evaluate(recommend_config) loss ``` > Step 4. ้€š่ฟ‡tellๅ‡ฝๆ•ฐๅฐ†่ง‚ๆต‹็ป“ๆžœ `config, loss` ไผ ้€’็ป™ไผ˜ๅŒ–ๅ™จ ``` optimizer.tell(recommend_config, loss) ``` #### ๅฐ†ไธŠ่ฟฐ่ฟ‡็จ‹ๆ•ด็†ไธบไธ€ไธชforๅพช็Žฏ ``` optimizer = ETPEOptimizer() optimizer.initialize(config_space) losses = [] best_losses = [] for _ in range(100): config, _ = optimizer.ask() loss = evaluate(config) optimizer.tell(config, loss) losses.append(loss) best_losses.append(min(losses)) import pylab as plt plt.grid(0.2) plt.xlabel("Iteration") plt.ylabel("Loss") plt.plot(range(20, 100), best_losses[20:]); ``` ๆ‚จๅฏ่ƒฝไผšๆœ‰็–‘้—ฎ๏ผŒไธ€ๆฌกๅช่ƒฝ`ask`ไธ€ไธชๆŽจ่้…็ฝฎๅ—๏ผŒ่ƒฝไธ่ƒฝ`ask`ๅคšไธชๅ‘ข๏ผŸ็ญ”ๆกˆๆ˜ฏๅฏไปฅ็š„ #### MapReduce่ฎก็ฎ—็ญ–็•ฅ๏ผšaskๅคšไธช้…็ฝฎ + ๅนถ่กŒ่ฐƒ็”จ่ฏ„ไปทๅ‡ฝๆ•ฐ ``` from joblib import Parallel, delayed n_parallels = 3 optimizer = ETPEOptimizer() optimizer.initialize(config_space) for _ in range(100 // n_parallels): config_info_pairs = optimizer.ask(n_points=n_parallels) losses = Parallel(n_jobs=n_parallels)( delayed(evaluate)(config) for config, _ in config_info_pairs ) loss = evaluate(config) for j, (loss, (config, _)) in enumerate(zip(losses, config_info_pairs)): optimizer.tell(config, loss, update_model=(j == n_parallels - 1)) # ไผ ๅ…ฅ่ฟ™ๆ‰น่ง‚ๆต‹็š„ๆœ€ๅŽไธ€ไธช่ง‚ๆต‹ๆ—ถ๏ผŒๆ›ดๆ–ฐๆจกๅž‹ ``` **ๅ‚่€ƒๆ–‡็Œฎ** <div id="refer-anchor-1"></div> - [1] https://github.com/scikit-optimize/scikit-optimize <div id="refer-anchor-2"></div> - [2] [Hutter, F. et al. โ€œAlgorithm runtime prediction: Methods & evaluation.โ€ Artif. Intell. 206 (2014): 79-111.](https://arxiv.org/abs/1211.0906) <div id="refer-anchor-3"></div> - [3] [Hutter F., Hoos H.H., Leyton-Brown K. (2011) Sequential Model-Based Optimization for General Algorithm Configuration. In: Coello C.A.C. (eds) Learning and Intelligent Optimization. LION 2011. Lecture Notes in Computer Science, vol 6683. Springer, Berlin, Heidelberg.](https://link.springer.com/chapter/10.1007/978-3-642-25566-3_40) <div id="refer-anchor-4"></div> - [4] [James Bergstra, Rรฉmi Bardenet, Yoshua Bengio, and Balรกzs Kรฉgl. 2011. Algorithms for hyper-parameter optimization. In Proceedings of the 24th International Conference on Neural Information Processing Systems (NIPS'11). Curran Associates Inc., Red Hook, NY, USA, 2546โ€“2554.](https://dl.acm.org/doi/10.5555/2986459.2986743)
github_jupyter
``` from quchem.Hamiltonian_Generator_Functions import * from quchem.Graph import * ### HAMILTONIAN start Molecule = 'H2' geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.74))] basis = 'sto-3g' ### Get Hamiltonian Hamilt = Hamiltonian_PySCF(Molecule, run_scf=1, run_mp2=1, run_cisd=1, run_ccsd=1, run_fci=1, basis=basis, multiplicity=1, geometry=geometry) # normally None! QubitHamiltonian = Hamilt.Get_Qubit_Hamiltonian(threshold=None, transformation='JW') ### HAMILTONIAN end ##################################### print(QubitHamiltonian) from quchem.Ansatz_Generator_Functions import * n_electrons=Hamilt.molecule.n_electrons n_qubits=Hamilt.molecule.n_qubits ansatz_obj = Ansatz(n_electrons,n_qubits) print('JW ground state = ', ansatz_obj.Get_JW_HF_state_in_OCC_basis()) print('BK ground state = ', ansatz_obj.Get_BK_HF_state_in_OCC_basis()) ansatz_obj.Get_ia_and_ijab_terms() print('ia') print(ansatz_obj.Sec_Quant_CC_ia_Fermi_ops) print(ansatz_obj.theta_ia) print('######') print('ijab') print(ansatz_obj.Sec_Quant_CC_ijab_Fermi_ops) print(ansatz_obj.theta_ijab) transformation='JW' ansatz_obj.UCCSD_single_trotter_step(transformation, List_FermiOps_ia=ansatz_obj.Sec_Quant_CC_ia_Fermi_ops, List_FermiOps_ijab=ansatz_obj.Sec_Quant_CC_ijab_Fermi_ops) ansatz_obj.Second_Quant_CC_single_Trot_list_ia ansatz_obj.Second_Quant_CC_single_Trot_list_ijab # None Simplified print('input state', ansatz_obj.Get_JW_HF_state_in_OCC_basis()) UCCSD_ansatz_Q_Circ_obj = Ansatz_Circuit(ansatz_obj.Get_JW_HF_state_in_OCC_basis(), ansatz_obj.Second_Quant_CC_single_Trot_list_ia, ansatz_obj.Second_Quant_CC_single_Trot_list_ijab) UCCSD_ansatz_Q_Circ =UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC(Theta_param_list_ia=ansatz_obj.theta_ia, Theta_param_list_ijab=ansatz_obj.theta_ijab, ia_first=True) UCCSD_ansatz_Q_Circ ``` From Helgaker, T., P. Jorgensen,and J. Olsen (2014),Molecularelectronic-structure theory(John Wiley & Sons) we known $$H_{2}^{ground} = 0.9939| 1100\rangle - 0.1106| 0011\rangle$$ LOOK at: PHYS. REV. X, **8**, 031022 (2018) ``` from quchem.Unitary_partitioning_LCU_method import * Hamiltonian_graph_obj = Openfermion_Hamiltonian_Graph(QubitHamiltonian) commutativity_flag = 'AC' ## <- defines relationship between sets!!! plot_graph = False Graph_colouring_strategy='largest_first' anti_commuting_sets = Hamiltonian_graph_obj.Get_Clique_Cover_as_QubitOp(commutativity_flag, Graph_colouring_strategy=Graph_colouring_strategy, plot_graph=plot_graph) anti_commuting_sets import random theta_ia_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia))] theta_ijab_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ijab))] input_state = ansatz_obj.Get_JW_HF_state_in_OCC_basis() print(input_state) UCCSD_ansatz_Q_Circ_obj = Ansatz_Circuit(ansatz_obj.Get_JW_HF_state_in_OCC_basis(), ansatz_obj.Second_Quant_CC_single_Trot_list_ia, ansatz_obj.Second_Quant_CC_single_Trot_list_ijab) UCCSD_ansatz_Q_Circ =UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC( Theta_param_list_ia=theta_ia_random_input, Theta_param_list_ijab=theta_ijab_random_input, ia_first=True) UCCSD_ansatz_Q_Circ SET_index = 7 N_index = 1 R_uncorrected, Pn, gamma_l = Get_R_op_list(anti_commuting_sets[SET_index], N_index) R_corrected_Op_list, R_corr_list, ancilla_amplitudes, l1 = absorb_complex_phases(R_uncorrected) ### full_Q_circuit = Full_Ansatz_and_Quantum_R_circuit(Pn, R_corrected_Op_list, R_corr_list, ancilla_amplitudes, Hamilt.molecule.n_qubits , UCCSD_ansatz_Q_Circ) full_Q_circuit N_QUBITS = Hamilt.molecule.n_qubits def GIVE_ENERGY_lin_alg(theta_ia_ijab): theta_ia=theta_ia_ijab[:len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia)] theta_ijab=theta_ia_ijab[len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia):] ansatz_cirq_circuit = UCCSD_ansatz_Q_Circ_obj.Get_Full_HF_UCCSD_QC( Theta_param_list_ia=theta_ia, Theta_param_list_ijab=theta_ijab, ia_first=True) VQE_exp_LCU_lin_alg = VQE_Experiment_LCU_UP_lin_alg(anti_commuting_sets, ansatz_cirq_circuit, N_QUBITS, # <--- NOTE THIS N_indices_dict={7:0, 8:1, 9:0, 10:1}) energy = VQE_exp_LCU_lin_alg.Calc_Energy() return np.array(energy).real theta_ia_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ia))] theta_ijab_random_input = [random.uniform(0, 2*np.pi) for _ in range(len(ansatz_obj.Second_Quant_CC_single_Trot_list_ijab))] combined_ia_ijab_random_input=[*theta_ia_random_input, *theta_ijab_random_input] GIVE_ENERGY_lin_alg(combined_ia_ijab_random_input) ``` ## Optimizing ``` from quchem.Scipy_Optimizer import * GG = Optimizer(GIVE_ENERGY_lin_alg, combined_ia_ijab_random_input, args=(), method='Nelder-Mead', jac=None, hess=None, hessp=None, bounds=None, constraints=None, tol=1e-8, display_convergence_message=True, display_steps=True) GG.get_env(50) GG.plot_convergence() plt.show() Hamilt.molecule.fci_energy ### optimizer def calc_gradient_ADAM(theta_ijab_list): grad_list=[] for index, theta in enumerate(theta_ijab_list): new_theta_list = theta_ijab_list.copy() new_theta_list[index] = theta + np.pi/4 Obs_PLUS = GIVE_ENERGY_lin_alg(new_theta_list) new_theta_list[index] = theta - np.pi/4 Obs_MINUS = GIVE_ENERGY_lin_alg(new_theta_list) gradient = Obs_PLUS - Obs_MINUS grad_list.append(gradient) return np.array(grad_list) custom_optimizer_DICT = {'learning_rate': 0.1, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-8, 'delta': 1e-8, 'maxfev': 15000} GG = Optimizer(GIVE_ENERGY_lin_alg, combined_ia_ijab_random_input, args=(), method=_minimize_Adam, jac=calc_gradient_ADAM, hess=None, hessp=None, bounds=None, constraints=None, tol=1e-20, display_convergence_message=True, display_steps=True, custom_optimizer_DICT=custom_optimizer_DICT) GG.get_env(50) GG.plot_convergence() plt.show() ``` # Reduced Ansatz From PHYS. REV. X, **8**, 031022 (2018): $$U = e^{-i \theta Y_{0} X_{1} X_{2} X_{3}}$$ - when acting on $| 1100 \rangle_{HF-JW-STATE}$ $$U | \psi_{HF}\rangle = | \psi_{UCCSD}\rangle$$ to do this in Q.C the following circuit is employed: ``` ######### Ansatz circuit from quchem.Simulating_Quantum_Circuit import * from quchem.Ansatz_Generator_Functions import * from openfermion.ops import QubitOperator def H2_ansatz(theta): HF_circ = [cirq.X.on(cirq.LineQubit(0)), cirq.X.on(cirq.LineQubit(1))] full_exp_circ_obj = full_exponentiated_PauliWord_circuit(QubitOperator('Y0 X1 X2 X3', -1j), theta) UCCSD_circ = cirq.Circuit(cirq.decompose_once((full_exp_circ_obj(*cirq.LineQubit.range(full_exp_circ_obj.num_qubits()))))) full_circuit = cirq.Circuit([*HF_circ, *UCCSD_circ.all_operations()]) return full_circuit H2_ansatz(np.pi) ciruict = H2_ansatz(np.pi) len(list(ciruict.all_operations())) SET_index = 7 N_index = 1 ansatz = H2_ansatz(np.pi) R_uncorrected, Pn, gamma_l = Get_R_op_list(anti_commuting_sets[SET_index], N_index) R_corrected_Op_list, R_corr_list, ancilla_amplitudes, l1 = absorb_complex_phases(R_uncorrected) ### Q_circuit = Full_Q_Circuit(Pn, R_corrected_Op_list, R_corr_list, ancilla_amplitudes, Hamilt.molecule.n_qubits , ansatz) Q_circuit # print(Q_circuit.to_qasm())/ list(Q_circuit.all_operations()) list(Q_circuit.all_operations())[-3].qubits Q_circuit.final_wavefunction().shape ######### Ansatz circuit from quchem.Simulating_Quantum_Circuit import * from quchem.Ansatz_Generator_Functions import * def H2_ansatz(theta): HF_circ = [cirq.X.on(cirq.LineQubit(0)), cirq.X.on(cirq.LineQubit(1))] full_exp_circ_obj = full_exponentiated_PauliWord_circuit(QubitOperator('Y0 X1 X2 X3', -1j), theta) UCCSD_circ = cirq.Circuit(cirq.decompose_once((full_exp_circ_obj(*cirq.LineQubit.range(full_exp_circ_obj.num_qubits()))))) full_circuit = cirq.Circuit([*HF_circ, *UCCSD_circ.all_operations()]) return full_circuit cirq_circuit = H2_ansatz(0.12) print(cirq_circuit) def Calc_E(H, ansatz_circuit): H_mat = get_sparse_operator(H).todense() input_state = reduce(np.kron, [np.array([[1],[0]]) for _ in range(4)]) ansatz_unitary = ansatz_circuit.unitary() ansatz_ket = ansatz_unitary.dot(input_state) ansatz_bra = ansatz_ket.conj().T return np.dot(ansatz_bra, H_mat.dot(ansatz_ket)).item(0) E_list=[] theta_list = np.arange(0,np.pi*2, 0.1) for theta in theta_list: Energy = Calc_E(QubitHamiltonian, H2_ansatz(theta)) E_list.append(Energy) import matplotlib.pyplot as plt %matplotlib notebook fig, ax = plt.subplots() ax.plot(theta_list, E_list, color='k', label='standard VQE', linestyle='-') ax.set(xlabel='theta', ylabel='E| / Ha') # ,title='Scaling of methods') ax.grid() plt.legend() plt.show() ```
github_jupyter
# Model Training (Elasticsearch LTR) We train a LambdaMart model using [RankLib](https://sourceforge.net/p/lemur/wiki/RankLib%20How%20to%20use/) and upload the trained model to Elasticsearch. ``` import json import os import requests DATA_DIR = "../../data" MODEL_FILE = os.path.join(DATA_DIR, "es_lambdamart_model.txt") ES_URL = "http://localhost:9200/" ``` ## Train Model with RankLib Command is as follows: java -jar RankLib-2.1-patched.jar \ -train ../data/es_features_train.txt \ -test ../data/es_features_test.txt \ -validate ../data/es_features_val.txt \ -ranker 6 \ -metric2t NDCG@10 \ -metric2T ERR@10 \ -save ../data/solr_lambdamart_model.txt Console output is shown below: [+] General Parameters: Training data: ../data/es_features_train.txt Test data: ../data/es_features_test.txt Validation data: ../data/es_features_val.txt Feature vector representation: Dense. Ranking method: LambdaMART Feature description file: Unspecified. All features will be used. Train metric: NDCG@10 Test metric: ERR@10 Highest relevance label (to compute ERR): 4 Feature normalization: No Model file: ../data/es_lambdamart_model.txt [+] LambdaMART's Parameters: No. of trees: 1000 No. of leaves: 10 No. of threshold candidates: 256 Learning rate: 0.1 Stop early: 100 rounds without performance gain on validation data Reading feature file [../data/es_features_train.txt]... [Done.] (12 ranked lists, 1200 entries read) Reading feature file [../data/es_features_val.txt]... [Done.] (3 ranked lists, 300 entries read) Reading feature file [../data/es_features_test.txt]... [Done.] (5 ranked lists, 480 entries read) Initializing... [Done] --------------------------------- Training starts... --------------------------------- #iter | NDCG@10-T | NDCG@10-V | --------------------------------- 1 | 0.844 | 0.844 | 2 | 0.8652 | 0.8652 | 3 | 0.8652 | 0.8652 | 4 | 0.8652 | 0.8652 | 5 | 0.8652 | 0.8652 | 6 | 0.8652 | 0.8652 | 7 | 0.8652 | 0.8652 | 8 | 0.8652 | 0.8652 | 9 | 0.8652 | 0.8652 | 10 | 0.8652 | 0.8652 | 11 | 0.8652 | 0.8652 | 12 | 0.8652 | 0.8652 | 13 | 0.8997 | 0.8997 | 14 | 0.8997 | 0.8997 | 15 | 0.9011 | 0.9011 | 16 | 0.9011 | 0.9011 | 17 | 0.9028 | 0.9028 | 18 | 0.9028 | 0.9028 | 19 | 0.9373 | 0.9373 | 20 | 0.9373 | 0.9373 | 21 | 0.9373 | 0.9373 | 22 | 0.9435 | 0.9435 | 23 | 0.9607 | 0.9607 | 24 | 0.9607 | 0.9607 | 25 | 0.978 | 0.978 | 26 | 0.9801 | 0.9801 | 27 | 0.9865 | 0.9865 | 28 | 0.9917 | 0.9917 | 29 | 0.9917 | 0.9917 | 30 | 0.9917 | 0.9917 | 31 | 0.9917 | 0.9917 | 32 | 0.9917 | 0.9917 | 33 | 1.0 | 1.0 | 34 | 1.0 | 1.0 | 35 | 1.0 | 1.0 | 36 | 1.0 | 1.0 | 37 | 1.0 | 1.0 | 38 | 1.0 | 1.0 | 39 | 1.0 | 1.0 | 40 | 1.0 | 1.0 | 41 | 1.0 | 1.0 | 42 | 1.0 | 1.0 | 43 | 1.0 | 1.0 | 44 | 1.0 | 1.0 | 45 | 1.0 | 1.0 | 46 | 1.0 | 1.0 | 47 | 1.0 | 1.0 | 48 | 1.0 | 1.0 | 49 | 1.0 | 1.0 | 50 | 1.0 | 1.0 | 51 | 1.0 | 1.0 | 52 | 1.0 | 1.0 | 53 | 1.0 | 1.0 | 54 | 1.0 | 1.0 | 55 | 1.0 | 1.0 | 56 | 1.0 | 1.0 | 57 | 1.0 | 1.0 | 58 | 1.0 | 1.0 | 59 | 1.0 | 1.0 | 60 | 1.0 | 1.0 | 61 | 1.0 | 1.0 | 62 | 1.0 | 1.0 | 63 | 1.0 | 1.0 | 64 | 1.0 | 1.0 | 65 | 1.0 | 1.0 | 66 | 1.0 | 1.0 | 67 | 1.0 | 1.0 | 68 | 1.0 | 1.0 | 69 | 1.0 | 1.0 | 70 | 1.0 | 1.0 | 71 | 1.0 | 1.0 | 72 | 1.0 | 1.0 | 73 | 1.0 | 1.0 | 74 | 1.0 | 1.0 | 75 | 1.0 | 1.0 | 76 | 1.0 | 1.0 | 77 | 1.0 | 1.0 | 78 | 1.0 | 1.0 | 79 | 1.0 | 1.0 | 80 | 1.0 | 1.0 | 81 | 1.0 | 1.0 | 82 | 1.0 | 1.0 | 83 | 1.0 | 1.0 | 84 | 1.0 | 1.0 | 85 | 1.0 | 1.0 | 86 | 1.0 | 1.0 | 87 | 1.0 | 1.0 | 88 | 1.0 | 1.0 | 89 | 1.0 | 1.0 | 90 | 1.0 | 1.0 | 91 | 1.0 | 1.0 | 92 | 1.0 | 1.0 | 93 | 1.0 | 1.0 | 94 | 1.0 | 1.0 | 95 | 1.0 | 1.0 | 96 | 1.0 | 1.0 | 97 | 1.0 | 1.0 | 98 | 1.0 | 1.0 | 99 | 1.0 | 1.0 | 100 | 1.0 | 1.0 | 101 | 1.0 | 1.0 | 102 | 1.0 | 1.0 | 103 | 1.0 | 1.0 | 104 | 1.0 | 1.0 | 105 | 1.0 | 1.0 | 106 | 1.0 | 1.0 | 107 | 1.0 | 1.0 | 108 | 1.0 | 1.0 | 109 | 1.0 | 1.0 | 110 | 1.0 | 1.0 | 111 | 1.0 | 1.0 | 112 | 1.0 | 1.0 | 113 | 1.0 | 1.0 | 114 | 1.0 | 1.0 | 115 | 1.0 | 1.0 | 116 | 1.0 | 1.0 | 117 | 1.0 | 1.0 | 118 | 1.0 | 1.0 | 119 | 1.0 | 1.0 | 120 | 1.0 | 1.0 | 121 | 1.0 | 1.0 | 122 | 1.0 | 1.0 | 123 | 1.0 | 1.0 | 124 | 1.0 | 1.0 | 125 | 1.0 | 1.0 | 126 | 1.0 | 1.0 | 127 | 1.0 | 1.0 | 128 | 1.0 | 1.0 | 129 | 1.0 | 1.0 | 130 | 1.0 | 1.0 | 131 | 1.0 | 1.0 | 132 | 1.0 | 1.0 | 133 | 1.0 | 1.0 | 134 | 1.0 | 1.0 | --------------------------------- Finished sucessfully. NDCG@10 on training data: 1.0 NDCG@10 on validation data: 1.0 --------------------------------- ERR@10 on test data: 2.1856 Model saved to: ../data/es_lambdamart_model.txt ## Upload Trained Model ``` model_def = None with open(MODEL_FILE, "r") as model_file: model_def = model_file.read() data = { "model": { "name": "es_lambdamart_model", "model": { "type": "model/ranklib", "definition": model_def } } } headers = { "Content-Type": "application/json" } resp = requests.post(ES_URL + "_ltr/_featureset/myFeatures/_createmodel", headers=headers, data=json.dumps(data)) print(resp.text) ```
github_jupyter
<a href="https://colab.research.google.com/github/BRIJNANDA1979/CNN-Sentinel/blob/master/Understand_band_data_info_using_histogram_and_classifying_pixel_values.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #https://www.earthdatascience.org/courses/use-data-open-source-python/multispectral-remote-sensing/vegetation-indices-in-python/calculate-NDVI-python/ #Sentinel 2 Use Handbook. https://sentinels.copernicus.eu/documents/247904/685211/Sentinel-2_User_Handbook !pip install rioxarray !pip install geopandas import os import matplotlib.pyplot as plt import numpy as np import rioxarray as rxr import geopandas as gpd path = '/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B01.tif' #os.chdir(path) #data_path = os.path.join("/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B01.tif") data = rxr.open_rasterio(path) data.shape !pip install earthpy import earthpy as et import earthpy.spatial as es import earthpy.plot as ep ep.plot_bands(data, title="Bigearthnet Band 1 Raster") plt.show() #https://earthpy.readthedocs.io/en/latest/gallery_vignettes/plot_bands_functionality.html #Stack all bands of BigEarthNet Data sample one band tiff images import glob files = glob.glob(os.path.join('/content/drive/MyDrive/Big/S2A_MSIL2A_20170613T101031_0_55/S2A_MSIL2A_20170613T101031_0_55_B*.tif')) files.sort() print("Number of Bands",len(files)) print(files) print(files[0]) # Band1 print(files[1]) # Band2 print(files[10]) # Band12 #array_stack, meta_data = es.stack(path, nodata=-9999) ``` # New Section ``` print(files[0]) band1= rxr.open_rasterio(files[0]) ep.plot_bands(band1, title="Bigearthnet Band 1 Raster") plt.show() print("The CRS of this data is:", band1.rio.crs) #Converting EPSG to Proj4 in Python # Convert to project string using earthpy proj4 = et.epsg['32634'] print(proj4) #Spatial Extent #You can access the spatial extent using the .bounds() attribute in rasterio. print(band1.rio.bounds()) #Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m # What is the x and y resolution for your raster data? print(band1.rio.resolution()) print("The nodatavalue of your data is:", band1.rio.nodata) # How many bands / layers does the object have? print("Number of bands", band1.rio.count) print("The shape of your data is:", band1.shape) print('min value:', np.nanmin(band1)) print('max value:', np.nanmax(band1)) import matplotlib.pyplot as plt f, ax = plt.subplots() band1.plot.hist(color="purple") ax.set(title="Distribution of Raster Cell Values Band 1 Data", xlabel="", ylabel="Number of Pixels") plt.show() print(files[1]) band2= rxr.open_rasterio(files[1]) ep.plot_bands(band2, title="Bigearthnet Band 2 Raster") plt.show() print("The CRS of this data is:", band2.rio.crs) #Converting EPSG to Proj4 in Python # Convert to project string using earthpy proj4 = et.epsg['32634'] print(proj4) #Spatial Extent #You can access the spatial extent using the .bounds() attribute in rasterio. print(band2.rio.bounds()) #Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m # What is the x and y resolution for your raster data? print(band2.rio.resolution()) print("The nodatavalue of your data is:", band2.rio.nodata) # How many bands / layers does the object have? print("Number of bands", band2.rio.count) print("The shape of your data is:", band2.shape) print('min value:', np.nanmin(band2)) print('max value:', np.nanmax(band2)) import matplotlib.pyplot as plt f, ax = plt.subplots() band1.plot.hist(color="purple") ax.set(title="Distribution of Raster Cell Values Band 2 Data", xlabel="", ylabel="Number of Pixels") plt.show() #https://rasterio.readthedocs.io/en/latest/api/rasterio.plot.html #rasterio.plot.reshape_as_image(arr) #Returns the source array reshaped into the order expected by image processing and visualization software (matplotlib, scikit-image, etc) by swapping the axes order from (bands, rows, columns) to (rows, columns, bands) print('min value:', np.nanmin(data)) print('max value:', np.nanmax(data)) #https://www.earthdatascience.org/courses/use-data-open-source-python/intro-raster-data-python/raster-data-processing/classify-plot-raster-data-in-python/ import matplotlib.pyplot as plt f, ax = plt.subplots() data.plot.hist(color="purple") ax.set(title="Distribution of Raster Cell Values Data", xlabel="", ylabel="Number of Pixels") plt.show() bins=[0, 100, 200, 250, 275, 300,350] f, ax = plt.subplots() data.plot.hist(color="purple",bins=[0, 100, 200, 250, 275, 300,350]) ax.set(title="Distribution of Raster Cell Values Data", xlabel="", ylabel="Number of Pixels") plt.show() class_bins = [-np.inf,250,275,300,350,+np.inf] import xarray as xr data_class = xr.apply_ufunc(np.digitize, data, class_bins) print(data_class.shape) #data_class = np.array(data_class[0]) import matplotlib.pyplot as plt f, ax = plt.subplots() data_class.plot.hist(color="purple") ax.set(title="Distribution of Raster Cell Values Data", xlabel="", ylabel="Number of Pixels") plt.show() #https://www.spatialreference.org/ref/epsg/32634/ #/*EPSG:32634 #WGS 84 / UTM zone 34N (Google it) #WGS84 Bounds: 18.0000, 0.0000, 24.0000, 84.0000 #Projected Bounds: 166021.4431, 0.0000, 833978.5569, 9329005.1825 #Scope: Large and medium scale topographic mapping and engineering survey. #Last Revised: June 2, 1995 #Area: World - N hemisphere - 18ยฐE to 24ยฐE - by country*/ #Proj4js.defs["EPSG:32634"] = "+proj=utm +zone=34 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"; print(files[10]) band12= rxr.open_rasterio(files[10]) ep.plot_bands(band12, title="Bigearthnet Band 12 Raster") plt.show() print("The CRS of this data is:", band12.rio.crs) #Converting EPSG to Proj4 in Python # Convert to project string using earthpy proj4 = et.epsg['32634'] print(proj4) #Spatial Extent #You can access the spatial extent using the .bounds() attribute in rasterio. print(band12.rio.bounds()) #Raster Resolution: area covered by 1 pixel on ground e.g 60m * 60m # What is the x and y resolution for your raster data? print(band12.rio.resolution()) print("The nodatavalue of your data is:", band12.rio.nodata) # How many bands / layers does the object have? print("Number of bands", band12.rio.count) print("The shape of your data is:", band12.shape) print('min value:', np.nanmin(band12)) print('max value:', np.nanmax(band12)) import matplotlib.pyplot as plt f, ax = plt.subplots() band1.plot.hist(color="purple") ax.set(title="Distribution of Raster Cell Values Band 12 Data", xlabel="", ylabel="Number of Pixels") plt.show() ``` ## New Section : Making Dataframe for min/max values of each bands of 1,2 and 12 ``` import pandas as pd df = pd.DataFrame(columns= ['filename','min','max']) df.head() import glob import os files_batch=[] #batch of same bands min=[] max=[] mean_min =[] mean_max =[] path = '/content/drive/MyDrive/Big' os.chdir(path) dirs = os.listdir() dirs.sort() print(dirs) print(len(dirs)) #remove last element of list del dirs[0] print(dirs) print(len(dirs)) step_size = len(dirs) # Add batch of band1 tif files to files list path = '/content/drive/MyDrive/Big' for i in dirs: s = "" s = s + path + '/' + str(i) + '/' +'*01.tif' print(s) temp = (glob.glob(os.path.join(s))) files_batch.append(temp[0]) # Fetch Filenames of band 1 print(files_batch,files_batch[0],len(files_batch)) #Batch of Band 1 files # Add min/max values of band 1 to min/max list for i in range(0,step_size): band1= rxr.open_rasterio(files_batch[i]) min.append(np.nanmin(band1)) max.append(np.nanmax(band1)) print(min) print(max) mean_min.append(np.mean(min)) mean_max.append(np.mean(max)) #df['B1_min'] = min #df['B1_max'] = max #print(df) # Add batch of band2 tif files to files list path = '/content/drive/MyDrive/Big' for i in dirs: s = "" s = s + path + '/' + str(i) + '/' +'*02.tif' print(s) temp = (glob.glob(os.path.join(s))) files_batch.append(temp[0]) print(files_batch) print(files_batch[len(files_batch)-1], len(files_batch)) # Add min/max values of band 2 to min/max list for i in range(step_size,2*step_size): band2= rxr.open_rasterio(files_batch[i]) min.append(np.nanmin(band2)) max.append(np.nanmax(band2)) print(min) print(max) mean_min.append(np.mean(min)) mean_max.append(np.mean(max)) # Add batch of band 12 tif files to files list path = '/content/drive/MyDrive/Big' for i in dirs: s = "" s = s + path + '/' + str(i) + '/' +'*12.tif' print(s) temp = (glob.glob(os.path.join(s))) files_batch.append(temp[0]) print(files_batch) print(files_batch[len(files_batch)-1], len(files_batch)) # Add min/max values of band 12 to min/max list for i in range(2*step_size,3*step_size): band2= rxr.open_rasterio(files_batch[i]) min.append(np.nanmin(band2)) max.append(np.nanmax(band2)) print(min) print(max) mean_min.append(np.mean(min)) mean_max.append(np.mean(max)) ``` # Add files and min/max lists to dataframe ``` print(files_batch) df['filename'] = files_batch df['min'] = min df['max'] = max df.head() #print means of min and max values for each band 1 2 and 12 print(mean_min) print(mean_max) # Plot histogram import matplotlib.pyplot as plt x=np.array(min) y=np.array(max) plt.bar(x,y,align='center') # A bar chart plt.xlabel('Min') plt.ylabel('Max') plt.show() # Plot histogram for mean min and mean max import matplotlib.pyplot as plt x=np.array(mean_min) y=np.array(mean_max) plt.bar(x,y,align='center') # A bar chart plt.xlabel('Mean_Min') plt.ylabel('Mean_Max') plt.show() ``` ### **USE RASTERIO module to open Raster images and read it to Array** ``` band1 = np.array(band1) band1.shape print(files) band2= rxr.open_rasterio(files[1]) band2 = np.array(band2) band2.shape band12 = np.array(band12) band12.shape print(df['filename']) files_bands = [] files_bands = df['filename'] print(files_bands[0:6]) # Reading raster geotif files #https://automating-gis-processes.github.io/CSC18/lessons/L6/reading-raster.html import rasterio band1_batch = files_bands[0:6] print(band1_batch[0]) band1_raster = rasterio.open(band1_batch[0]) print(type(band1_raster)) #Projection print(band1_raster.crs) #Affine transform (how raster is scaled, rotated, skewed, and/or translated band1_raster.transform band1_raster.meta #reading raster to array band1_array = band1_raster.read() print(band1_array) stats = [] for band in band1_array: stats.append({ 'mean' : band.mean(), 'min' : band.min(), 'max' : band.max(), 'median': np.median(band) }) print(stats) ``` # Read all Band1 files and find mean of all 6 Forest class Band1 data ``` print(df['filename']) files_bands = [] files_bands = df['filename'] print(files_bands[0:6]) # Reading raster geotif files using Rasterio #https://automating-gis-processes.github.io/CSC18/lessons/L6/reading-raster.html import rasterio band1_batch = files_bands[0:6] print(band1_batch[0]) band1_array=[] for i in band1_batch: band1_raster = rasterio.open(i) band1_array.append(band1_raster.read()) band1_mean=[] band1_min = [] band1_max = [] print(len(band1_array)) for i in band1_array: for band in i: band1_mean.append(band.mean()) band1_min.append(band.min()) band1_max.append(band.max()) print("Band 1 stat for 6 images is :------>") print(band1_mean) print(band1_min) print(band1_max) # Stat for band 2 images band2_batch = files_bands[6:12] print(band2_batch) band2_array=[] for i in band2_batch: band2_raster = rasterio.open(i) band2_array.append(band2_raster.read()) band2_mean=[] band2_min = [] band2_max = [] print(len(band2_array)) for i in band2_array: for band in i: band2_mean.append(band.mean()) band2_min.append(band.min()) band2_max.append(band.max()) print("Band 2 stat for 6 images is :------>") print(band2_mean) print(band2_min) print(band2_max) # Stat for band 12 images band12_batch = files_bands[12:18] print(band12_batch) band12_array=[] for i in band12_batch: band12_raster = rasterio.open(i) band12_array.append(band12_raster.read()) band12_mean=[] band12_min = [] band12_max = [] print(len(band12_array)) for i in band12_array: for band in i: band12_mean.append(band.mean()) band12_min.append(band.min()) band12_max.append(band.max()) print("Band 12 stat for 6 images is :------>") print(band12_mean) print(band12_min) print(band12_max) y=np.array(band1_mean) x=(1,2,3,4,5,6) plt.bar(x,y,align='center') plt.axis([0, 6, 100, 600]) plt.xlabel('Bands') plt.ylabel('Commulative Mean') plt.show() y=np.array(band2_mean) x=(1,2,3,4,5,6) plt.bar(x,y,align='center') plt.axis([0, 6, 100, 600]) plt.xlabel('Bands') plt.ylabel('Commulative Mean') plt.show() y=np.array(band12_mean) x=(1,2,3,4,5,6) plt.bar(x,y,align='center') plt.axis([0, 6, 100, 600]) plt.xlabel('Bands') plt.ylabel('Commulative Mean') plt.show() df = pd.DataFrame(columns = ['mean_band1','mean_band2','mean_band12']) df['mean_band1'] = np.array(band1_mean) df['mean_band2'] = np.array(band2_mean) df['mean_band12'] = np.array(band12_mean) df df.plot() ```
github_jupyter
# Rank Classification using BERT on Amazon Review dataset ## Introduction In this tutorial, you learn how to train a rank classification model using [Transfer Learning](https://en.wikipedia.org/wiki/Transfer_learning). We will use a pretrained DistilBert model to train on the Amazon review dataset. ## About the dataset and model [Amazon Customer Review dataset](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) consists of all different valid reviews from amazon.com. We will use the "Digital_software" category that consists of 102k valid reviews. As for the pre-trained model, use the DistilBERT[[1]](https://arxiv.org/abs/1910.01108) model. It's a light-weight BERT model already trained on [Wikipedia text corpora](https://en.wikipedia.org/wiki/List_of_text_corpora), a much larger dataset consisting of over millions text. The DistilBERT served as a base layer and we will add some more classification layers to output as rankings (1 - 5). <img src="https://djl-ai.s3.amazonaws.com/resources/images/amazon_review.png" width="500"> <center>Amazon Review example</center> We will use review body as our data input and ranking as label. ## Pre-requisites This tutorial assumes you have the following knowledge. Follow the READMEs and tutorials if you are not familiar with: 1. How to setup and run [Java Kernel in Jupyter Notebook](https://github.com/awslabs/djl/blob/master/jupyter/README.md) 2. Basic components of Deep Java Library, and how to [train your first model](https://github.com/awslabs/djl/blob/master/jupyter/tutorial/02_train_your_first_model.ipynb). ## Getting started Load the Deep Java Libarary and its dependencies from Maven: ``` %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/ %maven ai.djl:api:0.9.0-SNAPSHOT %maven ai.djl:basicdataset:0.9.0-SNAPSHOT %maven ai.djl.mxnet:mxnet-model-zoo:0.9.0-SNAPSHOT %maven org.slf4j:slf4j-api:1.7.26 %maven org.slf4j:slf4j-simple:1.7.26 %maven net.java.dev.jna:jna:5.3.0 // See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md // for more MXNet library selection options %maven ai.djl.mxnet:mxnet-native-auto:1.7.0-backport ``` Now let's import the necessary modules: ``` import ai.djl.Application; import ai.djl.Device; import ai.djl.MalformedModelException; import ai.djl.Model; import ai.djl.basicdataset.CsvDataset; import ai.djl.basicdataset.utils.DynamicBuffer; import ai.djl.inference.Predictor; import ai.djl.metric.Metrics; import ai.djl.modality.Classifications; import ai.djl.modality.nlp.SimpleVocabulary; import ai.djl.modality.nlp.bert.BertFullTokenizer; import ai.djl.ndarray.NDArray; import ai.djl.ndarray.NDList; import ai.djl.ndarray.types.Shape; import ai.djl.nn.Activation; import ai.djl.nn.Block; import ai.djl.nn.SequentialBlock; import ai.djl.nn.core.Linear; import ai.djl.nn.norm.Dropout; import ai.djl.repository.zoo.*; import ai.djl.training.*; import ai.djl.training.dataset.Batch; import ai.djl.training.dataset.RandomAccessDataset; import ai.djl.training.evaluator.Accuracy; import ai.djl.training.listener.CheckpointsTrainingListener; import ai.djl.training.listener.TrainingListener; import ai.djl.training.loss.Loss; import ai.djl.training.util.ProgressBar; import ai.djl.translate.*; import java.io.IOException; import java.nio.file.Paths; import java.util.List; import org.apache.commons.csv.CSVFormat; ``` ## Prepare Dataset First step is to prepare the dataset for training. Since the original data was in TSV format, we can use CSVDataset to be the dataset container. We will also need to specify how do we want to preprocess the raw data. For BERT model, the input data are required to be tokenized and mapped into indices based on the inputs. In DJL, we defined an interface called Fearurizer, it is designed to allow user customize operation on each selected row/column of a dataset. In our case, we would like to clean and tokenize our sentencies. So let's try to implement it to deal with customer review sentencies. ``` final class BertFeaturizer implements CsvDataset.Featurizer { private final BertFullTokenizer tokenizer; private final int maxLength; // the cut-off length public BertFeaturizer(BertFullTokenizer tokenizer, int maxLength) { this.tokenizer = tokenizer; this.maxLength = maxLength; } /** {@inheritDoc} */ @Override public void featurize(DynamicBuffer buf, String input) { SimpleVocabulary vocab = tokenizer.getVocabulary(); // convert sentence to tokens List<String> tokens = tokenizer.tokenize(input); // trim the tokens to maxLength tokens = tokens.size() > maxLength ? tokens.subList(0, maxLength) : tokens; // BERT embedding convention "[CLS] Your Sentence [SEP]" buf.put(vocab.getIndex("[CLS]")); tokens.forEach(token -> buf.put(vocab.getIndex(token))); buf.put(vocab.getIndex("[SEP]")); } } ``` Once we got this part done, we can apply the `BertFeaturizer` into our Dataset. We take `review_body` column and apply the Featurizer. We also pick `star_rating` as our label set. Since we go for batch input, we need to tell the dataset to pad our data if it is less than the `maxLength` we defined. `PaddingStackBatchifier` will do the work for you. ``` CsvDataset getDataset(int batchSize, BertFullTokenizer tokenizer, int maxLength) { String amazonReview = "https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Digital_Software_v1_00.tsv.gz"; float paddingToken = tokenizer.getVocabulary().getIndex("[PAD]"); return CsvDataset.builder() .optCsvUrl(amazonReview) // load from Url .setCsvFormat(CSVFormat.TDF.withQuote(null).withHeader()) // Setting TSV loading format .setSampling(batchSize, true) // make sample size and random access .addFeature( new CsvDataset.Feature( "review_body", new BertFeaturizer(tokenizer, maxLength))) .addNumericLabel("star_rating") // set label .optDataBatchifier( PaddingStackBatchifier.builder() .optIncludeValidLengths(false) .addPad(0, 0, (m) -> m.ones(new Shape(1)).mul(paddingToken)) .build()) // define how to pad dataset to a fix length .build(); } ``` ## Construct your model We will load our pretrained model and prepare the classification. First construct the `criteria` to specify where to load the embedding (DistiledBERT), then call `loadModel` to download that embedding with pre-trained weights. Since this model is built without classification layer, we need to add a classification layer to the end of the model and train it. After you are done modifying the block, set it back to model using `setBlock`. ### Load the word embedding We will download our word embedding and load it to memory (this may take a while) ``` Criteria<NDList, NDList> criteria = Criteria.builder() .optApplication(Application.NLP.WORD_EMBEDDING) .setTypes(NDList.class, NDList.class) .optModelUrls("https://resources.djl.ai/test-models/distilbert.zip") .optProgress(new ProgressBar()) .build(); ZooModel<NDList, NDList> embedding = ModelZoo.loadModel(criteria); ``` ### Create classification layers Then let's build a simple MLP layer to classify the ranks. We set the output of last FullyConnected (Linear) layer to 5 to get the predictions for star 1 to 5. Then all we need to do is to load the block into the model. Before applying the classification layer, we also need to add text embedding to the front. In our case, we just create a Lambda function that do the followings: 1. batch_data (batch size, token indices) -> batch_data + max_length (size of the token indices) 2. generate embedding ``` Predictor<NDList, NDList> embedder = embedding.newPredictor(); Block classifier = new SequentialBlock() // text embedding layer .add( ndList -> { NDArray data = ndList.singletonOrThrow(); long batchSize = data.getShape().get(0); float maxLength = data.getShape().get(1); try { return embedder.predict( new NDList(data, data.getManager() .full(new Shape(batchSize), maxLength))); } catch (TranslateException e) { throw new IllegalArgumentException("embedding error", e); } }) // classification layer .add(Linear.builder().setUnits(768).build()) // pre classifier .add(Activation::relu) .add(Dropout.builder().optRate(0.2f).build()) .add(Linear.builder().setUnits(5).build()) // 5 star rating .addSingleton(nd -> nd.get(":,0")); // Take [CLS] as the head Model model = Model.newInstance("AmazonReviewRatingClassification"); model.setBlock(classifier); ``` ## Start Training Finally, we can start building our training pipeline to train the model. ### Creating Training and Testing dataset Firstly, we need to create a voabulary that is used to map token to index such as "hello" to 1121 (1121 is the index of "hello" in dictionary). Then we simply feed the vocabulary to the tokenizer that used to tokenize the sentence. Finally, we just need to split the dataset based on the ratio. Note: we set the cut-off length to 64 which means only the first 64 tokens from the review will be used. You can increase this value to achieve better accuracy. ``` // Prepare the vocabulary SimpleVocabulary vocabulary = SimpleVocabulary.builder() .optMinFrequency(1) .addFromTextFile(embedding.getArtifact("vocab.txt").getPath()) .optUnknownToken("[UNK]") .build(); // Prepare dataset int maxTokenLength = 64; // cutoff tokens length int batchSize = 8; BertFullTokenizer tokenizer = new BertFullTokenizer(vocabulary, true); CsvDataset amazonReviewDataset = getDataset(batchSize, tokenizer, maxTokenLength); // split data with 7:3 train:valid ratio RandomAccessDataset[] datasets = amazonReviewDataset.randomSplit(7, 3); RandomAccessDataset trainingSet = datasets[0]; RandomAccessDataset validationSet = datasets[1]; ``` ### Setup Trainer and training config Then, we need to setup our trainer. We set up the accuracy and loss function. The model training logs will be saved to `build/modlel`. ``` CheckpointsTrainingListener listener = new CheckpointsTrainingListener("build/model"); listener.setSaveModelCallback( trainer -> { TrainingResult result = trainer.getTrainingResult(); Model model = trainer.getModel(); // track for accuracy and loss float accuracy = result.getValidateEvaluation("Accuracy"); model.setProperty("Accuracy", String.format("%.5f", accuracy)); model.setProperty("Loss", String.format("%.5f", result.getValidateLoss())); }); DefaultTrainingConfig config = new DefaultTrainingConfig(Loss.softmaxCrossEntropyLoss()) // loss type .addEvaluator(new Accuracy()) .optDevices(Device.getDevices(1)) // train using single GPU .addTrainingListeners(TrainingListener.Defaults.logging("build/model")) .addTrainingListeners(listener); ``` ### Start training We will start our training process. Training on GPU will takes approximately 10 mins. For CPU, it will take more than 2 hours to finish. ``` int epoch = 2; Trainer trainer = model.newTrainer(config); trainer.setMetrics(new Metrics()); Shape encoderInputShape = new Shape(batchSize, maxTokenLength); // initialize trainer with proper input shape trainer.initialize(encoderInputShape); EasyTrain.fit(trainer, epoch, trainingSet, validationSet); System.out.println(trainer.getTrainingResult()); ``` ### Save the model ``` model.save(Paths.get("build/model"), "amazon-review.param"); ``` ## Verify the model We can create a predictor from the model to run inference on our customized dataset. Firstly, we can create a `Translator` for the model to do preprocessing and post processing. Similar to what we have done before, we need to tokenize the input sentence and get the output ranking. ``` class MyTranslator implements Translator<String, Classifications> { private BertFullTokenizer tokenizer; private SimpleVocabulary vocab; private List<String> ranks; public MyTranslator(BertFullTokenizer tokenizer) { this.tokenizer = tokenizer; vocab = tokenizer.getVocabulary(); ranks = Arrays.asList("1", "2", "3", "4", "5"); } @Override public Batchifier getBatchifier() { return new StackBatchifier(); } @Override public NDList processInput(TranslatorContext ctx, String input) { List<String> tokens = tokenizer.tokenize(input); float[] indices = new float[tokens.size() + 2]; indices[0] = vocab.getIndex("[CLS]"); for (int i = 0; i < tokens.size(); i++) { indices[i+1] = vocab.getIndex(tokens.get(i)); } indices[indices.length - 1] = vocab.getIndex("[SEP]"); return new NDList(ctx.getNDManager().create(indices)); } @Override public Classifications processOutput(TranslatorContext ctx, NDList list) { return new Classifications(ranks, list.singletonOrThrow().softmax(0)); } } ``` Finally, we can create a `Predictor` to run the inference. Let's try with a random customer review: ``` String review = "It works great, but it takes too long to update itself and slows the system"; Predictor<String, Classifications> predictor = model.newPredictor(new MyTranslator(tokenizer)); System.out.println(predictor.predict(review)); ```
github_jupyter
##### Copyright 2018 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # How to build a simple text classifier with TF-Hub <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/docs/tutorials/text_classification_with_tf_hub.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> TF-Hub is a platform to share machine learning expertise packaged in reusable resources, notably pre-trained **modules**. This tutorial is organized into two main parts. ** *Introduction:* Training a text classifier with TF-Hub** We will use a TF-Hub text embedding module to train a simple sentiment classifier with a reasonable baseline accuracy. We will then analyze the predictions to make sure our model is reasonable and propose improvements to increase the accuracy. ** *Advanced:* Transfer learning analysis ** In this section, we will use various TF-Hub modules to compare their effect on the accuracy of the estimator and demonstrate advantages and pitfalls of transfer learning. ## Optional prerequisites * Basic understanding of Tensorflow [premade estimator framework](https://www.tensorflow.org/get_started/premade_estimators). * Familiarity with [Pandas](https://pandas.pydata.org/) library. ## Setup ``` # Install TF-Hub. !pip install seaborn ``` More detailed information about installing Tensorflow can be found at [https://www.tensorflow.org/install/](https://www.tensorflow.org/install/). ``` from absl import logging import tensorflow as tf import tensorflow_hub as hub import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import re import seaborn as sns ``` # Getting started ## Data We will try to solve the [Large Movie Review Dataset v1.0](http://ai.stanford.edu/~amaas/data/sentiment/) task [(Mass et al., 2011)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf). The dataset consists of IMDB movie reviews labeled by positivity from 1 to 10. The task is to label the reviews as **negative** or **positive**. ``` # Load all files from a directory in a DataFrame. def load_directory_data(directory): data = {} data["sentence"] = [] data["sentiment"] = [] for file_path in os.listdir(directory): with tf.io.gfile.GFile(os.path.join(directory, file_path), "r") as f: data["sentence"].append(f.read()) data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1)) return pd.DataFrame.from_dict(data) # Merge positive and negative examples, add a polarity column and shuffle. def load_dataset(directory): pos_df = load_directory_data(os.path.join(directory, "pos")) neg_df = load_directory_data(os.path.join(directory, "neg")) pos_df["polarity"] = 1 neg_df["polarity"] = 0 return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True) # Download and process the dataset files. def download_and_load_datasets(force_download=False): dataset = tf.keras.utils.get_file( fname="aclImdb.tar.gz", origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", extract=True) train_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "train")) test_df = load_dataset(os.path.join(os.path.dirname(dataset), "aclImdb", "test")) return train_df, test_df # Reduce logging output. logging.set_verbosity(logging.ERROR) train_df, test_df = download_and_load_datasets() train_df.head() ``` ## Model ### Input functions [Estimator framework](https://www.tensorflow.org/get_started/premade_estimators#overview_of_programming_with_estimators) provides [input functions](https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/pandas_input_fn) that wrap Pandas dataframes. ``` # Training input on the whole training set with no limit on training epochs. train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( train_df, train_df["polarity"], num_epochs=None, shuffle=True) # Prediction on the whole training set. predict_train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( train_df, train_df["polarity"], shuffle=False) # Prediction on the test set. predict_test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( test_df, test_df["polarity"], shuffle=False) ``` ### Feature columns TF-Hub provides a [feature column](https://www.tensorflow.org/hub/api_docs/python/hub/text_embedding_column.md) that applies a module on the given text feature and passes further the outputs of the module. In this tutorial we will be using the [nnlm-en-dim128 module](https://tfhub.dev/google/nnlm-en-dim128/1). For the purpose of this tutorial, the most important facts are: * The module takes **a batch of sentences in a 1-D tensor of strings** as input. * The module is responsible for **preprocessing of sentences** (e.g. removal of punctuation and splitting on spaces). * The module works with any input (e.g. **nnlm-en-dim128** hashes words not present in vocabulary into ~20.000 buckets). ``` embedded_text_feature_column = hub.text_embedding_column( key="sentence", module_spec="https://tfhub.dev/google/nnlm-en-dim128/1") ``` ### Estimator For classification we can use a [DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) (note further remarks about different modelling of the label function at the end of the tutorial). ``` estimator = tf.estimator.DNNClassifier( hidden_units=[500, 100], feature_columns=[embedded_text_feature_column], n_classes=2, optimizer=tf.keras.optimizers.Adagrad(lr=0.003)) ``` ### Training Train the estimator for a reasonable amount of steps. ``` # Training for 5,000 steps means 640,000 training examples with the default # batch size. This is roughly equivalent to 25 epochs since the training dataset # contains 25,000 examples. estimator.train(input_fn=train_input_fn, steps=5000); ``` # Prediction Run predictions for both training and test set. ``` train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn) test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn) print("Training set accuracy: {accuracy}".format(**train_eval_result)) print("Test set accuracy: {accuracy}".format(**test_eval_result)) ``` ## Confusion matrix We can visually check the confusion matrix to understand the distribution of misclassifications. ``` def get_predictions(estimator, input_fn): return [x["class_ids"][0] for x in estimator.predict(input_fn=input_fn)] LABELS = [ "negative", "positive" ] # Create a confusion matrix on training data. cm = tf.math.confusion_matrix(train_df["polarity"], get_predictions(estimator, predict_train_input_fn)) # Normalize the confusion matrix so that each row sums to 1. cm = tf.cast(cm, dtype=tf.float32) cm = cm / tf.math.reduce_sum(cm, axis=1)[:, np.newaxis] sns.heatmap(cm, annot=True, xticklabels=LABELS, yticklabels=LABELS); plt.xlabel("Predicted"); plt.ylabel("True"); ``` # Further improvements 1. **Regression on sentiment**: we used a classifier to assign each example into a polarity class. But we actually have another categorical feature at our disposal - sentiment. Here classes actually represent a scale and the underlying value (positive/negative) could be well mapped into a continuous range. We could make use of this property by computing a regression ([DNN Regressor](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNRegressor)) instead of a classification ([DNN Classifier](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/DNNClassifier)). 2. **Larger module**: for the purposes of this tutorial we used a small module to restrict the memory use. There are modules with larger vocabularies and larger embedding space that could give additional accuracy points. 3. **Parameter tuning**: we can improve the accuracy by tuning the meta-parameters like the learning rate or the number of steps, especially if we use a different module. A validation set is very important if we want to get any reasonable results, because it is very easy to set-up a model that learns to predict the training data without generalizing well to the test set. 4. **More complex model**: we used a module that computes a sentence embedding by embedding each individual word and then combining them with average. One could also use a sequential module (e.g. [Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/2) module) to better capture the nature of sentences. Or an ensemble of two or more TF-Hub modules. 5. **Regularization**: to prevent overfitting, we could try to use an optimizer that does some sort of regularization, for example [Proximal Adagrad Optimizer](https://www.tensorflow.org/api_docs/python/tf/train/ProximalAdagradOptimizer). # Advanced: Transfer learning analysis Transfer learning makes it possible to **save training resources** and to achieve good model generalization even when **training on a small dataset**. In this part, we will demonstrate this by training with two different TF-Hub modules: * **[nnlm-en-dim128](https://tfhub.dev/google/nnlm-en-dim128/1)** - pretrained text embedding module, * **[random-nnlm-en-dim128](https://tfhub.dev/google/random-nnlm-en-dim128/1)** - text embedding module that has same vocabulary and network as **nnlm-en-dim128**, but the weights were just randomly initialized and never trained on real data. And by training in two modes: * training **only the classifier** (i.e. freezing the module), and * training the **classifier together with the module**. Let's run a couple of trainings and evaluations to see how using a various modules can affect the accuracy. ``` def train_and_evaluate_with_module(hub_module, train_module=False): embedded_text_feature_column = hub.text_embedding_column( key="sentence", module_spec=hub_module, trainable=train_module) estimator = tf.estimator.DNNClassifier( hidden_units=[500, 100], feature_columns=[embedded_text_feature_column], n_classes=2, optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.003)) estimator.train(input_fn=train_input_fn, steps=1000) train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn) test_eval_result = estimator.evaluate(input_fn=predict_test_input_fn) training_set_accuracy = train_eval_result["accuracy"] test_set_accuracy = test_eval_result["accuracy"] return { "Training accuracy": training_set_accuracy, "Test accuracy": test_set_accuracy } results = {} results["nnlm-en-dim128"] = train_and_evaluate_with_module( "https://tfhub.dev/google/nnlm-en-dim128/1") results["nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module( "https://tfhub.dev/google/nnlm-en-dim128/1", True) results["random-nnlm-en-dim128"] = train_and_evaluate_with_module( "https://tfhub.dev/google/random-nnlm-en-dim128/1") results["random-nnlm-en-dim128-with-module-training"] = train_and_evaluate_with_module( "https://tfhub.dev/google/random-nnlm-en-dim128/1", True) ``` Let's look at the results. ``` pd.DataFrame.from_dict(results, orient="index") ``` We can already see some patterns, but first we should establish the baseline accuracy of the test set - the lower bound that can be achieved by outputting only the label of the most represented class: ``` estimator.evaluate(input_fn=predict_test_input_fn)["accuracy_baseline"] ``` Assigning the most represented class will give us accuracy of **50%**. There are a couple of things to notice here: 1. Maybe surprisingly, **a model can still be learned on top of fixed, random embeddings**. The reason is that even if every word in the dictionary is mapped to a random vector, the estimator can separate the space purely using its fully connected layers. 2. Allowing training of the module with **random embeddings** increases both training and test accuracy as oposed to training just the classifier. 3. Training of the module with **pre-trained embeddings** also increases both accuracies. Note however the overfitting on the training set. Training a pre-trained module can be dangerous even with regularization in the sense that the embedding weights no longer represent the language model trained on diverse data, instead they converge to the ideal representation of the new dataset.
github_jupyter
# Graph Neural Network (GCN)-based Synthetic Binding Logic Classification with Graph-SafeML The eisting example of GCN-based Synthetic Binding Logic Classification from google research team is used to test the idea of SafeML for Graph-based classifiers. You can find the source code [here](https://github.com/google-research/graph-attribution) and the related paper for the code is available [here](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf) [[1]](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf). Regarding the Graph-based distance measure, the theory of "Graph distance for complex networks" provided by of Yutaka Shimada et al. is used [[2]](https://www.nature.com/articles/srep34944). The code related to this paper is avaialble [here](https://github.com/msarrias/graph-distance-for-complex-networks). You can read more about the idea of SafeML in [[3]](https://github.com/ISorokos/SafeML). To read more about "Synthetic Binding Logic Classification" and the related dataset that is used in this notebook, please check [[4]](https://www.pnas.org/content/pnas/116/24/11624.full.pdf). ![SafeML logo from: https://github.com/ISorokos/SafeML](https://miro.medium.com/max/700/1*H0lN2Q9lmSRgfaGj9VqqGA.png) The SafeML project takes place at the University of Hull in collaboration with Fraunhofer IESE and Nuremberg Institute of Technology ## Table of Content * [Initialization and Installations](#init) * [Importing Required Libraries](#lib) * [Graph Attribution Specific Imports](#glib) * [Load Experiment Data, Task and Attribution Techniques](#load) * [Creating a GNN Model](#model) * [Graph Vizualization](#gviz) * [Graph Distance Measures and SafeML Idea](#SafeML) * [Discussion](#dis) ### References: [[1]. Wiltschko, A. B., Sanchez-Lengeling, B., Lee, B., Reif, E., Wei, J., McCloskey, K. J., & Wang, Y. (2020). Evaluating Attribution for Graph Neural Networks.](https://papers.nips.cc/paper/2020/file/417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf) [[2]. Shimada, Y., Hirata, Y., Ikeguchi, T., & Aihara, K. (2016). Graph distance for complex networks. Scientific reports, 6(1), 1-6.](https://www.nature.com/articles/srep34944) [[3]. Aslansefat, K., Sorokos, I., Whiting, D., Kolagari, R. T., & Papadopoulos, Y. (2020, September). SafeML: Safety Monitoring of Machine Learning Classifiers Through Statistical Difference Measures. In International Symposium on Model-Based Safety and Assessment (pp. 197-211). Springer, Cham.](https://arxiv.org/pdf/2005.13166.pdf) [[4]. McCloskey, K., Taly, A., Monti, F., Brenner, M. P., & Colwell, L. J. (2019). Using attribution to decode binding mechanism in neural network models for chemistry. Proceedings of the National Academy of Sciences, 116(24), 11624-11629.](https://www.pnas.org/content/pnas/116/24/11624.full.pdf) <a id = "init"></a> ## Initialization and Installations ``` import warnings warnings.filterwarnings('ignore') %load_ext autoreload %autoreload 2 import sys sys.path.append('..') import sys IN_COLAB = 'google.colab' in sys.modules REPO_DIR = '..' if IN_COLAB else '..' !git clone https://github.com/google-research/graph-attribution.git --quiet import sys sys.path.insert(1, '/kaggle/working/graph-attribution') !pip install tensorflow tensorflow-probability -q !pip install dm-sonnet -q !pip install graph_nets "tensorflow>=2.1.0-rc1" "dm-sonnet>=2.0.0b0" tensorflow_probability !pip install git+https://github.com/google-research/graph-attribution -quiet !pip install git+https://github.com/google-research/graph-attribution ``` <a id = "lib"></a> ## Importing Required Libraries ``` import os import itertools import collections import tqdm.auto as tqdm from IPython.display import display import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf import sonnet as snt import graph_nets from graph_nets.graphs import GraphsTuple import graph_attribution as gatt from tqdm import tqdm import time import networkx as nx # Ignore tf/graph_nets UserWarning: # Converting sparse IndexedSlices to a dense Tensor of unknown shape import warnings warnings.simplefilter("ignore", UserWarning) for mod in [tf, snt, gatt]: print(f'{mod.__name__:20s} = {mod.__version__}') ``` <a id = "glib"></a> ## Graph Attribution specific imports ``` from graph_attribution import tasks from graph_attribution import graphnet_models as gnn_models from graph_attribution import graphnet_techniques as techniques from graph_attribution import datasets from graph_attribution import experiments from graph_attribution import templates from graph_attribution import graphs as graph_utils #datasets.DATA_DIR = os.path.join(REPO_DIR, 'data') #print(f'Reading data from: {datasets.DATA_DIR}') datasets.DATA_DIR = './graph-attribution/data' ``` <a id = "load"></a> # Load Experiment Data, Task and Attribution Techniques ``` print(f'Available tasks: {[t.name for t in tasks.Task]}') print(f'Available model types: {[m.name for m in gnn_models.BlockType]}') print(f'Available ATT techniques: {list(techniques.get_techniques_dict(None,None).keys())}') task_type = 'logic7' block_type = 'gcn' #task_dir = datasets.get_task_dir(task_type) task_dir = './graph-attribution/data/logic7' exp, task, methods = experiments.get_experiment_setup(task_type, block_type) task_act, task_loss = task.get_nn_activation_fn(), task.get_nn_loss_fn() graph_utils.print_graphs_tuple(exp.x_train) print(f'Experiment data fields:{list(exp.__dict__.keys())}') ``` <a id = "model"></a> ## Creating a GNN Model ### Defining Hyperparams of the Experiment ``` hp = gatt.hparams.get_hparams({'block_type':block_type, 'task_type':task_type}) hp ``` ### Instantiate model ``` model = experiments.GNN(node_size = hp.node_size, edge_size = hp.edge_size, global_size = hp.global_size, y_output_size = task.n_outputs, block_type = gnn_models.BlockType(hp.block_type), activation = task_act, target_type = task.target_type, n_layers = hp.n_layers) model(exp.x_train) gnn_models.print_model(model) ``` <a id ="train"></a> ## Training the GNN Model ``` optimizer = snt.optimizers.Adam(hp.learning_rate) opt_one_epoch = gatt.training.make_tf_opt_epoch_fn(exp.x_train, exp.y_train, hp.batch_size, model, optimizer, task_loss) pbar = tqdm(range(hp.epochs)) losses = collections.defaultdict(list) start_time = time.time() for _ in pbar: train_loss = opt_one_epoch(exp.x_train, exp.y_train).numpy() losses['train'].append(train_loss) losses['test'].append(task_loss(exp.y_test, model(exp.x_test)).numpy()) #pbar.set_postfix({key: values[-1] for key, values in losses.items()}) losses = {key: np.array(values) for key, values in losses.items()} # Plot losses for key, values in losses.items(): plt.plot(values, label=key) plt.ylabel('loss') plt.xlabel('epochs') plt.legend() plt.show() y_pred = model(exp.x_test).numpy() y_pred[y_pred > 0.5] = 1 y_pred[y_pred <= 0.5] = 0 #y_pred from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report print(accuracy_score(exp.y_test, y_pred)) print(confusion_matrix(exp.y_test, y_pred)) print(classification_report(exp.y_test, y_pred)) # Evaluate predictions and attributions results = [] for method in tqdm(methods.values(), total=len(methods)): results.append(experiments.generate_result(model, method, task, exp.x_test, exp.y_test, exp.att_test)) pd.DataFrame(results) ``` <a id = "gviz"></a> ## Graph Vizualization ``` # Source: https://notebook.community/deepmind/graph_nets/graph_nets/demos/graph_nets_basics graphs_nx = graph_nets.utils_np.graphs_tuple_to_networkxs(exp.x_test) def nx_g_plotter(graphs_nx, ColNum=8, node_clr='#ff8080'): _, axs = plt.subplots(ncols=ColNum, nrows = 1, figsize=(30, 5)) for iax, (graph_nx2, ax) in enumerate(zip(graphs_nx, axs)): nx.draw(graph_nx2, ax=ax, node_color=node_clr) ax.set_title("Graph {}".format(iax)) graphs_nx_1 = [] graphs_nx_0 = [] for ii, g_net_ii in enumerate(graphs_nx): if exp.y_test[ii] == 1: graphs_nx_1.append(g_net_ii) else: graphs_nx_0.append(g_net_ii) nx_g_plotter(graphs_nx_1, ColNum=8, node_clr='#ff8080') nx_g_plotter(graphs_nx_0, ColNum=8, node_clr='#00bfff') y_wrong1[1] - y_wrong1[0] graphs_nx_wrong0 = [] graphs_nx_wrong1 = [] graphs_nx_correct0 = [] graphs_nx_correct1 = [] y_pred2 = model(exp.x_test).numpy() y_wrong0 = [] y_wrong1 = [] y_correct0 = [] y_correct1 = [] for ii, g_net_ii in enumerate(graphs_nx): if exp.y_test[ii] != y_pred[ii] and exp.y_test[ii] == 0: graphs_nx_wrong0.append(g_net_ii) y_wrong0.append(y_pred2[ii]) elif exp.y_test[ii] != y_pred[ii] and exp.y_test[ii] == 1: graphs_nx_wrong1.append(g_net_ii) y_wrong1.append(y_pred2[ii]) elif exp.y_test[ii] == y_pred[ii] and exp.y_test[ii] == 0: graphs_nx_correct0.append(g_net_ii) y_correct0.append(y_pred2[ii]) elif exp.y_test[ii] == y_pred[ii] and exp.y_test[ii] == 1: graphs_nx_correct1.append(g_net_ii) y_correct1.append(y_pred2[ii]) print(len(graphs_nx_wrong0), len(graphs_nx_wrong1), len(graphs_nx_correct0), len(graphs_nx_correct1)) nx_g_plotter(graphs_nx_wrong0, ColNum=8, node_clr='#ff8080') nx_g_plotter(graphs_nx_wrong1, ColNum=8, node_clr='#00bfff') nx_g_plotter(graphs_nx_correct0, ColNum=8, node_clr='#00e600') nx_g_plotter(graphs_nx_correct1, ColNum=8, node_clr='#e600ac') y_yes = exp.y_test[exp.y_test == 1] y_no = exp.y_test[exp.y_test != 1] y_yes.shape, y_no.shape recovered_data_dict_list = graph_nets.utils_np.graphs_tuple_to_data_dicts(exp.x_test) graphs_tuple_1 = graph_nets.utils_np.data_dicts_to_graphs_tuple(recovered_data_dict_list) ``` <a id = "SafeML"></a> ## Graph Distance Measures and SafeML Idea ``` !git clone https://github.com/msarrias/graph-distance-for-complex-networks --quiet import sys sys.path.insert(1, '/kaggle/working/graph-distance-for-complex-networks') import numpy as np from matplotlib import pyplot as plt from matplotlib.ticker import MultipleLocator import scipy.linalg as la import networkx as nx import random, time, math from collections import Counter import fun as f from Graph import Graph from Watts_Strogatz import watts_strogatz_graph from Erdos_Renyi import erdos_renyi_graph def Wasserstein_Dist(cdfX, cdfY): Res = 0 power = 1 n = len(cdfX) for ii in range(0, n-2): height = abs(cdfX[ii]-cdfY[ii]) width = cdfX[ii+1] - cdfX[ii] Res = Res + (height ** power) * width return Res def r_eigenv(G_i, G_j): #Eigen-decomposition of G_j A_Gi = (nx.adjacency_matrix(G_i)).todense() D_i = np.diag(np.asarray(sum(A_Gi))[0]) eigenvalues_Gi, eigenvectors_Gi = la.eig(D_i - A_Gi) r_eigenv_Gi = sorted(zip(eigenvalues_Gi.real, eigenvectors_Gi.T), key=lambda x: x[0]) #Eigen-decomposition of G_j A_Gj = (nx.adjacency_matrix(G_j)).todense() D_j = np.diag(np.asarray(sum(A_Gj))[0]) eigenvalues_Gj, eigenvectors_Gj = la.eig(D_j - A_Gj) r_eigenv_Gj = sorted(zip(eigenvalues_Gj.real, eigenvectors_Gj.T), key=lambda x: x[0]) r = 4 signs =[-1,1] temp = [] for sign_s in signs: for sign_l in signs: vri = sorted(f.normalize_eigenv(sign_s * r_eigenv_Gi[r][1])) vrj = sorted(f.normalize_eigenv(sign_l * r_eigenv_Gj[r][1])) cdf_dist = f.cdf_dist(vri, vrj) temp.append(cdf_dist) #Compute empirical CDF step = 0.005 x=np.arange(0, 1, step) cdf_grid_Gip = f.cdf(len(r_eigenv_Gi[r][1]),x, f.normalize_eigenv(sorted(r_eigenv_Gi[r][1], key=lambda x: x))) cdf_grid_Gin = f.cdf(len(r_eigenv_Gi[r][1]),x, f.normalize_eigenv(sorted(-r_eigenv_Gi[r][1], key=lambda x: x))) cdf_grid_Gjp = f.cdf(len(r_eigenv_Gj[r][1]),x, f.normalize_eigenv(sorted(r_eigenv_Gj[r][1], key=lambda x: x))) cdf_grid_Gjn = f.cdf(len(r_eigenv_Gj[r][1]),x, f.normalize_eigenv(sorted(-r_eigenv_Gj[r][1], key=lambda x: x))) WD1 = Wasserstein_Dist(cdf_grid_Gip, cdf_grid_Gjp) WD2 = Wasserstein_Dist(cdf_grid_Gip, cdf_grid_Gjn) WD3 = Wasserstein_Dist(cdf_grid_Gin, cdf_grid_Gjp) WD4 = Wasserstein_Dist(cdf_grid_Gin, cdf_grid_Gjn) WD = [WD1, WD2, WD3, WD4] return max(temp), max(WD) distt_wrong1_correct1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1))) WDist_wrong1_correct1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1))) Conf_W1_C1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_correct1))) for ii, g_net_ii in enumerate(graphs_nx_wrong1): for jj, g_net_jj in enumerate(graphs_nx_correct1): distt_wrong1_correct1[ii,jj], WDist_wrong1_correct1[ii,jj] = r_eigenv(g_net_ii, g_net_jj) Conf_W1_C1[ii,jj] = y_correct1[jj] - y_wrong1[ii] import seaborn as sns; sns.set_theme() #ax = sns.heatmap(distt) #ax = sns.displot(distt_wrong1_correct1.flatten()) df = pd.DataFrame() df['WDist_W1_C1'] = WDist_wrong1_correct1.flatten() df['Conf_W1_C1'] = Conf_W1_C1.flatten() sns.scatterplot(data=df, x="Conf_W1_C1", y="WDist_W1_C1") graphs_nx_train = graph_nets.utils_np.graphs_tuple_to_networkxs(exp.x_train) graphs_nx_train_1 = [] graphs_nx_train_0 = [] for ii, g_net_ii in enumerate(graphs_nx_train): if exp.y_train[ii] == 1: graphs_nx_train_1.append(g_net_ii) else: graphs_nx_train_0.append(g_net_ii) distt_wrong1_train1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_1))) WDist_wrong1_train1 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_1))) for ii, g_net_ii in enumerate(graphs_nx_wrong1): for jj, g_net_jj in enumerate(graphs_nx_train_1): distt_wrong1_train1[ii,jj], WDist_wrong1_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj) distt_wrong1_train0 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_0))) WDist_wrong1_train0 = np.zeros((len(graphs_nx_wrong1),len(graphs_nx_train_0))) for ii, g_net_ii in enumerate(graphs_nx_wrong1): for jj, g_net_jj in enumerate(graphs_nx_train_0): distt_wrong1_train0[ii,jj], WDist_wrong1_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj) #ax = sns.displot(distt_wrong1_train1.flatten()) ax2 = sns.displot(WDist_wrong1_correct1.flatten(), kind = 'kde') ax2 = sns.displot(WDist_wrong1_train1.flatten(), kind = 'kde') ax2 = sns.displot(WDist_wrong1_train0.flatten(), kind = 'kde') distt_wrong0_correct0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_correct0))) WDist_wrong0_correct0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_correct0))) for ii, g_net_ii in enumerate(graphs_nx_wrong0): for jj, g_net_jj in enumerate(graphs_nx_correct0): distt_wrong0_correct0[ii,jj], WDist_wrong0_correct0[ii,jj] = r_eigenv(g_net_ii, g_net_jj) distt_wrong0_train0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_0))) WDist_wrong0_train0 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_0))) for ii, g_net_ii in enumerate(graphs_nx_wrong0): for jj, g_net_jj in enumerate(graphs_nx_train_0): distt_wrong0_train0[ii,jj], WDist_wrong0_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj) distt_wrong0_train1 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_1))) WDist_wrong0_train1 = np.zeros((len(graphs_nx_wrong0),len(graphs_nx_train_1))) for ii, g_net_ii in enumerate(graphs_nx_wrong0): for jj, g_net_jj in enumerate(graphs_nx_train_1): distt_wrong0_train1[ii,jj], WDist_wrong0_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj) ax2 = sns.displot(WDist_wrong0_correct0.flatten(), kind = 'kde') ax2 = sns.displot(WDist_wrong0_train0.flatten(), kind = 'kde') ax2 = sns.displot(WDist_wrong0_train1.flatten(), kind = 'kde') distt_correct0_train0 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_0))) WDist_correct0_train0 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_0))) for ii, g_net_ii in enumerate(graphs_nx_correct0): for jj, g_net_jj in enumerate(graphs_nx_train_0): distt_correct0_train0[ii,jj], WDist_correct0_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj) distt_correct0_train1 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_1))) WDist_correct0_train1 = np.zeros((len(graphs_nx_correct0),len(graphs_nx_train_1))) for ii, g_net_ii in enumerate(graphs_nx_correct0): for jj, g_net_jj in enumerate(graphs_nx_train_1): distt_correct0_train1[ii,jj], WDist_correct0_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj) if 0: distt_correct1_train0 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_0))) WDist_correct1_train0 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_0))) for ii, g_net_ii in enumerate(graphs_nx_correct1): for jj, g_net_jj in enumerate(graphs_nx_train_0): distt_correct1_train0[ii,jj], WDist_correct1_train0[ii,jj] = r_eigenv(g_net_ii, g_net_jj) distt_correct1_train1 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_1))) WDist_correct1_train1 = np.zeros((len(graphs_nx_correct1),len(graphs_nx_train_1))) for ii, g_net_ii in enumerate(graphs_nx_correct1): for jj, g_net_jj in enumerate(graphs_nx_train_1): distt_correct1_train1[ii,jj], WDist_correct1_train1[ii,jj] = r_eigenv(g_net_ii, g_net_jj) def Wasserstein_Dist(XX, YY): import numpy as np nx = len(XX) ny = len(YY) n = nx + ny XY = np.concatenate([XX,YY]) X2 = np.concatenate([np.repeat(1/nx, nx), np.repeat(0, ny)]) Y2 = np.concatenate([np.repeat(0, nx), np.repeat(1/ny, ny)]) S_Ind = np.argsort(XY) XY_Sorted = XY[S_Ind] X2_Sorted = X2[S_Ind] Y2_Sorted = Y2[S_Ind] Res = 0 E_CDF = 0 F_CDF = 0 power = 1 for ii in range(0, n-2): E_CDF = E_CDF + X2_Sorted[ii] F_CDF = F_CDF + Y2_Sorted[ii] height = abs(F_CDF-E_CDF) width = XY_Sorted[ii+1] - XY_Sorted[ii] Res = Res + (height ** power) * width; return Res def Wasserstein_Dist_PVal(XX, YY): # Information about Bootstrap: https://towardsdatascience.com/an-introduction-to-the-bootstrap-method-58bcb51b4d60 import random nboots = 1000 WD = Wasserstein_Dist(XX,YY) na = len(XX) nb = len(YY) n = na + nb comb = np.concatenate([XX,YY]) reps = 0 bigger = 0 for ii in range(1, nboots): e = random.sample(range(n), na) f = random.sample(range(n), nb) boost_WD = Wasserstein_Dist(comb[e],comb[f]); if (boost_WD > WD): bigger = 1 + bigger pVal = bigger/nboots; return pVal, WD pVal, WD = Wasserstein_Dist_PVal(WDist_wrong0_train0.flatten(), WDist_wrong0_train1.flatten()) print(pVal, WD) #pVal, WD = Wasserstein_Dist_PVal(WDist_correct0_train0.flatten(), WDist_correct0_train1.flatten()) #print(pVal, WD) pVal, WD = Wasserstein_Dist_PVal(WDist_wrong1_train1.flatten(), WDist_wrong1_train0.flatten()) print(pVal, WD) ``` <a id = "dis"></a> ## Discussion It seems that the current idea is not successful and we should do more investigation. We can also consider about model-specific SafeML.
github_jupyter
[@LorenaABarba](https://twitter.com/LorenaABarba) 12 steps to Navierโ€“Stokes ===== *** For a moment, recall the Navierโ€“Stokes equations for an incompressible fluid, where $\vec{v}$ represents the velocity field: $$ \begin{eqnarray*} \nabla \cdot\vec{v} &=& 0 \\ \frac{\partial \vec{v}}{\partial t}+(\vec{v}\cdot\nabla)\vec{v} &=& -\frac{1}{\rho}\nabla p + \nu \nabla^2\vec{v} \end{eqnarray*} $$ The first equation represents mass conservation at constant density. The second equation is the conservation of momentum. But a problem appears: the continuity equation for incompressble flow does not have a dominant variable and there is no obvious way to couple the velocity and the pressure. In the case of compressible flow, in contrast, mass continuity would provide an evolution equation for the density $\rho$, which is coupled with an equation of state relating $\rho$ and $p$. In incompressible flow, the continuity equation $\nabla \cdot\vec{v}=0$ provides a *kinematic constraint* that requires the pressure field to evolve so that the rate of expansion $\nabla \cdot\vec{v}$ should vanish everywhere. A way out of this difficulty is to *construct* a pressure field that guarantees continuity is satisfied; such a relation can be obtained by taking the divergence of the momentum equation. In that process, a Poisson equation for the pressure shows up! Step 10: 2D Poisson Equation ---- *** Poisson's equation is obtained from adding a source term to the right-hand-side of Laplace's equation: $$\frac{\partial ^2 p}{\partial x^2} + \frac{\partial ^2 p}{\partial y^2} = b$$ So, unlinke the Laplace equation, there is some finite value inside the field that affects the solution. Poisson's equation acts to "relax" the initial sources in the field. In discretized form, this looks almost the same as [Step 9](./12_Step_9.ipynb), except for the source term: $$\frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2}+\frac{p_{i,j+1}^{n}-2 p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2}=b_{i,j}^{n}$$ As before, we rearrange this so that we obtain an equation for $p$ at point $i,j$. Thus, we obtain: $$p_{i,j}^{n}=\frac{(p_{i+1,j}^{n}+p_{i-1,j}^{n})\Delta y^2+(p_{i,j+1}^{n}+p_{i,j-1}^{n})\Delta x^2-b_{i,j}^{n}\Delta x^2\Delta y^2}{2(\Delta x^2+\Delta y^2)}$$ We will solve this equation by assuming an initial state of $p=0$ everywhere, and applying boundary conditions as follows: $p=0$ at $x=0, \ 2$ and $y=0, \ 1$ and the source term consists of two initial spikes inside the domain, as follows: $b_{i,j}=100$ at $i=\frac{1}{4}nx, j=\frac{1}{4}ny$ $b_{i,j}=-100$ at $i=\frac{3}{4}nx, j=\frac{3}{4}ny$ $b_{i,j}=0$ everywhere else. The iterations will advance in pseudo-time to relax the initial spikes. The relaxation under Poisson's equation gets slower and slower as they progress. *Why?* Let's look at one possible way to write the code for Poisson's equation. As always, we load our favorite Python libraries. We also want to make some lovely plots in 3D. Let's get our parameters defined and the initialization out of the way. What do you notice of the approach below? ``` import numpy from matplotlib import pyplot, cm from mpl_toolkits.mplot3d import Axes3D %matplotlib inline # Parameters nx = 50 ny = 50 nt = 100 xmin = 0 xmax = 2 ymin = 0 ymax = 1 dx = (xmax - xmin) / (nx - 1) dy = (ymax - ymin) / (ny - 1) # Initialization p = numpy.zeros((ny, nx)) pd = numpy.zeros((ny, nx)) b = numpy.zeros((ny, nx)) x = numpy.linspace(xmin, xmax, nx) y = numpy.linspace(xmin, xmax, ny) # Source b[int(ny / 4), int(nx / 4)] = 100 b[int(3 * ny / 4), int(3 * nx / 4)] = -100 ``` With that, we are ready to advance the initial guess in pseudo-time. How is the code below different from the function used in [Step 9](./12_Step_9.ipynb) to solve Laplace's equation? ``` for it in range(nt): pd = p.copy() p[1:-1,1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 + (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 - b[1:-1, 1:-1] * dx**2 * dy**2) / (2 * (dx**2 + dy**2))) p[0, :] = 0 p[ny-1, :] = 0 p[:, 0] = 0 p[:, nx-1] = 0 ``` Maybe we could reuse our plotting function from [Step 9](./12_Step_9.ipynb), don't you think? ``` def plot2D(x, y, p): fig = pyplot.figure(figsize=(11, 7), dpi=100) ax = fig.gca(projection='3d') X, Y = numpy.meshgrid(x, y) surf = ax.plot_surface(X, Y, p[:], rstride=1, cstride=1, cmap=cm.viridis, linewidth=0, antialiased=False) ax.view_init(30, 225) ax.set_xlabel('$x$') ax.set_ylabel('$y$') plot2D(x, y, p) ``` Ah! The wonders of code reuse! Now, you probably think: "Well, if I've written this neat little function that does something so useful, I want to use it over and over again. How can I do this without copying and pasting it each time? โ€”If you are very curious about this, you'll have to learn about *packaging*. But this goes beyond the scope of our CFD lessons. You'll just have to Google it if you really want to know. *** ## Learn More To learn more about the role of the Poisson equation in CFD, watch **Video Lesson 11** on You Tube: ``` from IPython.display import YouTubeVideo YouTubeVideo('ZjfxA3qq2Lg') from IPython.core.display import HTML def css_styling(): styles = open("../styles/custom.css", "r").read() return HTML(styles) css_styling() ``` > (The cell above executes the style for this notebook.)
github_jupyter
# Fibonacci Series Classifier *Author: Brianna Gopaul* The fibonacci series is a sequence of numbers that increases as it sums up it's two subsequent values. For example, 1, 1, 2, 3 are numbers within the fibonacci series because 1 + 1 = 2 + 1 = 3. Below we create a supervised model that classifies fibonacci sequences from non-fibonacci sequences in Strawberry Fields using The [Quantum Machine Learning Toolbox](https://github.com/XanaduAI/qmlt). ![](fibonacci.png) ## Supervised Model Tutorial ``` import tensorflow as tf import strawberryfields as sf from strawberryfields.ops import * from qmlt.tf.helpers import make_param from qmlt.tf import CircuitLearner ``` Here we define the number of iterations we want our model to run through. ``` steps = 100 ``` Now we create a circuit that contains trainable parameters. The line proceeding it takes the shape of the input and runs the circuit. The tensorflow backend 'tf' is used and arguments eval, cutoff_dim and batch_size are defined. Different arguments will be required depending on the backend used. The fock backend can alternatively be used. The output of the circuit is measure using photon counting. If we measure zero photons in the first mode and two photons in the second mode, this output is defined as p0 ``` def circuit(X): kappa = make_param('kappa', constant=0.9) theta = make_param('theta', constant=2.25) eng, q = sf.Engine(2) with eng: Dgate(X[:, 0], X[:, 1]) | q[0] BSgate(theta=theta) | (q[0], q[1]) Sgate(X[:, 0], X[:, 1]) | q[0] Sgate(X[:, 0], X[:, 1]) | q[1] BSgate(theta=theta) | (q[0], q[1]) Dgate(X[:, 0], X[:, 1]) | q[0] Kgate(kappa=kappa) | q[0] Kgate(kappa=kappa) | q[1] num_inputs = X.get_shape().as_list()[0] state = eng.run('tf', cutoff_dim=10, eval=False, batch_size=num_inputs) p0 = state.fock_prob([0, 2]) p1 = state.fock_prob([2, 0]) normalisation = p0 + p1 + 1e-10 circuit_output = p1/normalisation return circuit_output ``` In machine learning, the loss function tells us how much error there is between the correct value and the output value. Mean Squared Error (MSE) minimizes the summation of all errors squared. ``` def myloss(circuit_output, targets): return tf.losses.mean_squared_error(labels=circuit_output, predictions=targets) def outputs_to_predictions(circuit_output): return tf.round(circuit_output) #training and testing data X_train = [[0.1, 0.1, 0.2, 0.3],[0.3, 0.4, 0.5, 0.8], [0.3,0.6,0.9,0.13], [0.5, 0.8, 0.14, 0.21],[0.3, 0.5, 0.8, 0.13],[0.08, 0.13, 0.21, 0.34],[0.21, 0.36, 0.59, 0.99], [1, 1, 2, 3], [0.3, 0.5, 0.8, 0.13],[0.13, 0.21, 0.34, 0.55], [0.10, 0.777, 0.13434334, 0.88809], [0.1, 0.9, 0.13, 0.17],[0.43, 0.675, 0.2, 0.9], [0.98, 0.32, 0.1, 0.3], [0.15, 0.21, 0.34, 0.56], [0.1, 0.1, 0.2, 0.3], [0.1, 0.15, 0.3, 0.5],[0.1, 0.2, 0.4, 0.5],[0.3, 0.4, 0.5, 0.8],[0.3,0.6,0.9,0.13],[0.15, 0.15, 0.25, 0.35],[0.15, 0.25, 0.35, 0.45],[0.46, 0.29, 0.7, 0.57],[0.55,0.89,1.44,2.33],[0.233, 0.377, 0.61, 0.987], [0.987, 1.597, 2.584, 4.181],[0.6, 0.7, 0.13, 0.20],[0.233, 0.377, 0.61, 0.987],[0.0008, 0.013, 0.0021, 0.0034], [0.5, 0.6, 0.11, 0.17], [0.4, 0.5, 0.9, 0.13], [0.3, 0.5, 0.8, 0.18],[0.1, 0.1, 0.2, 0.6], [0.4, 0.5, 0.10, 0.15], [0.2, 0.3, 0.5, 0.10], [0.2, 0.3, 0.6, 0.43], [0.1, 0.3, 0.4, 0.2], [0.3, 0.5, 0.8, 0.787687], [0.5, 0.8, 1.3, 1], [0.08, 0.13, 0.21, 0.4]] Y_train = [1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11], [0.2, 0.4, 0.6, 0.99],[0.53, 0.66, 0.06, 0.31], [0.24, 0.79, 0.25, 0.69], [0.008, 0.013, 0.021, 0.034], [0.144, 0.233, 0.377, 0.61], [0.61, 0.987, 1.597, 2.584], [0.34, 0.55, 0.89, 1.44], [0.034, 0.055, 0.089, 0.144],[0.2, 0.3, 0.5, 0.8], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.9], [0.2, 0.5, 0.14, 0.12], [0.5, 0.6, 0.7, 0.8],[0.5, 0.6, 0.9, 0.7],[0.5, 0.2, 0.9, 0.7],[0.4, 0.6, 0.4, 0.3],[0.9, 0.6, 0.4, 0.9],[0.9, 0.1, 0.6, 0.9],[0.8, 0.8, 0.6, 0.5]] Y_test = [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] X_pred = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]] ``` Hyperparameters that define the task, optimizer and various other parameters listed in the QMLT docs are defined below. A learner is then fed the hyperparameters and data. ``` hyperparams= {'circuit': circuit, 'task': 'supervised', 'loss': myloss, 'optimizer': 'SGD', 'init_learning_rate': 0.1, 'print_log': True} learner = CircuitLearner(hyperparams=hyperparams) learner.train_circuit(X=X_train, Y=Y_train, steps=steps) test_score = learner.score_circuit(X=X_test, Y=Y_test,outputs_to_predictions=outputs_to_predictions) print("Accuracy on test set: ", test_score['accuracy']) outcomes = learner.run_circuit(X=X_pred, outputs_to_predictions=outputs_to_predictions) print("Predictions for new inputs: {}".format(outcomes['predictions'])) ``` ## Observations ### Small Dataset vs Large Dataset Here we fix the value of x_pred in each test and feed the model two different datasets in order to see the success rate of using each model. The difficulty of x_pred will vary depending on the model's success rate. ``` X_pred_level1 = [[0.08, 0.13, 0.21, 0.34], [0.2, 0.3, 0.5, 0.8],[0.01, 0.01, 0.02, 0.03],[0.008, 0.013, 0.021, 0.034], [0.3, 0.5, 0.8, 0.13], [0.55, 0.64, 0.77, 0.21], [0.62, 0.93, 0.38, 0.23],[0.9, 0.8, 0.7, 0.6], [0.4, 0.6, 0.78, 0.77],[0.44, 0.96, 0.28, 0.33]] X_pred_level2 = [[0.34, 0.55, 0.89, 1.44], [0.003, 0.005, 0.008, 0.013], [0.3, 0.5, 0.8, 1.3], [0.08, 0.13, 0.21, 0.34], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.4], [0.3, 0.8, 0.12, 0.2], [0.4, 0.5, 0.7, 0.7], [0.7, 0.0, 0.6, 0.5]] X_pred_level3 = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]] ``` ### Sparse Dataset ``` X_train = [[0.1, 0.1, 0.2, 0.3],[0.5, 0.8, 0.14, 0.21],[0.3, 0.4, 0.5, 0.8], [0.3, 0.6, 0.9, 0.13]] Y_train = [1, 1, 0, 0] X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11]] Y_test = [1, 1, 0, 0] ``` ### Large Dataset ``` X_train = [[0.1, 0.1, 0.2, 0.3],[0.3, 0.4, 0.5, 0.8], [0.3,0.6,0.9,0.13], [0.5, 0.8, 0.14, 0.21],[0.3, 0.5, 0.8, 0.13],[0.08, 0.13, 0.21, 0.34],[0.21, 0.36, 0.59, 0.99], [1, 1, 2, 3], [0.3, 0.5, 0.8, 0.13],[0.13, 0.21, 0.34, 0.55], [0.10, 0.777, 0.13434334, 0.88809], [0.1, 0.9, 0.13, 0.17],[0.43, 0.675, 0.2, 0.9], [0.98, 0.32, 0.1, 0.3], [0.15, 0.21, 0.34, 0.56], [0.1, 0.1, 0.2, 0.3], [0.1, 0.15, 0.3, 0.5],[0.1, 0.2, 0.4, 0.5],[0.3, 0.4, 0.5, 0.8],[0.3,0.6,0.9,0.13],[0.15, 0.15, 0.25, 0.35],[0.15, 0.25, 0.35, 0.45],[0.46, 0.29, 0.7, 0.57],[0.55,0.89,1.44,2.33],[0.233, 0.377, 0.61, 0.987], [0.987, 1.597, 2.584, 4.181],[0.6, 0.7, 0.13, 0.20],[0.233, 0.377, 0.61, 0.987],[0.0008, 0.013, 0.0021, 0.0034], [0.5, 0.6, 0.11, 0.17], [0.4, 0.5, 0.9, 0.13], [0.3, 0.5, 0.8, 0.18],[0.1, 0.1, 0.2, 0.6], [0.4, 0.5, 0.10, 0.15], [0.2, 0.3, 0.5, 0.10], [0.2, 0.3, 0.6, 0.43], [0.1, 0.3, 0.4, 0.2], [0.3, 0.5, 0.8, 0.787687], [0.5, 0.8, 1.3, 1], [0.08, 0.13, 0.21, 0.4]] Y_train = [1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] X_test = [[0.5, 0.8, 0.13, 0.21], [0.21, 0.34, 0.55, 0.89], [0.7, 0.1, 0.879, 0.444], [0.20, 0.56, 0.909, 0.11], [0.2, 0.4, 0.6, 0.99],[0.53, 0.66, 0.06, 0.31], [0.24, 0.79, 0.25, 0.69], [0.008, 0.013, 0.021, 0.034], [0.144, 0.233, 0.377, 0.61], [0.61, 0.987, 1.597, 2.584], [0.34, 0.55, 0.89, 1.44], [0.034, 0.055, 0.089, 0.144],[0.2, 0.3, 0.5, 0.8], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.9], [0.2, 0.5, 0.14, 0.12], [0.5, 0.6, 0.7, 0.8],[0.5, 0.6, 0.9, 0.7],[0.5, 0.2, 0.9, 0.7],[0.4, 0.6, 0.4, 0.3],[0.9, 0.6, 0.4, 0.9],[0.9, 0.1, 0.6, 0.9],[0.8, 0.8, 0.6, 0.5]] Y_test = [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ``` ### Data Generation ``` import random import numpy as np x=np.random.random(4) np.set_printoptions(precision=2) print(x) a = [1,1] idx = 0 for i in range(30): a.append(a[idx] + a[idx+1]) idx +=1 print(a[-1]/100) ``` ## Results ### Level 1 ``` X_pred_level1 = [[0.08, 0.13, 0.21, 0.34], [0.2, 0.3, 0.5, 0.8],[0.01, 0.01, 0.02, 0.03],[0.008, 0.013, 0.021, 0.034], [0.3, 0.5, 0.8, 0.13], [0.55, 0.64, 0.77, 0.21], [0.62, 0.93, 0.38, 0.23],[0.9, 0.8, 0.7, 0.6], [0.4, 0.6, 0.78, 0.77],[0.44, 0.96, 0.28, 0.33]] ``` Level 1 is the easiest classification task. To challenge the model, the non-fibonacci sequences are close in value to each other. __Tasks Classified Correctly:__ * Large Dataset: 20% * Small Dataset: 80% ### Level 2 ``` X_pred_level2 = [[0.34, 0.55, 0.89, 1.44], [0.003, 0.005, 0.008, 0.013], [0.3, 0.5, 0.8, 1.3], [0.08, 0.13, 0.21, 0.34], [0.5, 0.8, 1.3, 2.1], [0.413, 0.875, 0.066, 0.63], [0.3, 0.5, 0.7, 0.4], [0.3, 0.8, 0.12, 0.2], [0.4, 0.5, 0.7, 0.7], [0.7, 0.0, 0.6, 0.5]] ``` Level 2 challenges the model by testing it against unfamiliar fibonacci sequences. The non-fibonacci numbers also become closer in value. __Tasks Classified Correctly:__ * Large Dataset: 40% * Small Dataset: 70% ### Level 3 ``` X_pred_level3 = [[0.233, 0.377, 0.61, 0.987], [0.55, 0.89, 1.44, 2.33], [0.0013, 0.0021, 0.0034, 0.0055], [0.5, 0.8, 1.3, 2.1], [0.89, 1.44, 2.33, 3.77], [0.03, 0.05, 0.3, 0.13], [0.40, 0.34, 0.55, 0.89], [0.2, 0.45, 0.5, 0.8], [0.08, 0.13, 0.30, 0.34], [0.13, 0.21, 0.34, 0.80]] ``` Level 3 is the most difficult test set. <br> It contains number sequences that appear to follow the fibonacci pattern but are off by a small value. e.g. 0.13, 0.21, 0.34, 0.80 **Tasks Classified Correctly:** * Large Dataset: 70% * Small Dataset: 30% The graph below illustrates the performance of the small dataset model and the large dataset model on each test set. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline N = 3 B = (20, 40, 70) A = (80, 70, 30) BB = (1, 1, 1) AA = (1, 1, 1) ind = np.arange(N) # the x locations for the groups width = 0.35 # the width of the bars: can also be len(x) sequence p1 = plt.bar(ind, B, width, yerr=BB) p2 = plt.bar(ind, A, width, bottom=B, yerr=AA) plt.ylabel('Correct Classifications(%)') plt.title('Large Dataset vs Small Dataset Performance') plt.xticks(ind, ('L1', 'L2', 'L3')) plt.yticks(np.arange(0, 81, 10)) plt.legend((p1[0], p2[0]), ('Large Dataset', 'Small Dataset')) plt.show() ```
github_jupyter
# Predicting Concrete Compressive Strength - Comparison with Linear Models In this code notebook, we will analyze the statistics pertaining the various models presented in this project. In the Exploratory Data Analysis notebook, we explored the various relationships that each consituent of concrete has on the cured compressive strength. The materials that held the strongest relationships, regardless of curing time, were cement, cementitious ratio, superplasticizer ratio, and fly ash ratio. We will examine each of the linear ratios independent of age, as well as at the industry-standard 28 day cure time mark. ## Dataset Citation This dataset was retrieved from the UC Irvine Machine Learning Repository from the following URL: <https://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength>. The dataset was donated to the UCI Repository by Prof. I-Cheng Yeh of Chung-Huah University, who retains copyright for the following published paper: I-Cheng Yeh, "Modeling of strength of high performance concrete using artificial neural networks," Cement and Concrete Research, Vol. 28, No. 12, pp. 1797-1808 (1998). Additional papers citing this dataset are listed at the reference link above. ## Import the Relevant Libraries ``` # Data Manipulation import numpy as np import pandas as pd # Data Visualization import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.set() # Data Preprocessing from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler # Linear Regresssion Model from sklearn.linear_model import LinearRegression # Model Evaluation from sklearn.metrics import mean_squared_error,mean_absolute_error,explained_variance_score ``` ## Import & Check the Data ``` df1 = pd.read_csv('2020_1124_Modeling_Data.csv') df2 = pd.read_csv('2020_1123_Concrete_Data_Loaded_Transformed.csv') original_data = df1.copy() transformed_data = df2.copy() # The original data contains kg/m^3 values original_data.head() # Original data original_data.describe() # The transformed data contains ratios to total mass of the concrete mix transformed_data.head() # Transformed data transformed_data.describe() ``` ## Cement Modeling - Including All Cure Times We understand that the ratio of cement to compressive strength is linear. We will model this relationship in Python and evaluate its performance compared to our ANN model. ### Visualization ``` # We will visualize the linear relationship between quantity of cement and compressive strength cement = original_data['Cement'] strength = original_data['Compressive_Strength'] plt.scatter(cement,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(cement).reshape(1030,1) y = np.array(strength).reshape(1030,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_cement = mean_absolute_error(y_test, y_pred) MSE_cement = mean_squared_error(y_test, y_pred) RMSE_cement = np.sqrt(mean_squared_error(y_test, y_pred)) cement_stats = [MAE_cement,MSE_cement,RMSE_cement] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENT VS. COMPRESSIVE STRENGTH") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_cement}\nMean Squared Error:\t\t\t{MSE_cement}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cement}") print('-----------------------------\n\n') ``` ## Cement Modeling - 28 Day Cure Time We will model the cement vs compressive strength relationship for a constant cure time (28 days). ### Visualization ``` # We will visualize the linear relationship between quantity of cement and compressive strength at 28 days cement = original_data[original_data['Age']==28]['Cement'] strength = original_data[original_data['Age']==28]['Compressive_Strength'] plt.scatter(cement,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(cement).reshape(425,1) y = np.array(strength).reshape(425,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_cement_28 = mean_absolute_error(y_test, y_pred) MSE_cement_28 = mean_squared_error(y_test, y_pred) RMSE_cement_28 = np.sqrt(mean_squared_error(y_test, y_pred)) cement_28_stats = [MAE_cement_28,MSE_cement_28,RMSE_cement_28] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENT VS. COMPRESSIVE STRENGTH") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_cement_28}\nMean Squared Error:\t\t\t{MSE_cement_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cement_28}") print('-----------------------------\n\n') ``` ## Cementitious Ratio Modeling - Including All Cure Times We know that the ratio of cementitious materials to the total mass is (cement + fly ash)/(total mass) to compressive strength is linear. We will model this relationship in Python and evaluate its performance. ### Visualization ``` # We will visualize the linear relationship between quantity of cementitious materials and compressive strength cementitious = transformed_data['Cementitious_Ratio'] strength = transformed_data['Compressive_Strength'] plt.scatter(cementitious,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(cementitious).reshape(1030,1) y = np.array(strength).reshape(1030,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_cementitious = mean_absolute_error(y_test, y_pred) MSE_cementitious = mean_squared_error(y_test, y_pred) RMSE_cementitious = np.sqrt(mean_squared_error(y_test, y_pred)) cementitious_stats = [MAE_cementitious,MSE_cementitious,RMSE_cementitious] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENTITIOUS RATIO VS. COMPRESSIVE STRENGTH") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_cementitious}\nMean Squared Error:\t\t\t{MSE_cementitious}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cementitious}") print('-----------------------------\n\n') ``` ## Cementitious Ratio Modeling - 28 Day Cure Time ### Visualization ``` # We will visualize the linear relationship between quantity of cementitious materials and compressive strength at 28 days cementitious = transformed_data[original_data['Age']==28]['Cementitious_Ratio'] strength = transformed_data[original_data['Age']==28]['Compressive_Strength'] plt.scatter(cementitious,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(cementitious).reshape(425,1) y = np.array(strength).reshape(425,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_cementitious_28 = mean_absolute_error(y_test, y_pred) MSE_cementitious_28 = mean_squared_error(y_test, y_pred) RMSE_cementitious_28 = np.sqrt(mean_squared_error(y_test, y_pred)) cementitious_28_stats = [MAE_cementitious_28,MSE_cementitious_28,RMSE_cementitious_28] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR CEMENTITIOUS RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_cementitious_28}\nMean Squared Error:\t\t\t{MSE_cementitious_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_cementitious_28}") print('-----------------------------\n\n') ``` ## Fly Ash Ratio Modeling - Including All Cure Times The fly ash ratio is interpreted as the percentage of fly ash within the cementitious materials mix, that is, Fly_Ash_Ratio = (fly ash + cement)/(total mass). ### Visualization ``` # We will visualize the linear relationship between fly ash ratio and compressive strength fly = transformed_data['Fly_Ash_Ratio'] strength = transformed_data['Compressive_Strength'] plt.scatter(fly,strength) ``` ### Data Preprocessing We see from the graph above that there are many instances where there is no fly ash in the mix design. Let us use only nonzero entries for our analysis. ``` fly = transformed_data[transformed_data['Fly_Ash_Ratio']!=0]['Fly_Ash_Ratio'] strength = transformed_data[transformed_data['Fly_Ash_Ratio']!=0]['Compressive_Strength'] plt.scatter(fly,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(fly).reshape(464,1) y = np.array(strength).reshape(464,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_fly = mean_absolute_error(y_test, y_pred) MSE_fly = mean_squared_error(y_test, y_pred) RMSE_fly = np.sqrt(mean_squared_error(y_test, y_pred)) fly_stats = [MAE_fly,MSE_fly,RMSE_fly] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR FLY ASH RATIO VS. COMPRESSIVE STRENGTH") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_fly}\nMean Squared Error:\t\t\t{MSE_fly}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_fly}") print('-----------------------------\n\n') ``` ## Fly Ash Ratio Modeling - 28 Day Cure Time The fly ash ratio is interpreted as the percentage of fly ash within the cementitious materials mix, that is, Fly_Ash_Ratio = (fly ash + cement)/(total mass). ``` fly = transformed_data[((transformed_data['Fly_Ash_Ratio']!=0)&(transformed_data['Age']==28))]['Fly_Ash_Ratio'] strength = transformed_data[((transformed_data['Fly_Ash_Ratio']!=0)&(transformed_data['Age']==28))]['Compressive_Strength'] plt.scatter(fly,strength) ``` ### Train the Linear Model ``` # Reshape the data so it complies with the linear model requirements X = np.array(fly).reshape(217,1) y = np.array(strength).reshape(217,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_fly_28 = mean_absolute_error(y_test, y_pred) MSE_fly_28 = mean_squared_error(y_test, y_pred) RMSE_fly_28 = np.sqrt(mean_squared_error(y_test, y_pred)) fly_28_stats = [MAE_fly_28,MSE_fly_28,RMSE_fly_28] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR FLY ASH RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_fly_28}\nMean Squared Error:\t\t\t{MSE_fly_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_fly_28}") print('-----------------------------\n\n') ``` ## Superplasticizer Ratio Modeling - Including All Cure Times The superplasticizer ratio is the ratio of superplasticizer contained within the total mix design, by weight. ### Visualization ``` # We will visualize the linear relationship between superplasticizer ratio and compressive strength superplasticizer = transformed_data['Superplasticizer_Ratio'] strength = transformed_data['Compressive_Strength'] plt.scatter(superplasticizer,strength) ``` ### Data Preprocessing Once agaain, we see from the graph above that there are many instances where there is no superplasticizer in the mix design. Let us use only nonzero entries for our analysis. ``` superplasticizer = transformed_data[transformed_data['Superplasticizer_Ratio']!=0]['Superplasticizer_Ratio'] strength = transformed_data[transformed_data['Superplasticizer_Ratio']!=0]['Compressive_Strength'] plt.scatter(superplasticizer,strength) ``` This is better, but we see a large spread in the data. Let's remove any outliers first, before training our model. ``` superplasticizer.describe() mean = 0.004146 three_sigma = 3*0.001875 upper = mean + three_sigma lower = mean - three_sigma print(f"The lower bound is:\t{lower}\nThe upper bound is:\t{upper}") ``` Since there are no negative ratios, we only need to remove data points where the superplasticizer ratio is greater than 0.009771. ``` superplasticizer = transformed_data[transformed_data['Superplasticizer_Ratio']!=0][transformed_data['Superplasticizer_Ratio'] < upper]['Superplasticizer_Ratio'] strength = transformed_data[transformed_data['Superplasticizer_Ratio']!=0][transformed_data['Superplasticizer_Ratio'] < upper]['Compressive_Strength'] plt.scatter(superplasticizer,strength) ``` ### Train the Linear Model ``` # We will train and test our model only on the data above, that does not contain outliers # Reshape the data so it complies with the linear model requirements X = np.array(superplasticizer).reshape(641,1) y = np.array(strength).reshape(641,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_super = mean_absolute_error(y_test, y_pred) MSE_super = mean_squared_error(y_test, y_pred) RMSE_super = np.sqrt(mean_squared_error(y_test, y_pred)) super_stats = [MAE_super,MSE_super,RMSE_super] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR SUPERPLASTICIZER RATIO VS. COMPRESSIVE STRENGTH") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_super}\nMean Squared Error:\t\t\t{MSE_super}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_super}") print('-----------------------------\n\n') ``` ## Superplasticizer Ratio Modeling - 28 Day Cure Time The superplasticizer ratio is the ratio of superplasticizer contained within the total mix design, by weight. ### Visualization ``` superplasticizer = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28))]['Superplasticizer_Ratio'] strength = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28))]['Compressive_Strength'] plt.scatter(superplasticizer,strength) ``` This is better, but we see a large spread in the data. Let's remove any outliers first, before training our model. ``` superplasticizer.describe() mean = 0.004146 three_sigma = 3*0.001875 upper = mean + three_sigma lower = mean - three_sigma print(f"The lower bound is:\t{lower}\nThe upper bound is:\t{upper}") ``` Since there are no negative ratios, we only need to remove data points where the superplasticizer ratio is greater than 0.009771. ``` superplasticizer = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28)&(transformed_data['Superplasticizer_Ratio']<upper))]['Superplasticizer_Ratio'] strength = transformed_data[((transformed_data['Superplasticizer_Ratio']!=0)&(transformed_data['Age']==28)&(transformed_data['Superplasticizer_Ratio']<upper))]['Compressive_Strength'] plt.scatter(superplasticizer,strength) ``` ### Train the Linear Model ``` # We will train and test our model only on the data above, that does not contain outliers # Reshape the data so it complies with the linear model requirements X = np.array(superplasticizer).reshape(315,1) y = np.array(strength).reshape(315,1) # Perform a train-test split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42) # Train the linear model lm = LinearRegression() lm.fit(X_train,y_train) ``` ### Test the Linear Model ``` y_pred = lm.predict(X_test) ``` ### Linear Equation ``` # print the intercept print(lm.intercept_) coeff = pd.DataFrame(lm.coef_,columns=['Coefficient']) coeff ``` ### Model Evaluation ``` # Plot the linear model preditions as a line superimposed on a scatter plot of the testing data plt.scatter(X_test,y_test) plt.plot(X_test,y_pred,'r') # Evaluation Metrics MAE_super_28 = mean_absolute_error(y_test, y_pred) MSE_super_28 = mean_squared_error(y_test, y_pred) RMSE_super_28 = np.sqrt(mean_squared_error(y_test, y_pred)) super_stats_28 = [MAE_super_28,MSE_super_28,RMSE_super_28] # storing for model comparison at the end of this notebook # Print the metrics print(f"EVALUATION METRICS, LINEAR MODEL FOR SUPERPLASTICIZER RATIO VS. COMPRESSIVE STRENGTH AT 28 DAYS") print('-----------------------------') print(f"Mean Absolute Error (MAE):\t\t{MAE_super_28}\nMean Squared Error:\t\t\t{MSE_super_28}\nRoot Mean Squared Error (RMSE):\t\t{RMSE_super_28}") print('-----------------------------\n\n') ``` ## Model Comparisons Analysis Neither superplasticizer linear model appeared to represent the data well from a visual perspective. The cement, cementitious ratio, and fly ash ratio linear models, however, did. We can display all of the evaluation metrics below and compare them to the artificial neural network's (ANN) performance. ``` ANN_metrics = [5.083552,6.466492**2,6.466492] metrics = [cement_stats, cementitious_stats, fly_stats, super_stats, ANN_metrics] metrics_28 = [cement_28_stats, cementitious_28_stats, fly_28_stats, super_stats_28, ANN_metrics] metrics_df = pd.DataFrame(data=metrics, index=['Cement (Ignoring Cure Time)','Cementitious_Ratio (Ignoring Cure Time)','Fly_Ash_Ratio (Ignoring Cure Time)','Superplasticizer_Ratio (Ignoring Cure Time)','ANN (Function of Time)'], columns=['MAE','MSE','RMSE']) metrics_28_df = pd.DataFrame(data=metrics_28, index=['Cement (Cure Time = 28 Days)','Cementitious_Ratio (Cure Time = 28 Days)','Fly_Ash_Ratio (Cure Time = 28 Days)','Superplasticizer_Ratio (Cure Time = 28 Days)','ANN (Function of Time)'], columns=['MAE','MSE','RMSE']) metrics_df metrics_28_df ``` ## Conclusions & Recommendations By comparing the evaluation metrics for all models, we conclude that the ANN model performed significantly better than all of the linear models. It outperformed the best linear model's RMSE (for Fly_Ash_Ratio at 28 Days) by over 30%! An important note is that the linear models were not scaled, and the ANN model was. We kept the linear models biased in order to maintain coefficient interpretabililty, whereas that was not relevant to the ANN model. What is surprising is that the ANN model still outperformed the linear models, even when controlling for cure time at 28 days. Perhaps the most startling insight is that the fly ash ratio was even more accurate at predicting concrete compressive strength than the cement quantity, to the point that it had the lowest errors of all of the linear models. We therefore recommend that engineers give very conservative fly ash ratio specifications when allowing substitutions for Portland cement.
github_jupyter
``` # default_exp vr_parser #hide_input import pivotpy as pp pp.nav_links(1) ``` # Xml Parser > This parser contains functions to extract data from vasprun.xml. All functions in xml parser can work without arguments if working directory contains `vasprun.xml`. - Almost every object in this module returns a `Dict2Data` object with attributes accessible via dot notation. This object can by transformed to a dictionary by `to_dict()` method on the object. ``` #export import re import os import json import pickle from itertools import islice, chain, product from collections import namedtuple import numpy as np from importlib.machinery import SourceFileLoader import textwrap import xml.etree.ElementTree as ET # Inside packages import to work both with package and jupyter notebook. try: from pivotpy import g_utils as gu from pivotpy.sio import read_ticks except: import pivotpy.g_utils as gu import pivotpy.sio.read_ticks as read_ticks #hide_input # To run notebook smoothly, not for module export from nbdev.showdoc import show_doc from pivotpy.vr_parser import dump_dict,load_export #export def dict2tuple(name,d): """Converts a dictionary (nested as well) to namedtuple, accessible via index and dot notation as well as by unpacking. - **Parameters** - name: Name of the tuple. - d : Dictionary, nested works as well. """ return namedtuple(name,d.keys())( *(dict2tuple(k.upper(),v) if isinstance(v,dict) else v for k,v in d.items()) ) #export class Dict2Data: """ - Returns a Data object with dictionary keys as attributes of Data accessible by dot notation or by key. Once an attribute is created, it can not be changed from outside. - **Parmeters** - dict : Python dictionary (nested as well) containing any python data types. - **Methods** - to_dict : Converts a Data object to dictionary if it could be made a dictionary, otherwise throws relevant error. - to_json : Converts to json str or save to file if `outfil` given. Accepts `indent` as parameter. - to_pickle: Converts to bytes str or save to file if `outfile` given. - to_tuple : Converts to a named tuple. - **Example** > x = Dict2Data({'A':1,'B':{'C':2}}) > x > Data( > A = 1 > B = Data( > C = 2 > ) > ) > x.B.to_dict() > {'C': 2} """ def __init__(self,d): if isinstance(d,Dict2Data): d = d.to_dict() # if nested Dict2Dataects, must expand here. for a,b in d.items(): if isinstance(b,Dict2Data): b = b.to_dict() # expands self instance !must here. if isinstance(b,(list,tuple)): setattr(self,a,[Dict2Data(x) if isinstance(x,dict) else x for x in b]) else: setattr(self,a,Dict2Data(b) if isinstance(b,dict) else b) def to_dict(self): """Converts a `Dict2Data` object (root or nested level) to a dictionary. """ result = {} for k,v in self.__dict__.items(): if isinstance(v,Dict2Data): result.update({k:Dict2Data.to_dict(v)}) else: result.update({k:v}) return result def to_json(self,outfile=None,indent=1): """Dumps a `Dict2Data` object (root or nested level) to json. - **Parameters** - outfile : Default is None and returns string. If given, writes to file. - indent : Json indent. Default is 1. """ return dump_dict(self,dump_to='json',outfile=outfile,indent=indent) def to_pickle(self,outfile=None): """Dumps a `Dict2Data` object (root or nested level) to pickle. - **Parameters** - outfile : Default is None and returns string. If given, writes to file. """ return dump_dict(self,dump_to='pickle',outfile=outfile) def to_tuple(self): """Creates a namedtuple.""" return dict2tuple('Data',self.to_dict()) def __repr__(self): items= [] for k,v in self.__dict__.items(): if type(v) not in (str,float,int,range) and not isinstance(v,Dict2Data): if isinstance(v,np.ndarray): v = "<{}:shape={}>".format(v.__class__.__name__,np.shape(v)) elif type(v) in (list,tuple): v = ("<{}:len={}>".format(v.__class__.__name__,len(v)) if len(v) > 10 else v) else: v = v.__class__ if isinstance(v,Dict2Data): v = repr(v).replace("\n","\n ") items.append(f" {k} = {v}") return "Data(\n{}\n)".format('\n'.join(items)) def __getstate__(self): pass #This is for pickling def __setattr__(self, name, value): if name in self.__dict__: raise AttributeError(f"Outside assignment is restricted for already present attribute.") else: self.__dict__[name] = value # Dictionary-wise access def keys(self): return self.__dict__.keys() def __getitem__(self,key): return self.__dict__[key] def items(self): return self.__dict__.items() show_doc(Dict2Data.to_dict) show_doc(Dict2Data.to_json) show_doc(Dict2Data.to_pickle) show_doc(Dict2Data.to_tuple) x = Dict2Data({'A':1,'B':2}) print('Dict: ',x.to_dict()) print('JSON: ',x.to_json()) print('Pickle: ',x.to_pickle()) print('Tuple: ',x.to_tuple()) x['A'] ``` ## Parser Functions ``` #export def read_asxml(path=None): """ - Reads a big vasprun.xml file into memory once and then apply commands. If current folder contains `vasprun.xml` file, it automatically picks it. - **Parameters** - path : Path/To/vasprun.xml - **Returns** - xml_data : Xml object to use in other functions """ if(path==None): path='./vasprun.xml' if not os.path.isfile(path): print("File: '{}'' does not exist!".format(path)) return # This is important to stop further errors. elif 'vasprun.xml' not in path: print("File name should be '*vasprun.xml'.") return # This is important to stop further errors. else: fsize = gu.get_file_size(path) value = float(fsize.split()[0]) print_str = """ Memory Consumption Warning! --------------------------- File: {} is large ({}). It may consume a lot of memory (generally 3 times the file size). An alternative way is to parse vasprun.xml is by using `Vasp2Visual` module in Powershell by command `pivotpy.load_export('path/to/vasprun.xml'), which runs underlying powershell functions to load data whith efficient memory managment. It works on Windows/Linux/MacOS if you have powershell core and Vasp2Visual installed on it. """.format(path,fsize) if 'MB' in fsize and value > 200: print(gu.color.y(textwrap.dedent(print_str))) elif 'GB' in fsize and value > 1: print(gu.color.y(textwrap.dedent(print_str))) tree = ET.parse(path) xml_data = tree.getroot() return xml_data #export def xml2dict(xmlnode_or_filepath): """Convert xml node or xml file content to dictionary. All output text is in string format, so further processing is required to convert into data types/split etc. - The only paramenter `xmlnode_or_filepath` is either a path to an xml file or an `xml.etree.ElementTree.Element` object. - Each node has `tag,text,attr,nodes` attributes. Every text element can be accessed via `xml2dict()['nodes'][index]['nodes'][index]...` tree which makes it simple. """ if isinstance(xmlnode_or_filepath,str): node = read_asxml(xmlnode_or_filepath) else: node = xmlnode_or_filepath text = node.text.strip() if node.text else '' nodes = [xml2dict(child) for child in list(node)] return {'tag': node.tag,'text': text, 'attr':node.attrib, 'nodes': nodes} #export def exclude_kpts(xml_data=None): """ - Returns number of kpoints to exclude used from IBZKPT. - **Parameters** - xml_data : From `read_asxml` function - **Returns** - int : Number of kpoints to exclude. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return for kpts in xml_data.iter('varray'): if(kpts.attrib=={'name': 'weights'}): weights=[float(arr.text.strip()) for arr in kpts.iter('v')] exclude=[] [exclude.append(item) for item in weights if item!=weights[-1]]; skipk=len(exclude) #that much to skip return skipk #export def get_ispin(xml_data=None): """ - Returns value of ISPIN. - **Parameters** - xml_data : From `read_asxml` function - **Returns** - int : Value of ISPIN. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return for item in xml_data.iter('i'): if(item.attrib=={'type': 'int', 'name': 'ISPIN'}): return int(item.text) #export def get_summary(xml_data=None): """ - Returns overview of system parameters. - **Parameters** - xml_data : From `read_asxml` function - **Returns** - Data : pivotpy.Dict2Data with attibutes accessible via dot notation. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return for i_car in xml_data.iter('incar'): incar={car.attrib['name']:car.text.strip() for car in i_car} n_ions=[int(atom.text) for atom in xml_data.iter('atoms')][0] type_ions=[int(atom_types.text) for atom_types in xml_data.iter('types')][0] elem=[info[0].text.strip() for info in xml_data.iter('rc')] elem_name=[]; #collect IONS names [elem_name.append(item) for item in elem[:-type_ions] if item not in elem_name] elem_index=[0]; #start index [elem_index.append((int(entry)+elem_index[-1])) for entry in elem[-type_ions:]]; ISPIN=get_ispin(xml_data=xml_data) NELECT = int([i.text.strip().split('.')[0] for i in xml_data.iter('i') if i.attrib['name']=='NELECT'][0]) # Fields try: for pro in xml_data.iter('partial'): dos_fields=[field.text.strip() for field in pro.iter('field')] dos_fields = [field for field in dos_fields if 'energy' not in field] except: dos_fields = [] for i in xml_data.iter('i'): #efermi for condition required. if(i.attrib=={'name': 'efermi'}): efermi=float(i.text) #Writing information to a dictionary info_dic={'SYSTEM':incar['SYSTEM'],'NION':n_ions,'NELECT':NELECT,'TypeION':type_ions, 'ElemName':elem_name,'ElemIndex':elem_index,'E_Fermi': efermi,'ISPIN':ISPIN, 'fields':dos_fields,'incar':incar} return Dict2Data(info_dic) import pivotpy.vr_parser as vp xml_data=vp.read_asxml(path= '../vasprun.xml') get_summary(xml_data=xml_data).to_tuple() #export def join_ksegments(kpath,kseg_inds=[]): """Joins a broken kpath's next segment to previous. `kseg_inds` should be list of first index of next segment""" path_list = np.array(kpath) if kseg_inds: for ind in kseg_inds: path_list[ind:] -= path_list[ind] - path_list[ind-1] return list(path_list) def get_kpts(xml_data=None,skipk=0,kseg_inds=[]): r"""Returns kpoints and calculated kpath. Parameters: xml_data From `read_asxml` function. skipk : int Number of initil kpoints to skip. kseg_inds : list List of indices of kpoints where path is broken. Returns: Data : pivotpy.Dict2Data with attibutes `kpath` and `kpoints`. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return for kpts in xml_data.iter('varray'): if(kpts.attrib=={'name': 'kpointlist'}): kpoints=[[float(item) for item in arr.text.split()] for arr in kpts.iter('v')] kpoints=np.array(kpoints[skipk:]) #KPath solved. kpath=[0];pts=kpoints [kpath.append(np.round(np.sqrt(np.sum((pt1-pt2)**2))+kpath[-1],6)) for pt1,pt2 in zip(pts[:-1],pts[1:])] # If broken path, then join points. kpath = join_ksegments(kpath,kseg_inds) return Dict2Data({'NKPTS':len(kpoints),'kpoints':kpoints,'kpath':kpath}) get_kpts(xml_data=xml_data,skipk=10) #export def get_tdos(xml_data=None,spin_set=1,elim=[]): """ - Returns total dos for a spin_set (default 1) and energy limit. If spin-polarized calculations, gives SpinUp and SpinDown keys as well. - **Parameters** - xml_data : From `read_asxml` function - spin_set : int, default is 1.and - elim : List [min,max] of energy, default empty. - **Returns** - Data : pivotpy.Dict2Data with attibutes E_Fermi, ISPIN,tdos. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return tdos=[]; #assign for safely exit if wrong spin set entered. ISPIN = get_ispin(xml_data=xml_data) for neighbor in xml_data.iter('dos'): for item in neighbor[1].iter('set'): if(ISPIN==1 and spin_set==1): if(item.attrib=={'comment': 'spin 1'}): tdos=np.array([[float(entry) for entry in arr.text.split()] for arr in item]) if(ISPIN==2 and spin_set==1): if(item.attrib=={'comment': 'spin 1'}): tdos_1=np.array([[float(entry) for entry in arr.text.split()] for arr in item]) if(item.attrib=={'comment': 'spin 2'}): tdos_2=np.array([[float(entry) for entry in arr.text.split()] for arr in item]) tdos = {'SpinUp':tdos_1,'SpinDown':tdos_2} if(spin_set!=1): #can get any if(item.attrib=={'comment': 'spin {}'.format(spin_set)}): tdos=np.array([[float(entry) for entry in arr.text.split()] for arr in item]) for i in xml_data.iter('i'): #efermi for condition required. if(i.attrib=={'name': 'efermi'}): efermi=float(i.text) dos_dic= {'E_Fermi':efermi,'ISPIN':ISPIN,'tdos':tdos} #Filtering in energy range. if elim: #check if elim not empty if(ISPIN==1 and spin_set==1): up_ind=np.max(np.where(tdos[:,0]-efermi<=np.max(elim)))+1 lo_ind=np.min(np.where(tdos[:,0]-efermi>=np.min(elim))) tdos=tdos[lo_ind:up_ind,:] if(ISPIN==2 and spin_set==1): up_ind=np.max(np.where(tdos['SpinUp'][:,0]-efermi<=np.max(elim)))+1 lo_ind=np.min(np.where(tdos['SpinUp'][:,0]-efermi>=np.min(elim))) tdos = {'SpinUp':tdos_1[lo_ind:up_ind,:],'SpinDown':tdos_2[lo_ind:up_ind,:]} if(spin_set!=1): up_ind=np.max(np.where(tdos[:,0]-efermi<=np.max(elim)))+1 lo_ind=np.min(np.where(tdos[:,0]-efermi>=np.min(elim))) tdos=tdos[lo_ind:up_ind,:] dos_dic= {'E_Fermi':efermi,'ISPIN':ISPIN,'grid_range':range(lo_ind,up_ind),'tdos':tdos} return Dict2Data(dos_dic) get_tdos(xml_data=xml_data,spin_set=1,elim=[]) #export def get_evals(xml_data=None,skipk=None,elim=[]): """ - Returns eigenvalues as numpy array. If spin-polarized calculations, gives SpinUp and SpinDown keys as well. - **Parameters** - xml_data : From `read_asxml` function - skipk : Number of initil kpoints to skip. - elim : List [min,max] of energy, default empty. - **Returns** - Data : pivotpy.Dict2Data with attibutes evals and related parameters. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return evals=[]; #assign for safely exit if wrong spin set entered. ISPIN=get_ispin(xml_data=xml_data) if skipk!=None: skipk=skipk else: skipk=exclude_kpts(xml_data=xml_data) #that much to skip by default for neighbor in xml_data.iter('eigenvalues'): for item in neighbor[0].iter('set'): if(ISPIN==1): if(item.attrib=={'comment': 'spin 1'}): evals=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:] NBANDS=len(evals[0]) if(ISPIN==2): if(item.attrib=={'comment': 'spin 1'}): eval_1=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:] if(item.attrib=={'comment': 'spin 2'}): eval_2=np.array([[float(th.text.split()[0]) for th in thing] for thing in item])[skipk:] evals={'SpinUp':eval_1,'SpinDown':eval_2} NBANDS=len(eval_1[0]) for i in xml_data.iter('i'): #efermi for condition required. if(i.attrib=={'name': 'efermi'}): efermi=float(i.text) evals_dic={'E_Fermi':efermi,'ISPIN':ISPIN,'NBANDS':NBANDS,'evals':evals,'indices': range(NBANDS)} if elim: #check if elim not empty if(ISPIN==1): up_ind=np.max(np.where(evals[:,:]-efermi<=np.max(elim))[1])+1 lo_ind=np.min(np.where(evals[:,:]-efermi>=np.min(elim))[1]) evals=evals[:,lo_ind:up_ind] if(ISPIN==2): up_ind=np.max(np.where(eval_1[:,:]-efermi<=np.max(elim))[1])+1 lo_ind=np.min(np.where(eval_1[:,:]-efermi>=np.min(elim))[1]) evals={'SpinUp':eval_1[:,lo_ind:up_ind],'SpinDown':eval_2[:,lo_ind:up_ind]} NBANDS = int(up_ind - lo_ind) #update Bands evals_dic['NBANDS'] = NBANDS evals_dic['indices'] = range(lo_ind,up_ind) evals_dic['evals'] = evals return Dict2Data(evals_dic) get_evals(xml_data=xml_data,skipk=10,elim=[-5,5]) #export def get_bands_pro_set(xml_data=None, spin_set=1, skipk=0, bands_range=None, set_path=None): """ - Returns bands projection of a spin_set(default 1). If spin-polarized calculations, gives SpinUp and SpinDown keys as well. - **Parameters** - xml_data : From `read_asxml` function - skipk : Number of initil kpoints to skip (Default 0). - spin_set : Spin set to get, default is 1. - bands_range : If elim used in `get_evals`,that will return bands_range to use here. Note that range(0,2) will give 2 bands 0,1 but tuple (0,2) will give 3 bands 0,1,2. - set_path : path/to/_set[1,2,3,4].txt, works if `split_vasprun` is used before. - **Returns** - Data : pivotpy.Dict2Data with attibutes of bands projections and related parameters. """ if(bands_range!=None): check_list = list(bands_range) if check_list==[]: return print(gu.color.r("No bands prjections found in given energy range.")) # Try to read _set.txt first. instance check is important. if isinstance(set_path,str) and os.path.isfile(set_path): _header = islice2array(set_path,nlines=1,raw=True,exclude=None) _shape = [int(v) for v in _header.split('=')[1].strip().split(',')] NKPTS, NBANDS, NIONS, NORBS = _shape if NORBS == 3: fields = ['s','p','d'] elif NORBS == 9: fields = ['s','py','pz','px','dxy','dyz','dz2','dxz','x2-y2'] else: fields = [str(i) for i in range(NORBS)] #s,p,d in indices. COUNT = NIONS*NBANDS*(NKPTS-skipk)*NORBS start = NBANDS*NIONS*skipk nlines = None # Read till end. if bands_range: _b_r = list(bands_range) # First line is comment but it is taken out by exclude in islice2array. start = [[NIONS*NBANDS*k + NIONS*b for b in _b_r] for k in range(skipk,NKPTS)] start = [s for ss in start for s in ss] #flatten nlines = NIONS # 1 band has nions NBANDS = _b_r[-1]-_b_r[0]+1 # upadte after start NKPTS = NKPTS-skipk # Update after start, and bands_range. COUNT = NIONS*NBANDS*NKPTS*NORBS data = islice2array(set_path,start=start,nlines=nlines,count=COUNT) data = data.reshape((NKPTS,NBANDS,NIONS,NORBS)).transpose([2,0,1,3]) return Dict2Data({'labels':fields,'pros':data}) # if above not worked, read from main vasprun.xml file. if(xml_data==None): xml_data=read_asxml() if not xml_data: return #Collect Projection fields fields=[]; for pro in xml_data.iter('projected'): for arr in pro.iter('field'): if('eig' not in arr.text and 'occ' not in arr.text): fields.append(arr.text.strip()) NORBS = len(fields) #Get NIONS for reshaping data NIONS=[int(atom.text) for atom in xml_data.iter('atoms')][0] for spin in xml_data.iter('set'): if spin.attrib=={'comment': 'spin{}'.format(spin_set)}: k_sets = [kp for kp in spin.iter('set') if 'kpoint' in kp.attrib['comment']] k_sets = k_sets[skipk:] NKPTS = len(k_sets) band_sets = [] for k_s in k_sets: b_set = [b for b in k_s.iter('set') if 'band' in b.attrib['comment']] if bands_range == None: band_sets.extend(b_set) else: b_r = list(bands_range) band_sets.extend(b_set[b_r[0]:b_r[-1]+1]) NBANDS = int(len(band_sets)/len(k_sets)) try: # Error prone solution but 5 times fater than list comprehension. bands_pro = (float(t) for band in band_sets for l in band.iter('r') for t in l.text.split()) COUNT = NKPTS*NBANDS*NORBS*NIONS # Must be counted for performance. data = np.fromiter(bands_pro,dtype=float,count=COUNT) except: # Alternate slow solution print("Error using `np.fromiter`.\nFalling back to (slow) list comprehension...",end=' ') bands_pro = (l.text for band in band_sets for l in band.iter('r')) bands_pro = [[float(t) for t in text.split()] for text in bands_pro] data = np.array(bands_pro) del bands_pro # Release memory print("Done.") data = data.reshape((NKPTS,NBANDS,NIONS,NORBS)).transpose((2,0,1,3)) return Dict2Data({'labels':fields,'pros':data}) get_bands_pro_set(xml_data,skipk=0,spin_set=1,bands_range=range(0, 1)) #export def get_dos_pro_set(xml_data=None,spin_set=1,dos_range=None): """ - Returns dos projection of a spin_set(default 1) as numpy array. If spin-polarized calculations, gives SpinUp and SpinDown keys as well. - **Parameters** - xml_data : From `read_asxml` function - spin_set : Spin set to get, default 1. - dos_range : If elim used in `get_tdos`,that will return dos_range to use here.. - **Returns** - Data : pivotpy.Dict2Data with attibutes of dos projections and related parameters. """ if(dos_range!=None): check_list=list(dos_range) if(check_list==[]): return print(gu.color.r("No DOS prjections found in given energy range.")) if(xml_data==None): xml_data=read_asxml() if not xml_data: return n_ions=get_summary(xml_data=xml_data).NION for pro in xml_data.iter('partial'): dos_fields=[field.text.strip()for field in pro.iter('field')] #Collecting projections. dos_pro=[]; set_pro=[]; #set_pro=[] in case spin set does not exists for ion in range(n_ions): for node in pro.iter('set'): if(node.attrib=={'comment': 'ion {}'.format(ion+1)}): for spin in node.iter('set'): if(spin.attrib=={'comment': 'spin {}'.format(spin_set)}): set_pro=[[float(entry) for entry in r.text.split()] for r in spin.iter('r')] dos_pro.append(set_pro) if dos_range==None: #full grid computed. dos_pro=np.array(dos_pro) #shape(NION,e_grid,pro_fields) else: dos_range=list(dos_range) min_ind=dos_range[0] max_ind=dos_range[-1]+1 dos_pro=np.array(dos_pro)[:,min_ind:max_ind,:] final_data=np.array(dos_pro) #shape(NION,e_grid,pro_fields) return Dict2Data({'labels':dos_fields,'pros':final_data}) #export def get_structure(xml_data=None): """ - Returns structure's volume,basis,positions and rec-basis. - **Parameters** - xml_data : From `read_asxml` function. - **Returns** - Data : pivotpy.Dict2Data with attibutes volume,basis,positions rec_basis and labels. """ if(xml_data==None): xml_data=read_asxml() if not xml_data: return SYSTEM = [i.text for i in xml_data.iter('i') if i.attrib['name'] == 'SYSTEM'][0] for final in xml_data.iter('structure'): if(final.attrib=={'name': 'finalpos'}): for i in final.iter('i'): volume=float(i.text) for arr in final.iter('varray'): if(arr.attrib=={'name': 'basis'}): basis=[[float(a) for a in v.text.split()] for v in arr.iter('v')] if(arr.attrib=={'name': 'rec_basis'}): rec_basis=[[float(a) for a in v.text.split()] for v in arr.iter('v')] if(arr.attrib=={'name': 'positions'}): positions=[[float(a) for a in v.text.split()] for v in arr.iter('v')] # element labels types = [int(_type.text) for _type in xml_data.iter('types')][0] elems = [info[0].text.strip() for info in xml_data.iter('rc')] _inds = np.array([int(a) for a in elems[-types:]]) _nums = [k + 1 for i in _inds for k in range(i)] labels = [f"{e} {i}" for i, e in zip(_nums,elems)] INDS = np.cumsum([0,*_inds]).astype(int) Names = list(np.unique(elems[:-types])) unique_d = {e:range(INDS[i],INDS[i+1]) for i,e in enumerate(Names)} st_dic={'SYSTEM':SYSTEM,'volume': volume,'basis': np.array(basis),'rec_basis': np.array(rec_basis),'positions': np.array(positions),'labels':labels,'unique': unique_d} return Dict2Data(st_dic) get_structure(xml_data=xml_data) ``` ## Quick Export for Bandstructure A fully comprehensive command that uses all functions and returns data for spin set 1 (set 1 and 2 if spin-polarized calculations) could be constructed for immediate usage. It is `export_vasrun()`. ``` #export def export_vasprun(path=None, skipk=None, elim=[], kseg_inds=[], shift_kpath=0, try_pwsh = True ): """ - Returns a full dictionary of all objects from `vasprun.xml` file. It first try to load the data exported by powershell's `Export-VR(Vasprun)`, which is very fast for large files. It is recommended to export large files in powershell first. - **Parameters** - path : Path to `vasprun.xml` file. Default is `'./vasprun.xml'`. - skipk : Default is None. Automatically detects kpoints to skip. - elim : List [min,max] of energy interval. Default is [], covers all bands. - kseg_inds : List of indices of kpoints where path is broken. - shift_kpath: Default 0. Can be used to merge multiple calculations on single axes side by side. - try_pwsh : Default is True and tries to load data exported by `Vasp2Visual` in Powershell. - **Returns** - Data : Data accessible via dot notation containing nested Data objects: - sys_info : System Information - dim_info : Contains information about dimensions of returned objects. - kpoints : numpy array of kpoints with excluded IBZKPT points - kpath : 1D numpy array directly accessible for plot. - bands : Data containing bands. - tdos : Data containing total dos. - pro_bands : Data containing bands projections. - pro_dos : Data containing dos projections. - poscar : Data containing basis,positions, rec_basis and volume. """ # Try to get files if exported data in PowerShell. if try_pwsh: req_files = ['Bands.txt','tDOS.txt','pDOS.txt','Projection.txt','SysInfo.py'] if path and os.path.isfile(path): req_files = [os.path.join( os.path.dirname(os.path.abspath(path)),f) for f in req_files] logic = [os.path.isfile(f) for f in req_files] if not False in logic: print('Loading from PowerShell Exported Data...') return load_export(path=(path if path else './vasprun.xml')) # Proceed if not files from PWSH if path==None: path='./vasprun.xml' try: xml_data = read_asxml(path=path) except: return base_dir = os.path.split(os.path.abspath(path))[0] set_paths = [os.path.join(base_dir,"_set{}.txt".format(i)) for i in (1,2)] #First exclude unnecessary kpoints. Includes only same weight points if skipk!=None: skipk=skipk else: skipk = exclude_kpts(xml_data=xml_data) #that much to skip by default info_dic = get_summary(xml_data=xml_data) #Reads important information of system. #KPOINTS kpts = get_kpts(xml_data=xml_data,skipk=skipk,kseg_inds=kseg_inds) #EIGENVALS eigenvals = get_evals(xml_data=xml_data,skipk=skipk,elim=elim) #TDOS tot_dos = get_tdos(xml_data=xml_data,spin_set=1,elim=elim) #Bands and DOS Projection if elim: bands_range = eigenvals.indices #indices in range form. grid_range=tot_dos.grid_range else: bands_range=None #projection function will read itself. grid_range=None if(info_dic.ISPIN==1): pro_bands = get_bands_pro_set(xml_data=xml_data,spin_set=1,skipk=skipk,bands_range=bands_range,set_path=set_paths[0]) pro_dos = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range) if(info_dic.ISPIN==2): pro_1 = get_bands_pro_set(xml_data=xml_data,spin_set=1,skipk=skipk,bands_range=bands_range,set_path=set_paths[0]) pro_2 = get_bands_pro_set(xml_data=xml_data,spin_set=2,skipk=skipk,bands_range=bands_range,set_path=set_paths[1]) pros={'SpinUp': pro_1.pros,'SpinDown': pro_2.pros}#accessing spins in dictionary after .pro. pro_bands={'labels':pro_1.labels,'pros': pros} pdos_1 = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range) pdos_2 = get_dos_pro_set(xml_data=xml_data,spin_set=1,dos_range=grid_range) pdos={'SpinUp': pdos_1.pros,'SpinDown': pdos_2.pros}#accessing spins in dictionary after .pro. pro_dos={'labels':pdos_1.labels,'pros': pdos} #Structure poscar = get_structure(xml_data=xml_data) poscar = {'SYSTEM':info_dic.SYSTEM,**poscar.to_dict()} #Dimensions dictionary. dim_dic={'kpoints':'(NKPTS,3)','kpath':'(NKPTS,1)','bands':'โ‡…(NKPTS,NBANDS)','dos':'โ‡…(grid_size,3)','pro_dos':'โ‡…(NION,grid_size,en+pro_fields)','pro_bands':'โ‡…(NION,NKPTS,NBANDS,pro_fields)'} #Writing everything to be accessible via dot notation kpath=[k+shift_kpath for k in kpts.kpath] # shift kpath for side by side calculations. full_dic={'sys_info':info_dic,'dim_info':dim_dic,'kpoints':kpts.kpoints,'kpath':kpath,'bands':eigenvals, 'tdos':tot_dos,'pro_bands':pro_bands,'pro_dos':pro_dos,'poscar': poscar} return Dict2Data(full_dic) export_vasprun(path='E:/Research/graphene_example/ISPIN_1/bands/vasprun.xml',elim=[-1,0],try_pwsh=True) #export def _validate_evr(path_evr=None,**kwargs): "Validates data given for plotting functions. Returns a tuple of (Boolean,data)." if type(path_evr) == Dict2Data: vr = path_evr elif path_evr is None: path_evr = './vasprun.xml' if isinstance(path_evr,str) and os.path.isfile(path_evr): # kwargs -> skipk=skipk,elim=elim,kseg_inds=kseg_inds vr = export_vasprun(path=path_evr,**kwargs) # Apply a robust final check. try: vr.bands;vr.kpath return (True,vr) except: return (False,path_evr) ``` ## Joining Multiple Calculations - Sometimes one may need to compare two or more bandstructures in same figure, for that reason, it is easy to export two calculations and plot on same axis. - There is another situation, if you have a large supercell and split calculations into multiple ones, joining that calculations works same way, you will add the last value of first kpath into all values of next kpath and next last to next and so on, by just using `shift_kpath` in `export_vasprun` and plot each export on same axis, this will align bandstructures side by side on same axis. ## Load Exported Vasprun from PowerShell On Windows, it will work automatically. On Linux/Mac it may require path to powershell executable. ``` #export def load_export(path= './vasprun.xml', kseg_inds =[], shift_kpath = 0, path_to_ps='pwsh', skipk = None, max_filled = 10, max_empty = 10, keep_files = True ): """ - Returns a full dictionary of all objects from `vasprun.xml` file exported using powershell. - **Parameters** - path : Path to `vasprun.xml` file. Default is `'./vasprun.xml'`. - skipk : Default is None. Automatically detects kpoints to skip. - path_to_ps : Path to `powershell.exe`. Automatically picks on Windows and Linux if added to PATH. - kseg_inds : List of indices of kpoints where path is broken. - shift_kpath: Default 0. Can be used to merge multiple calculations side by side. - keep_files : Could be use to clean exported text files. Default is True. - max_filled : Number of filled bands below and including VBM. Default is 10. - max_empty : Number of empty bands above VBM. Default is 10. - **Returns** - Data : Data accessible via dot notation containing nested Data objects: - sys_info : System Information - dim_info : Contains information about dimensions of returned objects. - kpoints : numpy array of kpoints with excluded IBZKPT points - kpath : 1D numpy array directly accessible for plot. - bands : Data containing bands. - tdos : Data containing total dos. - pro_bands : Data containing bands projections. - pro_dos : Data containing dos projections. - poscar : Data containing basis,positions, rec_basis and volume. """ that_loc, file_name = os.path.split(os.path.abspath(path)) # abspath is important to split. with gu.set_dir(that_loc): # Goes there and work i = 0 required_files = ['Bands.txt','tDOS.txt','pDOS.txt','Projection.txt','SysInfo.py'] for _file in required_files: if os.path.isfile(_file): i = i + 1 if i < 5: if skipk != None: gu.ps2std(path_to_ps=path_to_ps,ps_command='Import-Module Vasp2Visual; Export-VR -InputFile {} -MaxFilled {} -MaxEmpty {} -SkipK {}'.format(path,max_filled,max_empty,skipk)) else: gu.ps2std(path_to_ps=path_to_ps,ps_command='Import-Module Vasp2Visual; Export-VR -InputFile {} -MaxFilled {} -MaxEmpty {}'.format(path,max_filled,max_empty)) # Enable loading SysInfo.py file as source. _vars = SourceFileLoader("SysInfo", "./SysInfo.py").load_module() SYSTEM = _vars.SYSTEM NKPTS = _vars.NKPTS NBANDS = _vars.NBANDS NFILLED = _vars.NFILLED TypeION = _vars.TypeION NION = _vars.NION NELECT = _vars.NELECT nField_Projection = _vars.nField_Projection E_Fermi = _vars.E_Fermi ISPIN = _vars.ISPIN ElemIndex = _vars.ElemIndex ElemName = _vars.ElemName poscar = {'SYSTEM': SYSTEM, 'volume':_vars.volume, 'basis' : np.array(_vars.basis), 'rec_basis': np.array(_vars.rec_basis), 'positions': np.array(_vars.positions) } fields = _vars.fields incar = _vars.INCAR # Elements Labels elem_labels = [] for i, name in enumerate(ElemName): for ind in range(ElemIndex[i],ElemIndex[i+1]): elem_labels.append(f"{name} {str(ind - ElemIndex[i] + 1)}") poscar.update({'labels': elem_labels}) # Unique Elements Ranges unique_d = {} for i,e in enumerate(ElemName): unique_d.update({e:range(ElemIndex[i],ElemIndex[i+1])}) poscar.update({'unique': unique_d}) # Load Data bands= np.loadtxt('Bands.txt').reshape((-1,NBANDS+4)) #Must be read in 2D even if one row only. start = int(open('Bands.txt').readline().split()[4][1:]) pro_bands= np.loadtxt('Projection.txt').reshape((-1,NBANDS*nField_Projection)) pro_dos = np.loadtxt('pDOS.txt') dos= np.loadtxt('tDOS.txt') # Keep or delete only if python generates files (i < 5 case.) if(keep_files==False and i==5): for file in required_files: os.remove(file) # Returns back # Work now! sys_info = {'SYSTEM': SYSTEM,'NION': NION,'NELECT':NELECT,'TypeION': TypeION,'ElemName': ElemName, 'E_Fermi': E_Fermi,'fields':fields, 'incar': incar,'ElemIndex': ElemIndex,'ISPIN': ISPIN} dim_info = {'kpoints': '(NKPTS,3)','kpath': '(NKPTS,1)','bands': 'โ‡…(NKPTS,NBANDS)','dos': 'โ‡…(grid_size,3)', 'pro_dos': 'โ‡…(NION,grid_size,en+pro_fields)','pro_bands': 'โ‡…(NION,NKPTS,NBANDS,pro_fields)'} bands_dic,tdos_dic,pdos_dic,pro_dic,kpath={},{},{},{},[] if(ISPIN==1): kpath = bands[:,3] kpoints = bands[:,:3] evals = bands[:,4:] bands_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN, 'NBANDS': NBANDS, 'evals': evals, 'indices': range(start,start+NBANDS)} tdos_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN,'tdos': dos} pdos = pro_dos.reshape(NION,-1,nField_Projection+1) pdos_dic = {'labels': fields,'pros': pdos} pros = pro_bands.reshape(NION,NKPTS,NBANDS,-1) pro_dic = {'labels': fields,'pros': pros} if(ISPIN==2): # Bands kpath = bands[:NKPTS,3] kpoints = bands[:NKPTS,:3] SpinUp = bands[:NKPTS,4:] SpinDown= bands[NKPTS:,4:] evals = {'SpinUp':SpinUp,'SpinDown': SpinDown} bands_dic = {'E_Fermi': E_Fermi, 'ISPIN': ISPIN, 'NBANDS': NBANDS, 'evals': evals,'indices': range(start,start+NBANDS)} # tDOS dlen = int(np.shape(dos)[0]/2) SpinUp = dos[:dlen,:] SpinDown= dos[dlen:,:] tdos = {'SpinUp':SpinUp,'SpinDown': SpinDown} tdos_dic= {'E_Fermi': E_Fermi, 'ISPIN': ISPIN,'tdos': tdos} # pDOS plen = int(np.shape(pro_dos)[0]/2) SpinUp = pro_dos[:plen,:].reshape(NION,-1,nField_Projection+1) SpinDown= pro_dos[plen:,:].reshape(NION,-1,nField_Projection+1) pdos = {'SpinUp':SpinUp,'SpinDown': SpinDown} pdos_dic= {'labels': fields,'pros': pdos} # projections pblen = int(np.shape(pro_bands)[0]/2) SpinUp = pro_bands[:pblen,:].reshape(NION,NKPTS,NBANDS,-1) SpinDown= pro_bands[pblen:,:].reshape(NION,NKPTS,NBANDS,-1) pros = {'SpinUp': SpinUp,'SpinDown': SpinDown} pro_dic = {'labels': fields,'pros': pros} # If broken path, then join points. kpath = join_ksegments(kpath,kseg_inds) kpath=[k+shift_kpath for k in kpath.copy()] # Shift kpath full_dic = {'sys_info': sys_info,'dim_info': dim_info,'kpoints': kpoints,'kpath':kpath, 'bands':bands_dic,'tdos':tdos_dic,'pro_bands': pro_dic ,'pro_dos': pdos_dic, 'poscar':poscar} return Dict2Data(full_dic) ``` This back and forth data transport is required in [pivotpy-dash](https://github.com/massgh/pivotpy-dash) app where data is stored in browser in json format, but needs to by python objects for figures. ## Write Clean data to JSON or Pickle file Use `dump_vasprun` to write output of `export_vasprun` or `load_export` to pickle/json file. Pickle is useful for quick load in python while json is useful to transfer data into any language. ``` #export def dump_dict(dict_data = None, dump_to = 'pickle',outfile = None,indent=1): """ - Dump an `export_vasprun` or `load_export`'s `Data` object or any dictionary to json or pickle string/file. It convert `Dict2Data` to dictionary before serializing to json/pickle, so json/pickle.loads() of converted Data would be a simple dictionary, pass that to `Dict2Data` to again make accessible via dot notation. - **Parameters** - dict_data : Any dictionary/Dict2Data object containg numpy arrays, including `export_vasprun` or `load_export` output. - dump_to : Defualt is `pickle` or `json`. - outfile : Defualt is None and return string. File name does not require extension. - indent : Defualt is 1. Only works for json. """ if dump_to not in ['pickle','json']: return print("`dump_to` expects 'pickle' or 'json', got '{}'".format(dump_to)) try: dict_obj = dict_data.to_dict() # Change Data object to dictionary except: dict_obj = dict_data if dump_to == 'pickle': if outfile == None: return pickle.dumps(dict_obj) outfile = outfile.split('.')[0] + '.pickle' with open(outfile,'wb') as f: pickle.dump(dict_obj,f) if dump_to == 'json': if outfile == None: return json.dumps(dict_obj,cls = gu.EncodeFromNumpy,indent=indent) outfile = outfile.split('.')[0] + '.json' with open(outfile,'w') as f: json.dump(dict_obj,f,cls = gu.EncodeFromNumpy,indent=indent) return None #export def load_from_dump(file_or_str,keep_as_dict=False): """ - Loads a json/pickle dumped file or string by auto detecting it. - **Parameters** - file_or_str : Filename of pickl/json or their string. - keep_as_dict: Defualt is False and return `Data` object. If True, returns dictionary. """ out = {} if not isinstance(file_or_str,bytes): try: #must try, else fails due to path length issue if os.path.isfile(file_or_str): if '.pickle' in file_or_str: with open(file_or_str,'rb') as f: out = pickle.load(f) elif '.json' in file_or_str: with open(file_or_str,'r') as f: out = json.load(f,cls = gu.DecodeToNumpy) else: out = json.loads(file_or_str,cls = gu.DecodeToNumpy) # json.loads required in else and except both as long str > 260 causes issue in start of try block except: out = json.loads(file_or_str,cls = gu.DecodeToNumpy) elif isinstance(file_or_str,bytes): out = pickle.loads(file_or_str) if type(out) is dict and keep_as_dict == False: return Dict2Data(out) return out import pivotpy as pp evr = pp.export_vasprun('../vasprun.xml') s = dump_dict(evr.poscar,dump_to='pickle') #print(s) load_from_dump(s) ``` ## Parse Text Files with Flexibility - The function `islice2array` is used to read text files which have patterns of text and numbers inline, such as EIGENVAL and PROCAR. With all the options of this function, reading and parsing of such files should take a few lines of code only. It can be used to read txt,csv tsv as well with efficent speed. - It reads a file without fully loading into memory and you can still access slices of data in the file. That partial data fetching from file is very handy. ``` #export def islice2array(path_or_islice,dtype=float,delimiter='\s+', include=None,exclude='#',raw=False,fix_format = True, start=0,nlines=None,count=-1,cols=None,new_shape=None ): """ - Reads a sliced array from txt,csv type files and return to array. Also manages if columns lengths are not equal and return 1D array. It is faster than loading whole file into memory. This single function could be used to parse EIGENVAL, PROCAR, DOCAR and similar files with just a combination of `exclude, include,start,stop,step` arguments. - **Parameters** - path_or_islice: Path/to/file or `itertools.islice(file_object)`. islice is interesting when you want to read different slices of an opened file and do not want to open it again and again. For reference on how to use it just execute `pivotpy.export_potential??` in a notebook cell or ipython terminal to see how islice is used extensively. - dtype: float by default. Data type of output array, it is must have argument. - start,nlines: The indices of lines to start reading from and number of lines after start respectively. Only work if `path_or_islice` is a file path. both could be None or int, while start could be a list to read slices from file provided that nlines is int. The spacing between adjacent indices in start should be equal to or greater than nlines as pointer in file do not go back on its own. These parameters are in output of `slice_data` > Note: `start` should count comments if `exclude` is None. You can use `slice_data` function to get a dictionary of `start,nlines, count, cols, new_shape` and unpack in argument instead of thinking too much. - count: `np.size(output_array) = nrows x ncols`, if it is known before execution, performance is increased. This parameter is in output of `slice_data`. - delimiter: Default is `\s+`. Could be any kind of delimiter valid in numpy and in the file. - cols: List of indices of columns to pick. Useful when reading a file like PROCAR which e.g. has text and numbers inline. This parameter is in output of `slice_data`. - include: Default is None and includes everything. String of patterns separated by | to keep, could be a regular expression. - exclude: Default is '#' to remove comments. String of patterns separated by | to drop,could be a regular expression. - raw : Default is False, if True, returns list of raw strings. Useful to select `cols`. - fix_format: Default is True, it sepearates numbers with poor formatting like 1.000-2.000 to 1.000 2.000 which is useful in PROCAR. Keep it False if want to read string literally. - new_shape : Tuple of shape Default is None. Will try to reshape in this shape, if fails fallbacks to 2D or 1D. This parameter is in output of `slice_data`. - **Examples** > `islice2array('path/to/PROCAR',start=3,include='k-point',cols=[3,4,5])[:2]` > array([[ 0.125, 0.125, 0.125], > [ 0.375, 0.125, 0.125]]) > `islice2array('path/to/EIGENVAL',start=7,exclude='E',cols=[1,2])[:2]` > array([[-11.476913, 1. ], > [ 0.283532, 1. ]]) > Note: Slicing a dimension to 100% of its data is faster than let say 80% for inner dimensions, so if you have to slice more than 50% of an inner dimension, then just load full data and slice after it. """ if nlines is None and isinstance(start,(list,np.ndarray)): print("`nlines = None` with `start = array/list` is useless combination.") return np.array([]) # return empty array. def _fixing(_islice,include=include, exclude=exclude,fix_format=fix_format,nlines=nlines,start=start): if include: _islice = (l for l in _islice if re.search(include,l)) if exclude: _islice = (l for l in _islice if not re.search(exclude,l)) # Make slices here after comment excluding. if isinstance(nlines,int) and isinstance(start,(list,np.ndarray)): #As islice moves the pointer as it reads, start[1:]-nlines-1 # This confirms spacing between two indices in start >= nlines start = [start[0],*[s2-s1-nlines for s1,s2 in zip(start,start[1:])]] _islice = chain(*(islice(_islice,s,s+nlines) for s in start)) elif isinstance(nlines,int) and isinstance(start,int): _islice = islice(_islice,start,start+nlines) elif nlines is None and isinstance(start,int): _islice = islice(_islice,start,None) # Negative connected digits to avoid, especially in PROCAR if fix_format: _islice = (re.sub(r"(\d)-(\d)",r"\1 -\2",l) for l in _islice) return _islice def _gen(_islice,cols=cols): for line in _islice: line = line.strip().replace(delimiter,' ').split() if line and cols is not None: # if is must here. line = [line[i] for i in cols] for chars in line: yield dtype(chars) #Process Now if isinstance(path_or_islice,str) and os.path.isfile(path_or_islice): with open(path_or_islice,'r') as f: _islice = islice(f,0,None) # Read full, Will fix later. _islice = _fixing(_islice) if raw: return ''.join(_islice) # Must to consume islice when file is open data = np.fromiter(_gen(_islice),dtype=dtype,count=count) else: _islice = _fixing(path_or_islice) if raw: return ''.join(_islice) data = np.fromiter(_gen(_islice),dtype=dtype,count=count) if new_shape: try: data = data.reshape(new_shape) except: pass elif cols: #Otherwise single array. try: data = data.reshape((-1,len(cols))) except: pass return data #export def slice_data(dim_inds,old_shape): """ - Returns a dictionary that can be unpacked in arguments of isclice2array function. This function works only for regular txt/csv/tsv data files which have rectangular data written. - **Parameters** - dim_inds : List of indices array or range to pick from each dimension. Inner dimensions are more towards right. Last itmes in dim_inds is considered to be columns. If you want to include all values in a dimension, you can put -1 in that dimension. Note that negative indexing does not work in file readig, -1 is s special case to fetch all items. - old_shape: Shape of data set including the columns length in right most place. - **Example** - You have data as 3D arry where third dimension is along column. > 0 0 > 0 2 > 1 0 > 1 2 - To pick [[0,2], [1,2]], you need to give > slice_data(dim_inds = [[0,1],[1],-1], old_shape=(2,2,2)) > {'start': array([1, 3]), 'nlines': 1, 'count': 2} - Unpack above dictionary in `islice2array` and you will get output array. - Note that dimensions are packed from right to left, like 0,2 is repeating in 2nd column. """ # Columns are treated diffiernetly. if dim_inds[-1] == -1: cols = None else: cols = list(dim_inds[-1]) r_shape = old_shape[:-1] dim_inds = dim_inds[:-1] for i,ind in enumerate(dim_inds.copy()): if ind == -1: dim_inds[i] = range(r_shape[i]) nlines = 1 #start = [[NIONS*NBANDS*k + NIONS*b for b in _b_r] for k in range(skipk,NKPTS)] #kind of thing. _prod_ = product(*dim_inds) _mult_ = [np.product(r_shape[i+1:]) for i in range(len(r_shape))] _out_ = np.array([np.dot(p,_mult_) for p in _prod_]).astype(int) # check if innermost dimensions could be chunked. step = 1 for i in range(-1,-len(dim_inds),-1): _inds = np.array(dim_inds[i]) #innermost if np.max(_inds[1:] - _inds[:-1]) == 1: # consecutive step = len(_inds) _out_ = _out_[::step] # Pick first indices nlines = step*nlines # Now check if all indices picked then make chunks in outer dimensions too. if step != r_shape[i]: # Can't make chunk of outer dimension if inner is not 100% picked. break # Stop more chunking new_shape = [len(inds) for inds in dim_inds] #dim_inds are only in rows. new_shape.append(old_shape[-1]) return {'start':_out_,'nlines':nlines,'count': nlines*len(_out_),'cols':cols,'new_shape':tuple(new_shape)} slice_data([list(range(1,7)),-1,-1,range(2)],old_shape=[52,768,64,9]) ``` ## Process Largs `vasprun.xml` Files You can split a large vasprun.xml file in a small `_vasprun.xml` file which does not contain projected data, and `_set[1,2,3,4].txt` file(s) which contain projected data of each spin set. These spin set text files can be processed by `islice2array` function efficiently. ``` #export def split_vasprun(path=None): """ - Splits a given vasprun.xml file into a smaller _vasprun.xml file plus _set[1,2,3,4].txt files which contain projected data for each spin set. - **Parameters** - path: path/to/vasprun.xml file. - **Output** - _vasprun.xml file with projected data. - _set1.txt for projected data of colinear calculation. - _set1.txt for spin up data and _set2.txt for spin-polarized case. - _set[1,2,3,4].txt for each spin set of non-colinear calculations. """ if not path: path = './vasprun.xml' if not os.path.isfile(path): return print("{!r} does not exist!".format(path)) base_dir = os.path.split(os.path.abspath(path))[0] out_file = os.path.join(base_dir,'_vasprun.xml') out_sets = [os.path.join(base_dir,'_set{}.txt'.format(i)) for i in range(1,5)] # process with open(path,'r') as f: lines = islice(f,None) indices = [i for i,l in enumerate(lines) if re.search('projected|/eigenvalues',l)] f.seek(0) print("Writing {!r} ...".format(out_file),end=' ') with open(out_file,'w') as outf: outf.write(''.join(islice(f,0,indices[1]))) f.seek(0) outf.write(''.join(islice(f,indices[-1]+1,None))) print('Done') f.seek(0) middle = islice(f,indices[-2]+1,indices[-1]) #projected words excluded spin_inds = [i for i,l in enumerate(middle) if re.search('spin',l)][1:] #first useless. if len(spin_inds)>1: set_length = spin_inds[1]-spin_inds[0] # Must define else: set_length = indices[-1]-indices[-2] #It is technically more than set length, but fine for 1 set f.seek(0) # Must be at zero N_sets = len(spin_inds) # Let's read shape from out_file as well. xml_data = read_asxml(out_file) _summary = get_summary(xml_data) NIONS = _summary.NION NORBS = len(_summary.fields) NBANDS = get_evals(xml_data).NBANDS NKPTS = get_kpts(xml_data).NKPTS del xml_data # free meory now. for i in range(N_sets): #Reads every set print("Writing {!r} ...".format(out_sets[i]),end=' ') start = (indices[-2]+1+spin_inds[0] if i==0 else 0) # pointer is there next time. stop_ = start + set_length # Should move up to set length only. with open(out_sets[i],'w') as setf: setf.write(" # Set: {} Shape: (NKPTS[NBANDS[NIONS]],NORBS) = {},{},{},{}\n".format(i+1,NKPTS,NBANDS,NIONS,NORBS)) middle = islice(f,start,stop_) setf.write(''.join(l.lstrip().replace('/','').replace('<r>','') for l in middle if '</r>' in l)) print('Done') #hide_input import pivotpy as pp pp.nav_links(1) ```
github_jupyter
# Experiments with kernel machines In this notebook we will use simple two-dimensional data sets to illustrate the behavior of the support vector machine and the Perceptron, when used with quadratic and RBF kernels. ## 1. Basic training procedure ``` %matplotlib inline import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn.svm import SVC matplotlib.rc('xtick', labelsize=14) matplotlib.rc('ytick', labelsize=14) import pandas as pd ``` The directory containing this notebook should also contain two-dimensional data files, `data1.txt` through `data5.txt`. These files contain one data point per line, along with a label (either -1 or 1), like: * `3 8 -1` (meaning that point `x=(3,8)` has label `y=-1`) The next procedure, **learn_and_display_SVM**, loads one of these data sets, invokes `sklearn.SVC` to learn a classifier, and then displays the data as well as the boundary. It is invoked as follows: * `learn_and_display_SVM(datafile, kernel_type, C_value, s_value)` where * `datafile` is one of `'data1.txt'` through `'data5.txt'` (or another file in the same format) * `kernel_type` is either `'quadratic'` or `'rbf'` * `C_value` is the setting of the soft-margin parameter `C` (default: 1.0) * `s_value` (for the RBF kernel) is the scaling parameter `s` (default: 1.0) ``` np.zeros(3,dtype=bool) def learn_and_display_SVM(datafile, kernel_type='rbf', C_value=1.0, s_value=1.0): data = np.loadtxt(datafile) n,d = data.shape # Create training set x and labels y x = data[:,0:2] y = data[:,2] # Now train a support vector machine and identify the support vectors if kernel_type == 'rbf': clf = SVC(kernel='rbf', C=C_value, gamma=1.0/(s_value*s_value)) if kernel_type == 'quadratic': clf = SVC(kernel='poly', degree=2, C=C_value, coef0=1.0) clf.fit(x,y) sv = np.zeros(n,dtype=bool) sv[clf.support_] = True notsv = np.logical_not(sv) # Determine the x1- and x2- limits of the plot x1min = min(x[:,0]) - 1 x1max = max(x[:,0]) + 1 x2min = min(x[:,1]) - 1 x2max = max(x[:,1]) + 1 plt.xlim(x1min,x1max) plt.ylim(x2min,x2max) # Plot the data points, enlarging those that are support vectors plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro') plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10) plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^') plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10) # Construct a grid of points and evaluate classifier at each grid points grid_spacing = 0.05 xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing)) grid = np.c_[xx1.ravel(), xx2.ravel()] Z = clf.decision_function(grid) # Quantize the values to -1, -0.5, 0, 0.5, 1 for display purposes for i in range(len(Z)): Z[i] = min(Z[i],1.0) Z[i] = max(Z[i],-1.0) if (Z[i] > 0.0) and (Z[i] < 1.0): Z[i] = 0.5 if (Z[i] < 0.0) and (Z[i] > -1.0): Z[i] = -0.5 # Show boundary and margin using a color plot Z = Z.reshape(xx1.shape) plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2) plt.show() ``` ## 2. Experiments with the quadratic kernel Let's try out SVM on some examples, starting with the quadratic kernel. ``` learn_and_display_SVM('data1.txt', 'quadratic', 1) ``` Also try `data2.txt` through `data5.txt`. Also try changing the value of `C` (the third parameter) to see how that affects the boundary and margin. ``` learn_and_display_SVM('data2.txt', 'quadratic', 1) ``` ## 3. Experiments with the RBF kernel Now experiment with the RBF kernel, on the same five data sets. This time there are two parameters to play with: `C` and `sigma`. ``` learn_and_display_SVM('data1.txt', 'rbf', 10.0, 10.0) learn_and_display_SVM('data2.txt', 'rbf', 10.0, 10.0) ``` ## 4. The kernel Perceptron <font color="magenta">**For you to do:**</font> Implement the kernel Perceptron algorithm as specified in lecture. Your algorithm should allow both the quadratic and RBF kernel, and should follow roughly the same signature as the SVM routine above: * `learn_and_display_Perceptron(datafile, kernel_type, s_value)` Recall that the Perceptron algorithm does not always converge; you will need to explicitly check for this. - http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics.pairwise ``` from sklearn.metrics.pairwise import rbf_kernel, polynomial_kernel from numpy import linalg as LA a = np.arange(8).reshape(4, 2) b = np.array([3, 0]) a[0,]-b LA.norm(a[0,]-b, 2) # l2 norm distance a a[1,:].reshape(1, 2) rbf_kernel(a, b.reshape(1, 2), gamma=.5) np.exp(-1/2 * np.power(LA.norm(a[3,]-b,2), 2)) # rbf kernel aa = np.zeros((4,)) aa[3] = 3 aa def kernel_f(x, z, kernel_type='rbf', s=1.0): # define Kernel function # x is all training set, z is a single data point if kernel_type == 'rbf': K = rbf_kernel(x, z, gamma=s) if kernel_type == 'quadratic': K = polynomial_kernel(x, z, degree=2, gamma=s) return K # K is a n*1 matrix def evaluate_classifier(X, y, alpha, b, z, kernel_type='rbf', s=1.0): K = kernel_f(X, z.reshape(1,2), kernel_type=kernel_type, s=s) # print(K) y_hat = np.sign(np.sum(np.multiply(alpha.reshape(-1,1), np.multiply(y.reshape(-1,1), K.reshape(-1,1)))) + b) return y_hat ``` **converged means all data points have be classified correctly** ``` def train_perceptron(x,y,n_iters=100, kernel_type='rbf', s=1.0): n, d = x.shape alpha = np.zeros(n) b = 0 iters = 0 done = False converged = True while not(done): done = True I = np.random.permutation(n) for i in range(n): j = I[i] z = x[j, :].reshape(1, 2) y_j = y[j] # K = kernel_f(x, z) y_hat_j = evaluate_classifier(X=x, y=y.reshape(n, 1), alpha=alpha, b=b, z=z, kernel_type=kernel_type, s=s) if y_j*y_hat_j <= 0: alpha[j] += 1 b += y_j done = False iters += 1 if iters > n_iters: done = True converged = False if converged: print("Perceptron algorithm: iterations until convergence: ", iters) else: print("Perceptron algorithm: did not converge within the specified number of iterations") return alpha, b, converged ### ### Any auxiliary functions that you need ### def learn_and_display_Perceptron(datafile, kernel_type='rbf', s_value=1.0, max_iter=100): data = np.loadtxt(datafile) n,d = data.shape # max_iter = 100 # Create training set x and labels y x = data[:,0:2] y = data[:,2] alpha,b,converged = train_perceptron(x,y,n_iters=max_iter,kernel_type=kernel_type,s=s_value) print(alpha, b) sv = alpha.astype(bool) # sv[clf.support_] = True notsv = np.logical_not(sv) # Determine the x1- and x2- limits of the plot x1min = min(x[:,0]) - 1 x1max = max(x[:,0]) + 1 x2min = min(x[:,1]) - 1 x2max = max(x[:,1]) + 1 plt.xlim(x1min,x1max) plt.ylim(x2min,x2max) # Plot the data points plt.plot(x[(y==1)*notsv,0], x[(y==1)*notsv,1], 'ro') plt.plot(x[(y==1)*sv,0], x[(y==1)*sv,1], 'ro', markersize=10) plt.plot(x[(y==-1)*notsv,0], x[(y==-1)*notsv,1], 'k^') plt.plot(x[(y==-1)*sv,0], x[(y==-1)*sv,1], 'k^', markersize=10) # Construct a grid of points at which to evaluate the classifier if converged: grid_spacing = 0.05 # classify all points in grid, each grid's area is 0.05*0.05 xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, grid_spacing), np.arange(x2min, x2max, grid_spacing)) grid = np.c_[xx1.ravel(), xx2.ravel()] Z = np.array([evaluate_classifier(x,y,alpha,b,pt,kernel_type=kernel_type,s=s_value) for pt in grid]) # Show the classifier's boundary using a color plot Z = Z.reshape(xx1.shape) plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-3, vmax=3) plt.show() ``` <font color="magenta">Experiment with your routine, on the same five data sets.</font> ``` learn_and_display_Perceptron('data5.txt', kernel_type='quadratic', max_iter=10000) learn_and_display_Perceptron('data2.txt', kernel_type='rbf', max_iter=10000) ``` #### some operations of numpy ``` alpha = np.zeros(10) alpha[0] = 1 print(alpha) y = np.ones(10) K = np.random.rand(10).reshape(10,1) K np.multiply(alpha.reshape(-1,1), y.reshape(-1,1)) np.multiply(K, np.multiply(alpha, y)) # need to be very careful about the shape of each element # 2^2 elements in each dimension, great way to do Cartesian product xx1, xx2 = np.meshgrid(np.arange(1, 2, 0.5), np.arange(3, 4, 0.5)) grid = np.c_[xx1.ravel(), xx2.ravel()] grid [print(i) for i in grid] sv = np.zeros(4, dtype=bool) # convert 0 to False, non-zero to True sv alpha = np.array([0, 2, 3, 0]) alpha alpha.astype(bool) np.sign(-1), np.sign(2), np.sign(0) ```
github_jupyter
# `logictools` WaveDrom Tutorial [WaveDrom](http://wavedrom.com) is a tool for rendering digital timing waveforms. The waveforms are defined in a simple textual format. This notebook will show how to render digital waveforms using the pynq library. The __`logictools`__ overlay uses the same format as WaveDrom to specify and generate real signals on the board. A full tutorial of WaveDrom can be found [here](http://wavedrom.com/tutorial.html) ### Step 1: Import the `draw_wavedrom()` method from the pynq library ``` from pynq.lib.logictools.waveform import draw_wavedrom ``` A simple function to add wavedrom diagrams into a jupyter notebook. It utilizes the wavedrom java script library. <font color="DodgerBlue">**Example usage:**</font> ```python from pynq.lib.logictools.waveform import draw_wavedrom clock = {'signal': [{'name': 'clk', 'wave': 'h....l...'}]} draw_wavedrom(clock) ``` <font color="DodgerBlue">**Method:**</font> ```python def draw_wavedrom(data, width=None): # Note the optional argument width forces the width in pixels ``` ### Step 2: Specify and render a waveform ``` from pynq.lib.logictools.waveform import draw_wavedrom clock = {'signal': [{'name': 'clock_0', 'wave': 'hlhlhlhlhlhlhlhl'}], 'foot': {'tock': 1}, 'head': {'text': 'Clock Signal'}} draw_wavedrom(clock) ``` ### Step 3: Adding more signals to the waveform ``` from pynq.lib.logictools.waveform import draw_wavedrom pattern = {'signal': [{'name': 'clk', 'wave': 'hl' * 8}, {'name': 'clkn', 'wave': 'lh' * 8}, {'name': 'data0', 'wave': 'l.......h.......'}, {'name': 'data1', 'wave': 'h.l...h...l.....'}], 'foot': {'tock': 1}, 'head': {'text': 'Pattern'}} draw_wavedrom(pattern) ``` __Adding multiple wave groups and spaces__ ``` from pynq.lib.logictools.waveform import draw_wavedrom pattern_group = {'signal': [['Group1', {'name': 'clk', 'wave': 'hl' * 8}, {'name': 'clkn', 'wave': 'lh' * 8}, {'name': 'data0', 'wave': 'l.......h.......'}, {'name': 'data1', 'wave': 'h.l...h...l.....'}], {}, ['Group2', {'name': 'data2', 'wave': 'l...h..l.h......'}, {'name': 'data3', 'wave': 'l.h.' * 4}]], 'foot': {'tock': 1}, 'head': {'text': 'Pattern'}} draw_wavedrom(pattern_group) ``` # WaveDrom for real-time pattern generation and trace analysis ### The __`logictools`__ overlay uses WaveJSON format to specify and generate real signals on the board. ![](./images/logictools_block_diagram.png) * As shown in the figure above, the Pattern Generator is an output-only block that specifies a sequence of logic values (patterns) which appear on the output pins of the ARDUINO interface. The logictools API for Pattern Generator accepts **WaveDrom** specification syntax with some enhancements. * The Trace Analyzer is an input-only block that captures and records all the IO signals. These signals may be outputs driven by the generators or inputs to the PL that are driven by external circuits. The Trace Analyzer allows us to verify that the output signals we have specified from the generators are being applied correctly. It also allows us to debug and analyze the operation of the external interface. * The signals generated or captured by both the blocks can be displayed in the notebook by populating the WaveJSON dictionary that we have seen in this notebook. Users can access this dictionary through the provided API to extend or modify the waveform with special annotations. * we use a subset of the wave tokens that are allowed by WaveDrom to specify the waveforms for the Pattern Generator. However, users can call the `draw_waveform()` method on the dictionary populated by the Trace Analyzer to extend and modify the dictionary with annotations. __In the example below, we are going to generate 3 signals on the Arduino interface pins D0, D1 and D2 using the Pattern Generator. Since all IOs are accessible to the Trace analyzer, we will capture the data on the pins as well. This operation will serve as an internal loopback. __ ### Step 1: Download the `logictools` overlay and specify the pattern The pattern to be generated is specified in the WaveJSON format. The Waveform class is used to display the specified waveform. ``` from pynq.lib.logictools import Waveform from pynq.overlays.logictools import LogicToolsOverlay from pynq.lib.logictools import PatternGenerator logictools_olay = LogicToolsOverlay('logictools.bit') loopback_test = {'signal': [ ['stimulus', {'name': 'output0', 'pin': 'D0', 'wave': 'lh' * 8}, {'name': 'output1', 'pin': 'D1', 'wave': 'l.h.' * 4}, {'name': 'output2', 'pin': 'D2', 'wave': 'l...h...' * 2}], {}, ['analysis', {'name': 'input0', 'pin': 'D0'}, {'name': 'input1', 'pin': 'D1'}, {'name': 'input2', 'pin': 'D2'}]], 'foot': {'tock': 1}, 'head': {'text': 'loopback_test'}} waveform = Waveform(loopback_test) waveform.display() ``` **Note:** Since there are no captured samples at this moment, the analysis group will be empty. ### Step 2: Run the pattern generator and trace the loopback signals. This step populates the WaveJSON dict with the captured trace analyzer samples. The dict can now serve as an output that we can further modify. It is shown in the next step. ``` pattern_generator = logictools_olay.pattern_generator pattern_generator.trace(num_analyzer_samples=16) pattern_generator.setup(loopback_test, stimulus_group_name='stimulus', analysis_group_name='analysis') pattern_generator.run() pattern_generator.show_waveform() ``` ### Step 3: View the output waveJSON dict. ``` import pprint output_wavejson = pattern_generator.waveform.waveform_dict pprint.pprint(output_wavejson) ``` ![](./images/waveform_output_dictionary.png) ### Step 4: Extending the output waveJSON dict with state annotation ``` state_list = ['S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S0', 'S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7'] color_dict = {'white': '2', 'yellow': '3', 'orange': '4', 'blue': '5'} output_wavejson['signal'].extend([{}, ['Annotation', {'name': 'state', 'wave': color_dict['yellow'] * 8 + color_dict['blue'] * 8, 'data': state_list}]]) ``` __Note: __ The color_dict is a color code map as defined by WaveDrom ``` draw_wavedrom(output_wavejson) ```
github_jupyter
# ะœะฐั‚ะตะผะฐั‚ะธั‡ะตัะบะฐั ะพะฟั‚ะธะผะธะทะฐั†ะธั ะœะฝะพะณะธะต ะผะฐั‚ะตะผะฐั‚ะธั‡ะตัะบะธะต ะผะพะดะตะปะธ ะทะฐะดะฐั‡ ัะฒะพะดัั‚ัั ะบ ะฝะฐั…ะพะถะดะตะฝะธัŽ ะผะฐะบัะธะผัƒะผะพะฒ ะธะปะธ ะผะธะฝะธะผัƒะผะพะฒ. ะะฐะฟั€ะธะผะตั€, ะฒ ัะบะพะฝะพะผะธั‡ะตัะบะธั… ะทะฐะดะฐั‡ะฐั… ะฝะตะพะฑั…ะพะดะธะผะพ ะผะธะฝะธะผะธะทะธั€ะพะฒะฐั‚ัŒ ะทะฐั‚ั€ะฐั‚ั‹ ะธ ะผะฐะบัะธะผะธะทะธั€ะพะฒะฐั‚ัŒ ะฟั€ะธะฑั‹ะปัŒ. ะ‘ะพะปัŒัˆะธะฝัั‚ะฒะพ ะทะฐะดะฐั‡ ะผะฐัˆะธะฝะฝะพะณะพ ะพะฑัƒั‡ะตะฝะธั ั‚ะฐะบะถะต ัะฒะพะดัั‚ัั ะบ ะฝะฐั…ะพะถะดะตะฝะธัŽ ะผะธะฝะธะผัƒะผะฐ ะธะปะธ ะผะฐะบัะธะผัƒะผะฐ. ะงะฐั‰ะต ะฒัะตะณะพ ะทะฐะดะฐั‡ะฐ ั„ะพั€ะผัƒะปะธั€ัƒะตั‚ัั ะบะฐะบ ะฝะฐั…ะพะถะดะตะฝะธะต ั‚ะฐะบะธั… ะฟะฐั€ะฐะผะตั‚ั€ะพะฒ ะผะพะดะตะปะธ, ะฟั€ะธ ะบะพั‚ะพั€ั‹ั… ั€ะฐะทะฝะธั†ะฐ ะผะตะถะดัƒ ั„ะฐะบั‚ะธั‡ะตัะบะธะผะธ ะธ ะฟั€ะตะดัะบะฐะทะฐะฝะฝะฐะผะธ ะทะฝะฐั‡ะตะฝะธัะผะธ ะดะฐะฝะฝั‹ั… ะดะพะปะถะฝะฐ ะฑั‹ั‚ัŒ ะผะธะฝะธะผะฐะปัŒะฝะพะน. ะะฐะฟั€ะธะผะตั€, ะผั‹ ั…ะพั‚ะธะผ ัะพะทะดะฐั‚ัŒ ะผะพะดะตะปัŒ, ะบะพั‚ะพั€ะฐั ะดะพะปะถะฝะฐ ะฟั€ะตะดัะบะฐะทั‹ะฒะฐั‚ัŒ ั†ะตะฝัƒ ะบะฒะฐั€ั‚ะธั€ั‹ ะฟะพ ะตั‘ ั…ะฐั€ะฐะบั‚ะตั€ะธัั‚ะธะบะฐะผ. ะŸั€ะธ ัั‚ะพะผ ะฝะตะพะฑั…ะพะดะธะผะพ ะผะธะฝะธะผะธะทะธั€ะพะฒะฐั‚ัŒ ั€ะฐะทะฝะธั†ัƒ ะผะตะถะดัƒ ั„ะฐะบั‚ะธั‡ะตัะบะธะผะธ ะธ ะฟั€ะตะดัะบะฐะทะฐะฝะฝั‹ะผะธ ั†ะตะฝะฐะผะธ. ะ˜ะปะธ ะตัะปะธ ะผะพะดะตะปัŒ ะดะพะปะถะฝะฐ ะฟั€ะตะดัะบะฐะทะฐั‚ัŒ ะฒะตั€ะพัั‚ะฝะพัั‚ัŒ ะบะฐะบะพะณะพ ะปะธะฑะพ ะทะฐะฑะพะปะตะฒะฐะฝะธั ะฟะพ ะดะฐะฝะฝั‹ะผ ัะธะผะฟั‚ะพะผะฐะผ, ั‚ะพ ะฝัƒะถะฝะพ ะผะฐะบัะธะผะธะทะธั€ะพะฒะฐั‚ัŒ ะฒะตั€ะพัั‚ะฝะพัั‚ัŒ ะฟั€ะตะดัะบะฐะทะฐะฝะธั ะฑะพะปะตะทะฝะธ ัƒ ั€ะตะฐะปัŒะฝั‹ั… ะฑะพะปัŒะฝั‹ั… ะธ ะผะธะฝะธะผะธะทั€ะพะฒะฐั‚ัŒ ะฒะตั€ะพัั‚ะฝะพัั‚ัŒ ัƒ ะทะดะพั€ะพะฒั‹ั… ะปัŽะดะตะน. ะ—ะฐะดะฐั‡ะธ, ะฒ ะบะพั‚ะพั€ั‹ั… ะฝะตะพะฑั…ะพะดะธะผะพ ะฝะฐะนั‚ะธ ะผะธะฝะธะผัƒะผ ะธะปะธ ะผะฐะบัะธะผัƒะผ ะฝะฐะทั‹ะฒะฐัŽั‚ัั ะทะฐะดะฐั‡ะฐะผะธ ะพะฟั‚ะธะผะธะทะฐั†ะธะธ. ะšะฐะบ ั‚ะพะปัŒะบะพ ะทะฐะดะฐั‡ะฐ ัั„ะพั€ะผัƒะปะธั€ะพะฒะฐะฝะฐ ะฒ ะฒะธะดะต ะทะฐะดะฐั‡ะธ ะพะฟั‚ะธะผะธะทะฐั†ะธะธ, ั‚ะพ ะฟะพัะฒะปัะตั‚ัั ะฒะพะทะผะพะถะฝะพัั‚ัŒ ั€ะตัˆะธั‚ัŒ ะตั‘ ะณะพั‚ะพะฒั‹ะผ ะฝะฐะฑะพั€ะพะผ ะผะตั‚ะพะดะพะฒ. ะ•ัะปะธ ัƒ ะฝะฐั ะตัั‚ัŒ ั„ัƒะฝะบั†ะธั $f(x)$, ั‚ะพ ะทะฝะฐั‡ะตะฝะธั $x$, ะฒ ะบะพั‚ะพั€ั‹ั… ัั‚ะฐ ั„ัƒะฝะบั†ะธั ะดะพัั‚ะธะณะฐะตั‚ ะผะธะฝะธะผัƒะผะฐ ะธะปะธ ะผะฐะบัะธะผัƒะผะฐ ะฝะฐะทั‹ะฒะฐะตั‚ัั ั‚ะพั‡ะบะพะน ัะบัั‚ั€ะตะผัƒะผะฐ. ะะฐะฟั€ะธะผะตั€, ะฟะฐั€ะฐะฑะพะปะฐ $f(x)=x^2$ ะดะพัั‚ะธะณะฐะตั‚ ัะฒะพะตะณะพ ะผะธะฝะธะผัƒะผะฐ ะฒ ั‚ะพั‡ะบะต $x=0$. ``` %matplotlib inline %run code/math_examples.py draw_parabola() ``` ะงะฐัั‚ะพ ะฝะฐ ะฟั€ะฐะบั‚ะธะบะต ะผะธะฝะธะผัƒะผ ะฝัƒะถะฝะพ ะฝะฐะนั‚ะธ ะดะปั ะผะฝะพะณะพะผะตั€ะฝะพะน ั„ัƒะฝะบั†ะธะธ. ะะฐะฟั€ะธะผะตั€, ะฟะฐั€ะพะฑะพะปะพะธะด ะพะฟะธัั‹ะฒะฐะตั‚ัั ั„ัƒะฝะบั†ะธะตะน $f(x, y) = x^2 + y^2$ ะธ ะดะพัั‚ะธะณะฐะตั‚ ัะฒะพะตะณะพ ะผะธะฝะธะผัƒะผะฐ ะฒ ั‚ะพั‡ะบะต $x=0, y=0$ ``` draw_paraboloid() ``` ะžะดะฝะฐะบะพ ะฝะฐะนั‚ะธ ั‚ะพั‡ะบัƒ ัะบัั‚ั€ะตะผัƒะผะฐ ะดะปั ะฟั€ะพะธะทะฒะพะปัŒะฝะพะน ั„ัƒะฝะบั†ะธะธ ะบั€ะฐะนะฝะต ัะปะพะถะฝะพ, ั‚ะฐะบ ะบะฐะบ ัƒ ั„ัƒะฝะบั†ะธะธ ะผะพะถะตั‚ ะฑั‹ั‚ัŒ ะผะฝะพะถะตัั‚ะฒะพ ะปะพะบะฐะปัŒะฝั‹ั… ะผะธะฝะธะผัƒะผะพะฒ ะธะปะธ ะผะฐะบัะธะผัƒะผะพะฒ. ะะฐะฟั€ะธะผะตั€, ะฝะธะถะต ะฟั€ะธะฒะตะดะตะฝ ะณั€ะฐั„ะธะบ ั„ัƒะฝะบั†ะธะธ, ัƒ ะบะพั‚ะพั€ะพะน ะฝะตัะบะพะปัŒะบะพ ะปะพะบะฐะปัŒะฝั‹ั… ั‚ะพั‡ะตะบ ัะบัั‚ั€ะตะผัƒะผะฐ. ``` draw_mishra_bird() ``` ะฃ ะฝะตะบะพั‚ะพั€ั‹ั… ั„ัƒะฝะบั†ะธะน ะฒะพะพะฑั‰ะต ะผะพะถะตั‚ ะฝะต ะฑั‹ั‚ัŒ ะฝะธ ะผะฐะบัะธะผัƒะผะพะฒ ะฝะธ ะผะธะฝะธะผัƒะผะพะฒ. ะะธะถะต ะฟั€ะธะฒะตะดะตะฝ ะณั€ะฐั„ะธะบ ั‚ะฐะบะพะน ั„ัƒะฝะบั†ะธะธ ``` draw_hyperbolic_paraboloid() ``` ะขะฐะบ ะบะฐะบ ะฒ ะพะฑั‰ะตะผ ะฒะธะดะต ะดะปั ะฟั€ะพะธะทะฒะพะปัŒะฝะพะน ั„ัƒะฝะบั†ะธะธ ั€ะตัˆะธั‚ัŒ ะทะฐะดะฐั‡ัƒ ะพะฟั‚ะธะผะธะทะฐั†ะธะธ ะบั€ะฐะนะฝะต ัะปะพะถะฝะพ, ั‚ะพ ะฝะฐ ะฟั€ะฐะบั‚ะธะบะต ะทะฐะดะฐั‡ะธ ะพะฟั‚ะธะผะธะทะฐั†ะธะธ ัะฒะพะดัั‚ัั ะบ ั‚ะฐะบะธะผ ั„ัƒะฝะบั†ะธัะผ, ะดะปั ะบะพั‚ะพั€ั‹ั… ะพั‚ะฝะพัะธั‚ะตะปัŒะฝะพ ะปะตะณะบะพ ะผะพะถะฝะพ ั€ะตัˆะธั‚ัŒ ัั‚ัƒ ะทะฐะดะฐั‡ัƒ. ะžะดะฝะธะผ ะธะท ัะฟะพัะพะฑะพะฒ ะฝะฐั…ะพะถะดะตะฝะธั ัะบัั‚ั€ะตะผัƒะผะพะฒ ัะฒะปัะตั‚ัั ะฝะฐั…ะพะถะดะตะฝะธะต ั‚ะฐะบะธั… ั‚ะพั‡ะตะบ ั„ัƒะฝะบั†ะธะธ, ะฟั€ะธ ะบะพั‚ะพั€ั‹ั… ะฟั€ะพะธะทะฒะพะดะฝะฐั ัั‚ะพะน ั„ัƒะฝะบั†ะธะธ ั€ะฐะฒะฝะฐ ะฝัƒะปัŽ: $f'(x) = 0$. ะ˜ะฝั‚ัƒะธั‚ะธะฒะฝะพ ัั‚ะพ ะผะพะถะฝะพ ะพะฑัŠััะฝะธั‚ัŒ ั‚ะตะผ, ั‡ั‚ะพ ั„ัƒะฝะบั†ะธั, ะตัะปะธ ัƒ ะฝะตะต ััƒั‰ะตัั‚ะฒัƒะตั‚ ะฟั€ะพะธะทะฒะพะดะฝะฐั, ะฒะตะดะตั‚ ัะตะฑั ะบะฐะบ ะบะฐัะฐั‚ะตะปัŒะฝะฐั ะดะปั ะฝะตะฑะพะปัŒัˆะพะณะพ ะธะฝั‚ะตั€ะฒะฐะปะฐ. ะ•ัะปะธ ะบะฐัะฐั‚ะตะปัŒะฝะฐั ะฒ ะทะฐะดะฐะฝะฝะพะน ั‚ะพั‡ะบะต ะธะผะตะตั‚ ะพัั‚ั€ั‹ะน ัƒะณะพะป ั ะพััŒัŽ x, ั‚ะพ ัั‚ะพ ะทะฝะฐั‡ะธั‚, ั‡ั‚ะพ ั„ัƒะฝะบั†ะธั ั€ะฐัั‚ะตั‚ ะฒะฑะปะธะทะธ ัั‚ะพะน ั‚ะพั‡ะบะธ. ะ•ัะปะธ ะบะฐัะฐั‚ะตะปัŒะฝะฐั ะธะผะตะตั‚ ั‚ัƒะฟะพะน ัƒะณะพะป ั ะพััŒัŽ x, ั‚ะพ ัั‚ะพ ะทะฝะฐั‡ะธั‚, ั‡ั‚ะพ ั„ัƒะฝะบั†ะธั ัƒะฑั‹ะฒะฐะตั‚ ะฒะฑะปะธะทะธ ัั‚ะพะน ั‚ะพั‡ะบะธ. ะ”ะปั ะฟั€ะพัั‚ั‹ั… ั„ัƒะฝะบั†ะธะน ะผะพะถะฝะพ ะฝะฐะฟั€ัะผัƒัŽ ั€ะตัˆะธั‚ัŒ ัƒั€ะฐะฒะฝะตะฝะธะต $f'(x) = 0$ ะธ ะฝะฐะนั‚ะธ ั‚ะพั‡ะบัƒ ัะบัั‚ั€ะตะผัƒะผะฐ ะฒ ะฐะฝะฐะปะธั‚ะธั‡ะตัะบะพะผ ะฒะธะดะต (ั‚.ะต. ะฒ ะฒะธะดะต ั„ะพั€ะผัƒะปั‹). ะะฐะฟั€ะธะผะตั€, ะดะปั ะฟะฐั€ะฐะฑะพะปั‹ ะฟั€ะพะธะทะฒะพะดะฝะฐั ั€ะฐะฒะฝะฐ $f'(x) = 2x$ ะธ ัะพัั‚ะฐะฒะธะฒ ัƒั€ะฐะฒะฝะตะฝะธะต $2x=0$ ะผะพะถะฝะพ ะฝะฐะนั‚ะธ, ั‡ั‚ะพ $x=0$. ะ”ะปั ะฑะพะปะตะต ัะปะพะถะฝั‹ั… ั„ัƒะฝะบั†ะธะน, ะผะพะถะฝะพ ัะปะตะดะพะฒะฐั‚ัŒ ะฝะฐะฟั€ะฐะฒะปะตะฝะธัŽ ะบะฐัะฐั‚ะตะปัŒะฝะพะน ะธ ะฝะฐะนั‚ะธ ะฑะปะธะถะฐะนะถัƒัŽ ั‚ะพั‡ะบัƒ ัะบัั‚ั€ะตะผัƒะผะฐ. ะšะฐะบ ะฑั‹ะปะพ ัะบะฐะทะฐะฝะพ ะฒั‹ัˆะต, ัƒ ั„ัƒะฝะบั†ะธะธ ะผะพะถะตั‚ ะฑั‹ั‚ัŒ ะฝะตัะบะพะปัŒะบะพ ัะบัั‚ั€ะตะผัƒะผะพะฒ ะธ ะฒ ั‚ะฐะบะพะผ ัะปัƒั‡ะฐะต ะฝะตะพะฑั…ะพะดะธะผะพ ะฝะฐะนั‚ะธ ั‚ะพั‡ะบัƒ, ะณะดะต ั„ัƒะฝะบั†ะธั ะดะพัั‚ะธะณะฐะตั‚ ะฝะฐะธะผะตะฝัŒัˆะตะณะพ ะธะปะธ ะฝะฐะธะฑะพะปัŒัˆะตะณะพ ะทะฝะฐั‡ะตะฝะธั ัั€ะตะดะธ ะฒัะตั… ะปะพะบะฐะปัŒะฝั‹ั… ัะบัั‚ั€ะตะผัƒะผะพะฒ. ะขะฐะบะฐั ั‚ะพั‡ะบะฐ ะฝะฐะทั‹ะฒะฐะตั‚ัั ะณะปะพะฑะฐะปัŒะฝะพะน ั‚ะพั‡ะบะพะน ัะบัั‚ั€ะตะผัƒะผะฐ. ะะธะถะต ะฟั€ะธะฒะตะดะตะฝ ะฟั€ะธะผะตั€ ะปะพะผะฐะฝะพะน ะปะธะฝะธะธ, ะบะพั‚ะพั€ะฐั ะฐะฟั€ะพะบัะธะผะธั€ัƒะตั‚ ะฟะฐั€ะฐะฑะพะปัƒ. ะ’ ั‚ะพั‡ะบะต $x=0$, ะณะดะต ะปะธะฝะธั ัั‚ะฐะปะฐ ะณะพั€ะธะทะพะฝั‚ะฐะปัŒะฝะพะน, ั„ัƒะฝะบั†ะธั ะดะพัั‚ะธะณะฐะตั‚ ะฝะฐะธะผะตะฝัŒัˆะตะณะพ ะทะฝะฐั‡ะตะฝะธั. ``` draw_parabola(8) ``` ะ’ ัะปัƒั‡ะฐะต ั‚ั€ะตั…ะผะตั€ะฝะพะณะพ ะฟั€ะพัั‚ั€ะฐะฝัั‚ะฒะฐ ะฒะผะตัั‚ะพ ะบะฐัะฐั‚ะตะปัŒะฝะพะน ะปะธะฝะธะธ ะธัะฟะพะปัŒะทัƒะตั‚ัั ะบะฐัะฐั‚ะตะปัŒะฝะฐั ะฟะปะพัะบะพัั‚ัŒ. ะ”ะปั ะฝะตะฑะพะปัŒัˆะพะณะพ ะธะฝั‚ะตั€ะฒะฐะปะฐ ั„ัƒะฝะบั†ะธัŽ ะผะพะถะฝะพ ะฐะฟั€ะพะบัะธะผะธั€ะพะฒะฐั‚ัŒ ัั‚ะพะน ะบะฐัะฐั‚ะตะปัŒะฝะพะน ะฟะปะพัะบะพัั‚ัŒัŽ. ะะธะถะต ะฟั€ะธะฒะตะดะตะฝ ะฟั€ะธะผะตั€ ะฐะฟั€ะพะบัะธะผะฐั†ะธะธ ะฟะฐั€ะฐะฑะพะปะพะธะดะฐ ะบะฐัะฐั‚ะตะปัŒะฝั‹ะผะธ ะฟะปะพัะบะพัั‚ัะผะธ. ะ’ ั‚ะพั‡ะบะต $x=0, y=0$, ะณะดะต ะฟะปะพัะบะพัั‚ัŒ ัั‚ะฐะปะฐ ะณะพั€ะธะทะพะฝั‚ะฐะปัŒะฝะพะน, ั„ัƒะฝะบั†ะธั ะดะพัั‚ะธะณะฐะตั‚ ะฝะฐะธะผะตะฝัŒัˆะตะณะพ ะทะฝะฐั‡ะตะฝะธั. ``` draw_paraboloid(8) ```
github_jupyter
``` from mxnet import nd from mxnet.gluon import nn def conv_block(channels): out = nn.Sequential() out.add( nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(channels, kernel_size=3, padding=1) ) return out class DenseBlock(nn.Block): def __init__(self, layers, growth_rate, **kwargs): super(DenseBlock, self).__init__(**kwargs) self.net = nn.Sequential() for i in range(layers): self.net.add(conv_block(growth_rate)) def forward(self, x): for layer in self.net: out = layer(x) x = nd.concat(x, out, dim=1) return x dblk = DenseBlock(2, 10) dblk.initialize() x = nd.random.uniform(shape=(4,3,8,8)) dblk(x).shape def transition_block(channels): out = nn.Sequential() out.add( nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2) ) return out tblk = transition_block(10) tblk.initialize() tblk(x).shape init_channels = 64 growth_rate = 32 block_layers = [6, 12, 24, 16] num_classes = 10 def dense_net(): net = nn.Sequential() # add name_scope on the outermost Sequential with net.name_scope(): # first block net.add( nn.Conv2D(init_channels, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1) ) # dense blocks channels = init_channels for i, layers in enumerate(block_layers): net.add(DenseBlock(layers, growth_rate)) channels += layers * growth_rate if i != len(block_layers)-1: net.add(transition_block(channels//2)) # last block net.add( nn.BatchNorm(), nn.Activation('relu'), nn.AvgPool2D(pool_size=1), nn.Flatten(), nn.Dense(num_classes) ) return net import sys sys.path.append('..') import gluonbook as gb from mxnet import gluon from mxnet import init train_data, test_data = gb.load_data_fashion_mnist( batch_size=64, resize=32) ctx = gb.try_gpu() net = dense_net() net.initialize(ctx=ctx, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1}) gb.train(train_data, test_data, net, loss, trainer, ctx, num_epochs=1) ``` ## ๅฐ็ป“ * Desnet้€š่ฟ‡ๅฐ†ResNet้‡Œ็š„`+`ๆ›ฟๆขๆˆ`concat`ไปŽ่€Œ่Žทๅพ—ๆ›ด็จ ๅฏ†็š„่ฟžๆŽฅใ€‚ ## ็ปƒไน  - DesNet่ฎบๆ–‡ไธญๆไบค็š„ไธ€ไธชไผ˜็‚นๆ˜ฏๅ…ถๆจกๅž‹ๅ‚ๆ•ฐๆฏ”ResNetๆ›ดๅฐ๏ผŒๆƒณๆƒณไธบไป€ไนˆ๏ผŸ - DesNet่ขซไบบ่ฏŸ็—…็š„ไธ€ไธช้—ฎ้ข˜ๆ˜ฏๅ†…ๅญ˜ๆถˆ่€—่ฟ‡ๅคšใ€‚็œŸ็š„ไผš่ฟ™ๆ ทๅ—๏ผŸๅฏไปฅๆŠŠ่พ“ๅ…ฅๆขๆˆ$224\times 224$๏ผˆ้œ€่ฆๆ”นๆœ€ๅŽ็š„`AvgPool2D`ๅคงๅฐ๏ผ‰๏ผŒๆฅ็œ‹็œ‹ๅฎž้™…๏ผˆGPU๏ผ‰ๅ†…ๅญ˜ๆถˆ่€—ใ€‚ - ่ฟ™้‡Œ็š„FashionMNISTๆœ‰ๅฟ…่ฆ็”จ100+ๅฑ‚็š„็ฝ‘็ปœๅ—๏ผŸๅฐ่ฏ•ๅฐ†ๅ…ถๆ”น็ฎ€ๅ•็œ‹็œ‹ๆ•ˆๆžœใ€‚
github_jupyter
<a href="https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_inference_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Copyright 2019 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # UNet Industrial Inference Demo with TensorFlow Hub ## Overview In this notebook, we will demo the process of inference with NVIDIA pre-trained UNet Industrial defects detection TensorFlow Hub modules. NVIDIA pre-trained U-Net models for defect detection are adapted from the original version of the [U-Net model](https://arxiv.org/abs/1505.04597) which is a convolutional auto-encoder for 2D image segmentation. U-Net was first introduced by Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper: [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597). ### Requirement 1. Before running this notebook, please set the Colab runtime environment to GPU via the menu *Runtime => Change runtime type => GPU*. ``` !nvidia-smi ``` The below code checks whether a Tensor-Core GPU is present. Tensor Cores can accelerate large matrix operations by performing mixed-precision matrix multiply and accumulate calculations in a single operation. ``` %tensorflow_version 1.x import tensorflow as tf print(tf.__version__) # This notebook runs on TensorFlow 1.x. from tensorflow.python.client import device_lib def check_tensor_core_gpu_present(): local_device_protos = device_lib.list_local_devices() for line in local_device_protos: if "compute capability" in str(line): compute_capability = float(line.physical_device_desc.split("compute capability: ")[-1]) if compute_capability>=7.0: return True print("Tensor Core GPU Present:", check_tensor_core_gpu_present()) tensor_core_gpu = check_tensor_core_gpu_present() ``` 2. Next, we clone the NVIDIA Github UNet_Industrial repository and set up the workspace. ``` !git clone https://github.com/NVIDIA/DeepLearningExamples %%bash cd DeepLearningExamples git checkout master import os WORKSPACE_DIR='/content/DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/notebooks' os.chdir(WORKSPACE_DIR) print (os.getcwd()) !pip install tensorflow_hub==0.6.0 ``` ## Data download We will first download some data for testing purposes, in particular, the [Weakly Supervised Learning for Industrial Optical Inspection (DAGM 2007)](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html) dataset. > The competition is inspired by problems from industrial image processing. In order to satisfy their customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly. > > The competitors have to design a stand-alone algorithm which is able to detect miscellaneous defects on various background textures. > > The particular challenge of this contest is that the algorithm must learn, without human intervention, to discern defects automatically from a weakly labeled (i.e., labels are not exact to the pixel level) training set, the exact characteristics of which are unknown at development time. During the competition, the programs have to be trained on new data without any human guidance. **Source:** https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html ``` ! ./download_and_preprocess_dagm2007_public.sh ./data ``` The final data directory should look like: ``` ./data raw_images public Class1 Class2 Class3 Class4 Class5 Class6 Class1_def Class2_def Class3_def Class4_def Class5_def Class6_def private zip_files ``` Each data directory contains training images corresponding to one of the first 6 types of defects. ## Load UNet TF-Hub modules from Google Drive (Optional) This step allows you to connect and load pretrained UNet TF-Hub modules from Google Drive (only if you have modules saved there - see this [notebook](https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_export.ipynb) on UNet TF-Hub module creation and export to Google Drive). Execute the below cell to authorize Colab to access your Google Drive content, then copy the saved TF-Hub modules to Colab. ``` from google.colab import drive drive.mount('/content/gdrive') !cp -r "/content/gdrive/My Drive/NVIDIA/Unet_modules" . !ls Unet_modules ``` ## Inference with UNet TF-Hub modules Next, we will load one of the pretrained UNet TF-Hub modules (corresponding to one of the 10 classes of the DAGM 2007 dataset) and carry out inference. In order to load TF-Hub modules, there are several options: - Load from a local cache or directory - Load from a remote repository ``` import tensorflow_hub as hub # Loading from a local cache/directory #module = hub.Module("Unet_modules/Class_1", trainable=False) # Loading from a remote repository. The 10 NVIDIA UNet TF-Hub modules are available at # https://tfhub.dev/nvidia/unet/industrial/class_1/1 (similarly for class 2, 3 ...) and # https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_{1..10} module = hub.Module("https://tfhub.dev/nvidia/unet/industrial/class_1/1") # or class_2, class_3 etc... #module = hub.Module("https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_1/1.tar.gz") # or cls_as2, class_3 etc... print(module.get_signature_names()) print(module.get_input_info_dict()) # When no signature is given, considers it as 'default' print(module.get_output_info_dict()) ``` As seen, this module expects inputs as grayscale images of size 512x512, and produce masks of the same size. ``` # Load a test image import numpy as np %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as mpimg img = mpimg.imread('./data/raw_images/public/Class1_def/1.png') plt.figure(figsize = (10,10)); plt.imshow(img, cmap='gray'); ``` As we can see in this figure, there exists a defective area in the top left corner. We will now start a TF session and carry out inference on the normalized test image with the loaded TF-Hub module. ``` # Image preprocessing img = np.expand_dims(img, axis=2) img = np.expand_dims(img, axis=0) img = (img-0.5)/0.5 output = module(img) print(output.shape) import tensorflow as tf with tf.Session() as sess: sess.run([tf.global_variables_initializer(), tf.tables_initializer()]) pred = sess.run(output) # Print out model predicted mask plt.figure(figsize = (10,10)); plt.imshow(np.squeeze(pred), cmap='gray'); ``` As expected, the TF-Hub module points out the correct defective area in this image. Please feel free to try out other defective images for Class 1 within `./data/raw_images/public/Class1_def/`, or load the other UNet modules and test data for other classes from 1 to 10. ``` !ls ./data/raw_images/public/Class1_def/ ``` # Conclusion In this notebook, we have walked through the process of loading a pretrained UNet-Industrial TF-Hub module and carrying out inference on a test image. ## What's next Now it's time to try the UNet-Industrial TF Hub modules on your own data. ``` ```
github_jupyter
##### Copyright 2021 The TensorFlow Cloud Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TensorFlow Cloud <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/cloud/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb""><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/cloud/tutorials/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://kaggle.com/kernels/welcome?src=https://github.com/tensorflow/cloud/blob/master/g3doc/tutorials/overview.ipynb" target="blank"> <img width="90" src="https://www.kaggle.com/static/images/site-logo.png" alt="Kaggle logo">Run in Kaggle </a> </td> </table> TensorFlow Cloud is a library that makes it easier to do training and hyperparameter tuning of Keras models on Google Cloud. Using TensorFlow Cloud's `run` API, you can send your model code directly to your Google Cloud account, and use Google Cloud compute resources without needing to login and interact with the Cloud UI (once you have set up your project in the console). This means that you can use your Google Cloud compute resources from inside directly a Python notebook: a notebook just like this one! You can also send models to Google Cloud from a plain `.py` Python script. ## Simple example This is a simple introductory example to demonstrate how to train a model remotely using [TensorFlow Cloud](https://tensorflow.org/cloud) and Google Cloud. You can just read through it to get an idea of how this works, or you can run the notebook in Google Colab. Running the notebook requires connecting to a Google Cloud account and entering your credentials and project ID. See [Setting Up and Connecting To Your Google Cloud Account](https://www.tensorflow.org/cloud/tutorials/google_cloud_project_setup_instructions) if you don't have an account yet or are not sure how to set up a project in the console. ## Import required modules ``` import tensorflow as tf tf.version.VERSION ! pip install -q tensorflow-cloud import tensorflow_cloud as tfc print(tfc.__version__) import sys ``` ## Project Configurations Set project parameters. If you don't know what your `GCP_PROJECT_ID` or `GCS_BUCKET` should be, see [Setting Up and Connecting To Your Google Cloud Account](google_cloud_project_setup_instructions.ipynb). The `JOB_NAME` is optional, and you can set it to any string. If you are doing multiple training experiemnts (for example) as part of a larger project, you may want to give each of them a unique `JOB_NAME`. ``` # Set Google Cloud Specific parameters # TODO: Please set GCP_PROJECT_ID to your own Google Cloud project ID. GCP_PROJECT_ID = 'YOUR_PROJECT_ID' #@param {type:"string"} # TODO: set GCS_BUCKET to your own Google Cloud Storage (GCS) bucket. GCS_BUCKET = 'YOUR_GCS_BUCKET_NAME' #@param {type:"string"} # DO NOT CHANGE: Currently only the 'us-central1' region is supported. REGION = 'us-central1' # OPTIONAL: You can change the job name to any string. JOB_NAME = 'mnist' #@param {type:"string"} # Setting location were training logs and checkpoints will be stored GCS_BASE_PATH = f'gs://{GCS_BUCKET}/{JOB_NAME}' TENSORBOARD_LOGS_DIR = os.path.join(GCS_BASE_PATH,"logs") MODEL_CHECKPOINT_DIR = os.path.join(GCS_BASE_PATH,"checkpoints") SAVED_MODEL_DIR = os.path.join(GCS_BASE_PATH,"saved_model") ``` ## Authenticating the notebook to use your Google Cloud Project This code authenticates the notebook, checking your valid Google Cloud credentials and identity. It is inside the `if not tfc.remote()` block to ensure that it is only run in the notebook, and will not be run when the notebook code is sent to Google Cloud. Note: For Kaggle Notebooks click on "Add-ons"->"Google Cloud SDK" before running the cell below. ``` # Using tfc.remote() to ensure this code only runs in notebook if not tfc.remote(): # Authentication for Kaggle Notebooks if "kaggle_secrets" in sys.modules: from kaggle_secrets import UserSecretsClient UserSecretsClient().set_gcloud_credentials(project=GCP_PROJECT_ID) # Authentication for Colab Notebooks if "google.colab" in sys.modules: from google.colab import auth auth.authenticate_user() os.environ["GOOGLE_CLOUD_PROJECT"] = GCP_PROJECT_ID ``` ## Model and data setup From here we are following the basic procedure for setting up a simple Keras model to run classification on the MNIST dataset. ### Load and split data Read raw data and split to train and test data sets. ``` (x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data() x_train = x_train.reshape((60000, 28 * 28)) x_train = x_train.astype('float32') / 255 ``` ### Create a model and prepare for training Create a simple model and set up a few callbacks for it. ``` from tensorflow.keras import layers model = tf.keras.Sequential([ tf.keras.layers.Dense(512, activation='relu', input_shape=(28 * 28,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) ``` ### Quick validation training We'll train the model for one (1) epoch just to make sure everything is set up correctly, and we'll wrap that training command in `if not` `tfc.remote`, so that it only happens here in the runtime environment in which you are reading this, not when it is sent to Google Cloud. ``` if not tfc.remote(): # Run the training for 1 epoch and a small subset of the data to validate setup model.fit(x=x_train[:100], y=y_train[:100], validation_split=0.2, epochs=1) ``` ## Prepare for remote training The code below will only run when the notebook code is sent to Google Cloud, not inside the runtime in which you are reading this. First, we set up callbacks which will: * Create logs for [TensorBoard](https://www.tensorflow.org/tensorboard). * Create [checkpoints](/guide/checkpoint) and save them to the checkpoints directory specified above. * Stop model training if loss is not improving sufficiently. Then we call `model.fit` and `model.save`, which (when this code is running on Google Cloud) which actually run the full training (100 epochs) and then save the trained model in the GCS Bucket and directory defined above. ``` if tfc.remote(): # Configure Tensorboard logs callbacks=[ tf.keras.callbacks.TensorBoard(log_dir=TENSORBOARD_LOGS_DIR), tf.keras.callbacks.ModelCheckpoint( MODEL_CHECKPOINT_DIR, save_best_only=True), tf.keras.callbacks.EarlyStopping( monitor='loss', min_delta =0.001, patience=3)] model.fit(x=x_train, y=y_train, epochs=100, validation_split=0.2, callbacks=callbacks) model.save(SAVED_MODEL_DIR) ``` ## Start the remote training TensorFlow Cloud takes all the code from its local execution environment (this notebook), wraps it up, and sends it to Google Cloud for execution. (That's why the `if` and `if not` `tfc.remote` wrappers are important.) This step will prepare your code from this notebook for remote execution and then start a remote training job on Google Cloud Platform to train the model. First we add the `tensorflow-cloud` Python package to a `requirements.txt` file, which will be sent along with the code in this notebook. You can add other packages here as needed. Then a GPU and a CPU image are specified. You only need to specify one or the other; the GPU is used in the code that follows. Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the GPU CPU images are specified. You only need to specify one or the other; the GPU is used in the code that follows. Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the GPU and CPU images are specified. You only need to specify one or the other; the GPU is used in the code that follows. Finally, the heart of TensorFlow cloud: the call to `tfc.run`. When this is executed inside this notebook, all the code from this notebook, and the rest of the files in this directory, will be packaged and sent to Google Cloud for execution. The parameters on the `run` method specify the details of the execution environment and the distribution strategy (if any) to be used. Once the job is submitted you can go to the next step to monitor the jobs progress via Tensorboard. ``` # If you are using a custom image you can install modules via requirements # txt file. with open('requirements.txt','w') as f: f.write('tensorflow-cloud\n') # Optional: Some recommended base images. If you provide none the system # will choose one for you. TF_GPU_IMAGE= "gcr.io/deeplearning-platform-release/tf2-cpu.2-5" TF_CPU_IMAGE= "gcr.io/deeplearning-platform-release/tf2-gpu.2-5" # Submit a single node training job using GPU. tfc.run( distribution_strategy='auto', requirements_txt='requirements.txt', docker_config=tfc.DockerConfig( parent_image=TF_GPU_IMAGE, image_build_bucket=GCS_BUCKET ), chief_config=tfc.COMMON_MACHINE_CONFIGS['K80_1X'], job_labels={'job': JOB_NAME} ) ``` ## Training Results ### Reconnect your Colab instance Most remote training jobs are long running. If you are using Colab, it may time out before the training results are available. In that case, **rerun the following sections in order** to reconnect and configure your Colab instance to access the training results. 1. Import required modules 2. Project Configurations 3. Authenticating the notebook to use your Google Cloud Project **DO NOT** rerun the rest of the code. ### Load Tensorboard While the training is in progress you can use Tensorboard to view the results. Note the results will show only after your training has started. This may take a few minutes. ``` %load_ext tensorboard %tensorboard --logdir $TENSORBOARD_LOGS_DIR ``` ## Load your trained model Once training is complete, you can retrieve your model from the GCS Bucket you specified above. ``` trained_model = tf.keras.models.load_model(SAVED_MODEL_DIR) trained_model.summary() ```
github_jupyter
# Confidence Interval: In this notebook you will find: - Get confidence intervals for predicted survival curves using XGBSE estimators; - How to use XGBSEBootstrapEstimator, a meta estimator for bagging; - A nice function to help us plot survival curves. ``` import matplotlib.pyplot as plt plt.style.use('bmh') from IPython.display import set_matplotlib_formats set_matplotlib_formats('retina') # to easily plot confidence intervals def plot_ci(mean, upper_ci, lower_ci, i=42, title='Probability of survival $P(T \geq t)$'): # plotting mean and confidence intervals plt.figure(figsize=(12, 4), dpi=120) plt.plot(mean.columns,mean.iloc[i]) plt.fill_between(mean.columns, lower_ci.iloc[i], upper_ci.iloc[i], alpha=0.2) plt.title(title) plt.xlabel('Time [days]') plt.ylabel('Probability') plt.tight_layout() ``` ## Metrabic We will be using the Molecular Taxonomy of Breast Cancer International Consortium (METABRIC) dataset from [pycox](https://github.com/havakv/pycox#datasets) as base for this example. ``` from xgbse.converters import convert_to_structured from pycox.datasets import metabric import numpy as np # getting data df = metabric.read_df() df.head() ``` ## Split and Time Bins Split the data in train and test, using sklearn API. We also setup the TIME_BINS array, which will be used to fit the survival curve. ``` from xgbse.converters import convert_to_structured from sklearn.model_selection import train_test_split # splitting to X, T, E format X = df.drop(['duration', 'event'], axis=1) T = df['duration'] E = df['event'] y = convert_to_structured(T, E) # splitting between train, and validation X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state = 0) TIME_BINS = np.arange(15, 315, 15) TIME_BINS ``` ## Calculating confidence intervals We will be using the XGBSEKaplanTree estimator to fit the model and predict a survival curve for each point in our test data, and via <i>return_ci</i> parameter we will get upper and lower bounds for the confidence interval. ``` from xgbse import XGBSEKaplanTree, XGBSEBootstrapEstimator from xgbse.metrics import concordance_index, approx_brier_score # xgboost parameters to fit our model PARAMS_TREE = { 'objective': 'survival:cox', 'eval_metric': 'cox-nloglik', 'tree_method': 'hist', 'max_depth': 10, 'booster':'dart', 'subsample': 1.0, 'min_child_weight': 50, 'colsample_bynode': 1.0 } ``` ### Numerical Form The KaplanTree and KaplanNeighbors models support estimation of confidence intervals via the Exponential Greenwood formula. ``` %%time # fitting xgbse model xgbse_model = XGBSEKaplanTree(PARAMS_TREE) xgbse_model.fit(X_train, y_train, time_bins=TIME_BINS) # predicting mean, upper_ci, lower_ci = xgbse_model.predict(X_test, return_ci=True) # print metrics print(f"C-index: {concordance_index(y_test, mean)}") print(f"Avg. Brier Score: {approx_brier_score(y_test, mean)}") # plotting CIs plot_ci(mean, upper_ci, lower_ci) ``` ### Non-parametric Form We can also use the XGBSEBootstrapEstimator to wrap any XGBSE model and get confidence intervals via bagging, which also slighty increase our performance at the cost of computation time. ``` %%time # base model as XGBSEKaplanTree base_model = XGBSEKaplanTree(PARAMS_TREE) # bootstrap meta estimator bootstrap_estimator = XGBSEBootstrapEstimator(base_model, n_estimators=100) # fitting the meta estimator bootstrap_estimator.fit(X_train, y_train, time_bins=TIME_BINS) # predicting mean, upper_ci, lower_ci = bootstrap_estimator.predict(X_test, return_ci=True) # print metrics print(f"C-index: {concordance_index(y_test, mean)}") print(f"Avg. Brier Score: {approx_brier_score(y_test, mean)}") # plotting CIs plot_ci(mean, upper_ci, lower_ci) ```
github_jupyter
## Copyright 2021 Antoine Simoulin. <i>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Icons made by <a href="https://www.flaticon.com/authors/freepik" title="Freepik">Freepik</a>, <a href="https://www.flaticon.com/authors/pixel-perfect" title="Pixel perfect">Pixel perfect</a>, <a href="https://www.flaticon.com/authors/becris" title="Becris">Becris</a>, <a href="https://www.flaticon.com/authors/smashicons" title="Smashicons">Smashicons</a>, <a href="https://www.flaticon.com/authors/srip" title="srip">srip</a>, <a href="https://www.flaticon.com/authors/adib-sulthon" title="Adib">Adib</a>, <a href="https://www.flaticon.com/authors/flat-icons" title="Flat Icons">Flat Icons</a> and <a href="https://www.flaticon.com/authors/dinosoftlabs" title="Pixel perfect">DinosoftLabs</a> from <a href="https://www.flaticon.com/" title="Flaticon"> www.flaticon.com</a></i> # TP 3 : Words Embeddings <img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/tp3-header.png?raw=True" width="1000"> On va s'appuyer sur le corpus collectรฉ par <span class="badge badge-secondary">([Panckhurst et al., 2016](#panckhurst-2016))</span> qui rassemble 88,000 sms collectรฉs dans la rรฉgion de Montpellier. Le corpus a รฉtรฉ dรฉ-identifiรฉ (en particulier, les noms sont remplacรฉs par [ _forename_ ]). Pour chaque sms, on a identifiรฉ les Emojis dans le texte. Il y avait beaucoup de type d'Emojis. Dans le TP, ils ont รฉtรฉ simplifiรฉs selon le tableau suivant. Tous les Emojis de la colonne `Emoji list` ont รฉtรฉ remplacรฉ par l'emoji de la colonne `Generic`. Dans le TP les Emojis n'apparaissent pas dans le texte du sms car on cherche ร  les prรฉdire. | Generic Emoji | Emoji list | |:--------------:|:------------------------------------------------------------------:| | ๐Ÿ˜ƒ | '=P', ':)', ':P', '=)', ':p', ':d', ':-)', '=D', ':D', '^^' | | ๐Ÿ˜ฒ | ':O', 'o_o', ':o', ':&' | | ๐Ÿ˜” | '"-.-'''", '<_>', '-_-', "--'", "-.-'", '-.-', "-.-''", "-\_-'" | | ๐Ÿ˜  | ':/', ':-/', ':-(', ':(', ':-<' | | ๐Ÿ˜† | '>.<', 'ยค.ยค', '<>','><', '*.*', 'xd', 'XD', 'xD', 'x)',';)', ';-)' | | ๐Ÿ˜ | '</3', '<3' | Finalement pour le TP, on a filtrรฉ le jeu de donnรฉes pour ne conserver que les sms contenant qu'un seul Emoji. On a par ailleurs <i>down samplรฉ</i> les classes majoritaires pour limiter le dรฉsรฉquilibre du jeu de donnรฉes. En effet les sms avec un smiley ๐Ÿ˜ƒ รฉtait largement sur-reprรฉsentรฉs. <b>L'objet du TP est de prรฉdire l'รฉmoji associรฉ ร  chaque message. Pour cela on vectorisera le texte en utilisant les mรฉthodes d'embeddings.</b> ``` %%capture # Check environment if 'google.colab' in str(get_ipython()): IN_COLAB = True else: IN_COLAB = False if IN_COLAB: # โš ๏ธ Execute only if running in Colab !pip install -q scikit-learn==0.23.2 matplotlib==3.1.3 pandas==1.1.3 gensim==3.8.1 torch==1.6.0 torchvision==0.7.0 !pip install skorch==0.10.0 # then restart runtime environment from gensim.models import KeyedVectors from collections import Counter import numpy as np import pandas as pd import re from sklearn.base import BaseEstimator, TransformerMixin from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import LinearSVC from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, roc_auc_score import os, sys # IPython automatically reload all changed code %load_ext autoreload %autoreload 2 # Inline Figures with matplotlib %matplotlib inline %config InlineBackend.figure_format='retina' # import extrenal modules import urllib.request class_names = ['happy', 'joke', 'astonished', 'angry', 'bored', 'heart'] repo_url = 'https://raw.githubusercontent.com/AntoineSimoulin/m2-data-sciences/master/' _ = urllib.request.urlretrieve(repo_url + 'src/plots.py', 'plots.py') if not os.path.exists('smileys'): os.makedirs('smileys') for c in class_names: _ = urllib.request.urlretrieve( repo_url + 'TP3%20-%20Word%20Embeddings/smileys/{}.png'.format(c), 'smileys/{}.png'.format(c)) ``` On va utiliser les embeddings dรฉjร  entrainรฉ que nous avons manipulรฉ au cours prรฉcรฉdent. Pour limiter la taille du fichier d'embeddings, on a sauvegardรฉ que les `10,000` mots les plus frรฉquents. <b>Vous devez rรฉcupรฉrer le fichier d'embeddings aisni que le jeu de donnรฉes directement sur le [Moodle](https://moodle.u-paris.fr/course/view.php?id=11048).</b> ``` w2v_model = KeyedVectors.load_word2vec_format("oscar.fr.300.10k.model") w2v_model.init_sims(replace=True) len(w2v_model.vocab) # On crรฉe un array avec les 10,000 premiers mots et on crรฉe le dictionaire de vocabulaire word_count = {k: w2v_model.vocab[k].count for k in w2v_model.vocab} word_count = Counter(word_count) word_count.most_common(10) idx2w = {i: w for (i, (w, f)) in enumerate(word_count.most_common(10000), 2)} idx2w[0] = 'unk' idx2w[1] = 'pad' w2idx = {w: i for (i, (w, f)) in enumerate(word_count.most_common(10000), 2)} w2idx['unk'] = 0 w2idx['pad'] = 1 embeddings_vectors = [w2v_model[w] for (w, f) in word_count.most_common(10000)] word2vec_embeddings = np.vstack(embeddings_vectors) word2vec_embeddings = np.concatenate((np.zeros_like(word2vec_embeddings[0:2]), word2vec_embeddings), 0) word2vec_embeddings.shape w2idx['Oh'] word2vec_embeddings[3664][:10] w2v_model['Oh'][:10] dataset = pd.read_csv('emojis.csv') dataset.head() dataset.loc[3, 'sms'] class_names = ['happy', 'joke', 'astonished', 'angry', 'bored', 'heart'] dataset.shape ``` On va utiliser la mรชme fonction de tokenization qui a รฉtรฉ utilisรฉe pour entrainer les embeddings. ``` token_pattern = re.compile(r"(\->|(?::\)|:-\)|:\(|:-\(|;\);-\)|:-O|8-|:P|:D|:\||:S|:\$|:@|8o\||\+o\(|\(H\)|\(C\)|\(\?\))|(?:[\d.,]+)|([^\s\w0-9])\2*|(?:[\w0-9\.]+['โ€™]?)(?<!\.))") def tokenize(text): tokens = [groups[0] for groups in re.findall(token_pattern, str(text))] tokens = [t.strip() for t in tokens] return tokens dataset['tokens'] = dataset['sms'].apply(tokenize) dataset.head() ``` ### Exploration de donnรฉes <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Observer la distribution des classes.</p> </div> <hr> ``` dataset[["happy", "joke", "astonished", "angry", "bored", "heart"]].sum().plot.bar(color='#970137', title="dataset distribution"); ``` <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Evaluer la proportion de tokens qui sont hors du vocabulaire des embeddings.</p> </div> <hr> ``` # %load solutions/unk.py tokens_not_in_voc = [] for sms in dataset['tokens']: for t in sms: tokens_not_in_voc.append(t not in w2idx) print("On a {:.2f}% des tokens hors du vocabulaire".format(sum(tokens_not_in_voc) / len(tokens_not_in_voc) * 100)) ``` ### Vectorization Les embeddings de mots permettent de reprรฉsenter chaque <i>token</i> par un vecteur. Pour obtenir un vecteur qui reprรฉsente le sms, on va agrรฉger les diffรฉrents mots du texte. On considรฉrera plusieurs fonctions d'agrรฉgation : la somme, la moyenne, me maximum ou le minimum. En pratique nous verrons dans le dernier cours d'ouverture qu'il existe des mรฉthodes plus รฉvoluรฉes pour composer les mots de la phrase. Nรฉanmoins une simple fonction d'agrรฉgation nous donnera dรฉjร  une bonne <i>baseline</i>. <img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/model.png?raw=True" width="500"> <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Ecrire une fonction qui permet de vectoriser un sms.</p> </div> <hr> ``` # %load solutions/vectorize_1.py def vectorize(tokens, agg_method='mean'): token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab]) if not len(token_embeddings_arr): return np.zeros_like(w2v_model['roi']) # Agrรฉger les reprรฉsentations de chaque token. # Le vecteur de sortie doit รชtre de taille (300, ) if agg_method == 'mean': sentence_embedding = np.mean(token_embeddings_arr, axis=0) elif agg_method == 'max': sentence_embedding = np.max(token_embeddings_arr, axis=0) elif agg_method == 'sum': sentence_embedding = np.sum(token_embeddings_arr, axis=0) return sentence_embedding vectorize(dataset['tokens'][0], agg_method='max') ``` On voudrait attribuer un poids moins important aux embeddings des mots moins caractรฉristiques. Pour รงa, on voudrait pondรฉrer la contribution des vecteurs de chaque mot en fonction de leur score TF-IDF. <img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/model-tfidf.png?raw=True" width="700"> <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Utiliser la pondรฉration TF-IDF pour pondรฉrer chacun des vecteurs.</p> </div> <hr> ``` tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False) tfidf_vectorizer.fit(dataset['tokens']) w2idx_tfidf = {w: idx for (idx, w) in enumerate(tfidf_vectorizer.get_feature_names())} idx_tfidf2w = {idx: w for (idx, w) in enumerate(tfidf_vectorizer.get_feature_names())} # %load solutions/vectorize_2.py def vectorize(tokens, agg_method='mean', tfidf_vectorizer=None): token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab]) if not len(token_embeddings_arr): return np.zeros_like(w2v_model['roi']) # Agrรฉger les reprรฉsentations de chaque token. # Le vecteur de sortie doit รชtre de taille (300, ) if agg_method == 'mean': sentence_embedding = np.mean(token_embeddings_arr, axis=0) elif agg_method == 'max': sentence_embedding = np.max(token_embeddings_arr, axis=0) elif agg_method == 'sum': sentence_embedding = np.sum(token_embeddings_arr, axis=0) elif agg_method == 'tfidf': tf_idf_w = tfidf_vectorizer.transform([tokens]).todense().transpose() tf_idf_w = np.squeeze([tf_idf_w[w2idx_tfidf[t]] for t in tokens if t in w2v_model.vocab]) sentence_embedding = np.average(token_embeddings_arr, weights=tf_idf_w, axis=0) return sentence_embedding X = [vectorize(sms) for sms in dataset['tokens']] X = np.array(X) print(X.shape) ``` On va intรฉgrer la fonction `vectorize` dans un module compatible avec les fonctions de `sklearn`. <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Intรฉgrer votre fonction de vectorization dans la classe Vectorizer ci-dessous. Vous devez simoplement la copier/coller en replaรงant tfidf_vectorizer par self.tfidf_vectorizer car c'est maintenant un attribut de la class</p> </div> <hr> ``` # 6 choses ร  faire pour l'excercice sur la class Vectorizer : # copier votre fonction vectorize dans la class # ajouter l'argument self dans la fonction vectorize # supprimer l'argument tfidf_vectorizer de la fonction vectorize # remplacer toutes les occurences de agg_method par self.agg_method dans la fonction vectorize # supprimer l'argument agg_method de la fonction vectorize # remplacer toutes les occurences de w2idx_tfidf par self.w2idx_tfidf dans la fonction vectorize # %load solutions/vectorizer.py class Vectorizer(BaseEstimator, TransformerMixin): def __init__(self, agg_method='mean', normalize=False): self.agg_method = agg_method self.normalize = normalize self.tfidf_vectorizer = TfidfVectorizer(tokenizer=lambda x: x, lowercase=False, token_pattern=None) def vectorize(self, tokens): token_embeddings_arr = np.array([w2v_model[t] for t in tokens if t in w2v_model.vocab]) if len(token_embeddings_arr) == 0: sentence_embedding = np.zeros_like(w2v_model['roi']) elif len(token_embeddings_arr) == 1: sentence_embedding = np.squeeze(token_embeddings_arr) elif len(token_embeddings_arr) > 1: if self.agg_method == 'mean': sentence_embedding = np.mean(token_embeddings_arr, axis=0) elif self.agg_method == 'max': sentence_embedding = np.max(token_embeddings_arr, axis=0) elif self.agg_method == 'sum': sentence_embedding = np.sum(token_embeddings_arr, axis=0) elif self.agg_method == 'tfidf': tf_idf_w = self.tfidf_vectorizer.transform([tokens]).todense().transpose() tf_idf_w = np.squeeze([tf_idf_w[self.w2idx_tfidf.get(t, 0)] for t in tokens if t in w2v_model.vocab]) sentence_embedding = np.average(token_embeddings_arr, weights=tf_idf_w, axis=0) return sentence_embedding def _vectorize(self, tokens): return vectorize(tokens) def fit(self, X, y=None): self.tfidf_vectorizer.fit(X['tokens']) self.w2idx_tfidf = {w: idx for (idx, w) in enumerate(self.tfidf_vectorizer.get_feature_names())} self.idx_tfidf2w = {idx: w for (idx, w) in enumerate(self.tfidf_vectorizer.get_feature_names())} return self def transform(self, X, y=None, eps=1e-12): X = [self.vectorize(t) for t in X['tokens']] X = np.array(X) if self.normalize: X = X / np.linalg.norm(X + eps, axis=1, keepdims=True) return X vectorizer = Vectorizer(agg_method='tfidf') X = vectorizer.fit_transform(dataset) X.shape ``` ### Classification On compare deux algorithmes de classification : Une rรฉgression logistique et un SVM ou l'on pรฉnalise les classes majoritaires. ``` X_train, X_test = train_test_split( dataset, test_size=0.33, random_state=42) y_train = X_train[['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']].astype(int).values y_train = [x.tolist().index(1) for x in y_train] y_test = X_test[['happy', 'joke', 'astonished', 'angry', 'bored', 'heart']].astype(int).values y_test = [x.tolist().index(1) for x in y_test] len(y_train) X_train.shape LogReg_pipeline = Pipeline([ ('vect', Vectorizer('tfidf')), ('clf', OneVsRestClassifier(LogisticRegression(solver='sag'))), ]) # Training logistic regression model on train data LogReg_pipeline.fit(X_train, y_train) # Infering data on test set prediction_LogReg = LogReg_pipeline.predict(X_test) SVC_pipeline = Pipeline([ ('vect', Vectorizer('tfidf')), ('clf', OneVsRestClassifier(SVC(kernel='linear', class_weight='balanced', # penalize probability=True), n_jobs=-1)) ]) SVC_pipeline.fit(X_train, y_train) prediction_SVC = SVC_pipeline.predict(X_test) ``` ### Evaluation ``` import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from plots import plot_confusion_matrix print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_SVC))) print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test], SVC_pipeline.predict_proba(X_test), multi_class='ovo'))) plot_confusion_matrix(confusion_matrix(y_test, prediction_SVC), classes=class_names, title='Confusion matrix, without normalization') print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_LogReg))) print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test], LogReg_pipeline.predict_proba(X_test), multi_class='ovo'))) plot_confusion_matrix(confusion_matrix(y_test, prediction_LogReg), classes=class_names, title='Confusion matrix, without normalization') ``` <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Quelle mesure de performance vous semble le plus adaptรฉe pour ce cas d'usage ?</p> </div> <hr> La mesure de performance dรฉpend รฉvidemment du contexte d'รฉvaluation du cas d'usage. Par exemple, si l'on est dans un cas de classification binaire ou l'on cherche ร  distinguer des spams d'emails normaux, les spams reprรฉsenteront peut รชtre 5% du jeu de donnรฉes. Un algorithme qui prรฉdirait toujours "not spam" aurait une prรฉcision de 95%, ce qui est รฉvidemment inutile. Dans notre cas, le jeu de donnรฉes est lรฉgรจrement dรฉsรฉquilibrรฉ. Quand on observe les rรฉsultats prรฉcรฉdent, la rรฉgression logistique obtient une meilleure [prรฉcision](https://scikit-learn.org/stable/modules/model_evaluation.html#accuracy-score) (37,3%) que le SVM (32,5%). Mais les matrices de confusions rรฉvรจlent que pour la rรฉgression logistique, les classes minoritaires (astonished et bored) ne sont jamais prรฉdite. La prรฉcision ne traduit donc pas ce phรฉnomรจne. A l'inverse, AUC (Area Under the Curve) correspond comme son nom l'indique ร  l'aire sous la courbe [ROC](https://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics). Cette mรฉtrique traduit mieux la capacitรฉ des mรฉthodes ร  sรฉparer les classes puisqu'elle est meilleure pour le SVM (0.684) que pour la rรฉgression logistique (0.677). L'utilisation de l'AUC รฉcarte les modรจles reprรฉsentatifs, mais pas discriminants. Dans notre cas d'usage, la mesure ROC semble ainsi plus adaptรฉe. <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Comparer les rรฉsultats obtenus avec les deux algorithmes de classifications</p> </div> <hr> Comme on l'a mentionnรฉ prรฉcรฉdemment, la rรฉgression logistique semble prioriser les classes majoritaires pour obtenir une meilleure prรฉcision globale. Cette derniรจre est beaucoup plus performante que le SVM pour prรฉdire les classes "happy" ou "joke". Dans le SVM, on a appliquรฉ un pรฉnalitรฉ selon la distribution du jeu de donnรฉes (paramรจtre `class_weight='balanced'`) et ainsi on prรฉdit les classes minoritaires. Chacune des classes indรฉpendamment semble mieux modรฉlisรฉe et on voit ressortir la diagonale qui caractรฉrise ce comportement. Quand on observe le carrรฉ 2x2 dans le coin supรฉrieur gauche de la matrice de confusion, on constate que les deux modรจles semblent avoir du mal ร  distinguer les deux classes "happy" et "joke". Ces humeurs sont sans doutes trop proches et difficiles ร  distinguer. <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Comparer les diffรฉrentes mรฉthodes d'agrรฉgation proposรฉes. (Mean, Max, Sum, Moyenne pondรฉrรฉe par le TF-IDF)</p> </div> <hr> ``` test_svc = {'acc': {}, 'roc': {}} test_logreg = {'acc': {}, 'roc': {}} for agg_method in ['mean', 'max', 'sum', 'tfidf']: print('Computing agg method: {}'.format(agg_method)) LogReg_pipeline = Pipeline([ ('vect', Vectorizer(agg_method)), ('clf', OneVsRestClassifier(LogisticRegression(solver='sag'))),]) SVC_pipeline = Pipeline([ ('vect', Vectorizer(agg_method)), ('clf', OneVsRestClassifier(SVC(kernel='linear', class_weight='balanced', # penalize probability=True), n_jobs=-1))]) SVC_pipeline.fit(X_train, y_train) prediction_SVC = SVC_pipeline.predict(X_test) LogReg_pipeline.fit(X_train, y_train) prediction_LogReg = LogReg_pipeline.predict(X_test) test_svc['acc'][agg_method] = accuracy_score(y_test, prediction_SVC) test_svc['roc'][agg_method] = roc_auc_score(np.eye(np.max(y_test) + 1)[y_test], SVC_pipeline.predict_proba(X_test), multi_class='ovo') test_logreg['acc'][agg_method] = accuracy_score(y_test, prediction_LogReg) test_logreg['roc'][agg_method] = roc_auc_score(np.eye(np.max(y_test) + 1)[y_test], LogReg_pipeline.predict_proba(X_test), multi_class='ovo') print('Test accuracy for SVC with agg method {} is {}'.format(agg_method, test_svc['acc'][agg_method])) print('Test ROC score for ROC with agg method {} is {}'.format(agg_method, test_svc['roc'][agg_method])) x_svc = [v for (k, v)in test_svc['acc'].items()] y_svc = [v for (k, v)in test_svc['roc'].items()] x_logreg = [v for (k, v)in test_logreg['acc'].items()] y_logreg = [v for (k, v)in test_logreg['roc'].items()] labels = ['svc', 'svc', 'svc', 'svc', 'logreg', 'logreg', 'logreg', 'logreg'] colors = ['#970137', '#970137', '#970137', '#970137', 'black', 'black', 'black', 'black'] fig, ax = plt.subplots(figsize=(5, 5)) ax.scatter(x_svc, y_svc, color='#970137', label='balanced svc') for i, agg in enumerate(['mean', 'max', 'sum', 'tfidf']): label = "{}".format(agg) plt.annotate(label, (x_svc[i], y_svc[i]), textcoords="offset points", xytext=(10,10), ha='center', rotation=0, fontsize=15, fontweight='black') ax.scatter(x_logreg, y_logreg, color='black', label='logreg') for i, agg in enumerate(['mean', 'max', 'sum', 'tfidf']): label = "{}".format(agg) plt.annotate(label, (x_logreg[i], y_logreg[i]), textcoords="offset points", xytext=(10,10), ha='center', rotation=0, fontsize=15, fontweight='black') ax.set_xlabel('Accuracy') ax.set_ylabel('ROC') ax.set_title('Method comparison') ax.legend(loc=4) plt.show(); ``` On peut comparer les mรฉthodes d'aggrรฉgation et l'algorithme de classification en fonction de la mesure de prรฉcision ou ROC. On retrouve ce qu'on a observรฉ prรฉcรฉdemment : `LogReg` obtient de meilleures prรฉcisions, et `SVM` obtient de meilleurs ROC (sauf dans le cas de la mรฉthode `max`). Quelque soit l'algorithme de classification ou la mesure de performance, la mรฉthode d'aggrรฉgation `max` semble รชtre la moins adaptรฉe (la prรฉcision et les scores ROC sont moins bons). En considรฉrant le score ROC comme la mesure de performance la plus adaptรฉe, on peut dire que les diffรฉrentes mรฉthodes d'aggrรฉgation ont des scores proches. <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice (Bonus) :</b> Comparer les rรฉsultats obtenus avec un rรฉseau de neurones rรฉcurent (RNN).</p> </div> <hr> Nous ferons une ouverture sur les rรฉseaux de neurones et leur utilisation pour le texte lors de la derniรจre sรฉance. Nรฉanmoins, cela peut รชtre une bonne occasion de se familiariser avec leur utilisation. Nous allons utiliser la librairie [skorch](https://github.com/skorch-dev/skorch) qui est un wrapper de la librairie [PyTorch](https://pytorch.org/) compatible avec [scikit-learn](https://scikit-learn.org/). Cela permet en particulier de simplifier les aspects d'optimisation. Ici on utilise un rรฉseau rรฉcurent de type LSTM <span class="badge badge-secondary">([Cho and al., 2014](#cho-2014)</span>, <span class="badge badge-secondary">[Hochreiter and Schmidhuber, 1997](#schmidhuber-1997))</span>. Les rรฉseaux de neurones rรฉcurrents modรฉlisent les phrases comme des sรฉquences dโ€™embeddings de mots. Ils traitent lโ€™entrรฉe sรฉquentiellement. A chaque รฉtape, le vecteur de sortie est calculรฉ en fonction de lโ€™embedding du mot courant et de lโ€™รฉtat cachรฉ prรฉcรฉdent. <img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/lstm.png?raw=True" width="700"> ``` import torch from torch import nn from skorch import NeuralNet, NeuralNetClassifier from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence import torch.nn.functional as F from sklearn.utils.class_weight import compute_class_weight class RNNClassifier(nn.Module): def __init__(self, n_classes, embeddings_weights, hidden_dim=100, embedding_dim=300, dropout=0.5): super(RNNClassifier, self).__init__() self.embeddings = nn.Embedding.from_pretrained(embeddings_weights, sparse=True) self.embeddings.weight.requires_grad = False self.lstm = nn.LSTM(embedding_dim, hidden_dim) self.dense = nn.Linear(hidden_dim, n_classes) self.dropout = dropout self.drop = nn.Dropout(self.dropout) def forward(self, X, **kwargs): X, X_len = X X = self.embeddings(X) # On utilise une mรฉthode de pytorch pour tenir compte de la longueur des phrases # et ainsi s'adapter au padding. X_packed = pack_padded_sequence(X, X_len, batch_first=True, enforce_sorted=False) X_packed, (h, c) = self.lstm(X_packed)# [1][0] # .transpose(0, 1) # https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html#torch.nn.LSTM X, output_lengths = pad_packed_sequence(X_packed, batch_first=True) # X = torch.sigmoid(X) out = F.softmax(self.dense(h.squeeze()), dim=-1) return out class_weights = compute_class_weight( 'balanced', classes=range(len(class_names)), y=y_train) # On va donner un poids plus important aux classes minoritaires # mais pas proportionnel ร  leur distribution pour ne pas trop les favoriser # au dรฉtriment de la prรฉcision globale class_weights = [1, 1, 1.3, 1, 1.3, 1] class_weights = torch.tensor(class_weights, dtype=torch.float) net = NeuralNetClassifier( # NeuralNet RNNClassifier(len(class_names), torch.tensor(word2vec_embeddings)), max_epochs=10, lr=0.001, optimizer=torch.optim.Adam, criterion=torch.nn.NLLLoss, criterion__weight=class_weights ) sequences = [torch.tensor([w2idx.get(t, 0) for t in tokens]) for tokens in X_train['tokens']] sequences_length = torch.tensor([len(s) for s in sequences]) # On "pad" les sรฉquences pour qu'elles aient toutes la mรชme longueur. padded_sequences = pad_sequence(sequences, batch_first=True, padding_value=1) net.fit((padded_sequences, sequences_length), torch.tensor(y_train)) sequences_test = [torch.tensor([w2idx.get(t, 0) for t in tokens]) for tokens in X_test['tokens']] sequences_test_length = torch.tensor([len(s) for s in sequences_test]) # On "pad" les sรฉquences pour qu'elles aient toutes la mรชme longueur. padded_sequences_test = pad_sequence(sequences_test, batch_first=True, padding_value=1) prediction_LSTM = net.predict((padded_sequences_test, sequences_test_length)) print('Test accuracy is {}'.format(accuracy_score(y_test, prediction_LSTM))) print('Test ROC socre is {}'.format(roc_auc_score(np.eye(np.max(y_test) + 1)[y_test], net.predict_proba((padded_sequences_test, sequences_test_length)), multi_class='ovo'))) plot_confusion_matrix(confusion_matrix(y_test, prediction_LSTM), classes=class_names, title='Confusion matrix, without normalization') ``` Dans ce cas, l'apport des rรฉseaux de neurones n'est pas รฉvident. Nรฉnamoins en rรจgle gรฉnรฉrale, les mรฉthodes de Deep Learning sont plus performantes, en particulier quand le nombre de donnรฉes augmente. L'utilisation des mรฉthodes de down-sampling ou up-sampling peut s'avรฉrer fastidieux (on va se priver de donnรฉes ou en utiliser d'autres plusieurs fois. La sรฉlection des donnรฉes doit se faire prรฉcisรฉmment pour ne pas impacter les capacitรฉs de gรฉnรฉralisation de l'algorithme). Nous avons prรฉfรฉrรฉ ici utiliser un algorithme qui pรฉnalise les classes majoritaires et une mesure d'erreur adaptรฉe. Il existe un bon article de blog pour gรฉrer les classes dรฉsรฉquilibrรฉes : https://elitedatascience.com/imbalanced-classes. On peut se faire une idรฉe des limites et des points fort de l'algorithme en regardant des prรฉdictions. ``` humors = ['happy', 'astonished', 'bored', 'angry', 'joke', 'heart'] meta_smiley = [b'\xF0\x9F\x98\x83'.decode("utf-8"), b'\xF0\x9F\x98\xB2'.decode("utf-8"), b'\xF0\x9F\x98\x94'.decode("utf-8"), b'\xF0\x9F\x98\xA0'.decode("utf-8"), b'\xF0\x9F\x98\x86'.decode("utf-8"), b'\xF0\x9F\x98\x8D'.decode("utf-8")] humor_2_emoji = {h: ms for (h, ms) in zip(humors, meta_smiley)} X_test.shape for _ in range(10): idx = np.random.randint(0, len(X_test)) emojis = humor_2_emoji[class_names[prediction_SVC[idx]]] true_emojis = humor_2_emoji[class_names[y_test[idx]]] print(X_test['sms'].values[idx], '(Pred)', emojis, '(True)', true_emojis, '\n') ``` ### Visualisation On peut aussi essayer de visualiser plus globalement les reprรฉsentations. Pour รงa on peut utiliser des algorithmes de rรฉduction de dimension pour visualiser nos donnรฉes. On a dรฉjร  parlรฉ de UMAP et t-SNE. De maniรจre intutive, l'algorithme projete les reprรฉsentations dans un espace de plus faible dimension en s'efforcant de respecter les distances entre les points entre l'espace de dรฉpart et d'arrivรฉe. Il permet de visualiser facilement les donnรฉes. On va utiliser l'outil `Tensorboard` qui intรจgre les principales mรฉthodes de rรฉduction de dimensions. ``` from pathlib import Path from PIL import Image import os from os import listdir from os.path import isfile, join from torchvision import transforms from torch.utils.tensorboard import SummaryWriter import torch import tensorflow as tf import tensorboard as tb tf.io.gfile = tb.compat.tensorflow_stub.io.gfile pil_img = Image.open('./smileys/happy.png').convert('RGB') pil_img = pil_img.resize((100, 100)) smileys_images = [f for f in listdir('./smileys') if isfile(join('./smileys', f))] imgs_tb = {} for s in smileys_images: pil_img = Image.open(os.path.join('smileys', s)).convert('RGB') pil_img = pil_img.resize((25, 25)) pil_to_tensor = transforms.ToTensor()(pil_img).unsqueeze_(0) imgs_tb[Path(os.path.join('smileys', s)).stem] = pil_to_tensor writer_embeddings = SummaryWriter(log_dir=os.path.join("./tfb/")) vectorizer = Vectorizer(agg_method='tfidf', normalize=True) emb_test = vectorizer.fit_transform(X_test) writer_embeddings.add_embedding(torch.tensor(emb_test), metadata=[(r, s, l) for (r, s, l) in zip( X_test['sms'].values, [humor_2_emoji[class_names[y]] for y in y_test], [humor_2_emoji[class_names[y]] for y in prediction_SVC]) ], label_img=torch.cat([imgs_tb[class_names[y]] for y in y_test]), metadata_header=['sms','label', 'prediction'], tag="SMS-EMB-CLS") ``` Pour visualiser les reprรฉsentations, lancer un tensorboard. Dans un terminal, se placer dans le dossier ou est รฉxรฉcutรฉ le notebook et exรฉcuter: ``` tensorboard --logdir ./tfb/ ``` Dans **Colab** on va lancer le tensorboard directement dans le notebook en รฉxรฉcutant les cellules suivante : ``` %load_ext tensorboard ``` ``` %tensorboard --logdir ./tfb/ ``` ``` # Load the TensorBoard notebook extension %load_ext tensorboard from tensorboard import notebook notebook.list() # View open TensorBoard instances # Control TensorBoard display. If no port is provided, # the most recently launched TensorBoard is used notebook.display(port=6006, height=1000); ``` <hr> <div class="alert alert-info" role="alert"> <p><b>๐Ÿ“ Exercice :</b> Utiliser les mรฉthodes UMAP, PCA et t-SNE pour projeter les donnรฉes. Comparez les diffรฉrentes mรฉthodes de projections et interprรฉtez qualitativement les propriรฉtรฉs de vos reprรฉsentations.</p> </div> <hr> A premiรจre vue, il est plus difficile d'analyser les projections des sms que celle des embeddings de mots. En effet quelque soit la mรฉthode de projection, les documents sont moins bien sรฉparรฉs et l'analyse semble moins directe. On observe cependant des diffรฉrences entre les mรฉthodes de projection. De maniรจre gรฉnรฉrale, les sms ne sont pas forcรฉment bien sรฉparรฉs. Il semblerait que les clusters s'expliquent gรฉnรฉralement pour des raisons qui sont indรฉpendantes de la sรฉmantique. Par exemple, les messages avec des noms dรฉidentifiรฉs (รฉtiquette \[_forename_\]), les messages courts, avec un recoupement lexical important: "bonne chance" et "bon courage" ou encore avec ou des horaires des durรฉes. Finalement, il est intรฉressant de voir que l'on peut jouer sur les hyper-paramรจtres. Pour le t-SNE, on peut ajouter un degrรฉ de supervision qui permet d'amรฉliorer la dรฉfinition des clusters en fonction des labels. Pour UMAP on peut faire รฉvoluer le nomre de voisins et ainsi la forme du nuage de point. Par ailleurs, UMAP semble lรฉgรจrement plus rapide en terme de temps de calcul et de convergence des projections. La compatibilitรฉ entre Jupyter/Colab et Tensorboard est un parfois instable (c.f. https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks). Si vous รชtes sur Colab, vous pouvez tรฉlรฉcharger le dossier directement sur votre ordinateur. Tรฉlรฉchargez le .zip, sur votre ordinateur, dezipรฉ le. ``` !zip -r tfb.zip ./tfb/ ``` Sur votre ordinateur, dans un terminal, se placer dans le dossier ou est le notebook et exรฉcuter: ``` tensorboard --logdir ./tfb/ ``` Vous devriez avoir un visuel comme ci-dessous. Vous pouvez cliquer sur un sms et vous avez ร  droite les sms les plus proches en terme de distance cosine comme nous l'avons fait pour word2vec. Par ailleurs chaque sms est reprรฉsentรฉ par le smiley correspondant. Vous pouvez faire varier les mรฉthodes de projection dans le panneau de gauche. <img src="https://github.com/AntoineSimoulin/m2-data-sciences/blob/master/TP3%20-%20Word%20Embeddings/tfb-viz.png?raw=True" width="1000"> ## ๐Ÿ“š References > <div id="panckhurst-2016">Panckhurst, Rachel, et al. <a href=https://hal.archives-ouvertes.fr/hal-01485560> 88milSMS. A corpus of authentic text messages in French.</a> Banque de corpus CoMeRe. Chanier T.(รฉd)-Ortolang: Nancy (2016).</div> > <div id="schmidhuber-1997">Sepp Hochreiter, Jรผrgen Schmidhuber. <a href=https://dl.acm.org/doi/10.1162/neco.1997.9.8.1735> Long Short-Term Memory.</a> Neural Comput. 9(8): 1735-1780 (1997).</div> > <div id="cho-2014">Kyunghyun Cho, Bart van Merrienboer, ร‡aglar Gรผlรงehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, Yoshua Bengio: <a href=https://doi.org/10.3115/v1/d14-1179> Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation.</a> EMNLP 2014: 1724-1734.</div>
github_jupyter
## Our Mission ## Spam detection is one of the major applications of Machine Learning in the interwebs today. Pretty much all of the major email service providers have spam detection systems built in and automatically classify such mail as 'Junk Mail'. In this mission we will be using the Naive Bayes algorithm to create a model that can classify SMS messages as spam or not spam, based on the training we give to the model. It is important to have some level of intuition as to what a spammy text message might look like. Usually they have words like 'free', 'win', 'winner', 'cash', 'prize' and the like in them as these texts are designed to catch your eye and in some sense tempt you to open them. Also, spam messages tend to have words written in all capitals and also tend to use a lot of exclamation marks. To the recipient, it is usually pretty straightforward to identify a spam text and our objective here is to train a model to do that for us! Being able to identify spam messages is a binary classification problem as messages are classified as either 'Spam' or 'Not Spam' and nothing else. Also, this is a supervised learning problem, as we will be feeding a labelled dataset into the model, that it can learn from, to make future predictions. ### Step 0: Introduction to the Naive Bayes Theorem ### Bayes Theorem is one of the earliest probabilistic inference algorithms. It was developed by Reverend Bayes (which he used to try and infer the existence of God no less), and still performs extremely well for certain use cases. It's best to understand this theorem using an example. Let's say you are a member of the Secret Service and you have been deployed to protect the Democratic presidential nominee during one of his/her campaign speeches. Being a public event that is open to all, your job is not easy and you have to be on the constant lookout for threats. So one place to start is to put a certain threat-factor for each person. So based on the features of an individual, like the age, sex, and other smaller factors like whether the person is carrying a bag, looks nervous, etc., you can make a judgment call as to whether that person is a viable threat. If an individual ticks all the boxes up to a level where it crosses a threshold of doubt in your mind, you can take action and remove that person from the vicinity. Bayes Theorem works in the same way, as we are computing the probability of an event (a person being a threat) based on the probabilities of certain related events (age, sex, presence of bag or not, nervousness of the person, etc.). One thing to consider is the independence of these features amongst each other. For example if a child looks nervous at the event then the likelihood of that person being a threat is not as much as say if it was a grown man who was nervous. To break this down a bit further, here there are two features we are considering, age AND nervousness. Say we look at these features individually, we could design a model that flags ALL persons that are nervous as potential threats. However, it is likely that we will have a lot of false positives as there is a strong chance that minors present at the event will be nervous. Hence by considering the age of a person along with the 'nervousness' feature we would definitely get a more accurate result as to who are potential threats and who aren't. This is the 'Naive' bit of the theorem where it considers each feature to be independent of each other which may not always be the case and hence that can affect the final judgement. In short, Bayes Theorem calculates the probability of a certain event happening (in our case, a message being spam) based on the joint probabilistic distributions of certain other events (in our case, the appearance of certain words in a message). We will dive into the workings of Bayes Theorem later in the mission, but first, let us understand the data we are going to work with. ### Step 1.1: Understanding our dataset ### We will be using a dataset originally compiled and posted on the UCI Machine Learning repository which has a very good collection of datasets for experimental research purposes. If you're interested, you can review the [abstract](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) and the original [compressed data file](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/) on the UCI site. For this exercise, however, we've gone ahead and downloaded the data for you. ** Here's a preview of the data: ** <img src="images/dqnb.png" height="1242" width="1242"> The columns in the data set are currently not named and as you can see, there are 2 columns. The first column takes two values, 'ham' which signifies that the message is not spam, and 'spam' which signifies that the message is spam. The second column is the text content of the SMS message that is being classified. >**Instructions:** * Import the dataset into a pandas dataframe using the **read_table** method. The file has already been downloaded, and you can access it using the filepath 'smsspamcollection/SMSSpamCollection'. Because this is a tab separated dataset we will be using '\\t' as the value for the 'sep' argument which specifies this format. * Also, rename the column names by specifying a list ['label, 'sms_message'] to the 'names' argument of read_table(). * Print the first five values of the dataframe with the new column names. ``` ''' Solution ''' import pandas as pd # Dataset available using filepath 'smsspamcollection/SMSSpamCollection' df = pd.read_table('smsspamcollection/SMSSpamCollection', sep='\t', header=None, names=['label', 'sms_message']) # Output printing out first 5 rows df.head() ``` ### Step 1.2: Data Preprocessing ### Now that we have a basic understanding of what our dataset looks like, lets convert our labels to binary variables, 0 to represent 'ham'(i.e. not spam) and 1 to represent 'spam' for ease of computation. You might be wondering why do we need to do this step? The answer to this lies in how scikit-learn handles inputs. Scikit-learn only deals with numerical values and hence if we were to leave our label values as strings, scikit-learn would do the conversion internally(more specifically, the string labels will be cast to unknown float values). Our model would still be able to make predictions if we left our labels as strings but we could have issues later when calculating performance metrics, for example when calculating our precision and recall scores. Hence, to avoid unexpected 'gotchas' later, it is good practice to have our categorical values be fed into our model as integers. >**Instructions:** * Convert the values in the 'label' colum to numerical values using map method as follows: {'ham':0, 'spam':1} This maps the 'ham' value to 0 and the 'spam' value to 1. * Also, to get an idea of the size of the dataset we are dealing with, print out number of rows and columns using 'shape'. ``` ''' Solution ''' df['label'] = df.label.map({'ham':0, 'spam':1}) print(df.shape) df.head() # returns (rows, columns) ``` ### Step 2.1: Bag of words ### What we have here in our data set is a large collection of text data (5,572 rows of data). Most ML algorithms rely on numerical data to be fed into them as input, and email/sms messages are usually text heavy. Here we'd like to introduce the Bag of Words(BoW) concept which is a term used to specify the problems that have a 'bag of words' or a collection of text data that needs to be worked with. The basic idea of BoW is to take a piece of text and count the frequency of the words in that text. It is important to note that the BoW concept treats each word individually and the order in which the words occur does not matter. Using a process which we will go through now, we can covert a collection of documents to a matrix, with each document being a row and each word(token) being the column, and the corresponding (row,column) values being the frequency of occurrance of each word or token in that document. For example: Lets say we have 4 documents as follows: `['Hello, how are you!', 'Win money, win from home.', 'Call me now', 'Hello, Call you tomorrow?']` Our objective here is to convert this set of text to a frequency distribution matrix, as follows: <img src="images/countvectorizer.png" height="542" width="542"> Here as we can see, the documents are numbered in the rows, and each word is a column name, with the corresponding value being the frequency of that word in the document. Lets break this down and see how we can do this conversion using a small set of documents. To handle this, we will be using sklearns [count vectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer) method which does the following: * It tokenizes the string(separates the string into individual words) and gives an integer ID to each token. * It counts the occurrance of each of those tokens. **Please Note:** * The CountVectorizer method automatically converts all tokenized words to their lower case form so that it does not treat words like 'He' and 'he' differently. It does this using the `lowercase` parameter which is by default set to `True`. * It also ignores all punctuation so that words followed by a punctuation mark (for example: 'hello!') are not treated differently than the same words not prefixed or suffixed by a punctuation mark (for example: 'hello'). It does this using the `token_pattern` parameter which has a default regular expression which selects tokens of 2 or more alphanumeric characters. * The third parameter to take note of is the `stop_words` parameter. Stop words refer to the most commonly used words in a language. They include words like 'am', 'an', 'and', 'the' etc. By setting this parameter value to `english`, CountVectorizer will automatically ignore all words(from our input text) that are found in the built in list of english stop words in scikit-learn. This is extremely helpful as stop words can skew our calculations when we are trying to find certain key words that are indicative of spam. We will dive into the application of each of these into our model in a later step, but for now it is important to be aware of such preprocessing techniques available to us when dealing with textual data. ### Step 2.2: Implementing Bag of Words from scratch ### Before we dive into scikit-learn's Bag of Words(BoW) library to do the dirty work for us, let's implement it ourselves first so that we can understand what's happening behind the scenes. **Step 1: Convert all strings to their lower case form.** Let's say we have a document set: ``` documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ``` >>**Instructions:** * Convert all the strings in the documents set to their lower case. Save them into a list called 'lower_case_documents'. You can convert strings to their lower case in python by using the lower() method. ``` ''' Solution: ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] lower_case_documents = [] for i in documents: lower_case_documents.append(i.lower()) print(lower_case_documents) ``` **Step 2: Removing all punctuation** >>**Instructions:** Remove all punctuation from the strings in the document set. Save them into a list called 'sans_punctuation_documents'. ``` ''' Solution: ''' sans_punctuation_documents = [] import string for i in lower_case_documents: sans_punctuation_documents.append(i.translate(str.maketrans('', '', string.punctuation))) print(sans_punctuation_documents) ``` **Step 3: Tokenization** Tokenizing a sentence in a document set means splitting up a sentence into individual words using a delimiter. The delimiter specifies what character we will use to identify the beginning and the end of a word(for example we could use a single space as the delimiter for identifying words in our document set.) >>**Instructions:** Tokenize the strings stored in 'sans_punctuation_documents' using the split() method. Store the final document set in a list called 'preprocessed_documents'. ``` ''' Solution: ''' preprocessed_documents = [] for i in sans_punctuation_documents: preprocessed_documents.append(i.split(' ')) print(preprocessed_documents) ``` **Step 4: Count frequencies** Now that we have our document set in the required format, we can proceed to counting the occurrence of each word in each document of the document set. We will use the `Counter` method from the Python `collections` library for this purpose. `Counter` counts the occurrence of each item in the list and returns a dictionary with the key as the item being counted and the corresponding value being the count of that item in the list. >>**Instructions:** Using the Counter() method and preprocessed_documents as the input, create a dictionary with the keys being each word in each document and the corresponding values being the frequncy of occurrence of that word. Save each Counter dictionary as an item in a list called 'frequency_list'. ``` ''' Solution ''' frequency_list = [] import pprint from collections import Counter for i in preprocessed_documents: frequency_counts = Counter(i) frequency_list.append(frequency_counts) pprint.pprint(frequency_list) ``` Congratulations! You have implemented the Bag of Words process from scratch! As we can see in our previous output, we have a frequency distribution dictionary which gives a clear view of the text that we are dealing with. We should now have a solid understanding of what is happening behind the scenes in the `sklearn.feature_extraction.text.CountVectorizer` method of scikit-learn. We will now implement `sklearn.feature_extraction.text.CountVectorizer` method in the next step. ### Step 2.3: Implementing Bag of Words in scikit-learn ### Now that we have implemented the BoW concept from scratch, let's go ahead and use scikit-learn to do this process in a clean and succinct way. We will use the same document set as we used in the previous step. ``` ''' Here we will look to create a frequency matrix on a smaller document set to make sure we understand how the document-term matrix generation happens. We have created a sample document set 'documents'. ''' documents = ['Hello, how are you!', 'Win money, win from home.', 'Call me now.', 'Hello, Call hello you tomorrow?'] ``` >>**Instructions:** Import the sklearn.feature_extraction.text.CountVectorizer method and create an instance of it called 'count_vector'. ``` ''' Solution ''' from sklearn.feature_extraction.text import CountVectorizer count_vector = CountVectorizer() ``` **Data preprocessing with CountVectorizer()** In Step 2.2, we implemented a version of the CountVectorizer() method from scratch that entailed cleaning our data first. This cleaning involved converting all of our data to lower case and removing all punctuation marks. CountVectorizer() has certain parameters which take care of these steps for us. They are: * `lowercase = True` The `lowercase` parameter has a default value of `True` which converts all of our text to its lower case form. * `token_pattern = (?u)\\b\\w\\w+\\b` The `token_pattern` parameter has a default regular expression value of `(?u)\\b\\w\\w+\\b` which ignores all punctuation marks and treats them as delimiters, while accepting alphanumeric strings of length greater than or equal to 2, as individual tokens or words. * `stop_words` The `stop_words` parameter, if set to `english` will remove all words from our document set that match a list of English stop words which is defined in scikit-learn. Considering the size of our dataset and the fact that we are dealing with SMS messages and not larger text sources like e-mail, we will not be setting this parameter value. You can take a look at all the parameter values of your `count_vector` object by simply printing out the object as follows: ``` ''' Practice node: Print the 'count_vector' object which is an instance of 'CountVectorizer()' ''' print(count_vector) ``` >>**Instructions:** Fit your document dataset to the CountVectorizer object you have created using fit(), and get the list of words which have been categorized as features using the get_feature_names() method. ``` ''' Solution: ''' count_vector.fit(documents) count_vector.get_feature_names() ``` The `get_feature_names()` method returns our feature names for this dataset, which is the set of words that make up our vocabulary for 'documents'. >>**Instructions:** Create a matrix with the rows being each of the 4 documents, and the columns being each word. The corresponding (row, column) value is the frequency of occurrance of that word(in the column) in a particular document(in the row). You can do this using the transform() method and passing in the document data set as the argument. The transform() method returns a matrix of numpy integers, you can convert this to an array using toarray(). Call the array 'doc_array' ``` ''' Solution ''' doc_array = count_vector.transform(documents).toarray() doc_array ``` Now we have a clean representation of the documents in terms of the frequency distribution of the words in them. To make it easier to understand our next step is to convert this array into a dataframe and name the columns appropriately. >>**Instructions:** Convert the array we obtained, loaded into 'doc_array', into a dataframe and set the column names to the word names(which you computed earlier using get_feature_names(). Call the dataframe 'frequency_matrix'. ``` ''' Solution ''' frequency_matrix = pd.DataFrame(doc_array, columns = count_vector.get_feature_names()) frequency_matrix ``` Congratulations! You have successfully implemented a Bag of Words problem for a document dataset that we created. One potential issue that can arise from using this method out of the box is the fact that if our dataset of text is extremely large(say if we have a large collection of news articles or email data), there will be certain values that are more common that others simply due to the structure of the language itself. So for example words like 'is', 'the', 'an', pronouns, grammatical contructs etc could skew our matrix and affect our analyis. There are a couple of ways to mitigate this. One way is to use the `stop_words` parameter and set its value to `english`. This will automatically ignore all words(from our input text) that are found in a built in list of English stop words in scikit-learn. Another way of mitigating this is by using the [tfidf](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer) method. This method is out of scope for the context of this lesson. ### Step 3.1: Training and testing sets ### Now that we have understood how to deal with the Bag of Words problem we can get back to our dataset and proceed with our analysis. Our first step in this regard would be to split our dataset into a training and testing set so we can test our model later. >>**Instructions:** Split the dataset into a training and testing set by using the train_test_split method in sklearn. Split the data using the following variables: * `X_train` is our training data for the 'sms_message' column. * `y_train` is our training data for the 'label' column * `X_test` is our testing data for the 'sms_message' column. * `y_test` is our testing data for the 'label' column Print out the number of rows we have in each our training and testing data. ``` ''' Solution ''' # split into training and testing sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(df['sms_message'], df['label'], random_state=1) print('Number of rows in the total set: {}'.format(df.shape[0])) print('Number of rows in the training set: {}'.format(X_train.shape[0])) print('Number of rows in the test set: {}'.format(X_test.shape[0])) ``` ### Step 3.2: Applying Bag of Words processing to our dataset. ### Now that we have split the data, our next objective is to follow the steps from Step 2: Bag of words and convert our data into the desired matrix format. To do this we will be using CountVectorizer() as we did before. There are two steps to consider here: * Firstly, we have to fit our training data (`X_train`) into `CountVectorizer()` and return the matrix. * Secondly, we have to transform our testing data (`X_test`) to return the matrix. Note that `X_train` is our training data for the 'sms_message' column in our dataset and we will be using this to train our model. `X_test` is our testing data for the 'sms_message' column and this is the data we will be using(after transformation to a matrix) to make predictions on. We will then compare those predictions with `y_test` in a later step. For now, we have provided the code that does the matrix transformations for you! ``` ''' [Practice Node] The code for this segment is in 2 parts. Firstly, we are learning a vocabulary dictionary for the training data and then transforming the data into a document-term matrix; secondly, for the testing data we are only transforming the data into a document-term matrix. This is similar to the process we followed in Step 2.3 We will provide the transformed data to students in the variables 'training_data' and 'testing_data'. ''' ''' Solution ''' # Instantiate the CountVectorizer method count_vector = CountVectorizer() # Fit the training data and then return the matrix training_data = count_vector.fit_transform(X_train) # Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer() testing_data = count_vector.transform(X_test) ``` ### Step 4.1: Bayes Theorem implementation from scratch ### Now that we have our dataset in the format that we need, we can move onto the next portion of our mission which is the algorithm we will use to make our predictions to classify a message as spam or not spam. Remember that at the start of the mission we briefly discussed the Bayes theorem but now we shall go into a little more detail. In layman's terms, the Bayes theorem calculates the probability of an event occurring, based on certain other probabilities that are related to the event in question. It is composed of a prior(the probabilities that we are aware of or that is given to us) and the posterior(the probabilities we are looking to compute using the priors). Let us implement the Bayes Theorem from scratch using a simple example. Let's say we are trying to find the odds of an individual having diabetes, given that he or she was tested for it and got a positive result. In the medical field, such probabilies play a very important role as it usually deals with life and death situatuations. We assume the following: `P(D)` is the probability of a person having Diabetes. It's value is `0.01` or in other words, 1% of the general population has diabetes(Disclaimer: these values are assumptions and are not reflective of any medical study). `P(Pos)` is the probability of getting a positive test result. `P(Neg)` is the probability of getting a negative test result. `P(Pos|D)` is the probability of getting a positive result on a test done for detecting diabetes, given that you have diabetes. This has a value `0.9`. In other words the test is correct 90% of the time. This is also called the Sensitivity or True Positive Rate. `P(Neg|~D)` is the probability of getting a negative result on a test done for detecting diabetes, given that you do not have diabetes. This also has a value of `0.9` and is therefore correct, 90% of the time. This is also called the Specificity or True Negative Rate. The Bayes formula is as follows: <img src="images/bayes_formula.png" height="242" width="242"> * `P(A)` is the prior probability of A occuring independantly. In our example this is `P(D)`. This value is given to us. * `P(B)` is the prior probability of B occuring independantly. In our example this is `P(Pos)`. * `P(A|B)` is the posterior probability that A occurs given B. In our example this is `P(D|Pos)`. That is, **the probability of an individual having diabetes, given that, that individual got a positive test result. This is the value that we are looking to calculate.** * `P(B|A)` is the likelihood probability of B occuring, given A. In our example this is `P(Pos|D)`. This value is given to us. Putting our values into the formula for Bayes theorem we get: `P(D|Pos) = (P(D) * P(Pos|D) / P(Pos)` The probability of getting a positive test result `P(Pos)` can be calulated using the Sensitivity and Specificity as follows: `P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]` ``` ''' Instructions: Calculate probability of getting a positive test result, P(Pos) ''' ''' Solution (skeleton code will be provided) ''' # P(D) p_diabetes = 0.01 # P(~D) p_no_diabetes = 0.99 # Sensitivity or P(Pos|D) p_pos_diabetes = 0.9 # Specificity or P(Neg/~D) p_neg_no_diabetes = 0.9 # P(Pos) p_pos = (p_diabetes * p_pos_diabetes) + (p_no_diabetes * (1 - p_neg_no_diabetes)) print('The probability of getting a positive test result P(Pos) is: {}',format(p_pos)) ``` ** Using all of this information we can calculate our posteriors as follows: ** The probability of an individual having diabetes, given that, that individual got a positive test result: `P(D/Pos) = (P(D) * Sensitivity)) / P(Pos)` The probability of an individual not having diabetes, given that, that individual got a positive test result: `P(~D/Pos) = (P(~D) * (1-Specificity)) / P(Pos)` The sum of our posteriors will always equal `1`. ``` ''' Instructions: Compute the probability of an individual having diabetes, given that, that individual got a positive test result. In other words, compute P(D|Pos). The formula is: P(D|Pos) = (P(D) * P(Pos|D) / P(Pos) ''' ''' Solution ''' # P(D|Pos) p_diabetes_pos = (p_diabetes * p_pos_diabetes) / p_pos print('Probability of an individual having diabetes, given that that individual got a positive test result is:\ ',format(p_diabetes_pos)) ''' Instructions: Compute the probability of an individual not having diabetes, given that, that individual got a positive test result. In other words, compute P(~D|Pos). The formula is: P(~D|Pos) = (P(~D) * P(Pos|~D) / P(Pos) Note that P(Pos/~D) can be computed as 1 - P(Neg/~D). Therefore: P(Pos/~D) = p_pos_no_diabetes = 1 - 0.9 = 0.1 ''' ''' Solution ''' # P(Pos/~D) p_pos_no_diabetes = 0.1 # P(~D|Pos) p_no_diabetes_pos = (p_no_diabetes * p_pos_no_diabetes) / p_pos print ('Probability of an individual not having diabetes, given that individual got a positive test result is:'\ ,p_no_diabetes_pos) ``` Congratulations! You have implemented Bayes theorem from scratch. Your analysis shows that even if you get a positive test result, there is only a 8.3% chance that you actually have diabetes and a 91.67% chance that you do not have diabetes. This is of course assuming that only 1% of the entire population has diabetes which of course is only an assumption. ** What does the term 'Naive' in 'Naive Bayes' mean ? ** The term 'Naive' in Naive Bayes comes from the fact that the algorithm considers the features that it is using to make the predictions to be independent of each other, which may not always be the case. So in our Diabetes example, we are considering only one feature, that is the test result. Say we added another feature, 'exercise'. Let's say this feature has a binary value of `0` and `1`, where the former signifies that the individual exercises less than or equal to 2 days a week and the latter signifies that the individual exercises greater than or equal to 3 days a week. If we had to use both of these features, namely the test result and the value of the 'exercise' feature, to compute our final probabilities, Bayes' theorem would fail. Naive Bayes' is an extension of Bayes' theorem that assumes that all the features are independent of each other. ### Step 4.2: Naive Bayes implementation from scratch ### Now that you have understood the ins and outs of Bayes Theorem, we will extend it to consider cases where we have more than feature. Let's say that we have two political parties' candidates, 'Jill Stein' of the Green Party and 'Gary Johnson' of the Libertarian Party and we have the probabilities of each of these candidates saying the words 'freedom', 'immigration' and 'environment' when they give a speech: * Probability that Jill Stein says 'freedom': 0.1 ---------> `P(F|J)` * Probability that Jill Stein says 'immigration': 0.1 -----> `P(I|J)` * Probability that Jill Stein says 'environment': 0.8 -----> `P(E|J)` * Probability that Gary Johnson says 'freedom': 0.7 -------> `P(F|G)` * Probability that Gary Johnson says 'immigration': 0.2 ---> `P(I|G)` * Probability that Gary Johnson says 'environment': 0.1 ---> `P(E|G)` And let us also assume that the probablility of Jill Stein giving a speech, `P(J)` is `0.5` and the same for Gary Johnson, `P(G) = 0.5`. Given this, what if we had to find the probabilities of Jill Stein saying the words 'freedom' and 'immigration'? This is where the Naive Bayes'theorem comes into play as we are considering two features, 'freedom' and 'immigration'. Now we are at a place where we can define the formula for the Naive Bayes' theorem: <img src="images/naivebayes.png" height="342" width="342"> Here, `y` is the class variable or in our case the name of the candidate and `x1` through `xn` are the feature vectors or in our case the individual words. The theorem makes the assumption that each of the feature vectors or words (`xi`) are independent of each other. To break this down, we have to compute the following posterior probabilities: * `P(J|F,I)`: Probability of Jill Stein saying the words Freedom and Immigration. Using the formula and our knowledge of Bayes' theorem, we can compute this as follows: `P(J|F,I)` = `(P(J) * P(F|J) * P(I|J)) / P(F,I)`. Here `P(F,I)` is the probability of the words 'freedom' and 'immigration' being said in a speech. * `P(G|F,I)`: Probability of Gary Johnson saying the words Freedom and Immigration. Using the formula, we can compute this as follows: `P(G|F,I)` = `(P(G) * P(F|G) * P(I|G)) / P(F,I)` ``` ''' Instructions: Compute the probability of the words 'freedom' and 'immigration' being said in a speech, or P(F,I). The first step is multiplying the probabilities of Jill Stein giving a speech with her individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_j_text The second step is multiplying the probabilities of Gary Johnson giving a speech with his individual probabilities of saying the words 'freedom' and 'immigration'. Store this in a variable called p_g_text The third step is to add both of these probabilities and you will get P(F,I). ''' ''' Solution: Step 1 ''' # P(J) p_j = 0.5 # P(F/J) p_j_f = 0.1 # P(I/J) p_j_i = 0.1 p_j_text = p_j * p_j_f * p_j_i print(p_j_text) ''' Solution: Step 2 ''' # P(G) p_g = 0.5 # P(F/G) p_g_f = 0.7 # P(I/G) p_g_i = 0.2 p_g_text = p_g * p_g_f * p_g_i print(p_g_text) ''' Solution: Step 3: Compute P(F,I) and store in p_f_i ''' p_f_i = p_j_text + p_g_text print('Probability of words freedom and immigration being said are: ', format(p_f_i)) ``` Now we can compute the probability of `P(J|F,I)`, that is the probability of Jill Stein saying the words Freedom and Immigration and `P(G|F,I)`, that is the probability of Gary Johnson saying the words Freedom and Immigration. ``` ''' Instructions: Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I) and store it in a variable p_j_fi ''' ''' Solution ''' p_j_fi = p_j_text / p_f_i print('The probability of Jill Stein saying the words Freedom and Immigration: ', format(p_j_fi)) ''' Instructions: Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I) and store it in a variable p_g_fi ''' ''' Solution ''' p_g_fi = p_g_text / p_f_i print('The probability of Gary Johnson saying the words Freedom and Immigration: ', format(p_g_fi)) ``` And as we can see, just like in the Bayes' theorem case, the sum of our posteriors is equal to 1. Congratulations! You have implemented the Naive Bayes' theorem from scratch. Our analysis shows that there is only a 6.6% chance that Jill Stein of the Green Party uses the words 'freedom' and 'immigration' in her speech as compard the the 93.3% chance for Gary Johnson of the Libertarian party. Another more generic example of Naive Bayes' in action is as when we search for the term 'Sacramento Kings' in a search engine. In order for us to get the results pertaining to the Scramento Kings NBA basketball team, the search engine needs to be able to associate the two words together and not treat them individually, in which case we would get results of images tagged with 'Sacramento' like pictures of city landscapes and images of 'Kings' which could be pictures of crowns or kings from history when what we are looking to get are images of the basketball team. This is a classic case of the search engine treating the words as independant entities and hence being 'naive' in its approach. Applying this to our problem of classifying messages as spam, the Naive Bayes algorithm *looks at each word individually and not as associated entities* with any kind of link between them. In the case of spam detectors, this usually works as there are certain red flag words which can almost guarantee its classification as spam, for example emails with words like 'viagra' are usually classified as spam. ### Step 5: Naive Bayes implementation using scikit-learn ### Thankfully, sklearn has several Naive Bayes implementations that we can use and so we do not have to do the math from scratch. We will be using sklearns `sklearn.naive_bayes` method to make predictions on our dataset. Specifically, we will be using the multinomial Naive Bayes implementation. This particular classifier is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input. On the other hand Gaussian Naive Bayes is better suited for continuous data as it assumes that the input data has a Gaussian(normal) distribution. ``` ''' Instructions: We have loaded the training data into the variable 'training_data' and the testing data into the variable 'testing_data'. Import the MultinomialNB classifier and fit the training data into the classifier using fit(). Name your classifier 'naive_bayes'. You will be training the classifier using 'training_data' and y_train' from our split earlier. ''' ''' Solution ''' from sklearn.naive_bayes import MultinomialNB naive_bayes = MultinomialNB() naive_bayes.fit(training_data, y_train) ''' Instructions: Now that our algorithm has been trained using the training data set we can now make some predictions on the test data stored in 'testing_data' using predict(). Save your predictions into the 'predictions' variable. ''' ''' Solution ''' predictions = naive_bayes.predict(testing_data) ``` Now that predictions have been made on our test set, we need to check the accuracy of our predictions. ### Step 6: Evaluating our model ### Now that we have made predictions on our test set, our next goal is to evaluate how well our model is doing. There are various mechanisms for doing so, but first let's do quick recap of them. ** Accuracy ** measures how often the classifier makes the correct prediction. Itโ€™s the ratio of the number of correct predictions to the total number of predictions (the number of test data points). ** Precision ** tells us what proportion of messages we classified as spam, actually were spam. It is a ratio of true positives(words classified as spam, and which are actually spam) to all positives(all words classified as spam, irrespective of whether that was the correct classification), in other words it is the ratio of `[True Positives/(True Positives + False Positives)]` ** Recall(sensitivity)** tells us what proportion of messages that actually were spam were classified by us as spam. It is a ratio of true positives(words classified as spam, and which are actually spam) to all the words that were actually spam, in other words it is the ratio of `[True Positives/(True Positives + False Negatives)]` For classification problems that are skewed in their classification distributions like in our case, for example if we had a 100 text messages and only 2 were spam and the rest 98 weren't, accuracy by itself is not a very good metric. We could classify 90 messages as not spam(including the 2 that were spam but we classify them as not spam, hence they would be false negatives) and 10 as spam(all 10 false positives) and still get a reasonably good accuracy score. For such cases, precision and recall come in very handy. These two metrics can be combined to get the F1 score, which is weighted average of the precision and recall scores. This score can range from 0 to 1, with 1 being the best possible F1 score. We will be using all 4 metrics to make sure our model does well. For all 4 metrics whose values can range from 0 to 1, having a score as close to 1 as possible is a good indicator of how well our model is doing. ``` ''' Instructions: Compute the accuracy, precision, recall and F1 scores of your model using your test data 'y_test' and the predictions you made earlier stored in the 'predictions' variable. ''' ''' Solution ''' from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print('Accuracy score: ', format(accuracy_score(y_test, predictions))) print('Precision score: ', format(precision_score(y_test, predictions))) print('Recall score: ', format(recall_score(y_test, predictions))) print('F1 score: ', format(f1_score(y_test, predictions))) ``` ### Step 7: Conclusion ### One of the major advantages that Naive Bayes has over other classification algorithms is its ability to handle an extremely large number of features. In our case, each word is treated as a feature and there are thousands of different words. Also, it performs well even with the presence of irrelevant features and is relatively unaffected by them. The other major advantage it has is its relative simplicity. Naive Bayes' works well right out of the box and tuning it's parameters is rarely ever necessary, except usually in cases where the distribution of the data is known. It rarely ever overfits the data. Another important advantage is that its model training and prediction times are very fast for the amount of data it can handle. All in all, Naive Bayes' really is a gem of an algorithm! Congratulations! You have succesfully designed a model that can efficiently predict if an SMS message is spam or not! Thank you for learning with us!
github_jupyter
## Precision-Recall Curves in Multiclass For multiclass classification, we have 2 options: - determine a PR curve for each class. - determine the overall PR curve as the micro-average of all classes Let's see how to do both. ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_wine from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsRestClassifier # to convert the 1-D target vector in to a matrix from sklearn.preprocessing import label_binarize from sklearn.metrics import precision_recall_curve from yellowbrick.classifier import PrecisionRecallCurve ``` ## Load data (multiclass) ``` # load data data = load_wine() data = pd.concat([ pd.DataFrame(data.data, columns=data.feature_names), pd.DataFrame(data.target, columns=['target']), ], axis=1) data.head() # target distribution: # multiclass and (fairly) balanced data.target.value_counts(normalize=True) # separate dataset into train and test X_train, X_test, y_train, y_test = train_test_split( data.drop(labels=['target'], axis=1), # drop the target data['target'], # just the target test_size=0.3, random_state=0) X_train.shape, X_test.shape # the target is a vector with the 3 classes y_test[0:10] ``` ## Train ML models The dataset we are using is very, extremely simple, so I am creating dumb models intentionally, that is few trees and very shallow for the random forests and few iterations for the logit. This is, so that we can get the most out of the PR curves by inspecting them visually. ### Random Forests The Random Forests in sklearn are not trained as a 1 vs Rest. So in order to produce a 1 vs rest probability vector for each class, we need to wrap this estimator with another one from sklearn: - [OneVsRestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html) ``` # set up the model, wrapped by the OneVsRestClassifier rf = OneVsRestClassifier( RandomForestClassifier( n_estimators=10, random_state=39, max_depth=1, n_jobs=4, ) ) # train the model rf.fit(X_train, y_train) # produce the predictions (as probabilities) y_train_rf = rf.predict_proba(X_train) y_test_rf = rf.predict_proba(X_test) # note that the predictions are an array of 3 columns # first column: the probability of an observation of being of class 0 # second column: the probability of an observation of being of class 1 # third column: the probability of an observation of being of class 2 y_test_rf[0:10, :] pd.DataFrame(y_test_rf).sum(axis=1)[0:10] # The final prediction is that of the biggest probabiity rf.predict(X_test)[0:10] ``` ### Logistic Regression The Logistic regression supports 1 vs rest automatically though its multi_class parameter: ``` # set up the model logit = LogisticRegression( random_state=0, multi_class='ovr', max_iter=10, ) # train logit.fit(X_train, y_train) # obtain the probabilities y_train_logit = logit.predict_proba(X_train) y_test_logit = logit.predict_proba(X_test) # note that the predictions are an array of 3 columns # first column: the probability of an observation of being of class 0 # second column: the probability of an observation of being of class 1 # third column: the probability of an observation of being of class 2 y_test_logit[0:10, :] # The final prediction is that of the biggest probabiity logit.predict(X_test)[0:10] ``` ## Precision-Recall Curve ### Per class with Sklearn ``` # with label_binarize we transform the target vector # into a multi-label matrix, so that it matches the # outputs of the models # then we have 1 class per column y_test = label_binarize(y_test, classes=[0, 1, 2]) y_test[0:10, :] # now we determine the precision and recall at different thresholds # considering only the probability vector for class 2 and the true # target for class 2 # so we treat the problem as class 2 vs rest p, r, thresholds = precision_recall_curve(y_test[:, 2], y_test_rf[:, 2]) # precision values p # recall values r # threhsolds examined thresholds ``` Go ahead and examine the precision and recall for the other classes see how these values change. ``` # now let's do these for all classes and capture the results in # dictionaries, so we can plot the values afterwards # determine the Precision and recall # at various thresholds of probability # in a 1 vs all fashion, for each class precision_rf = dict() recall_rf = dict() # for each class for i in range(3): # determine precision and recall at various thresholds # in a 1 vs all fashion precision_rf[i], recall_rf[i], _ = precision_recall_curve( y_test[:, i], y_test_rf[:, i]) precision_rf # plot the curves for each class for i in range(3): plt.plot(recall_rf[i], precision_rf[i], label='class {}'.format(i)) plt.xlabel("recall") plt.ylabel("precision") plt.legend(loc="best") plt.title("precision vs. recall curve") plt.show() # and now for the logistic regression precision_lg = dict() recall_lg = dict() # for each class for i in range(3): # determine precision and recall at various thresholds # in a 1 vs all fashion precision_lg[i], recall_lg[i], _ = precision_recall_curve( y_test[:, i], y_test_logit[:, i]) plt.plot(recall_lg[i], precision_lg[i], label='class {}'.format(i)) plt.xlabel("recall") plt.ylabel("precision") plt.legend(loc="best") plt.title("precision vs. recall curve") plt.show() # and now, just because it is a bit difficult to compare # between models, we plot the PR curves class by class, # but the 2 models in the same plot # for each class for i in range(3): plt.plot(recall_lg[i], precision_lg[i], label='logit class {}'.format(i)) plt.plot(recall_rf[i], precision_rf[i], label='rf class {}'.format(i)) plt.xlabel("recall") plt.ylabel("precision") plt.legend(loc="best") plt.title("precision vs. recall curve for class{}".format(i)) plt.show() ``` We see that the Random Forest does a better job for all classes. ### Micro-average with sklearn In order to do this, we concatenate all the probability vectors 1 after the other, and so we do with the real values. ``` # probability vectors for all classes in 1-d vector y_test_rf.ravel() # see that the unravelled prediction vector has 3 times the size # of the origina target len(y_test), len(y_test_rf.ravel()) # A "micro-average": quantifying score on all classes jointly # for random forests precision_rf["micro"], recall_rf["micro"], _ = precision_recall_curve( y_test.ravel(), y_test_rf.ravel(), ) # for logistic regression precision_lg["micro"], recall_lg["micro"], _ = precision_recall_curve( y_test.ravel(), y_test_logit.ravel(), ) # now we plot them next to each other i = "micro" plt.plot(recall_lg[i], precision_lg[i], label='logit micro {}') plt.plot(recall_rf[i], precision_rf[i], label='rf micro {}') plt.xlabel("recall") plt.ylabel("precision") plt.legend(loc="best") plt.title("precision vs. recall curve for class{}".format(i)) plt.show() ``` ## Yellowbrick ### Per class with Yellobrick https://www.scikit-yb.org/en/latest/api/classifier/prcurve.html **Note:** In the cells below, we are passing to Yellobrick classes a model that is already fit. When we fit() the Yellobrick class, it will check if the model is fit, in which case it will do nothing. If we pass a model that is not fit, and a multiclass target, Yellowbrick will wrap the model automatically with a 1 vs Rest classifier. Check Yellobrick's documentation for more details. ``` visualizer = PrecisionRecallCurve( rf, per_class=True, cmap="cool", micro=False, ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure visualizer = PrecisionRecallCurve( logit, per_class=True, cmap="cool", micro=False, cv=0.05, ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure ``` ### Micro yellowbrick ``` visualizer = PrecisionRecallCurve( rf, cmap="cool", micro=True, ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure visualizer = PrecisionRecallCurve( logit, cmap="cool", micro=True, cv=0.05, ) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data visualizer.show() # Finalize and show the figure ``` That's all for PR curves
github_jupyter
``` from __future__ import division, absolute_import import sys import os import numpy as np import random import pickle import time import h5py import pandas as pd from plotnine import * from imblearn import over_sampling from collections import Counter from tables import * import matplotlib.pyplot as plt from matplotlib_venn import venn3 import sklearn from sklearn import preprocessing from sklearn.cluster import MiniBatchKMeans #root absPath = '/home/angela3/imbalance_pcm_benchmark/' sys.path.insert(0, absPath) from src.imbalance_functions import * np.random.seed(8) random.seed(8) protein_type = "GPCRs" #"kinases" activity_file = "".join((absPath, "data/", protein_type, "_activity.csv")) activity_df = pd.read_csv(activity_file, sep="\t") print(activity_df.info()) print(activity_df.head()) Counter(activity_df["label"]) unique_prots = activity_df["DeepAffinity Protein ID"].drop_duplicates().tolist() print("There are",len(unique_prots),"different proteins") if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/"))): os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/"))) with open("".join((absPath, "data/", protein_type, "/", protein_type, "_prots.pickle")), 'wb') as handle: pickle.dump(unique_prots, handle, protocol=pickle.HIGHEST_PROTOCOL) unique_comps = activity_df["DeepAffinity Compound ID"].drop_duplicates().tolist() print("There are",len(unique_comps),"different compounds") activity_df.groupby(["DeepAffinity Protein ID", "label"])["label"].count() list_ratios = [] for prot in unique_prots: ratio_actives_inactives = computing_active_inactive_ratio(activity_df, prot) dicti = {"DeepAffinity Protein ID" : prot, "ratio_actives_inactives": ratio_actives_inactives} list_ratios.append(dicti) df_ratios = pd.DataFrame(list_ratios) df_ratios.head() (ggplot(df_ratios, aes("ratio_actives_inactives")) + geom_histogram()) # for each protein, save a sub-dataframe with the data if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/"))): os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/"))) #loading fingeprints file_fps = "".join((absPath,"raw_data/dcid_fingerprint.tsv" )) fps_df = pd.read_csv(file_fps, sep="\t") fps_df.info() for prot in unique_prots: sub_prot = activity_df[activity_df["DeepAffinity Protein ID"] == prot] prot_fps = pd.merge(sub_prot[["DeepAffinity Protein ID", "DeepAffinity Compound ID", "label", "Canonical SMILE", "Sequence", "family"]], fps_df, "left", on=["DeepAffinity Compound ID"]) file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv")) prot_fps.to_csv(file_subdf) # how many unique compounds are according to fingerprints? activity_with_fps = pd.merge(activity_df, fps_df, "left", on=["DeepAffinity Compound ID"]) activity_with_fps.info() activity_with_fps.drop_duplicates("Fingerprint Feature") activity_with_fps.drop_duplicates("DeepAffinity Compound ID") ``` ### Balancing data ``` for prot in unique_prots: print(prot) file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv")) sub_prot = pd.read_csv(file_subdf) print(sub_prot.shape) prots_passing_smote = [] for prot in unique_prots: print(prot) sm = over_sampling.SMOTE(sampling_strategy=1.0, random_state=42) file_subdf = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/subdataframes/", prot, ".csv")) sub_prot = pd.read_csv(file_subdf) sub_prot["separated_fps"] = sub_prot["Fingerprint Feature"].apply(separating_fps) X = pd.DataFrame.from_dict(dict(zip(sub_prot["separated_fps"].index, sub_prot["separated_fps"].values))).transpose() Y = sub_prot["label"].values print(X.shape) try: Xmot, Ymot = sm.fit_resample(X, Y) print(Xmot.shape) prots_passing_smote.append(prot) except: print(" The specified ratio required to remove samples from the minority class while trying to generate new samples.") continue #print(Counter(Ymot)) # para cada proteina, guardar un sub-dataframe con los datos if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/"))): os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/"))) pickle_path = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/", prot, ".pickle")) with open(pickle_path, 'wb') as handle: pickle.dump((Xmot, Ymot), handle, protocol=pickle.HIGHEST_PROTOCOL) print("There are", len(prots_passing_smote), "proteins to which SMOTE can be applied") #with open("".join((absPath, "data/", protein_type, "/smote_prots.pickle")), 'wb') as handle: # pickle.dump(prots_passing_smote, handle, protocol=pickle.HIGHEST_PROTOCOL) df_lists = [] for prot in prots_passing_smote: print(prot) #Load data pickle pickle_path = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/resampled_data/", prot, ".pickle")) with open(pickle_path, 'rb') as handle: Xmot, Ymot = pickle.load(handle) df = Xmot.copy() df["Y"] = Ymot print(len(Ymot)) df["prot"] = prot df_lists.append(df) df_complete = pd.concat(df_lists) df_complete["fingerprint"] = df_complete[[i for i in range(881)]].apply(lambda row: "".join(row.values.astype(str)), axis=1) df_complete.info() df_complete.head() unique_compounds_df = df_complete.drop_duplicates(subset="fingerprint", keep="first", ignore_index=True) unique_compounds_df.info() names_comps = ["c"+str(i) for i in range(unique_compounds_df.shape[0])] unique_compounds_df["comp_ID"] = names_comps unique_compounds_df.head() unique_compounds_df[["fingerprint", "comp_ID"]].to_csv("".join((absPath, "data/", protein_type, "/resampling_before_clustering/unique_compounds.csv"))) ``` ### Clustering data ``` nclusters = 100 batch_size = 1000 sample_indices = np.arange(0, unique_compounds_df.shape[0]) sample_indices = np.random.permutation(sample_indices) #double checking compounds_df_filtered = unique_compounds_df.drop(["prot", "Y"], axis=1)#[["fingerprint", "comp_ID"]] print(compounds_df_filtered.head()) print(compounds_df_filtered.info()) generate_batches = batch_generator(batch_size, compounds_df_filtered, sample_indices) #K-Means model = MiniBatchKMeans(n_clusters=nclusters, init='k-means++', compute_labels=True) sse = {} labels = [] comp_ids = [] clusters_centers = {} #i=0 for i, batch in enumerate(generate_batches): print("Iteration ", i) t0 = time.time() df = pd.DataFrame(batch) print(df["comp_ID"]) comp_ids.append(df["comp_ID"]) to_array = df.drop(['comp_ID'], axis=1).values to_array = preprocessing.scale(to_array) model.partial_fit(to_array) print("The inertia for the batch %s is %s" % (i, model.inertia_)) t_mini_batch = time.time() - t0 print(t_mini_batch) sse[i] = model.inertia_ labels.append(model.labels_) clusters_centers[i] = model.cluster_centers_ plt.figure() plt.plot(list(sse.keys()), list(sse.values())) plt.show() labels_array = np.hstack(labels) comp_ids_list = np.hstack(comp_ids) #Joining compound IDs and cluster labels compound_clusters = pd.DataFrame({'comp_ID':comp_ids_list, 'cluster_label':labels_array}) compound_clusters.info() compound_clusters.to_csv("".join((absPath, "data/", protein_type, "/resampling_before_clustering/compound_clusters.csv")), header=True) compounds_df_filtered["fingerprint"] = compounds_df_filtered[[i for i in range(881)]].apply(lambda row: "".join(row.values.astype(str)), axis=1) compounds_df_filtered.head() #merging compound names, compound clusters and fingerprints activity_with_IDs = pd.merge(df_complete[["prot", "Y", "fingerprint"]], compounds_df_filtered, on=["fingerprint"], how='left') activity_with_IDs.info() activity_with_IDs.head() activity_df_clusters = pd.merge(activity_with_IDs, compound_clusters, on=["comp_ID"], how="left") activity_df_clusters.info() activity_df_clusters.head() activity_df_clusters.to_csv("".join((absPath, "data/", protein_type, "/resampling_before_clustering/activity_clusters.csv")), sep="\t", header=True) ``` ### Training test split ``` nfolds = 10 print(activity_df_clusters.info()) print(activity_df_clusters.head()) #How many pairs are there for each cluster? label_count = activity_df_clusters["cluster_label"].value_counts() print(label_count) if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/"))): os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/"))) # Histogram bins = np.arange(0, 100, 1) # fixed bin size plt.xlim([min(activity_df_clusters.loc[:,'cluster_label'])-5, max(activity_df_clusters.loc[:,'cluster_label'])+5]) plt.hist(activity_df_clusters['cluster_label'], bins=bins, alpha=0.5, edgecolor='black', linewidth=1.2) plt.title('Distribution of compounds clusters') plt.xlabel('clusters') plt.ylabel('samples per cluster') plt.show() plt.savefig("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/histogram_clusters.png"))) #we create nfolds differents splitting partitions for i in range(nfolds): compounds_classif = accumulated_size_clusters(activity_df_clusters) compounds_classif = training_test_split(compounds_classif, 80, 10, 10, i) #Joining smiles-label dataframe with label information dataframe name_column = "splitting_" + str(i) activity_df_clusters = pd.merge(compounds_classif.loc[:, ["cluster_label", name_column]], activity_df_clusters, on="cluster_label") #Checking that all the partitions are more or less the same size for i in range(nfolds): name_column = "splitting_" + str(i) print(activity_df_clusters[name_column].value_counts()) activity_df_clusters.info() #Now we should check number of actives/inactives per splitting fold count_list = [] for i in range(nfolds): name_column = "splitting_" + str(i) count_split = activity_df_clusters.loc[:,[name_column, "Y"]].groupby([name_column, "Y"]).size().unstack(fill_value=0) count_split_df = pd.DataFrame(count_split) count_split_df_melt = pd.melt(count_split_df.reset_index(), id_vars = name_column, value_vars=[0.0,1.0]) count_split_df_melt["splitting_fold"] = name_column count_split_df_melt = count_split_df_melt.rename(columns = {name_column:"split_set"}) print(count_split) count_list.append(count_split_df_melt) #preparing dataframe to check labels distribution across splitting sets count_list_df = pd.concat(count_list, axis=0) vals_to_replace = {0:'training_set', 1:'validation_set', 2:'test_set'} vals_to_replace2 = dict(zip(["splitting_"+str(i) for i in range(nfolds)], range(nfolds))) count_list_df['split_set'] = count_list_df['split_set'].map(vals_to_replace) count_list_df['splitting_fold'] = count_list_df['splitting_fold'].map(vals_to_replace2) p = (ggplot(count_list_df, aes(y="value")) + geom_bar(aes(x="factor(splitting_fold)", fill="Y"), stat="identity") + facet_grid(".~split_set") + xlab("splitting fold") + ylab("number of compounds")+ theme(legend_title=element_blank())) p ggsave(filename="".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/activity_distribution_clusters.pdf")), plot=p, dpi=300) p #Now we should check coincidence between clusters in splitting folds cluster_labels_list = [] for i in range(nfolds): cluster_labels = {} name_column = "splitting_" + str(i) count_split_cluster = activity_df_clusters.loc[:,[name_column, "cluster_label"]].groupby([name_column, "cluster_label"]).size().unstack(fill_value=0) #binarizing binary_df = count_split_cluster >0 cluster_lists = binary_df.apply(lambda x: binary_df.columns[x == True], axis=1) for idx in range(len(cluster_lists)): cluster_labels[idx] = list(cluster_lists[idx]) cluster_labels_list.append(cluster_labels) #print(count_split_cluster) # Now we compute sets for drawing venn diagrams for i in range(nfolds): len_train = len(cluster_labels_list[i][0]) len_val = len(cluster_labels_list[i][1]) len_test = len(cluster_labels_list[i][2]) len_train_val = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][1]))) len_train_test = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][2]))) len_val_test = len(set(cluster_labels_list[i][1]). intersection(set(cluster_labels_list[i][2]))) len_train_val_test = len(set(cluster_labels_list[i][0]). intersection(set(cluster_labels_list[i][1])).intersection(set(cluster_labels_list[i][2]))) plt.figure(figsize=(4,4)) venn3(subsets = (len_train, len_val, len_train_val, len_test, len_train_test, len_val_test, len_train_val_test), set_labels = ("training", "validation", "test"), alpha = 0.5) plt.title("splitting fold " + str(i)) plt.savefig("".join((absPath, "data/", protein_type, "/resampling_before_clustering/preprocessing_figures/venn_clusters_", str(i), ".png"))) #falta la info de las secuencias, uniprot ID, prot_family activity_file = "".join((absPath, "data/", protein_type, "_activity.csv")) activity_df = pd.read_csv(activity_file, sep="\t") print(activity_df.info()) print(activity_df.head()) unique_prots_df = activity_df[activity_df["DeepAffinity Protein ID"].isin(unique_prots)].drop_duplicates(["DeepAffinity Protein ID"]) unique_prots_df = unique_prots_df[["DeepAffinity Protein ID", "Uniprot ID", "Sequence", "family"]] unique_prots_df.info() activity_df_clusters.rename(columns={"prot": "DeepAffinity Protein ID"}, inplace=True) activity_df_clusters.info() activity_clusters_prot_info = pd.merge(activity_df_clusters, unique_prots_df, "left", on=["DeepAffinity Protein ID"]) print(activity_clusters_prot_info.info()) print(activity_clusters_prot_info.head()) #despues de dividir en training, test y validation #saving data into a HDF5 #Defining HDF5 table-type for storing data class Protein_Compound_Complex(IsDescription): #CID = UInt16Col() da_comp_id = StringCol(4) da_prot_id = StringCol(4) uniprot_id = StringCol(6) #activity = Float16Col() label = UInt16Col() #canonical_smiles = StringCol(100) sequence = StringCol(2000) prot_family = StringCol(5) comp_cluster = UInt16Col() splitting_0 = UInt8Col() splitting_1 = UInt8Col() splitting_2 = UInt8Col() splitting_3 = UInt8Col() splitting_4 = UInt8Col() splitting_5 = UInt8Col() splitting_6 = UInt8Col() splitting_7 = UInt8Col() splitting_8 = UInt8Col() splitting_9 = UInt8Col() fingerprint = StringCol(900) #open a HDF5 file with write options file_h5 = open_file("".join((absPath, "data/", protein_type,"/resampling_before_clustering/compounds_activity.h5")), "w") root = file_h5.root group = file_h5.create_group(root, "activity") table = file_h5.create_table('/activity', "prot_comp", Protein_Compound_Complex) pair = table.row for index,row in activity_clusters_prot_info.iterrows(): #pair["CID"] = row["CID"] pair["da_comp_id"] = row["comp_ID"] pair["da_prot_id"] = row["DeepAffinity Protein ID"] pair["uniprot_id"] = row["Uniprot ID"] #pair["activity"] = row["activity"] pair["label"] = row["Y"] #pair["canonical_smiles"] = row["Canonical SMILE"] pair["sequence"] = row["Sequence"] pair["prot_family"] = row["family"] pair["comp_cluster"] = row["cluster_label"] pair["fingerprint"] = row["fingerprint"] for i in range(nfolds): name_col = "splitting_" + str(i) pair[name_col] = row[name_col] pair.append() table.flush() file_h5.close() #Opening HDF5 with data filee = "".join((absPath, "data/", protein_type,"/resampling_before_clustering/compounds_activity.h5")) f = h5py.File(filee, 'r') group = '/activity' table = "prot_comp" #shuffling data indices n_samples = len(f[group][table]) sample_indices = np.arange(0, n_samples) sample_indices = np.random.permutation(sample_indices) #creating folder to storage splitting lists if it does not exist if not os.path.exists("".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/"))): os.makedirs("".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/"))) for i in range(nfolds): column_name = "splitting_" + str(i) training_list, validation_list, test_list = splitting_division(f, group, table, sample_indices, column_name) pickle_filename = "".join((absPath, "data/", protein_type, "/resampling_before_clustering/splitting_lists/", column_name, "_list.pickle")) with open(pickle_filename, "wb") as handle: pickle.dump((training_list, validation_list, test_list), handle) ```
github_jupyter
``` import graphlab as gl import pickle import pandas as pd import numpy as np from collections import Counter data_items = pickle.load(open("/Users/marvinbertin/Github/family_style_chat_bot/data/user_by_cuisine_by_dish_ratings.pkl", 'rb')) data_cuisine = pickle.load(open("/Users/marvinbertin/Github/family_style_chat_bot/data/user_by_cuisine_ratings.pkl", 'rb')) df_cuisine = pd.DataFrame(data_cuisine) df_cuisine.head() df_cuisine.pivot(index='user_id', columns="item_id") data_items.keys() class group_recommender(object): def __init__(self, cuisine_sf, dict_cuisine_items): self.cuisine = gl.SFrame(cuisine_sf) self.cuisine_items = dict_cuisine_items def recommend(self, group_list): group_name = "_".join(group_list) sf_avg_user = self.cuisine.filter_by(group_list, "user_id") \ .groupby(key_columns='item_id', operations={'rating': gl.aggregate.MEAN('rating')}) sf_avg_user.add_column(gl.SArray([group_name] * len(sf_avg_user)), "user_id") # print sf_avg_user sf_new = self.cuisine.append(sf_avg_user) model = gl.recommender.create(sf_new, target='rating') results = model.recommend([group_name], exclude_known=False) # print results result_cuisine = results["item_id"][:3] option_list = [] for cuisine in result_cuisine: sf_items = gl.SFrame(self.cuisine_items[cuisine]) model_items = gl.recommender.create(sf_items, target='rating') results_items = model_items.recommend(group_list, exclude_known=False, k = 2) # print results_items if cuisine == "Pizza": group_size = len(group_list) num_pizza = int(group_size / 1.5) item_results = [item for item, count in Counter(results_items["item_id"]).most_common()][:num_pizza] option_list.append(("Pizza Party!", item_results)) else: group_size = len(group_list) item_results = [item for item, count in Counter(results_items["item_id"]).most_common()][:group_size] option_list.append((cuisine, item_results)) return option_list group_list = np.random.choice(df_cuisine["user_id"].unique(), size = 2, replace=False) print group_list model = group_recommender(df_cuisine, data_items) result = model.recommend(group_list) result ```
github_jupyter
``` %matplotlib inline %reload_ext autoreload %autoreload 2 # ๅคš่กŒ่พ“ๅ‡บ from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" from fastai import * from fastai.text import * doc(Config) ``` - IMDB ็ฒพ็ฎ€ๆ•ฐๆฎ ``` path = untar_data(URLs.IMDB) path path.ls() BATCH = 32 # data_lm = (TextList.from_folder(path) # #Inputs: all the text files in path # .filter_by_folder(include=['train', 'test']) # #We may have other temp folders that contain text files so we only keep what's in train and test # .split_by_rand_pct(0.1) # #We randomly split and keep 10% (10,000 reviews) for validation # .label_for_lm() # #We want to do a language model so we label accordingly, ่‡ชๅทฑๅฐฑๆ˜ฏ่‡ชๅทฑ็š„ๆ ‡็ญพ # .databunch(bs=BATCH)) # data_lm.save('data_lm') data_lm = load_data(path, 'data_lm', bs=BATCH) ``` ## ๅˆ›ๅปบๆจกๅž‹ ``` learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3) learn_lm.loss_func learn_lm.opt_func learn_lm.model ``` ### ็ผ–็ ๅ™จ ``` encoder = get_model(learn_lm.model)[0] encoder ``` - ็”Ÿๆˆๅ‡็š„่ฏ„่ฎบ๏ผŒไฝฟ็”จ่ฏญ่จ€ๆจกๅž‹้ข„ๆต‹ ``` TEXT = "The color of the sky is" N_WORDS = 40 N_SENTENCES = 2 print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES))) ``` - temperature ๆŽงๅˆถ็”Ÿๆˆๆ–‡ๆœฌ็š„้šๆœบๆ€ง ``` print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.1) for _ in range(N_SENTENCES))) ``` - ้ข„่ฎญ็ปƒๆ•ฐๆฎ้›† ``` tr_itos = pickle.load(open(Config().model_path()/'transformer/itos_tfmer.pkl', 'rb')) tr_itos[:10] len(tr_itos), len(data_lm.vocab.itos) # wiki ่ฏๆฑ‡๏ผŒ IMDB ่ฏๆฑ‡ unks[:16] ``` ## ็ฒพ่ฐƒๆจกๅž‹ ``` learn_lm.lr_find() learn_lm.recorder.plot(skip_end=10) learn_lm.fit_one_cycle(1, 1e-2, moms=(0.8, 0.7)) learn_lm.save('transformer_fit_1') learn_lm=None gc.collect() ``` - ไฟๅญ˜ ``` data_lm = load_data(path, 'data_lm', bs=BATCH//2) learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3) learn_lm.unfreeze() learn_lm.load('transformer_fit_1'); learn_lm.fit_one_cycle(1, 1e-3, moms=(0.8,0.7)) learn_lm.save('transformer_fit_2') learn_lm=None gc.collect() learn_lm = language_model_learner(data_lm, Transformer, drop_mult=0.3) learn_lm.load('transformer_fit_2'); TEXT = "i liked this movie because" N_WORDS = 40 N_SENTENCES = 2 print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES))) print("\n\n".join(learn_lm.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES))) learn_lm.save_encoder('fine_tuned_enc') learn_lm = None gc.collect() ``` ## ่ฏญไน‰ๅˆ†็ฑปไปปๅŠก ``` learn_c = None gc.collect() path = untar_data(URLs.IMDB) BATCH = 16 # data_clas = (TextList.from_folder(path, vocab=data_lm.vocab) # #grab all the text files in path # .split_by_folder(valid='test') # #split by train and valid folder (that only keeps 'train' and 'test' so no need to filter) # .label_from_folder(classes=['neg', 'pos']) # #label them all with their folders # .databunch(bs=BATCH)) # data_clas.save('imdb_textlist_class') data_clas = load_data(path, 'imdb_textlist_class', bs=BATCH//4) data_clas.show_batch() learn_c = text_classifier_learner(data_clas, Transformer, drop_mult=0.5) learn_c.model learn_c.loss_func learn_c.load_encoder('fine_tuned_enc') learn_c.lr_find() learn_c.recorder.plot() learn_c.fit_one_cycle(3, 5e-3, moms=(0.8, 0.7)) learn_c.save('fine1') learn_c = None gc.collect() ```
github_jupyter
``` %matplotlib inline %config InlineBackend.figure_format = 'retina' import asyncio import aiohttp import json import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import requests import seaborn as sns from ast import literal_eval from collections import defaultdict pd.options.display.max_rows = 200 pd.options.display.max_columns = 50 from IPython.core.display import display, HTML display(HTML("<style>.container { width:80% !important; }</style>")) # Copied from pyencoded-tools/encodedcc.py to avoid dependency. class ENC_Key: def __init__(self, keyfile, keyname): if os.path.isfile(str(keyfile)): keys_f = open(keyfile, 'r') keys_json_string = keys_f.read() keys_f.close() keys = json.loads(keys_json_string) else: keys = keyfile key_dict = keys[keyname] self.authid = key_dict['key'] self.authpw = key_dict['secret'] self.server = key_dict['server'] if not self.server.endswith("/"): self.server += "/" class ENC_Connection(object): def __init__(self, key): self.headers = {'content-type': 'application/json', 'accept': 'application/json'} self.server = key.server self.auth = (key.authid, key.authpw) # Define key if private data desired. key = ENC_Key(os.path.expanduser("~/keypairs.json"), 'prod') ``` ## Get accessions for all replaced items ``` # Pull accession of all Items with replaced status. url = 'https://www.encodeproject.org/search/'\ '?type=File&type=Dataset&type=Donor&type=Library'\ '&type=Pipeline&type=Biosample&type=AntibodyLot&status=replaced'\ '&limit=all&format=json' r = requests.get(url, auth=(key.authid, key.authpw)) search_results = r.json()['@graph'] len(search_results) accessions = set() for result in search_results: accessions.add(result['accession']) len(accessions) ``` ## Search for each accession and check length of results ``` # loop.close() # loop = asyncio.new_event_loop() # asyncio.set_event_loop(loop) # Asyncio request. result_length = [] bad_accessions = [] request_auth = aiohttp.BasicAuth(key.authid, key.authpw) async def get_json(url, sem): async with sem: async with aiohttp.ClientSession() as session: async with session.get(url, auth=request_auth) as resp: return await resp.json() async def get_request(accession, sem): url = 'https://www.encodeproject.org/'\ 'search/?type=Item&accession={}'\ '&limit=all&format=json'.format(accession) result = await get_json(url, sem) search_results = result['@graph'] num_results = len(search_results) result_length.append({'accession': accession, 'result_length': num_results}) if num_results > 1: bad_accessions.append({'accession': accession, 'results': search_results}) sem = asyncio.Semaphore(20) loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(*[get_request(accession, sem) for accession in accessions])); # # Search for each accession, count number of results. # counter = 0 # result_length = [] # bad_accessions = [] # for accession in accessions: # url = 'https://www.encodeproject.org/search/'\ # '?type=Item&accession={}'\ # '&limit=all&format=json'.format(accession) # r = requests.get(url, auth=(key.authid, key.authpw)) # search_results = r.json()['@graph'] # result_length.append({'accession': accession, # 'result_length': len(search_results)}) # if len(search_results) > 1: # bad_accessions.append({'accession': accession, # 'results': search_results}) # counter += 1 # if counter % 100 == 0: # print(".", end="") # if counter % 1000 == 0: # print("\n") # Make sure search results returned for each accession. #assert len(accessions) == counter pd.DataFrame(result_length).result_length.value_counts() len(bad_accessions) bad_accessions[0] duplicate_accession_data = [] for bad in bad_accessions: for item in bad['results']: duplicate_accession_data.append({'accession': item['accession'], 'file_format': item['file_format'], 'status': item['status'], 'dataset': item['dataset']}) duplicate_accessions = pd.DataFrame(duplicate_accession_data) duplicate_accessions.dataset.value_counts() ``` ## Associate duplicate accessions to Experiment lab. ``` experiment_list = duplicate_accessions.dataset.unique() search_ids = "&@id=".join(experiment_list) url = 'https://www.encodeproject.org/search/'\ '?type=Item&limit=all&frame=embedded&@id={}'.format(search_ids) r = requests.get(url, auth=(key.authid, key.authpw)) search_results = r.json()['@graph'] search_id_map = {} for experiment in search_results: search_id_map[experiment['@id']] = experiment['lab']['name'] duplicate_accessions['lab'] = duplicate_accessions.dataset.apply(lambda x: search_id_map[x]) print(*sorted(duplicate_accessions.lab.unique()), sep='\n') list(duplicate_accessions.accession.unique()) duplicate_accessions[duplicate_accessions.status == "replaced"].groupby(['lab', 'accession', 'status', 'file_format']).count().sort_index(0)[[]] duplicate_accessions.groupby(['lab', 'status', 'dataset', 'accession', 'file_format']).count().sort_index(1, 0) duplicate_accessions.groupby(['accession', 'status', 'file_format', 'lab', 'dataset', 'file_format']).count().sort_index(1, 0).unstack() duplicate_accessions ``` ## Data for all replaced Items ``` # Grab data of all replaced Items. replaced_data = [] url = 'https://www.encodeproject.org/search/'\ '?type=File&type=Dataset&type=Donor&type=Library'\ '&type=Pipeline&type=Biosample&type=AntibodyLot&status=replaced'\ '&frame=embedded&limit=all&format=json' r = requests.get(url, auth=(key.authid, key.authpw)) search_results = r.json()['@graph'] na = 'not_available' for result in search_results: sub_by = result.get('submitted_by', {}) if isinstance(sub_by, str): submitted_by = sub_by else: submitted_by = sub_by.get('title', na) lab = result.get('lab', {}) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) item_data = {'accession': result['accession'], 'submitted_by': submitted_by, 'derived_from': result.get('derived_from', na), 'superseded_by': result.get('superseded_by', na), 'supersedes': result.get('supersedes', na), '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'], 'product_id': result.get('product_id', na), 'culture_start_date': result.get('culture_start_date', na), 'biosample_type': result.get('biosample_type', na), 'description': result.get('description', na), 'treatments': result.get('treatments', na) } replaced_data.append(item_data) replaced_data[900] len(replaced_data) def parse_lab_name(lab): if isinstance(lab, str): parse_lab = lab.replace("/", "").replace("labs", "") return parse_lab else: return lab[0] rd = pd.DataFrame(replaced_data) rd.lab_name = rd.lab_name.apply(lambda x: parse_lab_name(x)) rd.loc[rd.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list' rd.loc[rd.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list' rd.loc[rd.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list' rd.loc[rd.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list' rd.loc[rd.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' rd.loc[rd.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list' rd.loc[rd.treatments.apply(lambda x: len(x) == 0), 'treatments'] = 'empty_list' ``` ## Check to see if replacement is similar to replaced (optional) ``` def drop_unique_fields(data): drop_fields = ['@id', '@accession', 'md5sum', 'content_md5sum', 'date_created'] data = {k: v for k, v in data.items() if k not in drop_fields} return data replacement_data = [] broken_pair = defaultdict(list) for accession in rd.accession.unique(): replaced_values = rd[rd.accession == accession].to_dict(orient='records')[0] url = 'https://www.encodeproject.org/{}/?format=json'.format(accession) r = requests.get(url, auth=(key.authid, key.authpw)) if (r.status_code == 200): result = r.json() sub_by = result.get('submitted_by', {}) if isinstance(sub_by, str): submitted_by = sub_by else: submitted_by = sub_by.get('title', na) lab = result.get('lab', {}) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) item_data = {'accession': result['accession'], 'submitted_by': submitted_by, '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'], 'product_id': result.get('product_id', na), 'culture_start_date': result.get('culture_start_date', na), 'biosample_type': result.get('biosample_type', na), 'description': result.get('description', na), 'treatments': result.get('treatments', na) } item_temp = pd.DataFrame([item_data]) item_temp.lab_name = item_temp.lab_name.apply(lambda x: parse_lab_name(x)) item_temp.loc[item_temp.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list' item_temp.loc[item_temp.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' item_temp.loc[item_temp.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list' item_temp.loc[item_temp.treatments.apply(lambda x: len(x) == 0), 'treatments'] = 'empty_list' item_temp = item_temp.to_dict(orient='records')[0] replaced_dict = drop_unique_fields(replaced_values) replacement_dict = drop_unique_fields(replaced_dict) if replaced_dict != replacement_dict: broken_pair['accession'].append(item_data) replacement_data.append(item_data) len(replacement_data) ``` ## Data for portal redirect of replaced accessions ``` # loop.close() # loop = asyncio.new_event_loop() # asyncio.set_event_loop(loop) # Asyncio request. replaced_by_file = [] na = 'not_available' async def get_request(session, accession): url = 'https://www.encodeproject.org/{}'.format(accession) async with session.get(url, auth=request_auth, timeout=None) as response: if response.status == 404: item_data = {'searched_accession': accession, 'redirected_to_accession': 'no_result'} replaced_by_file.append(item_data) else: result = await response.json() sub_by = result.get('submitted_by', {}) if isinstance(sub_by, str): submitted_by = sub_by else: submitted_by = sub_by.get('title', na) lab = result.get('lab', {}) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) item_data = {'accession': result['accession'], 'submitted_by': submitted_by, 'derived_from': result.get('derived_from', na), 'superseded_by': result.get('superseded_by', na), 'supersedes': result.get('supersedes', na), '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'], 'product_id': result.get('product_id', na), 'culture_start_date': result.get('culture_start_date', na), 'biosample_type': result.get('biosample_type', na), 'description': result.get('description', na), 'treatments': result.get('treatments', na)} replaced_by_file.append(item_data) if len(replaced_by_file) % 100 == 0: print(len(replaced_by_file)) async def create_session(accessions, loop): connector = aiohttp.TCPConnector(keepalive_timeout=10, limit=100) async with aiohttp.ClientSession(connector=connector, loop=loop) as session: results = await asyncio.gather(*[get_request(session, accession) for accession in accessions]) loop = asyncio.get_event_loop() loop.run_until_complete(create_session(accessions, loop)) len(replaced_by_file) len(accessions) # # Asyncio request. # request_auth = aiohttp.BasicAuth(key.authid, key.authpw) # replaced_by_file = [] # na = 'not_available' # async def get_request(url, sem): # async with sem: # async with aiohttp.ClientSession() as session: # async with session.get(url, auth=request_auth) as resp: # return await resp.json() # async def get_data(accession, sem): # url = 'https://www.encodeproject.org/{}'.format(accession) # result = await get_request(url, sem) # if result.get('code', False) == 404: # item_data = {'searched_accession': accession, # 'redirected_to_accession': 'no_result'} # replaced_by_file.append(item_data) # else: # sub_by = result.get('submitted_by', {}) # if isinstance(sub_by, str): # submitted_by = sub_by # else: # submitted_by = sub_by.get('title', na) # lab = result.get('lab', {}) # if isinstance(lab, str): # lab_name = lab # else: # lab_name = lab.get('name', na) # item_data = {'accession': result['accession'], # 'submitted_by': submitted_by, # 'derived_from': result.get('derived_from', na), # 'superseded_by': result.get('superseded_by', na), # 'supersedes': result.get('supersedes', na), # '@id': result['@id'], # 'alternate_accessions': result.get('alternate_accessions', na), # 'dataset': result.get('dataset', na), # 'lab_name': lab_name, # 'date_created': result.get('date_created', na), # '@type': result['@type'][0], # 'output_type': result.get('output_type', na), # 'file_format': result.get('file_format', na), # 'assembly': result.get('assembly', na), # 'paired_with': result.get('paired_with', na), # 'paired_end': result.get('paired_end', na), # 'file_format_type': result.get('file_format_type', na), # 'technical_replicates': result.get('technical_replicates', na), # 'replicate_uuid': result.get('replicate', {}).get('uuid', na), # 'md5sum': result.get('md5sum', na), # 'content_md5sum': result.get('content_md5sum', na), # 'status': result['status'], # 'product_id': result.get('product_id', na), # 'culture_start_date': result.get('culture_start_date', na), # 'biosample_type': result.get('biosample_type', na), # 'description': result.get('description', na), # 'treatments': result.get('treatments', na) # } # replaced_by_file.append(item_data) # sem = asyncio.Semaphore(100) # loop = asyncio.get_event_loop() # loop.run_until_complete(asyncio.gather(*[get_data(accession, sem) for accession in accessions])); # loop = asyncio.get_event_loop() # loop.run_until_complete(create_session(accessions, loop)) # # For every replaced accession: # # Check if https://www.encodeproject.org/{accession} returns anything. # # If so, does it match replaced file type? # replaced_by_file = [] # na = 'not_available' # for accession in accessions: # url = 'https://www.encodeproject.org/{}'.format(accession) # r = requests.get(url, auth=(key.authid, key.authpw)) # if r.status_code == 404: # item_data = {'searched_accession': accession, # 'redirected_to_accession': 'no_result'} # replaced_by_file.append(item_data) # else: # result = r.json() # sub_by = result.get('submitted_by', {}) # if isinstance(sub_by, str): # submitted_by = sub_by # else: # submitted_by = sub_by.get('title', na) # lab = result.get('lab', {}) # if isinstance(lab, str): # lab_name = lab # else: # lab_name = lab.get('name', na) # item_data = {'accession': result['accession'], # 'submitted_by': submitted_by, # 'derived_from': result.get('derived_from', na), # 'superseded_by': result.get('superseded_by', na), # 'supersedes': result.get('supersedes', na), # '@id': result['@id'], # 'alternate_accessions': result.get('alternate_accessions', na), # 'dataset': result.get('dataset', na), # 'lab_name': lab_name, # 'date_created': result.get('date_created', na), # '@type': result['@type'][0], # 'output_type': result.get('output_type', na), # 'file_format': result.get('file_format', na), # 'assembly': result.get('assembly', na), # 'paired_with': result.get('paired_with', na), # 'paired_end': result.get('paired_end', na), # 'file_format_type': result.get('file_format_type', na), # 'technical_replicates': result.get('technical_replicates', na), # 'replicate_uuid': result.get('replicate', {}).get('uuid', na), # 'md5sum': result.get('md5sum', na), # 'content_md5sum': result.get('content_md5sum', na), # 'status': result['status'], # 'product_id': result.get('product_id', na), # 'culture_start_date': result.get('culture_start_date', na), # 'biosample_type': result.get('biosample_type', na), # 'description': result.get('description', na), # 'treatments': result.get('treatments', na) # } # replaced_by_file.append(item_data) len(accessions) len(replaced_by_file) rbf = pd.DataFrame(replaced_by_file) rbf = rbf.fillna('is_null') rbf.lab_name = rbf.lab_name.apply(lambda x: parse_lab_name(x)) rbf.loc[rbf.assembly.apply(lambda x: len(x) == 0), 'assembly'] = 'empty_list' rbf.loc[rbf.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' rbf.loc[rbf.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list' rbf.loc[rbf.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list' rbf.loc[rbf.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list' df = pd.read_excel('replaced_items_no_redirect_06_12_2017.xlsx') df.shape df['@type'].value_counts() dff = df[df['@type'] == 'File'] dff.dataset def get_assay_type(experiment): url = 'https://www.encodeproject.org{}?format=json'.format(experiment) r = requests.get(url, auth=(key.authid, key.authpw)) result = r.json() return result.get('assay_term_name', 'na') def get_lab_name(experiment): url = 'https://www.encodeproject.org/{}/?format=json'.format(experiment) r = requests.get(url, auth=(key.authid, key.authpw)) result = r.json() return result.get('lab', {}).get('name', 'na') dff.dataset dff['assay_type'] = dff.dataset.apply(lambda x: get_assay_type(x)) dff.assay_type.value_counts() dff['experiment_lab'] = dff.dataset.apply(lambda x: get_lab_name(x)) #rbf.to_csv("replaced_by_search.tsv", sep="\t") ``` ## Merge redirect data with replaced Item data ``` no_redirect_accessions = rd[rd.accession.isin(rbf[rbf.redirected_to_accession == "no_result"].searched_accession.values)] no_redirect_accessions = no_redirect_accessions.sort_values('@type').reset_index(drop=True) no_redirect_accessions.loc[no_redirect_accessions.description.apply(lambda x: len(x) == 0), 'description'] = 'empty_string' no_redirect_accessions['status'].value_counts() no_redirect_accessions.content_md5sum.value_counts() no_redirect_accessions.description.value_counts() no_redirect_accessions.lab_name.value_counts() no_redirect_accessions['@type'].value_counts() no_redirect_accessions[no_redirect_accessions.md5sum != "not_available"].accession.unique() len(no_redirect_accessions[no_redirect_accessions.md5sum != "not_available"].accession.unique()) len(no_redirect_accessions[no_redirect_accessions.md5sum == 'not_available'].accession.unique()) #.to_excel('replaced_items_no_redirect_06_12_2017.xlsx') ``` ## Search for possible replacement files with same MD5sum ``` # possible_replacements = defaultdict(list) # for md5 in no_redirect_accessions.md5sum.unique()[1:]: # url = 'https://www.encodeproject.org/search/'\ # '?type=Item&md5sum={}&status%21=replaced'\ # '&frame=embedded&limit=all&format=json'.format(md5) # r = requests.get(url, auth=(key.authid, key.authpw)) # if (r.status_code == 404) or (len(r.json()['@graph']) == 0): # item_data = {'md5sum': md5, # 'accession': 'no_result'} # possible_replacements[md5].append(item_data) # else: # results = r.json()['@graph'] # for result in results: # lab = result.get('lab', {}) # if isinstance(lab, str): # lab_name = lab # else: # lab_name = lab.get('name', na) # possible_replacements[md5].append({'accession': result['accession'], # '@id': result['@id'], # 'alternate_accessions': result.get('alternate_accessions', na), # 'dataset': result.get('dataset', na), # 'lab_name': lab_name, # 'date_created': result.get('date_created', na), # '@type': result['@type'][0], # 'output_type': result.get('output_type', na), # 'file_format': result.get('file_format', na), # 'assembly': result.get('assembly', na), # 'paired_with': result.get('paired_with', na), # 'paired_end': result.get('paired_end', na), # 'file_format_type': result.get('file_format_type', na), # 'technical_replicates': result.get('technical_replicates', na), # 'replicate_uuid': result.get('replicate', {}).get('uuid', na), # 'md5sum': result.get('md5sum', na), # 'content_md5sum': result.get('content_md5sum', na), # 'status': result['status'] # }) loop.close() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) possible_replacements = defaultdict(list) async def get_request(session, md5): url = 'https://www.encodeproject.org/search/'\ '?type=Item&md5sum={}&status%21=replaced'\ '&frame=embedded&limit=all&format=json'.format(md5) async with session.get(url, auth=request_auth) as response: r = await response.json() results = r['@graph'] if len(results) == 0: item_data = {'md5sum': md5, 'accession': 'no_result'} possible_replacements[md5].append(item_data) else: for result in results: lab = result.get('lab', {}) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) possible_replacements[md5].append({'accession': result['accession'], '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'] }) async def create_session(md5s, loop): conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100) async with aiohttp.ClientSession(connector=conn, loop=loop) as session: results = await asyncio.gather(*[get_request(session, md5) for md5 in md5s]) loop = asyncio.get_event_loop() loop.run_until_complete(create_session(no_redirect_accessions.md5sum.unique()[1:], loop)) len(possible_replacements) possible_replacements possible_merge = [item for key, value in possible_replacements.items() for item in value if item['accession'] != 'no_result'] possible_merge = pd.DataFrame(possible_merge) possible_merge = possible_merge.rename(columns={'accession': 'possible_redirect_accession', 'status': 'possible_redirect_status'}) possible_merge.loc[possible_merge.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' possible_merge.shape no_matches = no_redirect_accessions[~(no_redirect_accessions.md5sum.isin(possible_merge.md5sum.values))].reset_index(drop=True) pm = possible_merge.merge(no_redirect_accessions, how='left', suffixes=('_new', '_old'), on=['md5sum', '@type', 'file_format', 'file_format_type'])[['md5sum', 'accession', 'status', 'possible_redirect_accession', 'possible_redirect_status', '@type', 'file_format', 'file_format_type', 'assembly_old', 'assembly_new', 'dataset_old', 'dataset_new', 'date_created_old', 'date_created_new', 'lab_name_old', 'lab_name_new', 'technical_replicates_old', 'technical_replicates_new', '@id_old', '@id_new', 'output_type_old', 'output_type_new', 'paired_end_old', 'paired_end_new', 'paired_with_old', 'paired_with_new', 'replicate_uuid_old', 'replicate_uuid_new', 'alternate_accessions_old', 'alternate_accessions_new', 'content_md5sum_old', 'content_md5sum_new']] pm#.to_excel('possible_redirect_accessions_for_replaced_files_06_12_2017.xlsx') no_redirect_accessions[no_redirect_accessions.accession == 'ENCFF133IYK'] pm.shape len(pm.accession.unique()) replacements_exact_match = pm[(pm.dataset_old == pm.dataset_new)].reset_index(drop=True) replacements_exact_match.shape replacements_exact_match[[col for col in replacements_exact_match]] replacements_different = pm[~(pm.dataset_old == pm.dataset_new)].reset_index(drop=True) replacements_different.shape replacements_different # Different datasets but same MD5. Have to update replaced file to have replacement dataset. replacements_update_dataset = replacements_different[['@id_old', 'dataset_new']].rename(columns={'@id_old': '@id', 'dataset_new': 'dataset'}) #replacements_update_dataset.to_csv('../../update_dataset_of_replaced_filed_matching_md5_06_27_2017.tsv', index=False, sep='\t') # Now set exact match replacements_patch = replacements_exact_match[['possible_redirect_accession', 'accession']].rename(columns={'accession': 'alternate_accessions:array', 'possible_redirect_accession': 'accession'}) replacements_patch = replacements_patch.sort_values("alternate_accessions:array") replacements_patch.shape flat_list_patch = [] for accession in replacements_patch.accession.unique(): data = {'accession': accession, 'alternate_accessions:array': ", ".join(replacements_patch[replacements_patch.accession == accession]\ ['alternate_accessions:array'].values)} flat_list_patch.append(data) replacements_patch_flat_list = pd.DataFrame(flat_list_patch) #replacements_patch_flat_list.to_csv('../../replaced_with_matching_replacements_patch_06_27_2017.tsv', sep="\t", index=False) #replacements_different.sort_values('possible_redirect_accession').to_excel('replaced_same_md5_mismatched_dataset_06_14_2017.xlsx', index=False) ``` ## Extract the MD5sums with no matching replacements ``` no_matching_md5_replacements = [item['md5sum'] for key, value in possible_replacements.items() for item in value if item['accession'] == 'no_result'] len(pd.DataFrame(list(set(no_matching_md5_replacements))).rename(columns={0: 'md5sum'}).merge(no_redirect_accessions, how='left', on='md5sum')['accession'].unique()) ``` ## Search for similar types of Files for possible replacement ``` no_redirect_file = no_redirect_accessions[no_redirect_accessions['@type'] == 'File'].reset_index(drop=True) no_redirect_file na = 'not_available' possible_replacements = defaultdict(list) async def get_request_two(session, url, r): async with session.get(url, auth=request_auth) as response_two: result_one = await response_two.json() search_results = result_one['@graph'] if len(search_results) == 0: possible_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': 'no_result'}) for result in search_results: lab = result.get('lab', {}) sub_by = result.get('submitted_by', {}) if isinstance(sub_by, str): submitted_by = sub_by else: submitted_by = sub_by.get('title', na) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) possible_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': result['accession'], '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'], 'submitted_by': submitted_by, 'derived_from': result.get('derived_from', na), 'superseded_by': result.get('superseded_by', na), 'supersedes': result.get('supersedes', na) }) async def get_request_one(session, file_id): url = 'https://www.encodeproject.org/{}/?format=json'.format(file_id) async with session.get(url, auth=request_auth) as response_one: result_one = await response_one.json() r = result_one file_format = r['file_format'] output_type = r['output_type'] dataset = r['dataset'] assembly = r.get('assembly', '*') try: assay_term_name = r['quality_metrics'][0]['assay_term_name'] url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\ '&output_type={}&quality_metrics.assay_term_name={}'\ '&dataset={}&assembly={}&format=json&frame=embedded'\ '&status!=replaced'.format(file_format, output_type, assay_term_name, dataset, assembly) except IndexError: url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\ '&output_type={}&dataset={}&assembly={}&format=json&frame=embedded'\ '&status!=replaced'.format(file_format, output_type, dataset, assembly) if assembly == '*': url = url.replace('&assembly=*', '&assembly!=*') result_two = await get_request_two(session, url, r) async def create_session(file_ids, loop): conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100) async with aiohttp.ClientSession(connector=conn, loop=loop) as session: results = await asyncio.gather(*[get_request_one(session, file_id) for file_id in file_ids]) loop = asyncio.get_event_loop() loop.run_until_complete(create_session(no_redirect_file['@id'].unique(), loop)) len(possible_replacements) possible_replacements ``` ## Fill in empty_lists for list fields ``` replacement_search = pd.DataFrame([item for key, value in possible_replacements.items() for item in value]) replacement_search = replacement_search.fillna('isnull') replacement_search.loc[replacement_search.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list' replacement_search.loc[replacement_search.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' replacement_search.loc[replacement_search.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list' replacement_search.loc[replacement_search.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list' replacement_search.loc[replacement_search.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list' no_redirect_file.loc[no_redirect_accessions.alternate_accessions.apply(lambda x: len(x) == 0), 'alternate_accessions'] = 'empty_list' no_redirect_file.loc[no_redirect_accessions.technical_replicates.apply(lambda x: len(x) == 0), 'technical_replicates'] = 'empty_list' no_redirect_file.loc[no_redirect_file.superseded_by.apply(lambda x: len(x) == 0), 'superseded_by'] = 'empty_list' no_redirect_file.loc[no_redirect_file.supersedes.apply(lambda x: len(x) == 0), 'supersedes'] = 'empty_list' no_redirect_file.loc[no_redirect_file.derived_from.apply(lambda x: len(x) == 0), 'derived_from'] = 'empty_list' rsm = replacement_search.merge(no_redirect_file, how='left', suffixes=('_new', '_old'), on=['accession']) rsm.shape ``` ## Substitute replaced file_ids with replacement file_ids in derived_from fields ``` rsm = rsm[~(rsm.status_new.isin(['revoked', 'deleted']))] # Extract lookup table from data with just one result. # If derived_from File doesn't redirect then look up and see possible replacement. # Use that as fill in value of comparison. dfl = rsm[(rsm.possible_replacement_accession != 'no_result') & (rsm.technical_replicates_old == rsm.technical_replicates_new)].drop_duplicates('accession', keep=False).reset_index(drop=True) dfl.shape rsm[(rsm.possible_replacement_accession != 'no_result') & (rsm.technical_replicates_old == rsm.technical_replicates_new)].drop_duplicates('accession', keep=False).reset_index(drop=True).shape # Create from previous iterations below. derived_from_lookup = pd.concat([dfl, matching_rep.drop_duplicates('accession', keep=False)], axis=0).drop_duplicates('accession').reset_index(drop=True) len(derived_from_lookup.accession.unique()) def get_json(id): url = 'https://www.encodeproject.org/{}/?format=json'.format(id) return requests.get(url, auth=(key.authid, key.authpw)) def parse_derived_from(x): if len(x) == 0 or x == 'not_available': return x new_list = [] for y in x: y_id = y.split('/')[2] if y_id.startswith('ENC'): new_list.append(y) continue else: r = get_json(y) try: accession = r.json()['accession'] r = get_json(accession) if r.status_code == 404: # Pull from local lookup table. try: accession_replacement = derived_from_lookup[derived_from_lookup.accession == accession]\ .possible_replacement_accession.values[0] new_list.append('/files/{}/'.format(accession_replacement)) # If no results returned from one-result table. except IndexError: new_list.append(y) else: accession_replacement = r.json()['accession'] new_list.append('/files/{}/'.format(accession_replacement)) except KeyError: print(y) print(x) new_list.append(y) return new_list rsm_derived_from_old = rsm.derived_from_old.apply(lambda x: parse_derived_from(x)) rsm.derived_from_old = rsm_derived_from_old rsm rsm[~(rsm['@id_old'].isin(['/files/d9e23f37-9b33-41b9-b9df-0700ca87bc75/', '/files/3efeced1-a3c5-4131-a721-7c5f743350a9/', '/files/9fe192e9-af81-46f5-a16f-4d6b5cda577c/'])) & (rsm.supersedes_new != 'not_available')][cols] ``` ## Parse lists for comparison ``` lazy_dict = {'_,e,i,l,m,p,s,t,t,y': 'empty_list', 'i,l,l,n,s,u': 'isnull', '_,a,a,a,b,e,i,l,l,n,o,t,v': 'not_available'} def parse_list(x): return ','.join([y.strip() for y in sorted(x)]) rsm.date_created_old = rsm.date_created_old.apply(lambda x: pd.to_datetime(x)) for field in ['technical_replicates_old', 'technical_replicates_new', 'superseded_by_old', 'superseded_by_new', 'supersedes_old', 'supersedes_new', 'derived_from_old', 'derived_from_new']: rsm[field] = rsm[field].apply(lambda x: parse_list(x)).apply(lambda x: lazy_dict[x] if x in lazy_dict.keys() else x) rsm[rsm.technical_replicates_old != rsm.technical_replicates_new][['technical_replicates_old', 'technical_replicates_new']] rsm[rsm.accession == 'ENCFF721IVN'][cols] rsm[rsm.derived_from_old != rsm.derived_from_new][['derived_from_old', 'derived_from_new']] ``` ## Matching content_md5sum, ready to patch ``` rsm_patch = rsm[(rsm.content_md5sum_old == rsm.content_md5sum_new) & (rsm.content_md5sum_old != 'not_available')].reset_index(drop=True) first_cols = ['accession', 'possible_replacement_accession'] cols = first_cols + [col for col in sorted(rsm_patch.columns, reverse=True) if col not in first_cols] rsm_patch[cols] #rsm_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', 'accession': 'alternate_accessions:list'}).to_csv('../../matching_content_md5sum_patch_06_29_2017.tsv', sep='\t', index=False) ``` ## Remove files to be patched ``` rsm = rsm[~(rsm.accession.isin(rsm_patch.accession.values))].reset_index(drop=True) ``` ## Total Files that need replacement ``` len(rsm.accession.unique()) ``` ## Possible replacement with zero results ``` rsm_zero_result = rsm[rsm.possible_replacement_accession == 'no_result'].reset_index(drop=True) len(rsm_zero_result.accession.unique()) rsm_zero_result.submitted_by_old.value_counts() rsm_zero_result[cols] # To set to deleted because no conservative IDR anymore. #rsm_zero_result.loc[rsm_zero_result.submitted_by_old == 'J. Seth Strattan', 'status_old'] = 'deleted' #rsm_zero_result[rsm_zero_result.submitted_by_old == 'J. Seth Strattan'][['@id_old', 'status_old']].rename(columns={'status_old': 'status', '@id_old': '@id'}).to_csv('../../zero_match_replaced_to_deleted_patch_06_28_2017.tsv', sep='\t', index=False) ``` ### Check for superseded_by/supersedes field ``` rsm_zero_result.superseded_by_old.value_counts() ``` ## Possible replacement with one result ``` rsm_one_result = rsm[rsm.possible_replacement_accession != 'no_result'].drop_duplicates('accession', keep=False).reset_index(drop=True) len(rsm_one_result) rsm_one_result.submitted_by_old.value_counts() rsm_one_result = rsm_one_result[cols] rsm_one_result[rsm_one_result.submitted_by_old == "Diane Trout"] ``` ### Check for superseded_by/supersedes field ``` rsm_one_result.superseded_by_old.value_counts() rsm_one_result.supersedes_old.value_counts() rsm_one_result.superseded_by_new.value_counts() #rsm_one_result.supersedes_new.value_counts() ``` ### Files that should be revoked instead of replaced? ``` rsm_one_result[(rsm_one_result.superseded_by_old != 'empty_list')][cols] rsm_one_result_patch = rsm_one_result[(rsm_one_result.superseded_by_old != 'empty_list')].reset_index(drop=True) rsm_one_result_patch[['accession', 'superseded_by_old']] ``` ### Remove files with superseded_by values ``` rsm_one_result = rsm_one_result[~(rsm_one_result.accession.isin(rsm_one_result_patch.accession.values))].reset_index(drop=True) rsm_one_result.shape rsm_one_result[rsm_one_result.derived_from_old != rsm_one_result.derived_from_new][cols].submitted_by_old.value_counts() #[['derived_from_old', 'derived_from_new']].values rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new) & (rsm_one_result.submitted_by_old == 'Anna Vlasova')][cols][['accession', 'possible_replacement_accession', 'derived_from_old', 'derived_from_new']] rsm[(rsm['@type_old'] == 'File')]['@id_old'].unique() ``` ### Replacements with one result and matching derived_from files != not_available ``` rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new) & (rsm_one_result.derived_from_old != 'not_available')].shape ``` ### Replacements with one result and derived_from both equal to not_available ``` rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new) & (rsm_one_result.derived_from_old == 'not_available')] # Patch one of Diane's that has missing derived_from but otherwise equal # dp = rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new) # & (rsm_one_result.derived_from_old == 'not_available')] # dp[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', # 'accession': 'alternate_accessions:list'}).to_csv('../../one_match_missing_derived_from_patch_06_28_2017.tsv', sep='\t', index=False) # Patch 58 narrowPeaks with one match after dropping revoked/deleted from possible replacements # rsm_one_result[['possible_replacement_accession', # 'accession']].rename(columns={'possible_replacement_accession': 'accession', # 'accession': 'alternate_accessions:list'}).to_csv('../../one_match_after_dropping_deleted_revoked_patch_06_30_2017.tsv', sep='\t', index=False) ``` ### Replacements with one result where derived_from_old but not derived_from_new equal to not_available ``` rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new) & (rsm_one_result.derived_from_old == 'not_available')].shape ``` ### Replacements with one result where derived_from_new but not derived_from_old equal to not_available ``` rsm_one_result[(rsm_one_result.derived_from_old != rsm_one_result.derived_from_new) & (rsm_one_result.derived_from_new == 'not_available')] ``` ### Replacements with one result where either are not_available ``` rsm_one_result[(rsm_one_result.derived_from_old == 'not_available') | (rsm_one_result.derived_from_new == 'not_available')] ``` ### Replacements with one result where derived_from not matching ``` rsm_one_result[rsm_one_result.derived_from_old != rsm_one_result.derived_from_new].shape ``` ### Replacements with one result where derived_from is matching ``` rsm_one_result[rsm_one_result.derived_from_old == rsm_one_result.derived_from_new].shape rsm_one_result_full_match = rsm_one_result[(rsm_one_result.derived_from_old == rsm_one_result.derived_from_new) & (rsm_one_result.derived_from_old != 'not_available')][cols].reset_index(drop=True) rsm_one_result_full_match len(rsm_one_result_full_match.possible_replacement_accession.unique()) rsm_one_result_full_match[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', 'accession': 'alternate_accessions:list'}) ``` ### Replacements with one result with no matching derived_from ``` rsm_one_result_no_match = rsm_one_result[~(rsm_one_result.accession.isin(rsm_one_result_full_match.accession.values))][cols].reset_index(drop=True) rsm_one_result_no_match.shape rsm_one_result_no_match rsm_one_result_no_match.file_format_type_new.value_counts() rsm_one_result_no_match[rsm_one_result_no_match.file_format_type_new == "not_available"] rsm_one_result_no_match[['derived_from_new', 'derived_from_old']].values rsm_one_result_no_match[rsm_one_result_no_match.submitted_by_old == 'J. Seth Strattan'] # Patch these narrowPeaks that match except for derived_from because upstream Files changed. sp = rsm_one_result_no_match[rsm_one_result_no_match.submitted_by_old == 'J. Seth Strattan'][['possible_replacement_accession', 'accession']] sp.rename(columns={'possible_replacement_accession': 'accession', 'accession': 'alternate_accessions:list'})#.to_csv('../../one_match_derived_from_mismatch_patch_06_28_2017.tsv', index=False, sep='\t') ``` ## Possible replacement with many results ``` rsm_multi_result = rsm[rsm.duplicated('accession', keep=False)].reset_index(drop=True) len(rsm_multi_result.accession.unique()) rsm_multi_result.drop_duplicates('accession', keep='first').reset_index().submitted_by_old.value_counts() rsm_multi_result[rsm_multi_result.accession == 'ENCFF719FSK'] ``` ### Groups add back up to total number of accessions? ``` assert len(rsm_zero_result) + len(rsm_one_result) + len(rsm_one_result_patch) + len(rsm_multi_result.accession.unique()) == len(rsm.accession.unique()) ``` ### Does matching on technical replicates and derived_from reduce number of possible replacements with many results? ``` matching_rep = rsm_multi_result[(rsm_multi_result.technical_replicates_old == rsm_multi_result.technical_replicates_new) & (rsm_multi_result.derived_from_old == rsm_multi_result.derived_from_new)].reset_index(drop=True) len(matching_rep.accession.unique()) ``` ### Multiresults that now only have one result after matching on technical_replicate and derived_from ``` len(matching_rep.drop_duplicates('accession', keep=False).accession.unique()) rsm_multi_one_result = matching_rep.drop_duplicates('accession', keep=False)[cols].reset_index(drop=True) rsm_multi_one_result[cols] # rsm_multi_one_result[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', # 'accession': 'alternate_accessions:list'}).to_csv('../../multi_one_match_patch_06_27_2017.tsv', # index=False, sep='\t') # Patch multiresults that have one match when matched on tech_rep (only narrowPeaks) # multi_one_narrow_peaks = rsm_multi_result[(rsm_multi_result.technical_replicates_old == rsm_multi_result.technical_replicates_new) # & (rsm_multi_result.file_format_type_old == 'narrowPeak')].drop_duplicates('accession', keep=False).reset_index(drop=True) # multi_one_narrow_peaks[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', # 'accession': 'alternate_accessions:list'}).to_csv('../../multi_narrow_peaks_tech_rep_match_patch_06_30_2017.tsv', sep='\t', index=False) ``` ### Multiresults that still have more than one result after matching on technical_replicate and derived_from ``` len(matching_rep[matching_rep.duplicated('accession', keep=False)].accession.unique()) ``` ### Group by accession and possible_replacement ``` cols = ['accession','possible_replacement_accession'] cols = cols + [x for x in sorted(rsm.columns, reverse=True) if (x not in cols) and (x not in ['alternate_accessions_new', 'alternate_accessions_old'])] mr = matching_rep[matching_rep.duplicated('accession', keep=False)].groupby(cols).count().reset_index() matching_rep[matching_rep.duplicated('accession', keep=False)].groupby(cols).count() # # Patch pointing to in progress replacement instead of deleted replacement. # in_prog_multi_patch = mr[(mr.status_new == 'in progress') # & (mr.accession.isin(['ENCFF219IZI', # 'ENCFF362CIL', # 'ENCFF522EVZ', # 'ENCFF526SQT', # 'ENCFF554QRY', # 'ENCFF799OIZ', # 'ENCFF826MUG', # 'ENCFF832XOD', # 'ENCFF833LEK']))] # # in_prog_multi_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', # # 'accession': 'alternate_accessions:list'})\ # # .to_csv('../../multi_result_point_to_in_progress_patch_06_28_2017.tsv', index=False, sep='\t') # in_prog_multi_patch # # Patch pointing to released replacement instead of revoked replacement. # released_multi_patch = mr[(mr.status_new == 'released') # & (mr.accession.isin(['ENCFF311CTD', # 'ENCFF442FSP', # 'ENCFF521DYG', # 'ENCFF660PBO', # 'ENCFF723DLE', # 'ENCFF758WLI', # 'ENCFF803YCX', # 'ENCFF809POG']))] # # released_multi_patch[['possible_replacement_accession', 'accession']].rename(columns={'possible_replacement_accession': 'accession', # # 'accession': 'alternate_accessions:list'})\ # # .to_csv('../../multi_result_point_to_released_patch_06_28_2017.tsv', index=False, sep='\t') # released_multi_patch # # Patch these as deleted because merged fasta that was never released # mr.loc[mr.submitted_by_old == 'Xintao Wei', 'status_old'] = 'deleted' # mr[mr.submitted_by_old == 'Xintao Wei'].drop_duplicates('accession')[['@id_old', 'status_old']].rename(columns={'status_old': 'status', '@id_old': '@id'}).to_csv('../../two_match_to_deleted_patch_06_29_2017.tsv', sep='\t', index=False) ``` ## Multiresults that don't match on technical_replicates or derived_from ``` no_matching_rep = rsm_multi_result[~(rsm_multi_result.accession.isin(matching_rep.accession.unique()))].reset_index(drop=True) len(no_matching_rep.accession.unique()) no_matching_rep[~(no_matching_rep.accession.isin(multi_tech_match.accession)) & (no_matching_rep.submitted_by_old == "J. Seth Strattan")]['@id_old'].unique() ``` ### Multiresults that have matching technical_replicates but not derived_from ``` len(no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].accession.unique()) no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].drop_duplicates('accession', keep=False) multi_tech_match = no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)] no_matching_rep[(no_matching_rep.technical_replicates_old == no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].groupby(cols).count() multi_tech_match.superseded_by_old.value_counts() multi_tech_match[multi_tech_match.superseded_by_old == 'empty_list'][cols] multi_tech_match[multi_tech_match.supersedes_new != 'not_available'][cols] multi_tech_match.supersedes_old.value_counts() ``` ### One result after matching on technical_replicate ``` multi_tech_one_match = multi_tech_match.drop_duplicates('accession', keep=False) len(multi_tech_match.drop_duplicates('accession', keep=False).accession.unique()) multi_tech_one_match.submitted_by_old.value_counts() pd.crosstab(multi_tech_one_match.output_type_old, multi_tech_one_match.submitted_by_old, margins=False) multi_tech_one_match # Delete because no matching derived_from #multi_tech_one_match[['@id_old', 'status_old']].rename(columns={'@id_old': '@id', 'status_old': 'status'}).to_csv('../../no_matching_derived_from_delete_patch_07_03_2017.tsv', index=False, sep='\t') multi_tech_one_match.file_format_old.value_counts() multi_tech_one_match[(multi_tech_one_match.output_type_old != 'alignments')][cols] multi_tech_one_match[(multi_tech_one_match.submitted_by_old == 'Xintao Wei') & (multi_tech_one_match.output_type_old != 'alignments')][cols]#[['@id_old', 'possible_replacement_accession']].values multi_tech_one_match.groupby(cols).count() multi_tech_one_match.file_format_type_old.value_counts() multi_tech_one_match[multi_tech_one_match.submitted_by_old == "Jean Davidson"][cols] ``` ### Multiresult after matching on technical_replicate ``` len(multi_tech_match[multi_tech_match.duplicated('accession', keep=False)].accession.unique()) mtm = multi_tech_match[multi_tech_match.duplicated('accession', keep=False)] mtm.groupby(cols).count() mtm[mtm.submitted_by_old == 'Jean Davidson'].groupby(cols).count() mtm[mtm.submitted_by_old == 'J. Seth Strattan'].groupby(cols).count() ``` ### Multiresults that have matching derived_from but not technical_replicates ``` no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old == no_matching_rep.derived_from_new)].shape no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old == no_matching_rep.derived_from_new)].groupby(cols).count() ``` ### Multiresults that have mismatching derived_from and technical_replicates ``` len(no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].accession.unique()) no_matching_rep[(no_matching_rep.technical_replicates_old != no_matching_rep.technical_replicates_new) & (no_matching_rep.derived_from_old != no_matching_rep.derived_from_new)].groupby(cols).count() cols = ['accession','possible_replacement_accession'] cols = cols + [x for x in sorted(matching_rep.columns, reverse=True) if (x not in cols) and (x not in ['alternate_accessions_new', 'alternate_accessions_old'])] no_matching_rep.groupby(cols).count() ``` ## Accessions of multiple results that don't have matching technical_replicates or derived_from ``` mis_matching_rep = rsm_multi_result[~(rsm_multi_result.accession.isin(matching_rep.accession))].reset_index(drop=True) len(mis_matching_rep.accession.unique()) mis_matching_rep[['technical_replicates_old','technical_replicates_new', 'derived_from_old', 'derived_from_new']] ``` ## Pull all accessions ready for patching ``` replacement_patch = pd.concat([rsm_patch, rsm_one_result_full_match, rsm_multi_one_result]) # Squash list for patching. patch_list = [] for replacement_accession in replacement_patch.possible_replacement_accession.unique(): values = replacement_patch[replacement_patch.possible_replacement_accession == replacement_accession]['accession'] accession_list = [] for val in values: accession_list.append(val) patch_list.append({'accession': replacement_accession, 'alternate_accessions:array': ', '.join(accession_list)}) patch_data = pd.DataFrame(patch_list) #patch_data.to_csv("replaced_with_matching_replacements_patch_06_21_2017.tsv", sep="\t", index=False) with sns.plotting_context("notebook", font_scale=1.5): fig = plt.figure(figsize=[14, 8]) sns.set_style('whitegrid') sns.stripplot(x='date_created_old', data=rsm[rsm.possible_replacement_accession == 'no_result'], size=10, color='black', alpha=0.8) ``` ## Biosamples ``` biosamples = no_redirect_accessions[no_redirect_accessions['@type'] == 'Biosample'] biosamples.submitted_by.value_counts() na = 'not_available' possible_replacements = defaultdict(list) async def get_request_two(session, url, r): async with session.get(url, auth=request_auth) as response_two: result_one = await response_two.json() search_results = result_one['@graph'] if len(search_results) == 0: possible_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': 'no_result'}) for result in search_results: lab = result.get('lab', {}) sub_by = result.get('submitted_by', {}) if isinstance(sub_by, str): submitted_by = sub_by else: submitted_by = sub_by.get('title', na) if isinstance(lab, str): lab_name = lab else: lab_name = lab.get('name', na) possible_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': result['accession'], '@id': result['@id'], 'alternate_accessions': result.get('alternate_accessions', na), 'dataset': result.get('dataset', na), 'lab_name': lab_name, 'date_created': result.get('date_created', na), '@type': result['@type'][0], 'output_type': result.get('output_type', na), 'file_format': result.get('file_format', na), 'assembly': result.get('assembly', na), 'paired_with': result.get('paired_with', na), 'paired_end': result.get('paired_end', na), 'file_format_type': result.get('file_format_type', na), 'technical_replicates': result.get('technical_replicates', na), 'replicate_uuid': result.get('replicate', {}).get('uuid', na), 'md5sum': result.get('md5sum', na), 'content_md5sum': result.get('content_md5sum', na), 'status': result['status'], 'submitted_by': submitted_by, 'derived_from': result.get('derived_from', na), 'superseded_by': result.get('superseded_by', na), 'supersedes': result.get('supersedes', na) }) async def get_request_one(session, file_id): url = 'https://www.encodeproject.org/{}/?format=json'.format(file_id) async with session.get(url, auth=request_auth) as response_one: result_one = await response_one.json() r = result_one file_format = r['file_format'] output_type = r['output_type'] dataset = r['dataset'] assembly = r.get('assembly', '*') try: assay_term_name = r['quality_metrics'][0]['assay_term_name'] url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\ '&output_type={}&quality_metrics.assay_term_name={}'\ '&dataset={}&assembly={}&format=json&frame=embedded'\ '&status!=replaced'.format(file_format, output_type, assay_term_name, dataset, assembly) except IndexError: url = 'https://www.encodeproject.org/search/?type=File&file_format={}'\ '&output_type={}&dataset={}&assembly={}&format=json&frame=embedded'\ '&status!=replaced'.format(file_format, output_type, dataset, assembly) if assembly == '*': url = url.replace('&assembly=*', '&assembly!=*') result_two = await get_request_two(session, url, r) async def create_session(file_ids, loop): conn = aiohttp.TCPConnector(keepalive_timeout=10, limit=100) async with aiohttp.ClientSession(connector=conn, loop=loop) as session: results = await asyncio.gather(*[get_request_one(session, file_id) for file_id in file_ids]) na = 'not_available' possible_biosample_replacements = defaultdict(list) for biosample_id in biosamples['@id'].unique(): r = requests.get('https://www.encodeproject.org/{}/?format=json'.format(biosample_id), auth=(key.authid, key.authpw)) r = r.json() lab_old = r.get('lab', {}) if isinstance(lab_old, str): lab_name_old = lab_old else: lab_name_old = lab_old.get('name', na) donor_old = r.get('donor', {}) if isinstance(donor_old, str): donor_name_old = donor_old else: donor_name_old = donor_old.get('@id', na) sub_by_old = r.get('submitted_by', {}) if isinstance(sub_by_old, str): submitted_by_old = sub_by_old else: submitted_by_old = sub_by_old.get('title', na) try: product_id = r['product_id'] health_status = r['health_status'] culture_start_date = r['culture_start_date'] url = 'https://www.encodeproject.org/search/'\ '?type=Biosample&product_id={}'\ '&health_status={}&culture_start_date={}'\ '&status%21=replaced&format=json&frame=embedded'.format(product_id, health_status, culture_start_date) except KeyError: description = r['description'] url = 'https://www.encodeproject.org/search/'\ '?type=Biosample&description={}'\ '&status%21=replaced&format=json&frame=embedded'.format(description) search_results = requests.get(url, auth=(key.authid, key.authpw)) search_results = search_results.json()['@graph'] if len(search_results) == 0: possible_biosample_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': 'no_result'}) for result in search_results: lab_new = result.get('lab', {}) if isinstance(lab_new, str): lab_name_new = lab_new else: lab_name_new = lab_new.get('name', na) donor_new = result.get('donor', {}) if isinstance(donor_new, str): donor_name_new = donor_new else: donor_name_new = donor_new.get('@id', na) sub_by_new = result.get('submitted_by', {}) if isinstance(sub_by_new, str): submitted_by_new = sub_by_new else: submitted_by_new = sub_by_new.get('title', na) possible_biosample_replacements[r['accession']].append({'accession': r['accession'], 'possible_replacement_accession': result['accession'], '@id_old': r['@id'], '@id_new': result['@id'], 'alternate_accessions_new': r.get('alternate_accessions', na), 'alternate_accessions_old': result.get('alternate_accessions', na), 'donor_old': donor_name_old, 'donor_new': donor_name_new, 'lab_name_old': lab_name_old, 'lab_name_new': lab_name_new, 'date_created_old': r.get('date_created', na), 'date_created_new': result.get('date_created', na), '@type_old': r['@type'][0], '@type_new': result['@type'][0], 'status_old': r['status'], 'status_new': result['status'], 'product_id_old': r.get('product_id', na), 'product_id_new': result.get('product_id', na), 'health_status_old': r.get('health_status', na), 'health_status_new': result.get('health_status', na), 'culture_start_date_old': r.get('culture_start_date', na), 'culture_start_date_new': result.get('culture_start_date', na), 'biosample_type_old': r['biosample_type'], 'biosample_type_new': result['biosample_type'], 'treatment_old': r['treatments'], 'treatment_new': result['treatments'], 'biosample_term_name_old': r['biosample_term_name'], 'biosample_term_name_new': result['biosample_term_name'], 'summary_old': r['summary'], 'summary_new': result['summary'], 'description_old': r['description'], 'description_new': result['description'], 'pooled_from_old': r.get('pooled_from', na), 'pooled_from_new': result.get('pooled_from', na), 'part_of_old': r.get('part_of', na), 'part_of_new': result.get('part_of', na), 'culture_harvest_date_old': r.get('culture_harvest_date', na), 'culture_harvest_date_new': result.get('culture_harvest_date', na), 'passage_number_old': r.get('passage_number', na), 'passage_number_new': result.get('passage_number', na), 'lot_id_old': r.get('lot_id', na), 'lot_id_new': result.get('lot_id', na), 'submitted_by_old': submitted_by_old, 'submitted_by_new': submitted_by_new }) len(possible_biosample_replacements) possible_biosample_replacements replacement_search = pd.DataFrame([item for key, value in possible_biosample_replacements.items() for item in value]) replacement_search = replacement_search.fillna('isnull') replacement_search.loc[replacement_search.alternate_accessions_old.apply(lambda x: len(x) == 0), 'alternate_accessions_old'] = 'empty_list' replacement_search.loc[replacement_search.alternate_accessions_new.apply(lambda x: len(x) == 0), 'alternate_accessions_new'] = 'empty_list' #replacement_search.loc[replacement_search.pooled_from_old.apply(lambda x: len(x) == 0), 'pooled_from_old'] = 'empty_list' #replacement_search.loc[replacement_search.pooled_from_new.apply(lambda x: len(x) == 0), 'pooled_from_new'] = 'empty_list' replacement_search.shape lazy_dict = {'_,e,i,l,m,p,s,t,t,y': 'empty_list', 'i,l,l,n,s,u': 'isnull', '_,a,a,a,b,e,i,l,l,n,o,t,v': 'not_available'} def parse_list(x): return ','.join([y.strip() for y in sorted(x)]) replacement_search.date_created_old = replacement_search.date_created_old.apply(lambda x: pd.to_datetime(x)) replacement_search.date_created_new = replacement_search.date_created_new.apply(lambda x: pd.to_datetime(x)) for field in ['treatment_new', 'treatment_old', 'alternate_accessions_old', 'alternate_accessions_new', 'pooled_from_new', 'pooled_from_old', 'part_of_new', 'part_of_old']: replacement_search[field] = replacement_search[field].apply(lambda x: parse_list(x)).apply(lambda x: lazy_dict[x] if x in lazy_dict.keys() else x) bcols biosamples_one_match = replacement_search.drop_duplicates('accession', keep=False) first_cols = ['accession', 'possible_replacement_accession'] bcols = first_cols + [col for col in sorted(biosamples_one_match.columns, reverse=True) if col not in first_cols] biosamples[biosamples['@id'].isin(replacement_search['@id_old'])].shape biosamples_one_match[bcols].lab_name_old.value_counts() biosamples_one_match[bcols] flat_patch = [] for replacement in bs_patch.possible_replacement_accession.unique(): data = {'accession': replacement, 'alternate_accessions:array': ", ".join(bs_patch[bs_patch.possible_replacement_accession == replacement].accession.values)} flat_patch.append(data) fp = pd.DataFrame(flat_patch) # fp.to_csv('../../biosample_one_match_patch_07_03_2017.tsv', sep='\t', index=False) biosamples_multi_match[biosamples_multi_match.accession.isin(bs_multi_match.accession)] bs_multi_match = bs[(bs.donor_old == bs.donor_old) & (bs.passage_number_old == bs.passage_number_new) & (bs.lot_id_old == bs.lot_id_new) & (bs.product_id_old == bs.product_id_new) & (bs.culture_harvest_date_old == bs.culture_harvest_date_new) & (bs.culture_start_date_old == bs.culture_start_date_new)] bs = biosamples_multi_match#.drop_duplicates('accession').shape bs_multi_match.submitted_by_old.value_counts() bs_multi_match.groupby(bcols).count() ANTIBODIES: product_id=A301-145A @type=AntibodyLot targets.gene_name: "NCOR1", antigen_description: "Nuclear Receptor corepressor 1; N-CoR, TRAC1, KIAA1047, hN-CoR", source.title: "Bethyl Labs", https://www.encodeproject.org/search/?type=AntibodyLot&targets.gene_name=NCOR1&source.title=Bethyl+Labs&product_id=A301-145A&status%21=replaced BIOSAMPLE biosample_type: "immortalized cell line", treatment: [ ] lab.name: "gene-yeo" culture_start_date: "2015-06-12", health_status: "hepatocellular carcinoma", product_id: "HB-8065", biosample_term_name: "HepG2", @type: "Biosample" donor.@id: "/human-donors/ENCDO000AAC/", summary: "Homo sapiens HepG2 immortalized cell line", life_stage: "child", source.title: "ATCC", biosample_term_name: "HepG2", https://www.encodeproject.org/search/ ?type=Biosample&product_id=HB-8065 &health_status=hepatocellular+carcinoma &culture_start_date=2015-06-12&status%21=replaced FILE quality_metrics.assay_term_name: "ChIP-seq", file_type: "bam", assembly: "hg19", lab.name: "encode-processing-pipeline", output_category: "alignment", analysis_step_version.analysis_step.name: "bwa-raw-alignment-step-v-1", biological_replicates: 1 technical_replicates: [ "1_1" https://www.encodeproject.org/search/?type=File&file_format=bam &output_type=alignments&quality_metrics.assay_term_name=ChIP-seq &dataset=%2Fexperiments%2FENCSR021JFW%2F&assembly=hg19 LIBRARY nucleic_acid_term_name: "DNA", library_size_selection_method: "SPRI beads", strand_specificity: false, fragmentation_method: "shearing (Covaris S2)", aliases: "tim-reddy:hic_dex.t0_brep1_lib" lab: "/labs/tim-reddy/", crosslinking_method: "formaldehyde", biosample.summary: "Homo sapiens A549 immortalized cell line" biosample.biosample_term_name: "A549" https://www.encodeproject.org/search/?type=Library &lab=%2Flabs%2Fthomas-gingeras%2F &nucleic_acid_term_name=polyadenylated+mRNA &strand_specificity=true&depleted_in_term_name=rRNA &biosample.biosample_term_name=NCI-H460 &biosample.%40id=%2Fbiosamples%2FENCBS814QPR%2F&status%21=replaced ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title ##### Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Bayesian Neural Network <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/bnn_mnist_advi.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/python/experimental/nn/examples/bnn_mnist_advi.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ### 1 Imports ``` import sys import time import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics as sklearn_metrics import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_datasets as tfds import tensorflow_probability as tfp from tensorflow_probability.python.internal import prefer_static # Globally Enable XLA. # tf.config.optimizer.set_jit(True) try: physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) except: # Invalid device or cannot modify virtual devices once initialized. pass tfb = tfp.bijectors tfd = tfp.distributions tfn = tfp.experimental.nn ``` ### 2 Load Dataset ``` dataset_name = 'emnist' batch_size = 32 [train_dataset, eval_dataset], datasets_info = tfds.load( name=dataset_name, split=['train', 'test'], with_info=True, as_supervised=True, shuffle_files=True) def _preprocess(image, label): image = tf.cast(image, dtype=tf.float32) / 255. if dataset_name == 'emnist': image = tf.transpose(image, perm=[1, 0, 2]) label = tf.cast(label, dtype=tf.int32) return image, label train_size = datasets_info.splits['train'].num_examples eval_size = datasets_info.splits['test'].num_examples num_classes = datasets_info.features['label'].num_classes image_shape = datasets_info.features['image'].shape if dataset_name == 'emnist': import string yhuman = np.array(list(string.digits + string.ascii_uppercase + string.ascii_lowercase)) else: yhuman = np.range(num_classes).astype(np.int32) if True: orig_train_size = train_size train_size = int(10e3) train_dataset = train_dataset.shuffle(orig_train_size // 7).repeat(1).take(train_size) train_dataset = tfn.util.tune_dataset( train_dataset, batch_size=batch_size, shuffle_size=int(train_size / 7), preprocess_fn=_preprocess) if True: orig_eval_size = eval_size eval_size = int(10e3) eval_dataset = eval_dataset.shuffle(orig_eval_size // 7).repeat(1).take(eval_size) eval_dataset = tfn.util.tune_dataset( eval_dataset, repeat_count=None, preprocess_fn=_preprocess) x, y = next(iter(eval_dataset.batch(10))) tfn.util.display_imgs(x, yhuman[y.numpy()]); ``` ### 3 Define Model ``` #@title Optional Custom Posterior def make_posterior( kernel_shape, bias_shape, dtype=tf.float32, kernel_initializer=None, bias_initializer=None, kernel_name='posterior_kernel', bias_name='posterior_bias'): if kernel_initializer is None: kernel_initializer = tf.initializers.glorot_uniform() if bias_initializer is None: bias_initializer = tf.zeros make_loc = lambda shape, init, name: tf.Variable( # pylint: disable=g-long-lambda init(shape, dtype=dtype), name=name + '_loc') make_scale = lambda shape, name: tfp.util.TransformedVariable( # pylint: disable=g-long-lambda tf.fill(shape, tf.constant(0.01, dtype)), tfb.Chain([tfb.Shift(1e-5), tfb.Softplus()]), name=name + '_scale') return tfd.JointDistributionSequential([ tfd.Independent( tfd.Normal(loc=make_loc(kernel_shape, kernel_initializer, kernel_name), scale=make_scale(kernel_shape, kernel_name)), reinterpreted_batch_ndims=prefer_static.size(kernel_shape), name=kernel_name), tfd.Independent( tfd.Normal(loc=make_loc(bias_shape, bias_initializer, bias_name), scale=make_scale(bias_shape, bias_name)), reinterpreted_batch_ndims=prefer_static.size(bias_shape), name=bias_name), ]) #@title Optional Custom Prior def make_prior( kernel_shape, bias_shape, dtype=tf.float32, kernel_initializer=None, # pylint: disable=unused-argument bias_initializer=None, # pylint: disable=unused-argument kernel_name='prior_kernel', bias_name='prior_bias'): k = tfd.MixtureSameFamily( tfd.Categorical(tf.zeros(3, dtype)), tfd.StudentT( df=[1,1.,1.], loc=[0,3,-3], scale=tf.constant([1, 10, 10], dtype))) #df=[0.5, 1., 1.], loc=[0, 2, -2], scale=tf.constant([0.25, 5, 5], dtype))) b = tfd.Normal(0, tf.constant(1000, dtype)) return tfd.JointDistributionSequential([ tfd.Sample(k, kernel_shape, name=kernel_name), tfd.Sample(b, bias_shape, name=bias_name), ]) max_pool = tf.keras.layers.MaxPooling2D( # Has no tf.Variables. pool_size=(2, 2), strides=(2, 2), padding='SAME', data_format='channels_last') def batchnorm(axis): def fn(x): m = tf.math.reduce_mean(x, axis=axis, keepdims=True) v = tf.math.reduce_variance(x, axis=axis, keepdims=True) return (x - m) / tf.math.sqrt(v) return fn maybe_batchnorm = batchnorm(axis=[-4, -3, -2]) # maybe_batchnorm = lambda x: x bnn = tfn.Sequential([ lambda x: 2. * tf.cast(x, tf.float32) - 1., # Center. tfn.ConvolutionVariationalReparameterization( input_size=1, output_size=8, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), penalty_weight=1 / train_size, # penalty_weight=1e2 / train_size, # Layer specific "beta". # make_posterior_fn=make_posterior, # make_prior_fn=make_prior, name='conv1'), maybe_batchnorm, tf.nn.leaky_relu, tfn.ConvolutionVariationalReparameterization( input_size=8, output_size=16, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), penalty_weight=1 / train_size, # penalty_weight=1e2 / train_size, # Layer specific "beta". # make_posterior_fn=make_posterior, # make_prior_fn=make_prior, name='conv2'), maybe_batchnorm, tf.nn.leaky_relu, max_pool, # [28, 28, 8] -> [14, 14, 8] tfn.ConvolutionVariationalReparameterization( input_size=16, output_size=32, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), penalty_weight=1 / train_size, # penalty_weight=1e2 / train_size, # Layer specific "beta". # make_posterior_fn=make_posterior, # make_prior_fn=make_prior, name='conv3'), maybe_batchnorm, tf.nn.leaky_relu, max_pool, # [14, 14, 16] -> [7, 7, 16] tfn.util.flatten_rightmost(ndims=3), tfn.AffineVariationalReparameterizationLocal( input_size=7 * 7 * 32, output_size=num_classes - 1, penalty_weight=1. / train_size, # make_posterior_fn=make_posterior, # make_prior_fn=make_prior, name='affine1'), tfb.Pad(), lambda x: tfd.Categorical(logits=x, dtype=tf.int32), ], name='BNN') # bnn_eval = tfn.Sequential([l for l in bnn.layers if l is not maybe_batchnorm], # name='bnn_eval') bnn_eval = bnn print(bnn.summary()) ``` ### 4 Loss / Eval ``` def compute_loss_bnn(x, y, beta=1., is_eval=False): d = bnn_eval(x) if is_eval else bnn(x) nll = -tf.reduce_mean(d.log_prob(y), axis=-1) kl = bnn.extra_loss loss = nll + beta * kl return loss, (nll, kl), d train_iter_bnn = iter(train_dataset) def train_loss_bnn(): x, y = next(train_iter_bnn) loss, (nll, kl), _ = compute_loss_bnn(x, y) return loss, (nll, kl) opt_bnn = tf.optimizers.Adam(learning_rate=0.003) fit_bnn = tfn.util.make_fit_op( train_loss_bnn, opt_bnn, bnn.trainable_variables, grad_summary_fn=lambda gs: tf.nest.map_structure(tf.norm, gs)) #@title Eval Helpers def all_categories(d): num_classes = tf.shape(d.logits_parameter())[-1] batch_ndims = tf.size(d.batch_shape_tensor()) expand_shape = tf.pad( [num_classes], paddings=[[0, batch_ndims]], constant_values=1) return tf.reshape(tf.range(num_classes, dtype=d.dtype), expand_shape) def rollaxis(x, shift): return tf.transpose(x, tf.roll(tf.range(tf.rank(x)), shift=shift, axis=0)) def compute_eval_stats(y, d, threshold=None): # Assume we have evidence `x`, targets `y`, and model function `dnn`. all_pred_log_prob = tf.math.log_softmax(d.logits, axis=-1) yhat = tf.argmax(all_pred_log_prob, axis=-1) pred_log_prob = tf.reduce_max(all_pred_log_prob, axis=-1) # all_pred_log_prob = d.log_prob(all_categories(d)) # yhat = tf.argmax(all_pred_log_prob, axis=0) # pred_log_prob = tf.reduce_max(all_pred_log_prob, axis=0) # Alternative #1: # all_pred_log_prob = rollaxis(all_pred_log_prob, shift=-1) # pred_log_prob, yhat = tf.math.top_k(all_pred_log_prob, k=1, sorted=False) # Alternative #2: # yhat = tf.argmax(all_pred_log_prob, axis=0) # pred_log_prob = tf.gather(rollaxis(all_pred_log_prob, shift=-1), # yhat, # batch_dims=len(d.batch_shape)) if threshold is not None: keep = pred_log_prob > tf.math.log(threshold) pred_log_prob = tf.boolean_mask(pred_log_prob, keep) yhat = tf.boolean_mask(yhat, keep) y = tf.boolean_mask(y, keep) hit = tf.equal(y, tf.cast(yhat, y.dtype)) avg_acc = tf.reduce_mean(tf.cast(hit, tf.float32), axis=-1) num_buckets = 10 ( avg_calibration_error, acc, conf, cnt, edges, bucket, ) = tf.cond(tf.size(y) > 0, lambda: tfp.stats.expected_calibration_error_quantiles( hit, pred_log_prob, num_buckets=num_buckets, log_space_buckets=True), lambda: (tf.constant(np.nan), tf.fill([num_buckets], np.nan), tf.fill([num_buckets], np.nan), tf.fill([num_buckets], np.nan), tf.fill([num_buckets + 1], np.nan), tf.constant([], tf.int64))) return avg_acc, avg_calibration_error, (acc, conf, cnt, edges, bucket) eval_iter_bnn = iter(eval_dataset.batch(2000).repeat()) @tfn.util.tfcompile def eval_bnn(threshold=None, num_inferences=5): x, y = next(eval_iter_bnn) loss, (nll, kl), d = compute_loss_bnn(x, y, is_eval=True) if num_inferences > 1: before_avg_predicted_log_probs = tf.map_fn( lambda _: tf.math.log_softmax(bnn(x).logits, axis=-1), elems=tf.range(num_inferences), dtype=loss.dtype) d = tfd.Categorical(logits=tfp.math.reduce_logmeanexp( before_avg_predicted_log_probs, axis=0)) avg_acc, avg_calibration_error, (acc, conf, cnt, edges, bucket) = \ compute_eval_stats(y, d, threshold=threshold) n = tf.reduce_sum(cnt, axis=0) return loss, (nll, kl, avg_acc, avg_calibration_error, n) ``` ### 5 Train ``` DEBUG_MODE = False tf.config.experimental_run_functions_eagerly(DEBUG_MODE) num_train_epochs = 2. # @param { isTemplate: true} num_evals = 50 # @param { isTemplate: true dur_sec = dur_num = 0 num_train_steps = int(num_train_epochs * train_size) for i in range(num_train_steps): start = time.time() trn_loss, (trn_nll, trn_kl), g = fit_bnn() stop = time.time() dur_sec += stop - start dur_num += 1 if i % int(num_train_steps / num_evals) == 0 or i == num_train_steps - 1: tst_loss, (tst_nll, tst_kl, tst_acc, tst_ece, tst_tot) = eval_bnn() f, x = zip(*[ ('it:{:5}', opt_bnn.iterations), ('ms/it:{:6.4f}', dur_sec / max(1., dur_num) * 1000.), ('tst_acc:{:6.4f}', tst_acc), ('tst_ece:{:6.4f}', tst_ece), ('tst_tot:{:5}', tst_tot), ('trn_loss:{:6.4f}', trn_loss), ('tst_loss:{:6.4f}', tst_loss), ('tst_nll:{:6.4f}', tst_nll), ('tst_kl:{:6.4f}', tst_kl), ('sum_norm_grad:{:6.4f}', sum(g)), ]) print(' '.join(f).format(*[getattr(x_, 'numpy', lambda: x_)() for x_ in x])) sys.stdout.flush() dur_sec = dur_num = 0 # if i % 1000 == 0 or i == maxiter - 1: # bnn.save('/tmp/bnn.npz') ``` ### 6 Evaluate ``` #@title More Eval Helpers @tfn.util.tfcompile def compute_log_probs_bnn(x, num_inferences): lp = tf.map_fn(lambda _: tf.math.log_softmax(bnn_eval(x).logits, axis=-1), elems=tf.range(num_inferences), dtype=tf.float32) log_mean_prob = tfp.math.reduce_logmeanexp(lp, axis=0) # ovr = "one vs rest" log_avg_std_ovr_prob = tfp.math.reduce_logmeanexp(lp + tf.math.log1p(-lp), axis=0) #log_std_prob = 0.5 * tfp.math.log_sub_exp(log_mean2_prob, log_mean_prob * 2.) tiny_ = np.finfo(lp.dtype.as_numpy_dtype).tiny log_std_prob = 0.5 * tfp.math.reduce_logmeanexp( 2 * tfp.math.log_sub_exp(lp + tiny_, log_mean_prob), axis=0) return log_mean_prob, log_std_prob, log_avg_std_ovr_prob num_inferences = 50 num_chunks = 10 eval_iter_bnn = iter(eval_dataset.batch(eval_size // num_chunks)) @tfn.util.tfcompile def all_eval_labels_and_log_probs_bnn(): def _inner(_): x, y = next(eval_iter_bnn) return x, y, compute_log_probs_bnn(x, num_inferences) x, y, (log_probs, log_std_probs, log_avg_std_ovr_prob) = tf.map_fn( _inner, elems=tf.range(num_chunks), dtype=(tf.float32, tf.int32,) + ((tf.float32,) * 3,)) return ( tf.reshape(x, (-1,) + image_shape), tf.reshape(y, [-1]), tf.reshape(log_probs, [-1, num_classes]), tf.reshape(log_std_probs, [-1, num_classes]), tf.reshape(log_avg_std_ovr_prob, [-1, num_classes]), ) ( x_, y_, log_probs_, log_std_probs_, log_avg_std_ovr_prob_, ) = all_eval_labels_and_log_probs_bnn() #@title Run Eval x, y, log_probs, log_std_probs, log_avg_std_ovr_prob = ( x_, y_, log_probs_, log_std_probs_, log_avg_std_ovr_prob_) yhat = tf.argmax(log_probs, axis=-1) max_log_probs = tf.gather(log_probs, yhat, batch_dims=1) max_log_std_probs = tf.gather(log_std_probs, yhat, batch_dims=1) max_log_avg_std_ovr_prob = tf.gather(log_avg_std_ovr_prob, yhat, batch_dims=1) # Sort by ascending confidence. score = max_log_probs # Mean #score = -max_log_std_probs # 1 / Sigma #score = max_log_probs - max_log_std_probs # Mean / Sigma #score = abs(tf.math.expm1(max_log_std_probs - (max_log_probs + tf.math.log1p(-max_log_probs)))) idx = tf.argsort(score) score = tf.gather(score, idx) x = tf.gather(x, idx) y = tf.gather(y, idx) yhat = tf.gather(yhat, idx) hit = tf.cast(tf.equal(y, tf.cast(yhat,y.dtype)), tf.int32) log_probs = tf.gather(log_probs, idx) max_log_probs = tf.gather(max_log_probs, idx) log_std_probs = tf.gather(log_std_probs, idx) max_log_std_probs = tf.gather(max_log_std_probs, idx) log_avg_std_ovr_prob = tf.gather(log_avg_std_ovr_prob, idx) max_log_avg_std_ovr_prob = tf.gather(max_log_avg_std_ovr_prob, idx) d = tfd.Categorical(logits=log_probs) max_log_probs = tf.reduce_max(log_probs, axis=-1) keep = tf.range(500,eval_size) #threshold = 0.95; # keep = tf.where(max_log_probs > tf.math.log(threshold))[..., 0] x_keep = tf.gather(x, keep) y_keep = tf.gather(y, keep) log_probs_keep = tf.gather(log_probs, keep) yhat_keep = tf.gather(yhat, keep) d_keep = tfd.Categorical(logits=log_probs_keep) ( avg_acc, ece, (acc, conf, cnt, edges, bucket), ) = tfn.util.tfcompile(lambda: compute_eval_stats(y, d))() ( avg_acc_keep, ece_keep, (acc_keep, conf_keep, cnt_keep, edges_keep, bucket_keep), ) = tfn.util.tfcompile(lambda: compute_eval_stats(y_keep, d_keep))() print('Accurary (all) : {}'.format(avg_acc)) print('Accurary (certain) : {}'.format(avg_acc_keep)) print('ECE (all) : {}'.format(ece)) print('ECE (certain) : {}'.format(ece_keep)) print('Number undecided: {}'.format(eval_size - tf.size(keep))) print('Most uncertain:') ss = (6,12); n = np.prod(ss); s = ss+image_shape tfn.util.display_imgs( tf.reshape(x[:n], s), yhuman[tf.reshape(y[:n], ss).numpy()]) print(tf.reshape(hit[:n], ss).numpy()) print(yhuman[tf.reshape(yhat[:n], ss).numpy()]) print('Least uncertain:') tfn.util.display_imgs( tf.reshape(x[-n:], s), yhuman[tf.reshape(y[-n:], ss).numpy()]) print(tf.reshape(hit[-n:], ss).numpy()) print(yhuman[tf.reshape(yhat[-n:], ss).numpy()]) a = tf.math.exp(max_log_probs) b = tf.math.exp(max_log_std_probs) plt.plot(a, b, '.', label='observed'); #sns.jointplot(a.numpy(), b.numpy()) plt.xlabel('mean'); plt.ylabel('std'); p = tf.linspace(0.,1,100) plt.plot(p, tf.math.sqrt(p * (1 - p)), label='theoretical'); b = max_log_probs # b = tf.boolean_mask(b, b < 0.) sns.distplot(tf.math.exp(b).numpy(), bins=20); plt.xlabel('Posterior Mean Pred Prob'); plt.ylabel('Freq'); b = max_log_std_probs tiny_ = np.finfo(b.dtype.as_numpy_dtype).tiny b = tf.boolean_mask(b, b > tf.math.log(tiny_)) sns.distplot(tf.math.exp(b).numpy(), bins=20); plt.xlabel('Posterior Std. Pred Prob'); plt.ylabel('Freq'); b = max_log_avg_std_ovr_prob sns.distplot(tf.math.exp(b).numpy(), bins=20); plt.xlabel('Posterior Avg Std. Pred Prob (OVR)'); plt.ylabel('Freq'); #@title Avg One-vs-Rest AUC try: bnn_auc = sklearn_metrics.roc_auc_score( y_keep, log_probs_keep, average='macro', multi_class='ovr') print('Avg per class AUC:\n{}'.format(bnn_auc)) except TypeError: bnn_auc = np.array([ sklearn_metrics.roc_auc_score(tf.equal(y_keep, i), log_probs_keep[:, i]) for i in range(num_classes)]) print('Avg per class AUC:\n{}'.format(bnn_auc.mean())) ``` ### 7 Appendix: Compare against DNN ``` max_pool = tf.keras.layers.MaxPooling2D( # Has no tf.Variables. pool_size=(2, 2), strides=(2, 2), padding='SAME', data_format='channels_last') maybe_batchnorm = batchnorm(axis=[-4, -3, -2]) # maybe_batchnorm = lambda x: x dnn = tfn.Sequential([ lambda x: 2. * tf.cast(x, tf.float32) - 1., # Center. tfn.Convolution( input_size=1, output_size=8, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), name='conv1'), maybe_batchnorm, tf.nn.leaky_relu, tfn.Convolution( input_size=8, output_size=16, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), name='conv1'), maybe_batchnorm, tf.nn.leaky_relu, max_pool, # [28, 28, 8] -> [14, 14, 8] tfn.Convolution( input_size=16, output_size=32, filter_shape=5, padding='SAME', init_kernel_fn=tf.initializers.he_uniform(), name='conv2'), maybe_batchnorm, tf.nn.leaky_relu, max_pool, # [14, 14, 16] -> [7, 7, 16] tfn.util.flatten_rightmost(ndims=3), tfn.Affine( input_size=7 * 7 * 32, output_size=num_classes - 1, name='affine1'), tfb.Pad(), lambda x: tfd.Categorical(logits=x, dtype=tf.int32), ], name='DNN') # dnn_eval = tfn.Sequential([l for l in dnn.layers if l is not maybe_batchnorm], # name='dnn_eval') dnn_eval = dnn print(dnn.summary()) def compute_loss_dnn(x, y, is_eval=False): d = dnn_eval(x) if is_eval else dnn(x) nll = -tf.reduce_mean(d.log_prob(y), axis=-1) return nll, d train_iter_dnn = iter(train_dataset) def train_loss_dnn(): x, y = next(train_iter_dnn) nll, _ = compute_loss_dnn(x, y) return nll, None opt_dnn = tf.optimizers.Adam(learning_rate=0.003) fit_dnn = tfn.util.make_fit_op( train_loss_dnn, opt_dnn, dnn.trainable_variables, grad_summary_fn=lambda gs: tf.nest.map_structure(tf.norm, gs)) eval_iter_dnn = iter(eval_dataset.batch(2000).repeat()) @tfn.util.tfcompile def eval_dnn(threshold=None): x, y = next(eval_iter_dnn) loss, d = compute_loss_dnn(x, y, is_eval=True) avg_acc, avg_calibration_error, _ = compute_eval_stats( y, d, threshold=threshold) return loss, (avg_acc, avg_calibration_error) num_train_epochs = 2. # @param { isTemplate: true} num_evals = 25 # @param { isTemplate: true dur_sec = dur_num = 0 num_train_steps = int(num_train_epochs * train_size) for i in range(num_train_steps): start = time.time() trn_loss, _, g = fit_dnn() stop = time.time() dur_sec += stop - start dur_num += 1 if i % int(num_train_steps / num_evals) == 0 or i == num_train_steps - 1: tst_loss, (tst_acc, tst_ece) = eval_dnn() f, x = zip(*[ ('it:{:5}', opt_dnn.iterations), ('ms/it:{:6.4f}', dur_sec / max(1., dur_num) * 1000.), ('tst_acc:{:6.4f}', tst_acc), ('tst_ece:{:6.4f}', tst_ece), ('trn_loss:{:6.4f}', trn_loss), ('tst_loss:{:6.4f}', tst_loss), ('tst_nll:{:6.4f}', tst_nll), ('tst_kl:{:6.4f}', tst_kl), ('sum_norm_grad:{:6.4f}', sum(g)), ]) print(' '.join(f).format(*[getattr(x_, 'numpy', lambda: x_)() for x_ in x])) sys.stdout.flush() dur_sec = dur_num = 0 # if i % 1000 == 0 or i == maxiter - 1: # dnn.save('/tmp/dnn.npz') #@title Run Eval eval_iter_dnn = iter(eval_dataset.batch(eval_size)) @tfn.util.tfcompile def compute_log_probs_dnn(): x, y = next(eval_iter_dnn) lp = tf.math.log_softmax(dnn_eval(x).logits, axis=-1) return x, y, lp x, y, log_probs = compute_log_probs_dnn() max_log_probs = tf.reduce_max(log_probs, axis=-1) idx = tf.argsort(max_log_probs) x = tf.gather(x, idx) y = tf.gather(y, idx) log_probs = tf.gather(log_probs, idx) max_log_probs = tf.gather(max_log_probs, idx) yhat = tf.argmax(log_probs, axis=-1) d = tfd.Categorical(logits=log_probs) hit = tf.cast(tf.equal(y, tf.cast(yhat, y.dtype)), tf.int32) #threshold = 1.-1e-5 #keep = tf.where(max_log_probs >= np.log(threshold))[..., 0] keep = tf.range(500, eval_size) x_keep = tf.gather(x, keep) y_keep = tf.gather(y, keep) yhat_keep = tf.gather(yhat, keep) log_probs_keep = tf.gather(log_probs, keep) max_log_probs_keep = tf.gather(max_log_probs, keep) hit_keep = tf.gather(hit, keep) d_keep = tfd.Categorical(logits=log_probs_keep) ( avg_acc, ece, (acc, conf, cnt, edges, bucket), ) = tfn.util.tfcompile(lambda: compute_eval_stats(y, d))() ( avg_acc_keep, ece_keep, (acc_keep, conf_keep, cnt_keep, edges_keep, bucket_keep), ) = tfn.util.tfcompile(lambda: compute_eval_stats(y_keep, d_keep))() print('Number of examples undecided: {}'.format(eval_size - tf.size(keep))) print('Accurary before excluding undecided ones: {}'.format(avg_acc)) print('Accurary after excluding undecided ones: {}'.format(avg_acc_keep)) print('ECE before/after.', ece.numpy(), ece_keep.numpy()) print('Most uncertain:') ss = (6,12); n = np.prod(ss); s = ss+image_shape tfn.util.display_imgs( tf.reshape(x[:n], s), yhuman[tf.reshape(y[:n], ss).numpy()]) print(tf.reshape(hit[:n], ss).numpy()) print(yhuman[tf.reshape(yhat[:n], ss).numpy()]) print('Least uncertain:') tfn.util.display_imgs( tf.reshape(x[-n:], s), yhuman[tf.reshape(y[-n:], ss).numpy()]) print(tf.reshape(hit[-n:], ss).numpy()) print(yhuman[tf.reshape(yhat[-n:], ss).numpy()]) b = max_log_probs + tf.math.log1p(-max_log_probs); b=tf.boolean_mask(b,b<-1e-12) sns.distplot(tf.math.exp(b).numpy(), bins=20); #@title Avg One-vs-Rest AUC try: dnn_auc = sklearn_metrics.roc_auc_score( y_keep, log_probs_keep, average='macro', multi_class='ovr') print('Avg per class AUC:\n{}'.format(dnn_auc)) except TypeError: dnn_auc = np.array([ sklearn_metrics.roc_auc_score(tf.equal(y_keep, i), log_probs_keep[:, i]) for i in range(num_classes)]) print('Avg per class AUC:\n{}'.format(dnn_auc.mean())) ```
github_jupyter
# Structured Dataset Profiling with Lens ### Find the code This notebook can be found on [github](https://github.com/credo-ai/credoai_lens/blob/develop/docs/notebooks/lens_demos/dataset_profiling.ipynb). ## Contents 1. [What is Covered](#What-is-Covered) 2. [Introduction](#Introduction) 3. [Dataset](#Dataset) 4. [Running Lens](#Running-Lens) ## What is Covered <a name="What-is-Covered"></a> * **Domain:** * Applications that rely on structured datasets. * **ML task:** * Exploratory data analysis for model training, validation, and testing with structured datasets. ## Introduction <a name="Introduction"></a> Structured data conforms to a tabular format with relationship between the different rows and columns. Many machine learning models are trained, validated, and tested on structured datasets. Exploratory analysis of a structured dataset provides insights for a more informed assessment of the ML model. Lens Dataset Profiling module uses pandas_profiling to enable this analysis through generating data profiles. ## Dataset <a name="Dataset"></a> The [Census Adult Dataset](https://archive.ics.uci.edu/ml/datasets/adult) is from the Census Bureau and the label is whether a given adult makes more than $50K a year based attributes such as sex and education. The dataset provides 13 input variables that are a mixture of categorical, ordinal, and numerical data types. The complete list of variables is as follows: Age, Workclass, Education, Education Number of Years, Marital-status, Occupation, Relationship, Race, Sex, Capital-gain, Capital-loss, Hours-per-week, and Native-country. ``` import numpy as np # Imports for demo data from credoai.data import fetch_censusincome # Base Lens imports import credoai.lens as cl import credoai.assessment as assess cl.set_logging_level('info') # set default format for image displays. Change to 'png' if 'svg' is failing %config InlineBackend.figure_formats = ['svg'] data = fetch_censusincome() df = data['data'].copy() df['target'] = data['target'] df.head(3) ``` Prepare missing values ``` df = df.replace("\\?", np.nan, regex=True) ``` ## Running Lens <a name="Running-Lens"></a> First step is creating a Lens CredoData artifact. This will hold the structured dataset and the meta information needed for doing the assessment. CredoData has the following paramters: `name` : an arbitrary name that you want to assign to the object (str) `data` : dataset dataframe that includes all features and labels (pd.DataFrame) `label_key` : name of the label column in your data, like "label" (`str`) ``` label_key = 'target' categorical_features_keys = ['workclass', 'education', 'marital.status', 'occupation', 'relationship', 'race', 'sex', 'native.country'] # Set up the data artifact credo_data = cl.CredoData(name='census-income', data=df, label_key=label_key) lens = cl.Lens(data=credo_data, assessments=[assess.DatasetProfilingAssessment]) results = lens.run_assessments().display_results() ```
github_jupyter
# Training a Linear Model to Predict Length of Stay The [Population Health Management Solution](https://github.com/Azure/cortana-intelligence-population-health-management/tree/master/Azure%20Data%20Lake) uses U-SQL queries in Data Lake Analytics to apply trained models to input data. The solution copies pre-trained models to an Azure Data Lake Store account (under the folder `forphmdeploymentbyadf`) for this purpose. In this notebook, we provide the code demonstrating how xgboost models can be created for this purpose. (See the notebook named "Length Of Stay Models -- lm" for the code used to create the linear models employed by the solution.) Running this notebook will create the models used in this solution and store them in a folder named `myxgboostLOSmodelsfolder` in your current working directory. ## Outline <a id="BackToTop"></a> * [Problem Description](#probdesc) * [Get Data](#getdata) * [Data Description](#datadesc) * [Data Exploration](#dataexp) * [Feature Engineering](#featureeng) * [Create the Model](#model) * [Results](#results) ## Problem Description <a id="probdesc"></a> ### Why predict the length of a hospital stay? Recent legislative changes have standardized payments for procedures performed, regardless of the number of days a patient actually spends in the hospital. Hospitals are therefore strongly incentivized to use resources more efficiently and find ways to accommodate more patients with the same volume of resources. An accurate prediction of each patient's length of stay can help hospitals: 1. Manage bed occupancy 2. Effectively schedule elective admissions 3. Improve patient satisfaction during their hospital stay Extended lengths of stay costs hospitals millions of dollars a year. By identifying patients at risk for an extended stay, they can take proactive measures to formulate a treatment plan to reduce the expected length of stay. ### When should the prediction be used? Hospitals want to predict the length of each patient's stay at the time of admission and provide this information to the admitting nurse or staff. Our model is trained using encounter-level records for a million or so patients from 23 hospitals (obtained from the Healthcare Cost and Utilization Project, or [HCUP](https://www.hcup-us.ahrq.gov/)) and is suitable for use on similar patient populations, though we recommend that hospitals retrain the model using their own historical patient data for best results. To be applied to newly-admitted patients, the model must be trained using only features that are available for each patient at the time of their admission. [Back To Top](#BackToTop) <a id="getdata"></a> ## Get Data Here, we download copies of the training data (~2 GB) from the web to your current working directory. This will likely take a few minutes, depending on your bandwidth: ``` # clear workspace and collect garbage rm(list=ls()) gc() Sys.time() url1 <- "https://phm.blob.core.windows.net/models/core_data.csv" url2 <- "https://phm.blob.core.windows.net/models/charges_data.csv" url3 <- "https://phm.blob.core.windows.net/models/severity_data.csv" url4 <- "https://phm.blob.core.windows.net/models/dxpr_data.csv" pathd <- getwd() dest1 <- paste(pathd, "/core_data.csv", sep='') dest2 <- paste(pathd, "/charges_data.csv", sep='') dest3 <- paste(pathd, "/severity_data.csv", sep='') dest4 <- paste(pathd, "/dxpr_data.csv", sep='') download.file(url1, dest1) download.file(url2, dest2) download.file(url3, dest3) download.file(url4, dest4) Sys.time() ``` We now read the input files into memory (this will likely take a few minutes, given the combined size of the files): ``` Sys.time() dat_core <- read.csv(dest1) dat_chrg <- read.csv(dest2) dat_sevr <- read.csv(dest3) dat_dxpr <- read.csv(dest4) Sys.time() ``` Confirm that the input files were downloaded and read fully by checking the data dimensions: ``` dim(dat_core) # expected: 1103172 x 248 dim(dat_chrg) # expected: 1103172 x 155 dim(dat_sevr) # expected: 1103172 x 30 dim(dat_dxpr) # expected: 1103172 x 180 ``` Merge the input files and delete the intermediate variables: ``` dat <- merge(merge(merge(dat_sevr, dat_chrg, by="KEY"), dat_core, by="KEY"), dat_dxpr, by="KEY") dim(dat) # expected: 1103172 x 610 rm(dat_core, dat_chrg, dat_sevr, dat_dxpr) ``` [Back To Top](#BackToTop) <a id="datadesc"></a> ## Data Description We list the columns contained in the dataset below. A full description of the columns in this dataset can be found in the [data dictionary](https://www.hcup-us.ahrq.gov/db/state/siddist/siddistvarnote2013.jsp). ``` names(dat) ``` We perform a few sanity checks on the integrity of the downloaded data, ensuring that the month column contains the values for months and so on: ``` unique(dat$AYEAR) unique(dat$AMONTH) length(unique(dat$AMONTH)) unique(dat$FEMALE) ``` The dataset currently contains 610 columns, but some are not suitable for use with our model because they are not available at the time of a patient's admission (when the length-of-stay prediction will be made). We will use just the following 135 columns for building the length-of-stay model: ``` cols_demographic <- c('AGE', 'FEMALE', 'RACE', 'MEDINCSTQ', 'PSTATE', 'ZIP', 'HOSPST', 'PAY1', 'PAY2', 'PAY3') cols_admitinfo <- c('KEY', 'VisitLink', 'DSHOSPID', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN') cols_Diagnosis_present_on_admission <- grep('DXPOA', names(dat), value=T) cols_ECode_present_on_admission <- grep('E_POA', names(dat), value=T) cols_ICD9_CM_Chronic_Condition_Indicators <- grep('^CHRON[0-9]', names(dat), value=T) cols_Chronic_Condition_Indicators_BodySystem <- grep('^CHRONB', names(dat), value=T) cols_comorbidity_measure_ICD9_CMcodes <- grep('^CM_', names(dat), value=T) cols_primaryDiagnosis <- c('DX1', 'DXCCS1', 'DXMCCS1') cols_counts <- c('NDX', 'NCHRONIC') cols_Target <- c('LOS') cols4los <- c(cols_demographic, cols_admitinfo, cols_Diagnosis_present_on_admission, cols_ECode_present_on_admission, cols_ICD9_CM_Chronic_Condition_Indicators, cols_Chronic_Condition_Indicators_BodySystem, cols_comorbidity_measure_ICD9_CMcodes, cols_primaryDiagnosis, cols_counts, cols_Target) length(cols4los) ``` We now reduce the dataset to just the columns of interest: ``` dat4los <- dat[,cols4los] dim(dat4los) ``` [Back To Top](#BackToTop) ## Data Exploration <a id="dataexp"></a> ### Categorical features The dataset contains 13 categorical features. 'DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN', 'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXCCS1', 'DXMCCS1', 'ZIP' Below, we enumerate some of these features and show their possible values: ``` cat_cols <- c('DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN', 'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXMCCS1') apply(dat4los[, cat_cols], 2, FUN=function(x){length(unique(x))}) apply(dat4los[, cat_cols], 2, FUN=function(x){unique(x)}) ``` ### Distribution of Length Of Stay Below, we plot the distribution of values for our model's prediction target, `LOS` (length of stay): ``` losbreaks <- c(0, 2, 4, 6, 10, 20, 365) loslabels <- c('vshort', 'short', 'medium', 'long', 'vlong', 'extreme') losdist <- data.frame(table(cut(as.numeric(dat4los$LOS), breaks=losbreaks, labels=loslabels))) options(repr.plot.width=7, repr.plot.height=6) bp <- barplot(losdist$Freq, names.arg=losdist$Var1, main="Length of Stay") bp legend("topright", fill=c("grey"), c('vshort (0-2d)', 'short (2-4d)', 'medium (4-6d)', 'long (6-10d)', 'vlong (10-20d)', 'extreme (20-365d)')) ``` [Back To Top](#BackToTop) <a id="featureeng"></a> ## Feature Engineering Create some additional features from raw data. ### Count diagnoses present on admission Find the columns named DXPOA1 - DXPOA25, convert them from string to integer values, perform the count, then delete the original DXPOA columns: ``` indDXPOA <- grep('DXPOA', names(dat4los)) dat4los[, indDXPOA] <- apply(dat4los[, indDXPOA], 2, FUN=function(x){ifelse(x=='Y', 1, x)}) dat4los[, indDXPOA] <- apply(dat4los[, indDXPOA], 2, FUN=function(x){ifelse(x==1, x, 0)}) dat4los$num_DXPOA <- apply(dat4los[, indDXPOA], 1, FUN=function(x){length(x[x=='1'])}) dat4los <- dat4los[, -indDXPOA] ``` ### Count external causes of injury present on admission Delete the original binary indicators. ``` indE_POA <- grep('E_POA', names(dat4los)) dat4los[, indE_POA] <- apply(dat4los[, indE_POA], 2, FUN=function(x){ifelse(x=='Y', 1, x)}) dat4los[, indE_POA] <- apply(dat4los[, indE_POA], 2, FUN=function(x){ifelse(x==1, x, 0)}) dat4los$num_E_POA <- apply(dat4los[, indE_POA], 1, FUN=function(x){length(x[x=='1'])}) dat4los <- dat4los[, -indE_POA] ``` ### Count the number of body systems affected by chronic conditions Delete the original binary indicators (both at the body system and individual condition levels). ``` indchronB <- grep('^CHRONB[0-9]', names(dat4los)) dat4los$num_uCHRONB <- apply(dat4los[indchronB], 1, FUN=function(x){length(unique(x[!is.na(x)]))}) dat4los <- dat4los[, -indchronB] # Won't use these for anything either indCHRON <- grep('^CHRON[0-9]', names(dat4los)) dat4los <- dat4los[, -indCHRON] ``` ### Count number of payers (medicare, medicaid, private insurance, ...) Delete the mostly-missing columns `PAY2` and `PAY3` when done. ``` indPAY <- grep('PAY', names(dat4los), value=T) dat4los$num_PAY <- apply(dat4los[, grep('PAY', names(dat4los), value=T)], 1, FUN=function(x){length(x[!is.na(x)])}) dat4los$PAY2 <- NULL dat4los$PAY3 <- NULL ``` ### Count pre-existing (comorbid) conditions Delete the original binary indicators when done. ``` indCM <- grep('CM_', names(dat4los)) dat4los$num_CM <- apply(dat4los[, indCM], 1, FUN=function(x){(length(x[x==1]))}) dat4los <- dat4los[, -indCM] ``` ### Remove rows with invalid point of origin values ``` indgood <- grep('[0-9A-Za-z]', dat4los$PointOfOriginUB04) dat4los <- dat4los[indgood, ] ``` ### Remove `HOSPT` and `DX1` columns We will apply the model to simulated data where there is only one state, so we will derive no benefit from including the hospital state (`HOSPST`) column. We will also remove the `DX1` column (we'll use the more specific `DXCCS1` feature instead). ``` dat4los <- dat4los[, !names(dat4los) %in% c('HOSPST')] dat4los <- dat4los[, !names(dat4los) %in% c('DX1')] ``` ### Keep just the first three digits of the zip code Allows us to group hospitals by larger geographical regions. ``` dat4los$ZIP3 <- substr(dat4los$ZIP, 1, 3) dat4los$ZIP <- NULL ``` After this, we are left with only 24 columns for modeling: ``` dim(dat4los) ``` ## Cast features and save the cleaned data Now we ensure that the data types for these columns are properly defined as categorical or numeric: ``` # make these columns categorical cat_cols <- c('DSHOSPID', 'FEMALE', 'RACE', 'ATYPE', 'AMONTH', 'PointOfOriginUB04', 'TRAN_IN', 'MEDINCSTQ', 'PSTATE', 'PAY1', 'DXCCS1', 'DXMCCS1', 'ZIP3') makecatg <- sapply(dat4los[, cat_cols], FUN=function(x){as.factor(x)}) makecatg <- as.data.frame(makecatg) dat4los[, cat_cols] <- makecatg # make these columns numeric cat_num <- c('AGE', 'LOS', 'NDX', 'NCHRONIC', 'num_DXPOA', 'num_E_POA', 'num_uCHRONB', 'num_PAY', 'num_CM') makenum <- sapply(dat4los[, cat_num], FUN=function(x){as.numeric(x)}) makenum <- as.data.frame(makenum) dat4los[, cat_num] <- makenum data_mod <- dat4los str(data_mod) ``` As a sanity check, we enumerate the possible values of the categorical features specifically: ``` levelinfo <- sapply(dat4los[, cat_cols], FUN=function(x){unique(x)}) str(levelinfo) levelinfo[['FEMALE']] levelinfo[['RACE']] save(levelinfo, file='listoflevels.Rdata') ``` ### Define function to evaluate model performance Computes and returns the following common regression evaluation metrics: - Mean Absolute Error (MAE) - Root Mean Squared Error (RMSE) - Coefficient of determination (Rsquare), as a percentage - Relative Absolute Error (RAE) - Relative Squared Error (RSE) ``` regression_res <- function(Target, Prediction){ res <- data.frame(Target=Target, Scored.Labels=Prediction) res$delta <- abs(res$Target - res$Scored.Labels) res$percdelta <- res$delta / res$Target n <- nrow(res) MAE <- sum(res$delta) / n RMSE <- sqrt(sum(res$delta ^ 2) / n) Rsquare <- (cor(res$Target, res$Scored.Labels) ^ 2) * 100 RAE <- sum(res$delta) / sum(abs(mean(res$Target) - res$Target)) RSE <- sum(res$delta ^ 2) / sum(abs(mean(res$Target) - res$Target) ^ 2) results <- data.frame(MAE=MAE, RMSE=RMSE, RAE=RAE, RSE=RSE, Rsq=Rsquare) return(results) } #create a data frame for storing training and testing metrics res_train <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0) res_train <- res_train[-1,] res_test <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0) res_test <- res_test[-1,] ``` [Back To Top](#BackToTop) ## Create the Models <a id="model"></a> We will create 10 models for 10 individual hospitals and an additional model for all the other hospitals. ``` selected_hosp <- c('hosp_1', 'hosp_2', 'hosp_3', 'hosp_4', 'hosp_5', 'hosp_6', 'hosp_7', 'hosp_8', 'hosp_9', 'hosp_10') allotherhosp <- unique(data_mod$DSHOSPID)[!unique(data_mod$DSHOSPID) %in% selected_hosp] allotherhosp <- as.character(allotherhosp) allotherhosp ``` For each model, we will split the available data into training and test sets with the following approach: - Attempt to evenly split the data in each level of `DXCCS1` (the categorical feature with the most levels) to create an initial train/test partitioning. - Because some categorical levels are rare, we may find that the training dataset does not contain some levels that are present in the test set. Remedy this by moving any such rows from the test set to the training set. - Ensure that no patients are present in both the training and test sets (by transferring half of any offending patients to the training set, and the other half to the test set) - Drop columns that we do not want to use for testing or training: - `myrownum`, `KEY`, and `VisitLink`, because they are uninformative and could result in overfitting - `DSHOSPID`, because most of our models will be trained on data from a single hospital ``` # Create a folder in current working directory to save the trained models. wrdir <- getwd() modeldir <- 'xgboostLOSmodelsfolder' dir.create(file.path(wrdir, modeldir), showWarnings=FALSE) modelsLocation <- paste(wrdir, modeldir, sep='/') modelsLocation <- paste0(modelsLocation, '/') # create a data frame for storing training and testing metrics res_train <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0) res_train <- res_train[-1,] res_test <- data.frame(model_name='hosp_X_LOSmodel', MAE=0, RMSE=0, RAE=0, RSE=0, Rsq=0) res_test <- res_test[-1,] shaheen_lib_path = 'C:/dsvm/notebooks/HealthcareSolution/shaheen_lib' library("caret", lib.loc=shaheen_lib_path) library("xgboost", lib.loc=shaheen_lib_path) if(!require(xgboost)) { install.packages("xgboost") library(xgboost) } if(!require(caret)) { install.packages("caret") library(caret) } ``` Train all of the models and store data on their performance: ``` for (h in 1:(length(selected_hosp)+1)){ # subset data for the hospital to build a model for cat('h=',h,'\n') if(h==(length(selected_hosp)+1)){ cat('allotherhosp',allotherhosp,'\n') sub_data_mod <- subset(data_mod,data_mod$DSHOSPID %in% allotherhosp) cat(unique(as.character(sub_data_mod$DSHOSPID)),'\n') model_name <- paste('allotherhosp','_LOSmodel',sep='') model_name <- paste0(modelsLocation, model_name) cat('model_name =',model_name,'\n') } else { cat('selected_hosp[h]',selected_hosp[h],'\n') sub_data_mod <- subset(data_mod,data_mod$DSHOSPID %in% selected_hosp[h]) cat(unique(as.character(sub_data_mod$DSHOSPID)),'\n') model_name <- paste(unique(as.character(sub_data_mod$DSHOSPID)),'_LOSmodel',sep='') model_name <- paste0(modelsLocation, model_name) cat('model_name =',model_name,'\n') } dim(sub_data_mod) # sub_data_mod contains data for a hospital, build LOS model for this hospital and save the model with name model_name sub_data_mod <- sub_data_mod[complete.cases(sub_data_mod),] # ensure all rows are complete # convert categorical variable into numeric vector using One Hot Encoding # --- one-hot-encoding categorical features ohe_feats <- cat_cols oheformula <- as.formula(paste('~',paste(cat_cols,collapse=' + '),sep=' ')) dummies <- dummyVars(oheformula, data = sub_data_mod) df_all_ohe <- as.data.frame(predict(dummies, newdata = sub_data_mod)) # takes time df_all_combined <- cbind(sub_data_mod[,-c(which(colnames(sub_data_mod) %in% ohe_feats))],df_all_ohe) # split df_all_combined into train and test # 60% of the sample size for training smp_size <- floor(0.6 * nrow(df_all_combined)) set.seed(18) ## set the seed split.index <- sample(seq_len(nrow(df_all_combined)), size = smp_size,replace=F) train_xgb <- df_all_combined[split.index, ] test_xgb <- df_all_combined[-split.index, ] #now have the data split into training and testing ~60% training and 40% testing #================================================================== #ensuring unique patients in train and test #================================================================== # check how many patients overlap # table(unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink)) # table(unique(train_xgb$VisitLink) %in% unique(test_xgb$VisitLink)) # patient ids that occur in both train and test vk <- unique(test_xgb$VisitLink)[unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink)] vk1 <- vk[1:round(length(vk)/2)] vk2 <- vk[(round(length(vk)/2)+1) : length(vk)] torm4mtest_xgb <- which(test_xgb$VisitLink %in% vk1) # patient ids to remove from test train_xgb <- rbind(train_xgb,test_xgb[torm4mtest_xgb,]) # append rows with these patients to train test_xgb <- test_xgb[-torm4mtest_xgb,] # remove these patient rows from test torm4mtrain_xgb <- which(train_xgb$VisitLink %in% vk2) # patient ids to remove from train test_xgb <- rbind(test_xgb,train_xgb[torm4mtrain_xgb,]) # append rows with these patients to test train_xgb <- train_xgb[-torm4mtrain_xgb,] # remove these patient rows from train # confirm unique patients in training and test data # table(unique(test_xgb$VisitLink) %in% unique(train_xgb$VisitLink)) # table(unique(train_xgb$VisitLink) %in% unique(test_xgb$VisitLink)) #----------------------------------------- # remove unnecessary cols train_xgb$myrownum <- NULL test_xgb$myrownum <- NULL train_xgb$KEY <- NULL test_xgb$KEY <- NULL train_xgb$VisitLink <- NULL test_xgb$VisitLink <- NULL train_xgb$DSHOSPID <- NULL test_xgb$DSHOSPID <- NULL # now have the data split into training and testing ~60% training and ~40% testing #--------------------------------------------- labels_xgb <- train_xgb$LOS #remove LOS column from training data grep('LOS',names(train_xgb)) train_xgb_2 <- train_xgb[-grep('LOS',names(train_xgb))] #---------------------------------------------------------------- #Tune and Run the model rm(xgb_LOSmod) set.seed(18) xgb_LOSmod <- xgboost(data = data.matrix(train_xgb_2), label = labels_xgb, booster = "gbtree", eta = 0.1, gamma = 0, max_depth = 3, nround=25, subsample = 0.5, colsample_bytree = 0.5, eval_metric = "rmse", objective = "reg:linear" ) # save model to binary local file xgb.save(xgb_LOSmod, model_name) # should return TRUE # Object "xgb_LOSmod" is an xgboost model. # To load binary model to R we would - xgb.load(model_name) # check how the model does on test data - save in res_test y_pred <- predict(xgb_LOSmod, data.matrix(test_xgb)) Target <- test_xgb$LOS res_test_xgboost <- regression_res(Target,y_pred) tst <- data.frame(model_name=as.character(as.data.frame(strsplit(model_name,split = '/xgboostLOSmodelsfolder/'))[2,1])) tst <- cbind(tst,res_test_xgboost) res_test <- rbind(res_test, tst) # model results - training data - save in res_train y_predTr <- predict(xgb_LOSmod, data.matrix(train_xgb)) TargetTr <- train_xgb$LOS res_train_xgboost <- regression_res(TargetTr,y_predTr) trn <- data.frame(model_name=as.character(as.data.frame(strsplit(model_name,split = '/xgboostLOSmodelsfolder/'))[2,1])) trn <- cbind(trn,res_train_xgboost) res_train <- rbind(res_train, trn) } ``` [Back To Top](#BackToTop) ## Model Performance <a id="results"></a> ``` cat('Performance on training data\n') res_train cat('Performance on test data\n') res_test ```
github_jupyter
<a href="https://colab.research.google.com/github/EmilSkaaning/DeepStruc/blob/main/DeepStruc.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # DeepStruc **Github:** https://github.com/EmilSkaaning/DeepStruc **Paper:** DeepStruc: Towards structure solution from pair distribution function data using deep generative models **Questions:** andy@chem.ku.dk or etsk@chem.ku.dk Welcome to DeepStruc that is a Deep Generative Model (DGM) which learns the relation between PDF and atomic structure and thereby solve a structure based on a PDF! This script guides you through a simple example of how to use DeepStruc to predict a structure on a given PDF. Aftwerwards, you can upload a PDF and use DeepStruc to predict the structure. # First install requirements for DeepStruc (this step takes 5 - 10 minutes) ``` %%capture !git clone https://github.com/EmilSkaaning/DeepStruc !pip3 install torch==1.10.1+cpu torchvision==0.11.2+cpu torchaudio==0.10.1+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html !pip install pytorch_lightning torch-geometric==1.7.2 torch-scatter !pip3 install torch-sparse -f https://data.pyg.org/whl/torch-1.10.1+cpu.html !pip install matplotlib==3.4.3 ase nglview ipywidgets from google.colab import output, files from ase.io import read from ase.visualize import view from IPython.display import Image import shutil import os os.chdir("DeepStruc") ``` # Example of how to use DeepStruc on a simulated dataset We here provide an example of how to use DeepStruc on simulated data. The script can both take a single PDF or a directory of PDFs as input. Be aware that the PDF(s) will be made to have an r-range between 2 - 30 ร… in steps of 0.01 ร… (2800 points PDF). Any data outside this range will not be used. Check the dataformat of our datasets (often made with PDFGui) if in doubt. ``` PDFFile = "/data/PDFs_simulated/FCC_h_3_k_6_l_7.gr" # Path to PDF(s). Nstructures = 10 # Number of samples/structures generated for each unique PDF structure = 0 # Which of the Nstructures to visualize. (Goes from 0 to Nstructures - 1) sigma = 3 # Sample to '-s' sigma in the normal distribution plot = True # Plots sampled structures on top of DeepStruc training data. ``` **Outcomment the following line to use DeepStruc on experimental PDF(s) from your local computer.** <br> Some browsers do not support this upload option. Use Google Chrome or simply upload the file manually in the left menu in the DeepStruc-main' folder. ``` #PDFFile = list(files.upload())[0] # Upload PDF(s) from local computer ``` ## Predict with DeepStruc ``` # Use DeepStruc on the uploaded PDF(s) !python predict.py -d $PDFFile -n $Nstructures -s $sigma -p $plot -i $structure # Get the latest results all_subdirs = [d for d in os.listdir('.') if os.path.isdir(d)] latest_subdir = max(all_subdirs, key=os.path.getmtime) # Plot the latent space Image(latest_subdir + '/PDFs.png', width = 480, height = 360) ``` **The raw input PDF and the normalised PDF.** The raw input PDF is normalised to have the highest peak at G(r) = 1 and to be in between r = 2 ร… and 30 ร…. ## Visualization of the two-dimensional latent space (compressed feature space of the structures) ``` # Plot the latent space Image(latest_subdir + '/ls.png', width = 900, height = 360) ``` **The two-dimensional latent space with location of the input.** The size of the points relates to the size of the embedded structure. Each point is coloured after its structure type, FCC (light blue), octahedral (dark grey), decahedral (orange), BCC (green), icosahedral (dark blue), HCP (pink) and SC (red). Each point in the latent space corresponds to a structure based on its simulated PDF. Test data point are plotted on top of the training and validation data, which is made semi-transparent. The latent space locations of the reconstructed structures from the input are shown with black markers and the specific reconstructed structure that is shown in the next box is shown with a black and white marker. ## Visualization of a reconstructed structure ``` # Get folder of structures subfolder = [f.path for f in os.scandir(latest_subdir) if f.is_dir()] # Define which structure to plot and plot it output.enable_custom_widget_manager() view(read(subfolder[0] + "/" + os.listdir(subfolder[0])[structure]) , viewer='ngl') ``` **The reconstructed structure from the input.** The reconstructed structure is indicated at the latent space above using a black and white marker. **Be aware** that DeepStruc are only created to predict mono-metallic nanoparticles (MMNP) of up to 200 atoms. If the PDF file is not a MMNP, it is highly likely that DeepStruc will not output an meaningful structure. ## Download the latest results ``` # Download the latest results shutil.make_archive(latest_subdir, 'zip', latest_subdir) files.download(latest_subdir + ".zip") ``` # Cite If you use DeepStruc, please consider citing our paper. Thanks in advance! ``` @article{kjรฆr2022DeepStruc, title={DeepStruc: Towards structure solution from pair distribution function data using deep generative models}, author={Emil T. S. Kjรฆr, Andy S. Anker, Marcus N. Weng, Simon J. L. Billinge, Raghavendra Selvan, Kirsten M. ร˜. Jensen}, year={2022}} ``` # LICENSE This project is licensed under the Apache License Version 2.0, January 2004 - see the LICENSE file at https://github.com/EmilSkaaning/DeepStruc/blob/main/LICENSE.md for details.
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # sns.set_context('paper', font_scale=2) def get_talon_nov_colors(samples=None, how='normal'): c_dict = {'Known': '#009E73', 'ISM': '#0072B2', 'NIC': '#D55E00', 'NNC': '#E69F00', 'Antisense': '#000000', 'Intergenic': '#CC79A7', 'Genomic': '#F0E442'} if how == 'light_40': c_dict = {'Known': '#66c5ab', 'ISM':'#66aad1', 'NIC': '#e69e66', 'NNC': '#f0c566', 'Antisense': '#666666', 'Intergenic': '#e0afca', 'Genomic': '#f6ef8e'} elif how == 'light_20': c_dict = {'Known': '#33b18f', 'ISM':'#338ec1', 'NIC': '#dd7e33', 'NNC': '#ebb233', 'Antisense': '#333333', 'Intergenic': '#d694b9', 'Genomic': '#f3e968'} order = ['Known', 'ISM', 'NIC', 'NNC', 'Antisense', 'Intergenic', 'Genomic'] if samples: keys = c_dict.keys() pop_list = [] for key in keys: if key not in samples: pop_list.append(key) for p in pop_list: del c_dict[p] order = [o for o in order if o in samples] return c_dict, order def compute_prop_support(sj_file, ab_file, opref): sj_df = pd.read_csv(sj_file, sep='\t') ab = pd.read_csv(ab_file, sep='\t') # merge with ab to get novelty info ab = ab[['annot_transcript_id', 'transcript_novelty']] sj_df = sj_df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id') # count up splice junctions per transcript sjs_per_t = sj_df[['tid', 'sj_id']].groupby('tid').count() sjs_per_t.reset_index(inplace=True) sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True) # groupby transcript id and illumina support sj_df = sj_df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count() sj_df.reset_index(inplace=True) sj_df.rename({'sj_id':'n_sjs'}, axis=1, inplace=True) # merge with total sjs and calc % supported sj_df = sj_df.merge(sjs_per_t, how='left', on='tid') sj_df['perc_supported'] = (sj_df.n_sjs/sj_df.total_sjs)*100 # remove unsupported bois sj_df = sj_df.loc[sj_df.illumina_support == True] # drop antisense, intergenic, and genomic cause they bad novs = ['Known', 'ISM', 'NIC', 'NNC'] sj_df = sj_df.loc[sj_df.transcript_novelty.isin(novs)] # plot plot plt.figure(figsize=(8.5,8.5)) # sns.set(font_scale=1.50) # font sizes plt.rc('font', size=14) c_dict, order = get_talon_nov_colors(novs) ax = sns.violinplot(data=sj_df, x='transcript_novelty', y='perc_supported', order=order, palette=c_dict, saturation=1, linewidth=0.5) ax.set_ylabel('% SJs in transcript supported by Illumina') ax.set_xlabel('Isoform Novelty') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) fname = '{}_illumina_sj_support.pdf'.format(opref) plt.savefig(fname) # average and median of each category for n in novs: temp = sj_df.loc[sj_df.transcript_novelty==n] print() print('Mean % Illumina supported SJs for {}: {}'.format(n, temp.perc_supported.mean())) print('Median % Illumina supported SJs for {}: {}'.format(n, temp.perc_supported.median())) def plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, oprefix): sj_df = pd.read_csv(sj_file, sep='\t') nov_df = pd.read_csv(sj_nov_file, sep='\t', header=None, usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'novelty']) nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str) nov_df = nov_df[['sj_id', 'novelty']] # limit to just sjs with no illumina support sj_df = sj_df.loc[sj_df.illumina_support == False] # merge with novelty of splice junctions sj_df = sj_df.merge(nov_df, how='left', on='sj_id') sj_df = sj_df[['sj_id', 'novelty']] sj_df.drop_duplicates(inplace=True) # groupby and count # of Illumina-supported SJs per SJ novelty category sj_df = sj_df.groupby('novelty').count() sj_df.reset_index(inplace=True) sj_df.rename({'sj_id':'sj_count'}, axis=1, inplace=True) # plot plot plt.figure(figsize=(8.5,8.5)) plt.rc('font', size=14) # sns.set(font_scale=1.50) c_dict, order = get_talon_nov_colors(['Known', 'NIC', 'NNC']) ax = sns.barplot(data=sj_df, x='novelty', y='sj_count', palette=c_dict, hue_order=order, saturation=1) ax.set_ylabel('Number of SJs unsupported by Illumina') ax.set_xlabel('SJ Novelty') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) add_n(ax, sj_df, 'sj_count') fname = '{}_ill_unsupported_sj_novelty.pdf'.format(oprefix) plt.savefig(fname) ``` ## PacBio ``` def add_n(ax, data, feature): total = data[feature].sum() ylim = ax.get_ylim()[1] for p in ax.patches: percentage = '{:,.0f}'.format(p.get_height()) x_scale = 0.03*len(percentage) x = p.get_x() + p.get_width() / 2 - x_scale y = p.get_y() + p.get_height() + ylim*0.01 ax.annotate(percentage, (x, y), size = 15) sj_file = 'pb_GM12878_sj_tid_support.tsv' ab_file = 'pb_ont_talon_abundance_filtered.tsv' compute_prop_support(sj_file, ab_file, 'figures/PB_GM12878') sj_file = 'pb_GM12878_sj_tid_support.tsv' sj_nov_file = 'pb_talon_GM12878_sjs_novelty.tab' plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, 'figures/PB_GM12878') ``` ## ONT ``` sj_file = 'ont_GM12878_sj_tid_support.tsv' ab_file = 'pb_ont_talon_abundance_filtered.tsv' compute_prop_support(sj_file, ab_file, 'figures/ONT_GM12878') sj_file = 'ont_GM12878_sj_tid_support.tsv' sj_nov_file = 'ont_talon_GM12878_sjs_novelty.tab' plot_unsupp_sjs_by_nov(sj_file, sj_nov_file, 'figures/ONT_GM12878') sj_support = 'pb_GM12878_sj_tid_support.tsv' ab = 'pb_ont_talon_abundance_filtered.tsv' sj_nov = 'pb_talon_GM12878_sjs_novelty.tab' nov = comput_suport(sj_support, ab, sj_nov) plot_plot(nov, 'pb_GM12878') sj_support = 'ont_GM12878_sj_tid_support.tsv' ab = 'pb_ont_talon_abundance_filtered.tsv' sj_nov = 'ont_talon_GM12878_sjs_novelty.tab' nov = comput_suport(sj_support, ab, sj_nov) plot_plot(nov, 'ont_GM12878') def comput_suport(sj_support, ab, sj_nov): df = pd.read_csv(sj_support, sep='\t') ab = pd.read_csv(ab, sep='\t') ab = ab[['annot_transcript_id', 'transcript_novelty']] df = df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id') support_df = df.loc[~df.tid.duplicated()] print(len(support_df.index)) support_df = support_df[['tid', 'transcript_novelty']].groupby('transcript_novelty').count() support_df.reset_index(inplace=True) support_df.rename({'tid':'n_tids'}, axis=1, inplace=True) # merge with information about sj support nov_df = pd.read_csv(sj_nov, sep='\t', header=None, usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'sj_novelty']) nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str) nov_df = nov_df[['sj_id', 'sj_novelty']] # merge with novelty of splice junctions df = df.merge(nov_df, how='left', on='sj_id') # count sjs per transcript sjs_per_t = df[['tid', 'transcript_novelty', 'sj_id']].groupby(['tid', 'transcript_novelty']).count() sjs_per_t.reset_index(inplace=True) sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True) # what novelty are the remaining ones wrt gencode splice junctions? temp = df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_illumina'}, axis=1, inplace=True) temp = temp.loc[temp.illumina_support == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_illumina']], how='left', on='tid') # count the number of sjs per transcript that are supported by illumina or gencode df['ill_or_known'] = (df.illumina_support==True)|(df.sj_novelty=='Known') temp = df[['tid', 'ill_or_known', 'sj_id']].groupby(['tid', 'ill_or_known']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_ill_known'}, axis=1, inplace=True) temp = temp.loc[temp.ill_or_known == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known']], how='left', on='tid') # count the number of sjs/sss per transcript that are supported by illumina or gencode df['ill_known_nic'] = (df.illumina_support==True)|(df.sj_novelty.isin(['NIC', 'Known'])) temp = df[['tid', 'ill_known_nic', 'sj_id']].groupby(['tid', 'ill_known_nic']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_ill_known_nic'}, axis=1, inplace=True) temp = temp.loc[temp.ill_known_nic == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known_nic']], how='left', on='tid') # fill nans with 0 sjs_per_t.fillna(0, inplace=True) sjs_per_t['total_percent'] = 100 # sjs_per_t['ill_percent'] = (sjs_per_t.n_sjs_illumina/sjs_per_t.total_sjs)*100 # sjs_per_t['ill_known_percent'] = (sjs_per_t.n_sjs_ill_known/sjs_per_t.total_sjs)*100 # sjs_per_t['ill_known_nic_percent'] = (sjs_per_t.n_sjs_ill_known_nic/sjs_per_t.total_sjs)*100 sjs_per_t['full_ill_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_illumina sjs_per_t['full_ill_known_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known sjs_per_t['full_ill_known_nic_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known_nic sjs_per_t.head() nov = sjs_per_t[['tid', 'transcript_novelty']].groupby('transcript_novelty').count() nov.reset_index(inplace=True) nov.rename({'tid': 'n_transcripts'},axis=1, inplace=True) temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_support']].groupby(['transcript_novelty', 'full_ill_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_support == True] temp.rename({'tid': 'ill_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_support']].groupby(['transcript_novelty', 'full_ill_known_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_known_support == True] temp.rename({'tid': 'ill_known_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_known_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_nic_support']].groupby(['transcript_novelty', 'full_ill_known_nic_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_known_nic_support == True] temp.rename({'tid': 'ill_known_nic_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_known_nic_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') cols = ['ill', 'ill_known', 'ill_known_nic'] for c in cols: nov['{}_perc'.format(c)] = (nov['{}_support'.format(c)]/nov['n_transcripts'])*100 nov['total_percent'] =100 return nov df = pd.read_csv('pb_GM12878_sj_tid_support.tsv', sep='\t') ab = pd.read_csv('pb_ont_talon_abundance_filtered.tsv', sep='\t') ab = ab[['annot_transcript_id', 'transcript_novelty']] df = df.merge(ab, how='left', left_on='tid', right_on='annot_transcript_id') support_df = df.loc[~df.tid.duplicated()] print(len(support_df.index)) support_df = support_df[['tid', 'transcript_novelty']].groupby('transcript_novelty').count() support_df.reset_index(inplace=True) support_df.rename({'tid':'n_tids'}, axis=1, inplace=True) # merge with information about sj support nov_df = pd.read_csv('pb_talon_GM12878_sjs_novelty.tab', sep='\t', header=None, usecols=[0,1,2,3,9], names=['chrom', 'start', 'stop', 'strand', 'sj_novelty']) nov_df['sj_id'] = nov_df.chrom+'_'+nov_df.start.astype(str)+'_'+nov_df.stop.astype(str)+'_'+nov_df.strand.astype(str) nov_df = nov_df[['sj_id', 'sj_novelty']] # merge with novelty of splice junctions df = df.merge(nov_df, how='left', on='sj_id') # count sjs per transcript sjs_per_t = df[['tid', 'transcript_novelty', 'sj_id']].groupby(['tid', 'transcript_novelty']).count() sjs_per_t.reset_index(inplace=True) sjs_per_t.rename({'sj_id':'total_sjs'}, axis=1, inplace=True) print(len(sjs_per_t.index)) # what novelty are the remaining ones wrt gencode splice junctions? temp = df[['tid', 'transcript_novelty', 'illumina_support', 'sj_id']].groupby(['tid', 'transcript_novelty', 'illumina_support']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_illumina'}, axis=1, inplace=True) temp = temp.loc[temp.illumina_support == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_illumina']], how='left', on='tid') sjs_per_t.head() # count the number of sjs per transcript that are supported by illumina or gencode df['ill_or_known'] = (df.illumina_support==True)|(df.sj_novelty=='Known') temp = df[['tid', 'ill_or_known', 'sj_id']].groupby(['tid', 'ill_or_known']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_ill_known'}, axis=1, inplace=True) temp = temp.loc[temp.ill_or_known == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known']], how='left', on='tid') # count the number of sjs/sss per transcript that are supported by illumina or gencode df['ill_known_nic'] = (df.illumina_support==True)|(df.sj_novelty.isin(['NIC', 'Known'])) temp = df[['tid', 'ill_known_nic', 'sj_id']].groupby(['tid', 'ill_known_nic']).count() temp.reset_index(inplace=True) temp.rename({'sj_id':'n_sjs_ill_known_nic'}, axis=1, inplace=True) temp = temp.loc[temp.ill_known_nic == True] # merge in with sjs per t sjs_per_t = sjs_per_t.merge(temp[['tid', 'n_sjs_ill_known_nic']], how='left', on='tid') # fill nans with 0 sjs_per_t.fillna(0, inplace=True) sjs_per_t['total_percent'] = 100 # sjs_per_t['ill_percent'] = (sjs_per_t.n_sjs_illumina/sjs_per_t.total_sjs)*100 # sjs_per_t['ill_known_percent'] = (sjs_per_t.n_sjs_ill_known/sjs_per_t.total_sjs)*100 # sjs_per_t['ill_known_nic_percent'] = (sjs_per_t.n_sjs_ill_known_nic/sjs_per_t.total_sjs)*100 sjs_per_t['full_ill_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_illumina sjs_per_t['full_ill_known_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known sjs_per_t['full_ill_known_nic_support'] = sjs_per_t.total_sjs == sjs_per_t.n_sjs_ill_known_nic sjs_per_t.head() nov = sjs_per_t[['tid', 'transcript_novelty']].groupby('transcript_novelty').count() nov.reset_index(inplace=True) nov.rename({'tid': 'n_transcripts'},axis=1, inplace=True) nov.head() temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_support']].groupby(['transcript_novelty', 'full_ill_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_support == True] temp.rename({'tid': 'ill_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_support']].groupby(['transcript_novelty', 'full_ill_known_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_known_support == True] temp.rename({'tid': 'ill_known_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_known_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') temp = sjs_per_t[['tid', 'transcript_novelty', 'full_ill_known_nic_support']].groupby(['transcript_novelty', 'full_ill_known_nic_support']).count() temp.reset_index(inplace=True) temp = temp.loc[temp.full_ill_known_nic_support == True] temp.rename({'tid': 'ill_known_nic_support'}, axis=1, inplace=True) temp = temp[['transcript_novelty', 'ill_known_nic_support']] nov = nov.merge(temp, how='left', on='transcript_novelty') nov cols = ['ill', 'ill_known', 'ill_known_nic'] for c in cols: nov['{}_perc'.format(c)] = (nov['{}_support'.format(c)]/nov['n_transcripts'])*100 nov['total_percent'] =100 nov def plot_plot(nov, opref): c_dict, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense']) c_dict_40, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense'], how='light_40') c_dict_20, order = get_talon_nov_colors(['Known', 'ISM', 'NIC', 'NNC', 'Intergenic', 'Antisense'], how='light_20') # plotting plt.figure(figsize=(8.5,8.5)) sns.set(font_scale=1.5, style="whitegrid") # font sizes plt.rc('font', size=14) top_plot = sns.barplot(x='transcript_novelty', y='total_percent', data=nov, color='white', order=order, edgecolor='black') p2 = sns.barplot(x='transcript_novelty', y='ill_known_nic_perc', data=nov, palette=c_dict_40, order=order, edgecolor='black', saturation=1) p2 = sns.barplot(x='transcript_novelty', y='ill_known_perc', data=nov, palette=c_dict_20, saturation=1, order=order, edgecolor='black') bottom_plot = sns.barplot(x='transcript_novelty', y='ill_perc', data=nov, palette=c_dict, saturation=1, order=order, edgecolor='black') topbar = plt.Rectangle((0,0),1,1,fc='white', edgecolor='black') bottombar = plt.Rectangle((0,0),1,1,fc='#0000A3', edgecolor='black') # plt.title('{} SJ Support by Isoform Novelty'.format(args.sample_name)) plt.xlabel('') bottom_plot.set_ylabel("Percent of Isoforms with 100% SJ Support") for ntype, p in zip(order, bottom_plot.patches): height = p.get_height() bottom_plot.text(p.get_x()+p.get_width()/2., height + .3, 'n={}'.format(nov.loc[nov['transcript_novelty']==ntype]['n_transcripts'].values[0]), ha="center") # bottom_plot.set_xticklabels(bottom_plot.get_xticklabels(), fontsize=14) # fontsize of the x and y labels fname = '{}_sj_support_isoform.pdf'.format(opref) plt.savefig(fname) plot_plot(nov, 'PB_GM12878') temp.loc[temp.tid == 'ENCODEHT000217262'] df.loc[df.tid == 'ENCODEHT000209674'] sjs_per_t.head() # wtf is ISM doing here sjs_per_t.loc[sjs_per_t.n_sjs_ill_known != sjs_per_t.n_sjs_ill_known_nic].transcript_novelty.unique() sjs_per_t.loc[sjs_per_t.tid == 'ENCODEHT000274449'] df.loc[df.tid == 'ENCODEHT000274449'] sjs_per_t.loc[(sjs_per_t.n_sjs_ill_known != sjs_per_t.n_sjs_ill_known_nic)&(sjs_per_t.transcript_novelty == 'ISM')] df.loc[df.tid == 'ENCODEHT000230837'] sjs_per_t.loc[sjs_per_t.n_sjs_illumina != sjs_per_t.n_sjs_ill_known] sjs_per_t.loc[sjs_per_t.tid == 'ENCODEHT000274449'] ```
github_jupyter
CER001 - Generate a Root CA certificate ======================================= If a Certificate Authority certificate for the test environmnet has never been generated, generate one using this notebook. If a Certificate Authoriy has been generated in another cluster, and you want to reuse the same CA for multiple clusters, then use CER002/CER003 download and upload the already generated Root CA. - [CER002 - Download existing Root CA certificate](../cert-management/cer002-download-existing-root-ca.ipynb) - [CER003 - Upload existing Root CA certificate](../cert-management/cer003-upload-existing-root-ca.ipynb) Consider using one Root CA certificate for all non-production clusters in each environment, as this reduces the number of Root CA certificates that need to be uploaded to clients connecting to these clusters. Steps ----- ### Parameters ``` import getpass common_name = "SQL Server Big Data Clusters Test CA" country_name = "US" state_or_province_name = "Illinois" locality_name = "Chicago" organization_name = "Contoso" organizational_unit_name = "Finance" email_address = f"{getpass.getuser()}@contoso.com" days = "825" # Max supported validity period on MacOS 10.15+ 'Catalina' (https://support.apple.com/en-us/HT210176) test_cert_store_root = "/var/opt/secrets/test-certificates" ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" try: # Load this notebook as json to get access to the expert rules in the notebook metadata. # j = load_json("cer001-create-root-ca.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: # rules that have 9 elements are the injected (output) rules (the ones we want). Rules # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029, # not ../repair/tsg029-nb-name.ipynb) if len(rule) == 9: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Create a temporary directory to stage files ``` # Create a temporary directory to hold configuration files import tempfile temp_dir = tempfile.mkdtemp() print(f"Temporary directory created: {temp_dir}") ``` ### Helper function to save configuration files to disk ``` # Define helper function 'save_file' to save configuration files to the temporary directory created above import os import io def save_file(filename, contents): with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file: text_file.write(contents) print("File saved: " + os.path.join(temp_dir, filename)) print("Function `save_file` defined successfully.") ``` ### Certificate configuration file ``` certificate = f""" [ ca ] default_ca = CA_default # The default ca section [ CA_default ] default_days = 1000 # How long to certify for default_crl_days = 30 # How long before next CRL default_md = sha256 # Use public key default MD preserve = no # Keep passed DN ordering x509_extensions = ca_extensions # The extensions to add to the cert email_in_dn = no # Don't concat the email in the DN copy_extensions = copy # Required to copy SANs from CSR to cert [ req ] default_bits = 2048 default_keyfile = {test_cert_store_root}/cakey.pem distinguished_name = ca_distinguished_name x509_extensions = ca_extensions string_mask = utf8only [ ca_distinguished_name ] countryName = Country Name (2 letter code) countryName_default = {country_name} stateOrProvinceName = State or Province Name (full name) stateOrProvinceName_default = {state_or_province_name} localityName = Locality Name (eg, city) localityName_default = {locality_name} organizationName = Organization Name (eg, company) organizationName_default = {organization_name} organizationalUnitName = Organizational Unit (eg, division) organizationalUnitName_default = {organizational_unit_name} commonName = Common Name (e.g. server FQDN or YOUR name) commonName_default = {common_name} emailAddress = Email Address emailAddress_default = {email_address} [ ca_extensions ] subjectKeyIdentifier = hash authorityKeyIdentifier = keyid:always, issuer basicConstraints = critical, CA:true keyUsage = keyCertSign, cRLSign """ save_file("ca.openssl.cnf", certificate) ``` ### Get name of the โ€˜Runningโ€™ `controller` `pod` ``` # Place the name of the 'Running' controller pod in variable `controller` controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True) print(f"Controller pod name: {controller}") ``` ### Create folder on controller to hold Test Certificates ``` run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "mkdir -p {test_cert_store_root}" ') ``` ### Copy certificate configuration to `controller` `pod` ``` import os cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp ca.openssl.cnf {controller}:{test_cert_store_root}/ca.openssl.cnf -c controller -n {namespace}') os.chdir(cwd) ``` ### Generate certificate ``` cmd = f"openssl req -x509 -config {test_cert_store_root}/ca.openssl.cnf -newkey rsa:2048 -sha256 -nodes -days {days} -out {test_cert_store_root}/cacert.pem -outform PEM -subj '/C={country_name}/ST={state_or_province_name}/L={locality_name}/O={organization_name}/OU={organizational_unit_name}/CN={common_name}'" run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"') ``` ### Clean up temporary directory for staging configuration files ``` # Delete the temporary directory used to hold configuration files import shutil shutil.rmtree(temp_dir) print(f'Temporary directory deleted: {temp_dir}') print('Notebook execution complete.') ``` Related ------- - [CER002 - Download existing Root CA certificate](../cert-management/cer002-download-existing-root-ca.ipynb) - [CER003 - Upload existing Root CA certificate](../cert-management/cer003-upload-existing-root-ca.ipynb) - [CER010 - Install generated Root CA locally](../cert-management/cer010-install-generated-root-ca-locally.ipynb)
github_jupyter
# New start...... ``` %cd /content/PyHelpers !ls -a !git add . !git commit -m 'commit 1 from colabs' # !cat '/content/PyHelpers/Libs/OptimalPrime.ipynb' # !git clone https://github.com/bxck75/PyHelpers.git import os import subprocess from IPython.display import clear_output !python /content/PyHelpers/__main__.py # clear_output() # help('__main__') %%writefile /content/PyHelpers/__main__.py import os import subprocess from Libs import BigHelp global _ROOT_FOLDER_ global _LIB_ _ROOT_FOLDER_='/content/PyHelpers/' _LIB_=_ROOT_FOLDER_+'Libs/' import subprocess import importlib.util as impylib # print(len(H.Me(['globx',_LIB_,'*.py']))) import argparse parser = argparse.ArgumentParser() parser.add_argument('--dev', defaults=False, help='foo help') args = parser.parse_args() class Stimpy_Imp: def __init__(self,dev=False): self.dev = dev self.cmd_com = self.system_cmd() # main helper loading self.RootHelperFile ='BigHelp' self.H = self.load_lib(self.RootHelperFile) # module list loading self.H = self.load_lib('live_list', _ROOT_FOLDER_) self.H = self.load_lib('experimental_list', _ROOT_FOLDER_) # Live modules list self.libs_list = [ 'ZipUp', 'RepCoList', 'send_mail' ] # experimental modules list self.libs_experimental_list = self.libs_list +[ 'GdriveD', 'RamGpu', 'GitAid', 'Fileview', 'custom_functions', 'FiFyFo', ] self.list_to_load =self.libs_list if self.dev == True: self.list_to_load = self.libs_experimental_list # load the modules into the globals for lib in self.list_to_load: globals()[lib] = self.load_lib(str(lib)) def load_lib(self,lib_file,folder=_LIB_+'/'): ''' 1Get the specs from the file 2Make a module outof the specs 3init the module into the globals()[name] (same as import...) ''' module_path = folder+lib_file+'.py' print('Module : '+ lib_file +' Loaded!') if self.dev==True: print(module_path) dummy = impylib.spec_from_file_location("module.name",module_path ) Imp = impylib.module_from_spec(dummy) dummy.loader.exec_module(Imp) return Imp def system_cmd(cmd,args,vals): cmd = ['', '--arg', 'value'] proc_out=[] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) for line in p.stdout: proc_out.append(line) # print(line) p.wait() print(p.returncode) print(proc_out) # #Big Helpers Loading # from pathlib import Path # import os, inspect # try: # os.system('rm -r /content/sample_data') # os.system('rm -r /content/ProjectPrimer.py') # except: # print('No default garbage to remove') # # lib_file=Path('/content/lib/Helpers.py') # # if not lib_file.is_file(): # # os.system('wget https://raw.githubusercontent.com/bxck75/PyHelpers/master/BigHelp.py -O /content/lib/BigHelp.py') # # os.chdir('/content/') # # import the biggest help # from Libs.BigHelp import Helpers # # installing done......bring in the helpers! # global H # H=Helpers() # # bring in the helpers! # H.Me(['cml','echo "Pull in the helpers!"']) # H.prime=['bxck75/PyHelpers'] # H.Me(['inst_reps',H.prime,'/content/lib',True,True]) # H.Me(['cml','echo "All Done!"']) # # os.chdir('/content/') # os.system('rm -r /content/lib/BigHelp.py') # check the new module Stimpy_Imp() # print(dir(experimental)) # help(sys.argv[2]) print(SI) print(experimental_list) %%writefile /content/PyHelpers/live_list.py live_mods=[ 'ZipUp', 'RepCoList', 'send_mail' ] %%writefile /content/PyHelpers/experimental_list.py x_mods=[ 'GdriveD', 'RamGpu', 'GitAid', 'Fileview', 'custom_functions', 'FiFyFo', ] '''Own functions list''' def get_gdrive_dataset(pack, DS_root='datasets',GD_root='datasets'): import google from google.colab import drive drive.mount('/content/drive', force_remount=True) H.GD_ROOT=GD_root+'/' H.DS_ROOT=DS_root+'/' os.chdir(H.gdrive_root+H.GD_ROOT) H.Me(['mkd',[DS_root,'models'],H.pix_root]) H.Me(['cml','cp -r '+pack+' '+H.pix_root+DS_root]) os.chdir(H.pix_root+DS_root) H.Me(['cml','unzip -q '+pack]) H.Me(['cml','rm -r '+pack]) os.chdir(H.pix_root) def MethHelp(libs): os_help=H.Me(['vdir',libs]) #make a list containing libs values of os_help listOfLibs = [x[0] for x in os_help] #make a list containing libs method values of os_help listOfMethods= [x[1] for x in os_help] # Create a zipped list of tuples from above lists zippedList = list(zip(listOfLibs, listOfMethods[0:5])) zippedList # request help on method from list return zippedList def loadTboard(): '''load tensorboard''' import datetime, os # install tensorboard # H.Me(['cml','pip install -q tensorflow']) # Load the TensorBoard notebook extension try: %load_ext tensorboard except: %reload_ext tensorboard # !wget https://raw.githubusercontent.com/bxck75/PyHelpers/master/ProjectPrimer.py # !python ProjectPrimer.py from lib.PyHelpers.ProjectPrimer import H from IPython.display import clear_output from lib import PyHelpers from lib.PyHelpers import RepCoList,RamGpu,GdriveD,ZipUp H.Me(['vdir',[ZipUp]]) clear_output() from lib import PyHelpers from lib.PyHelpers import RepCoList,RamGpu,GdriveD,ZipUp,BigHelp H.zip_to_drive = ZipUp.ZipUp H.Me(['vdir',[H.zip_to_drive]]) sheit_to = H.zip_to_drive('sample_data', # name of zipfile '/content/drive/My Drive', # folder to push the zip to '/content/sample_data') # folder to zip print(sheit_to.ZipUp) H.Me(['vdir',[H.zip_to_drive]]) # H.Me(['vdir',[BigHelp,GdriveD]]) %cd /content/ import os os.system('pip install -U -q PyDrive') from google.colab import files from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials import zipfile import os import sys class ZipUp: def __init__(self, zipname, foldername, target_dir): ''' init the details for the zip and push''' self.zipname = zipname self.foldername = foldername self.target_dir = target_dir @property def ZipUp(self): ''' define the zip_n_push property''' if( self.zipname !='' and self.foldername != '' and self.target_dir != '' ): self.zipfolder() self.g_login() self.make_push() self.status = self.status + self.get_id() return self.status def zipfolder(self): ''' zip the selected folder to the target dir with ''' zipobj = zipfile.ZipFile(self.zipname + '.zip', 'w', zipfile.ZIP_DEFLATED) rootlen = len(self.target_dir) + 1 for base, dirs, files in os.walk(self.target_dir): for file in files: fn = os.path.join(base, file) zipobj.write(fn, fn[rootlen:]) self.status='zipped ' def g_login(self): ''' Authenticate and create the PyDrive client.''' auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() self.drive = GoogleDrive(gauth) self.status= self.status+'and ' def make_push(self): ''' Create & upload a file text file.''' file1 = self.drive.CreateFile({'id':self.get_id()}) file1.SetContentFile(self.zipname+".zip") file1.Upload() self.status= self.status+'pushed! (id) ' def get_id(self): query = "title = '"+self.zipname+".zip'" file_list = self.drive.ListFile({'q': query}).GetList() for file in file_list: if file['labels']['trashed'] ==False: return file['id'] if __name__ == "__main__": item_to = ZipUp('sample_data','/content/drive/My Drive','/content/lib/PyHelpers') print(item_to.ZipUp) from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive def g_login(): ''' Authenticate and create the PyDrive client.''' auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() return GoogleDrive(gauth) drive = g_login() query = "title = 'sample_data.zip'" file_list = drive.ListFile({'q': query}).GetList() for file in file_list: if file['labels']['trashed'] ==False: print('-' * 10) print(file['id']) # print(file['downloadUrl']) # print('-' * 10) # first parent id parent_id = file['parents'][0]['id'] print(parent_id) # print('-' * 10) # x = drive.CreateFile({'id': parent_id}) # x.FetchMetadata() # print(x) %cd /content/lib/PyHelpers import tensorflow as tf device_name = tf.test.gpu_device_name() if device_name != '/device:GPU:0': raise SystemError('GPU device not found') print('Found GPU at: {}'.format(device_name)) # Install Keras with pip !pip install -q keras import keras # >>> Using TensorFlow backend. # Install GraphViz with apt !apt-get install graphviz -y # Here's the easiest way to do so, IMO, with a little direction from here. # In a 3 step process, first invoke a file selector within your notebook with this: from google.colab import files uploaded = files.upload() # After your file(s) is/are selected, use the following to iterate the uploaded files in order to find their key names, using: for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn]))) import os os.chdir('/content/lib') from lib.ProjectPrimer import H as Tickle import importlib.util # print(len(H.Me(['globx','/content/lib/PyHelpers','*.py']))) # Tickle.Me(['vdir',[importlib]]) # for mod in range(len(H.Me(['globx','/content/lib/PyHelpers','*.py']))): # print(mod) # Get the specs from the file spec = importlib.util.spec_from_file_location("module.name", "/content/lib/PyHelpers/ZipUp.py") # Make a module outof the specs ZipDrive = importlib.util.module_from_spec(spec) # Load the new module(same as import...) spec.loader.exec_module(ZipDrive) # check the new module Tickle.Me(['vdir',[Zip2Drive]]) # # item_to = foo.ZipUp('sample_data','/content/drive/My Drive','/content/lib/PyHelpers') # # print(item_to.ZipUp) # # foo.ZipUp() # help(foo) # print(foo.__dict__) # foo.DICT=foo.__dict__ # __builtins__.locals() H.Me ```
github_jupyter
TSG075 - FailedCreatePodSandBox due to NetworkPlugin cni failed to set up pod ============================================================================= Description ----------- > Error: Warning FailedCreatePodSandBox 58m kubelet, > rasha-virtual-machine Failed create pod sandbox: rpc error: code = > Unknown desc = failed to set up sandbox container > โ€œb76dc0446642bf06ef91b331be55814795410d58807eeffddf1fe3b5c9c572c0โ€ > network for pod โ€œmssql-controller-hfvxrโ€: NetworkPlugin cni failed to > set up pod โ€œmssql-controller-hfvxr\_testโ€ network: open > /run/flannel/subnet.env: no such file or directory Normal > SandboxChanged 34m (x325 over 59m) kubelet, virtual-machine Pod > sandbox changed, it will be killed and re-created. Warning > FailedCreatePodSandBox 4m5s (x831 over 58m) kubelet, virtual-machine > (combined from similar events): Failed create pod sandbox: rpc error: > code = Unknown desc = failed to set up sandbox container > โ€œbee7d4eb0a74a4937de687a31676887b0c324e88a528639180a10bdbc33ce008โ€ > network for pod โ€œmssql-controller-hfvxrโ€: NetworkPlugin cni failed to > set up pod โ€œmssql-controller-hfvxr\_testโ€ network: open > /run/flannel/subnet.env: no such file or directory ### Instantiate Kubernetes client ``` # Instantiate the Python Kubernetes client into 'api' variable import os try: from kubernetes import client, config from kubernetes.stream import stream if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ: config.load_incluster_config() else: try: config.load_kube_config() except: display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.')) raise api = client.CoreV1Api() print('Kubernetes client instantiated') except ImportError: from IPython.display import Markdown display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.')) raise ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" # Load this notebook as json to get access to the expert rules in the notebook metadata. # try: j = load_json("tsg075-networkplugin-cni-failed-to-setup-pod.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "expanded_rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["expanded_rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ``` ### Resolution This issue has been seen on single node kubeadm installations when the host machine has been rebooted. To resolve the issue, delete the kube-flannel and coredns pods. The higher level Kuberenetes objects will re-create these pods. The following code cells will do this for you: ### Verify there are flannel and coredns pods in this kubernetes cluster ``` run(f"kubectl get pods -n kube-system") ``` ### Delete them, so they can be re-created by the higher level Kubernetes objects ``` pod_list = api.list_namespaced_pod("kube-system") for pod in pod_list.items: if pod.metadata.name.find("kube-flannel-ds") != -1: print(f"Deleting pod: {pod.metadata.name}") run(f"kubectl delete pod/{pod.metadata.name} -n kube-system") if pod.metadata.name.find("coredns-") != -1: print(f"Deleting pod: {pod.metadata.name}") run(f"kubectl delete pod/{pod.metadata.name} -n kube-system") ``` ### Verify the flannel and coredns pods have been re-created ``` run(f"kubectl get pods -n kube-system") print('Notebook execution complete.') ```
github_jupyter
# 12. ์ง์ ‘ ๋งŒ๋“ค์–ด๋ณด๋Š” OCR **Text recognition ๋ชจ๋ธ์„ ๊ตฌํ˜„, ํ•™์Šตํ•˜๊ณ  Text detection ๋ชจ๋ธ๊ณผ ์—ฐ๊ฒฐํ•˜์—ฌ OCR์„ ๊ตฌํ˜„ํ•œ๋‹ค.** ## 12-1. ๋“ค์–ด๊ฐ€๋ฉฐ ## 12-2. Overall structure of OCR ## 12-3. Dataset for OCR ``` import os path = os.path.join(os.getenv('HOME'),'aiffel/ocr') os.chdir(path) print(path) ``` ## 12-4. Recognition model (1) ``` NUMBERS = "0123456789" ENG_CHAR_UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" TARGET_CHARACTERS = ENG_CHAR_UPPER + NUMBERS print(f"The total number of characters is {len(TARGET_CHARACTERS)}") import re import six import math import lmdb import os import numpy as np import tensorflow as tf from PIL import Image from tensorflow.keras import layers from tensorflow.keras.models import Model from tensorflow.keras.utils import Sequence from tensorflow.keras import backend as K from tensorflow.keras.models import load_model BATCH_SIZE = 128 HOME_DIR = os.getenv('HOME')+'/aiffel/ocr' TRAIN_DATA_PATH = HOME_DIR+'/data/MJ/MJ_train' VALID_DATA_PATH = HOME_DIR+'/data/MJ/MJ_valid' TEST_DATA_PATH = HOME_DIR+'/data/MJ/MJ_test' print(TRAIN_DATA_PATH) ``` ## 12-5. Recognition model (2) Input Image ``` from IPython.display import display # env์— ๋ฐ์ดํ„ฐ๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ๊ฒŒ์š” # lmdb์—์„œ ๋ฐ์ดํ„ฐ๋ฅผ ๋ถˆ๋Ÿฌ์˜ฌ ๋•Œ env๋ผ๋Š” ๋ณ€์ˆ˜๋ช…์„ ์‚ฌ์šฉํ•˜๋Š”๊ฒŒ ์ผ๋ฐ˜์ ์ด์—์š” env = lmdb.open(TRAIN_DATA_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) # ๋ถˆ๋Ÿฌ์˜จ ๋ฐ์ดํ„ฐ๋ฅผ txn(transaction)์ด๋ผ๋Š” ๋ณ€์ˆ˜๋ฅผ ํ†ตํ•ด ์—ฝ๋‹ˆ๋‹ค # ์ด์ œ txn๋ณ€์ˆ˜๋ฅผ ํ†ตํ•ด ์ง์ ‘ ๋ฐ์ดํ„ฐ์— ์ ‘๊ทผ ํ•  ์ˆ˜ ์žˆ์–ด์š” with env.begin(write=False) as txn: for index in range(1, 5): # index๋ฅผ ์ด์šฉํ•ด์„œ ๋ผ๋ฒจ ํ‚ค์™€ ์ด๋ฏธ์ง€ ํ‚ค๋ฅผ ๋งŒ๋“ค๋ฉด # txn์—์„œ ๋ผ๋ฒจ๊ณผ ์ด๋ฏธ์ง€๋ฅผ ์ฝ์–ด์˜ฌ ์ˆ˜ ์žˆ์–ด์š” label_key = 'label-%09d'.encode() % index label = txn.get(label_key).decode('utf-8') img_key = 'image-%09d'.encode() % index imgbuf = txn.get(img_key) buf = six.BytesIO() buf.write(imgbuf) buf.seek(0) # ์ด๋ฏธ์ง€๋Š” ๋ฒ„ํผ๋ฅผ ํ†ตํ•ด ์ฝ์–ด์˜ค๊ธฐ ๋•Œ๋ฌธ์— # ๋ฒ„ํผ์—์„œ ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ๊ณผ์ •์ด ๋‹ค์‹œ ํ•„์š”ํ•ด์š” try: img = Image.open(buf).convert('RGB') except IOError: img = Image.new('RGB', (100, 32)) label = '-' # ์›๋ณธ ์ด๋ฏธ์ง€ ํฌ๊ธฐ๋ฅผ ์ถœ๋ ฅํ•ด ๋ด…๋‹ˆ๋‹ค width, height = img.size print('original image width:{}, height:{}'.format(width, height)) # ์ด๋ฏธ์ง€ ๋น„์œจ์„ ์œ ์ง€ํ•˜๋ฉด์„œ ๋†’์ด๋ฅผ 32๋กœ ๋ฐ”๊ฟ€๊ฑฐ์—์š” # ํ•˜์ง€๋งŒ ๋„ˆ๋น„๋ฅผ 100๋ณด๋‹ค๋Š” ์ž‘๊ฒŒํ•˜๊ณ  ์‹ถ์–ด์š” target_width = min(int(width*32/height), 100) target_img_size = (target_width,32) print('target_img_size:{}'.format(target_img_size)) img = np.array(img.resize(target_img_size)).transpose(1,0,2) # ์ด์ œ ๋†’์ด๊ฐ€ 32๋กœ ์ผ์ •ํ•œ ์ด๋ฏธ์ง€์™€ ๋ผ๋ฒจ์„ ํ•จ๊ป˜ ์ถœ๋ ฅํ•  ์ˆ˜ ์žˆ์–ด์š” print('display img shape:{}'.format(img.shape)) print('label:{}'.format(label)) display(Image.fromarray(img.transpose(1,0,2).astype(np.uint8))) class MJDatasetSequence(Sequence): # ๊ฐ์ฒด๋ฅผ ์ดˆ๊ธฐํ™” ํ•  ๋•Œ lmdb๋ฅผ ์—ด์–ด env์— ์ค€๋น„ํ•ด๋‘ก๋‹ˆ๋‹ค # ๋˜, lmdb์— ์žˆ๋Š” ๋ฐ์ดํ„ฐ ์ˆ˜๋ฅผ ๋ฏธ๋ฆฌ ํŒŒ์•…ํ•ด๋‘ก๋‹ˆ๋‹ค def __init__(self, dataset_path, label_converter, batch_size=1, img_size=(100,32), max_text_len=22, is_train=False, character='') : self.label_converter = label_converter self.batch_size = batch_size self.img_size = img_size self.max_text_len = max_text_len self.character = character self.is_train = is_train self.divide_length = 100 self.env = lmdb.open(dataset_path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False) with self.env.begin(write=False) as txn: self.num_samples = int(txn.get('num-samples'.encode())) self.index_list = [index + 1 for index in range(self.num_samples)] def __len__(self): return math.ceil(self.num_samples/self.batch_size/self.divide_length) # index์— ํ•ด๋‹นํ•˜๋Š” image์™€ label์„ ์ฝ์–ด์˜ต๋‹ˆ๋‹ค # ์œ„์—์„œ ์‚ฌ์šฉํ•œ ์ฝ”๋“œ์™€ ๋งค์šฐ ์œ ์‚ฌํ•ฉ๋‹ˆ๋‹ค # label์„ ์กฐ๊ธˆ ๋” ๋‹ค๋“ฌ๋Š” ๊ฒƒ์ด ์•ฝ๊ฐ„ ๋‹ค๋ฆ…๋‹ˆ๋‹ค def _get_img_label(self, index): with self.env.begin(write=False) as txn: label_key = 'label-%09d'.encode() % index label = txn.get(label_key).decode('utf-8') img_key = 'image-%09d'.encode() % index imgbuf = txn.get(img_key) buf = six.BytesIO() buf.write(imgbuf) buf.seek(0) try: img = Image.open(buf).convert('RGB') except IOError: img = Image.new('RGB', self.img_size) label = '-' width, height = img.size target_width = min(int(width*self.img_size[1]/height), self.img_size[0]) target_img_size = (target_width, self.img_size[1]) img = np.array(img.resize(target_img_size)).transpose(1,0,2) # label์„ ์•ฝ๊ฐ„ ๋” ๋‹ค๋“ฌ์Šต๋‹ˆ๋‹ค label = label.upper() out_of_char = f'[^{self.character}]' label = re.sub(out_of_char, '', label) label = label[:self.max_text_len] return (img, label) # __getitem__์€ ์•ฝ์†๋˜์–ด์žˆ๋Š” ๋ฉ”์„œ๋“œ์ž…๋‹ˆ๋‹ค # ์ด ๋ถ€๋ถ„์„ ์ž‘์„ฑํ•˜๋ฉด sliceํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค # ์ž์„ธํžˆ ์•Œ๊ณ  ์‹ถ๋‹ค๋ฉด ์•„๋ž˜ ๋ฌธ์„œ๋ฅผ ์ฐธ๊ณ ํ•˜์„ธ์š” # https://docs.python.org/3/reference/datamodel.html#object.__getitem__ # # 1. idx์— ํ•ด๋‹นํ•˜๋Š” index_list๋งŒํผ ๋ฐ์ดํ„ฐ๋ฅผ ๋ถˆ๋Ÿฌ # 2. image์™€ label์„ ๋ถˆ๋Ÿฌ์˜ค๊ณ  # 3. ์‚ฌ์šฉํ•˜๊ธฐ ์ข‹์€ inputs๊ณผ outputsํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค def __getitem__(self, idx): # 1. batch_indicies = self.index_list[ idx*self.batch_size: (idx+1)*self.batch_size ] input_images = np.zeros([self.batch_size, *self.img_size, 3]) labels = np.zeros([self.batch_size, self.max_text_len], dtype='int64') input_length = np.ones([self.batch_size], dtype='int64') * self.max_text_len label_length = np.ones([self.batch_size], dtype='int64') # 2. for i, index in enumerate(batch_indicies): img, label = self._get_img_label(index) encoded_label = self.label_converter.encode(label) # ์ธ์ฝ”๋”ฉ ๊ณผ์ •์—์„œ '-'์ด ์ถ”๊ฐ€๋˜๋ฉด max_text_len๋ณด๋‹ค ๊ธธ์–ด์งˆ ์ˆ˜ ์žˆ์–ด์š” if len(encoded_label) > self.max_text_len: continue width = img.shape[0] input_images[i,:width,:,:] = img labels[i,0:len(encoded_label)] = encoded_label label_length[i] = len(encoded_label) # 3. inputs = { 'input_image': input_images, 'label': labels, 'input_length': input_length, 'label_length': label_length, } outputs = {'ctc': np.zeros([self.batch_size, 1])} return inputs, outputs print("์Š~") ``` ## 12-6. Recognition model (3) Encode ``` class LabelConverter(object): def __init__(self, character): self.character = "-" + character self.label_map = dict() for i, char in enumerate(self.character): self.label_map[char] = i def encode(self, text): encoded_label = [] # [[YOUR CODE]] for i, char in enumerate(text): if i > 0 and char == text[i - 1]: encoded_label.append(0) # ๊ฐ™์€ ๋ฌธ์ž ์‚ฌ์ด์— ๊ณต๋ฐฑ ๋ฌธ์ž label์„ ์‚ฝ์ž… encoded_label.append(self.label_map[char]) return np.array(encoded_label) return np.array(encoded_label) def decode(self, encoded_label): target_characters = list(self.character) decoded_label = "" for encode in encoded_label: decoded_label += self.character[encode] return decoded_label ``` ```python # ์ •๋‹ต ์ฝ”๋“œ class LabelConverter(object): def __init__(self, character): self.character = "-" + character self.label_map = dict() for i, char in enumerate(self.character): self.label_map[char] = i def encode(self, text): encoded_label = [] for i, char in enumerate(text): if i > 0 and char == text[i - 1]: encoded_label.append(0) # ๊ฐ™์€ ๋ฌธ์ž ์‚ฌ์ด์— ๊ณต๋ฐฑ ๋ฌธ์ž label์„ ์‚ฝ์ž… encoded_label.append(self.label_map[char]) return np.array(encoded_label) def decode(self, encoded_label): target_characters = list(self.character) decoded_label = "" for encode in encoded_label: decoded_label += self.character[encode] return decoded_label print("์Š~") ``` ``` label_converter = LabelConverter(TARGET_CHARACTERS) encdoded_text = label_converter.encode('HELLO') print("Encdoded_text: ", encdoded_text) decoded_text = label_converter.decode(encdoded_text) print("Decoded_text: ", decoded_text) ``` ## 12-7. Recognition model (4) Build CRNN model ``` def ctc_lambda_func(args): # CTC loss๋ฅผ ๊ณ„์‚ฐํ•˜๊ธฐ ์œ„ํ•œ Lambda ํ•จ์ˆ˜ labels, y_pred, label_length, input_length = args y_pred = y_pred[:, 2:, :] return K.ctc_batch_cost(labels, y_pred, input_length, label_length) print("์Š~") def build_crnn_model(input_shape=(100,32,3), characters=TARGET_CHARACTERS): num_chars = len(characters)+2 image_input = layers.Input(shape=input_shape, dtype='float32', name='input_image') # Build CRNN model # [[YOUR CODE]] conv = layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(image_input) conv = layers.MaxPooling2D(pool_size=(2, 2))(conv) conv = layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.MaxPooling2D(pool_size=(2, 2))(conv) conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.MaxPooling2D(pool_size=(1, 2))(conv) conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.BatchNormalization()(conv) conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.BatchNormalization()(conv) conv = layers.MaxPooling2D(pool_size=(1, 2))(conv) feature = layers.Conv2D(512, (2, 2), activation='relu', kernel_initializer='he_normal')(conv) sequnce = layers.Reshape(target_shape=(24, 512))(feature) sequnce = layers.Dense(64, activation='relu')(sequnce) sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce) sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce) y_pred = layers.Dense(num_chars, activation='softmax', name='output')(sequnce) labels = layers.Input(shape=[22], dtype='int64', name='label') input_length = layers.Input(shape=[1], dtype='int64', name='input_length') label_length = layers.Input(shape=[1], dtype='int64', name='label_length') loss_out = layers.Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")( [labels, y_pred, label_length, input_length] ) model_input = [image_input, labels, input_length, label_length] model = Model( inputs=model_input, outputs=loss_out ) return model ``` ```python # ์ •๋‹ต ์ฝ”๋“œ def build_crnn_model(input_shape=(100,32,3), characters=TARGET_CHARACTERS): num_chars = len(characters)+2 image_input = layers.Input(shape=input_shape, dtype='float32', name='input_image') # Build CRNN model conv = layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(image_input) conv = layers.MaxPooling2D(pool_size=(2, 2))(conv) conv = layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.MaxPooling2D(pool_size=(2, 2))(conv) conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.MaxPooling2D(pool_size=(1, 2))(conv) conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.BatchNormalization()(conv) conv = layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal')(conv) conv = layers.BatchNormalization()(conv) conv = layers.MaxPooling2D(pool_size=(1, 2))(conv) feature = layers.Conv2D(512, (2, 2), activation='relu', kernel_initializer='he_normal')(conv) sequnce = layers.Reshape(target_shape=(24, 512))(feature) sequnce = layers.Dense(64, activation='relu')(sequnce) sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce) sequnce = layers.Bidirectional(layers.LSTM(256, return_sequences=True))(sequnce) y_pred = layers.Dense(num_chars, activation='softmax', name='output')(sequnce) labels = layers.Input(shape=[22], dtype='int64', name='label') input_length = layers.Input(shape=[1], dtype='int64', name='input_length') label_length = layers.Input(shape=[1], dtype='int64', name='label_length') loss_out = layers.Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")( [labels, y_pred, label_length, input_length] ) model_input = [image_input, labels, input_length, label_length] model = Model( inputs=model_input, outputs=loss_out ) return model print("์Š~") ``` ## 12-8. Recognition model (5) Train & Inference ``` # ๋ฐ์ดํ„ฐ์…‹๊ณผ ๋ชจ๋ธ์„ ์ค€๋น„ํ•ฉ๋‹ˆ๋‹ค train_set = MJDatasetSequence(TRAIN_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS, is_train=True) val_set = MJDatasetSequence(VALID_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS) model = build_crnn_model() # ๋ชจ๋ธ์„ ์ปดํŒŒ์ผ ํ•ฉ๋‹ˆ๋‹ค optimizer = tf.keras.optimizers.Adadelta(lr=0.1, clipnorm=5) model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer) # ํ›ˆ๋ จ์ด ๋นจ๋ฆฌ ๋๋‚  ์ˆ˜ ์žˆ๋„๋ก ModelCheckPoint์™€ EarlyStopping์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค checkpoint_path = HOME_DIR + '/model_checkpoint.hdf5' ckp = tf.keras.callbacks.ModelCheckpoint( checkpoint_path, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True ) earlystop = tf.keras.callbacks.EarlyStopping( monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='min' ) model.fit(train_set, steps_per_epoch=len(train_set), epochs=1, validation_data=val_set, validation_steps=len(val_set), callbacks=[ckp, earlystop]) # ๋‹ค์Œ์€ ํ•™์Šต๋œ ๋ชจ๋ธ์˜ ๊ฐ€์ค‘์น˜๊ฐ€ ์ €์žฅ๋œ ๊ฒฝ๋กœ์ž…๋‹ˆ๋‹ค checkpoint_path = HOME_DIR + '/data/model_checkpoint.hdf5' # ๋ฐ์ดํ„ฐ์…‹๊ณผ ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค test_set = MJDatasetSequence(TEST_DATA_PATH, label_converter, batch_size=BATCH_SIZE, character=TARGET_CHARACTERS) model = build_crnn_model() model.load_weights(checkpoint_path) # crnn ๋ชจ๋ธ์€ ์ž…๋ ฅ์ด ๋ณต์žกํ•œ ๊ตฌ์กฐ์ด๋ฏ€๋กœ ๊ทธ๋Œ€๋กœ ์‚ฌ์šฉํ•  ์ˆ˜๊ฐ€ ์—†์Šต๋‹ˆ๋‹ค # ๊ทธ๋ž˜์„œ crnn ๋ชจ๋ธ์˜ ์ž…๋ ฅ์ค‘ 'input_image' ๋ถ€๋ถ„๋งŒ ์‚ฌ์šฉํ•œ ๋ชจ๋ธ์„ ์ƒˆ๋กœ ๋งŒ๋“ค๊ฒ๋‹ˆ๋‹ค # inference ์ „์šฉ ๋ชจ๋ธ์ด์—์š” input_data = model.get_layer('input_image').output y_pred = model.get_layer('output').output model_pred = Model(inputs=input_data, outputs=y_pred) from IPython.display import display # ๋ชจ๋ธ์ด inferenceํ•œ ๊ฒฐ๊ณผ๋ฅผ ๊ธ€์ž๋กœ ๋ฐ”๊ฟ”์ฃผ๋Š” ์—ญํ• ์„ ํ•ฉ๋‹ˆ๋‹ค # ์ฝ”๋“œ ํ•˜๋‚˜ํ•˜๋‚˜๋ฅผ ์ดํ•ดํ•˜๊ธฐ๋Š” ์กฐ๊ธˆ ์–ด๋ ค์šธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค def decode_predict_ctc(out, chars = TARGET_CHARACTERS): results = [] indexes = K.get_value( K.ctc_decode( out, input_length=np.ones(out.shape[0]) * out.shape[1], greedy=False , beam_width=5, top_paths=1 )[0][0] )[0] text = "" for index in indexes: text += chars[index] results.append(text) return results # ๋ชจ๋ธ๊ณผ ๋ฐ์ดํ„ฐ์…‹์ด ์ฃผ์–ด์ง€๋ฉด inference๋ฅผ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค # index๊ฐœ ๋งŒํผ์˜ ๋ฐ์ดํ„ฐ๋ฅผ ์ฝ์–ด ๋ชจ๋ธ๋กœ inference๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ  # ๊ฒฐ๊ณผ๋ฅผ ๋””์ฝ”๋”ฉํ•ด ์ถœ๋ ฅํ•ด์ค๋‹ˆ๋‹ค def check_inference(model, dataset, index = 5): for i in range(index): inputs, outputs = dataset[i] img = dataset[i][0]['input_image'][0:1,:,:,:] output = model.predict(img) result = decode_predict_ctc(output, chars="-"+TARGET_CHARACTERS)[0].replace('-','') print("Result: \t", result) display(Image.fromarray(img[0].transpose(1,0,2).astype(np.uint8))) check_inference(model_pred, test_set, index=10) ``` ## 12-9. ํ”„๋กœ์ ํŠธ: End-to-End OCR ``` import tensorflow as tf import numpy as np import PIL import cv2 import keras_ocr print(tf.__version__) print(np.__version__) print(PIL.__version__) print(cv2.__version__) print(keras_ocr.__version__) from keras_ocr.detection import Detector SAMPLE_IMG_PATH = HOME_DIR + '/data/sample.jpg' detector = Detector() def detect_text(img_path): # TODO # ๋ฐฐ์น˜ ํฌ๊ธฐ๋ฅผ ์œ„ํ•ด์„œ dimension์„ ํ™•์žฅํ•ด์ฃผ๊ณ  kera-ocr์˜ ์ž…๋ ฅ ์ฐจ์›์— ๋งž๊ฒŒ H,W,C๋กœ ๋ณ€๊ฒฝํ•ฉ๋‹ˆ๋‹ค. # ๋ฐฐ์น˜์˜ ์ฒซ ๋ฒˆ์งธ ๊ฒฐ๊ณผ๋งŒ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค. # ์‹œ๊ฐํ™”๋ฅผ ์œ„ํ•ด์„œ x์™€ y์ขŒํ‘œ๋ฅผ ๋ณ€๊ฒฝํ•ด์ค๋‹ˆ๋‹ค. (์•ž์„  h dimension์œผ๋กœ ์ธํ•ด y,x๋กœ ํ‘œ๊ธฐ๋จ) cropped_imgs = [] for text_result in ocr_result: img_draw.polygon(text_result, outline='red') x_min = text_result[:,0].min() - 5 x_max = text_result[:,0].max() + 5 y_min = text_result[:,1].min() - 5 y_max = text_result[:,1].max() + 5 word_box = [x_min, y_min, x_max, y_max] cropped_imgs.append(img_pil.crop(word_box)) return result_img, cropped_imgs img_pil, cropped_img = detect_text(SAMPLE_IMG_PATH) display(img_pil) def recognize_img(pil_img, input_img_size=(100,32)): # TODO: ์ž˜๋ ค์ง„ ๋‹จ์–ด ์ด๋ฏธ์ง€๋ฅผ ์ธ์‹ํ•˜๋Š” ์ฝ”๋“œ๋ฅผ ์ž‘์„ฑํ•˜์„ธ์š”! for _img in cropped_img: recognize_img(_img) ``` >## **๋ฃจ๋ธŒ๋ฆญ** > >|๋ฒˆํ˜ธ|ํ‰๊ฐ€๋ฌธํ•ญ|์ƒ์„ธ๊ธฐ์ค€| >|:---:|---|---| >|1|Text recognition์„ ์œ„ํ•ด ํŠนํ™”๋œ ๋ฐ์ดํ„ฐ์…‹ ๊ตฌ์„ฑ์ด ์ฒด๊ณ„์ ์œผ๋กœ ์ง„ํ–‰๋˜์—ˆ๋‹ค.|ํ…์ŠคํŠธ ์ด๋ฏธ์ง€ ๋ฆฌ์‚ฌ์ด์ง•, ctc loss ์ธก์ •์„ ์œ„ํ•œ ๋ผ๋ฒจ ์ธ์ฝ”๋”ฉ, ๋ฐฐ์น˜์ฒ˜๋ฆฌ ๋“ฑ์ด ์ ์ ˆํžˆ ์ˆ˜ํ–‰๋˜์—ˆ๋‹ค.| >|2|CRNN ๊ธฐ๋ฐ˜์˜ recognition ๋ชจ๋ธ์˜ ํ•™์Šต์ด ์ •์ƒ์ ์œผ๋กœ ์ง„ํ–‰๋˜์—ˆ๋‹ค.|ํ•™์Šต๊ฒฐ๊ณผ loss๊ฐ€ ์•ˆ์ •์ ์œผ๋กœ ๊ฐ์†Œํ•˜๊ณ  ๋Œ€๋ถ€๋ถ„์˜ ๋ฌธ์ž์ธ์‹ ์ถ”๋ก  ๊ฒฐ๊ณผ๊ฐ€ ์ •ํ™•ํ•˜๋‹ค.| >|3|keras-ocr detector์™€ CRNN recognizer๋ฅผ ์—ฎ์–ด ์›๋ณธ ์ด๋ฏธ์ง€ ์ž…๋ ฅ์œผ๋กœ๋ถ€ํ„ฐ text๊ฐ€ ์ถœ๋ ฅ๋˜๋Š” OCR์ด End-to-End๋กœ ๊ตฌ์„ฑ๋˜์—ˆ๋‹ค.|์ƒ˜ํ”Œ ์ด๋ฏธ์ง€๋ฅผ ์›๋ณธ์œผ๋กœ ๋ฐ›์•„ OCR ์ˆ˜ํ–‰ ๊ฒฐ๊ณผ๋ฅผ ๋ฆฌํ„ดํ•˜๋Š” 1๊ฐœ์˜ ํ•จ์ˆ˜๊ฐ€ ๋งŒ๋“ค์–ด์กŒ๋‹ค.|
github_jupyter
# Continuous training with TFX and Google Cloud AI Platform ## Learning Objectives 1. Use the TFX CLI to build a TFX pipeline. 2. Deploy a TFX pipeline version with tuning enabled to a hosted AI Platform Pipelines instance. 3. Create and monitor a TFX pipeline run using the TFX CLI and KFP UI. In this lab, you use utilize the following tools and services to deploy and run a TFX pipeline on Google Cloud that automates the development and deployment of a TensorFlow 2.3 WideDeep Classifer to predict forest cover from cartographic data: * The [**TFX CLI**](https://www.tensorflow.org/tfx/guide/cli) utility to build and deploy a TFX pipeline. * A hosted [**AI Platform Pipeline instance (Kubeflow Pipelines)**](https://www.tensorflow.org/tfx/guide/kubeflow) for TFX pipeline orchestration. * [**Dataflow**](https://cloud.google.com/dataflow) jobs for scalable, distributed data processing for TFX components. * A [**AI Platform Training**](https://cloud.google.com/ai-platform/) job for model training and flock management of tuning trials. * [**AI Platform Prediction**](https://cloud.google.com/ai-platform/), a model server destination for blessed pipeline model versions. * [**CloudTuner**](https://www.tensorflow.org/tfx/guide/tuner#tuning_on_google_cloud_platform_gcp) (KerasTuner implementation) and [**AI Platform Vizier**](https://cloud.google.com/ai-platform/optimizer/docs/overview) for advanced model hyperparameter tuning using the Vizier algorithm. You will then create and monitor pipeline runs using the TFX CLI as well as the KFP UI. ### Setup #### Update lab environment PATH to include TFX CLI and skaffold ``` import yaml # Set `PATH` to include the directory containing TFX CLI and skaffold. PATH=%env PATH %env PATH=/home/jupyter/.local/bin:{PATH} ``` #### Validate lab package version installation ``` !python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))" !python -c "import kfp; print('KFP version: {}'.format(kfp.__version__))" ``` **Note**: this lab was built and tested with the following package versions: `TFX version: 0.25.0` `KFP version: 1.0.4` (Optional) If running the above command results in different package versions or you receive an import error, upgrade to the correct versions by running the cell below: ``` %pip install --upgrade --user tfx==0.25.0 %pip install --upgrade --user kfp==1.0.4 ``` Note: you may need to restart the kernel to pick up the correct package versions. #### Validate creation of AI Platform Pipelines cluster Navigate to [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console. Note you may have already deployed an AI Pipelines instance during the Setup for the lab series. If so, you can proceed using that instance. If not: **1. Create or select an existing Kubernetes cluster (GKE) and deploy AI Platform**. Make sure to select `"Allow access to the following Cloud APIs https://www.googleapis.com/auth/cloud-platform"` to allow for programmatic access to your pipeline by the Kubeflow SDK for the rest of the lab. Also, provide an `App instance name` such as "tfx" or "mlops". Validate the deployment of your AI Platform Pipelines instance in the console before proceeding. ## Review: example TFX pipeline design pattern for Google Cloud The pipeline source code can be found in the `pipeline` folder. ``` %cd pipeline !ls -la ``` The `config.py` module configures the default values for the environment specific settings and the default values for the pipeline runtime parameters. The default values can be overwritten at compile time by providing the updated values in a set of environment variables. You will set custom environment variables later on this lab. The `pipeline.py` module contains the TFX DSL defining the workflow implemented by the pipeline. The `preprocessing.py` module implements the data preprocessing logic the `Transform` component. The `model.py` module implements the training, tuning, and model building logic for the `Trainer` and `Tuner` components. The `runner.py` module configures and executes `KubeflowDagRunner`. At compile time, the `KubeflowDagRunner.run()` method converts the TFX DSL into the pipeline package in the [argo](https://argoproj.github.io/argo/) format for execution on your hosted AI Platform Pipelines instance. The `features.py` module contains feature definitions common across `preprocessing.py` and `model.py`. ## Exercise: build your pipeline with the TFX CLI You will use TFX CLI to compile and deploy the pipeline. As explained in the previous section, the environment specific settings can be provided through a set of environment variables and embedded into the pipeline package at compile time. ### Configure your environment resource settings Update the below constants with the settings reflecting your lab environment. - `GCP_REGION` - the compute region for AI Platform Training, Vizier, and Prediction. - `ARTIFACT_STORE` - An existing GCS bucket. You can use any bucket or use the GCS bucket created during installation of AI Platform Pipelines. The default bucket name will contain the `kubeflowpipelines-` prefix. ``` # Use the following command to identify the GCS bucket for metadata and pipeline storage. !gsutil ls ``` * `CUSTOM_SERVICE_ACCOUNT` - In the gcp console Click on the Navigation Menu. Navigate to `IAM & Admin`, then to `Service Accounts` and use the service account starting with prefix - `'tfx-tuner-caip-service-account'`. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please refer back to the lab `README` for setup instructions. - `ENDPOINT` - set the `ENDPOINT` constant to the endpoint to your AI Platform Pipelines instance. The endpoint to the AI Platform Pipelines instance can be found on the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console. Open the *SETTINGS* for your instance and use the value of the `host` variable in the *Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD* section of the *SETTINGS* window. The format is `'...pipelines.googleusercontent.com'`. ``` #TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT. GCP_REGION = 'us-central1' ARTIFACT_STORE_URI = 'gs://dougkelly-sandbox-kubeflowpipelines-default' ENDPOINT = '70811b42aef62be3-dot-us-central2.pipelines.googleusercontent.com' CUSTOM_SERVICE_ACCOUNT = 'tfx-tuner-caip-service-account@dougkelly-sandbox.iam.gserviceaccount.com' PROJECT_ID = !(gcloud config get-value core/project) PROJECT_ID = PROJECT_ID[0] # Set your resource settings as Python environment variables. These override the default values in pipeline/config.py. %env GCP_REGION={GCP_REGION} %env ARTIFACT_STORE_URI={ARTIFACT_STORE_URI} %env CUSTOM_SERVICE_ACCOUNT={CUSTOM_SERVICE_ACCOUNT} %env PROJECT_ID={PROJECT_ID} ``` ### Create a pipeline version with hyperparameter tuning Incorporating automatic model hyperparameter tuning into a continuous training TFX pipeline workflow enables faster experimentation, development, and deployment of a top performing model. Default hyperparameter values in the search space are defined in `_get_hyperparameters()` in `model.py` and used these values to build a TensorFlow WideDeep Classifier model. Let's deploy a pipeline version with the `Tuner` component added to the pipeline that calls out to the AI Platform Vizier service for hyperparameter tuning. The `Tuner` component `"best_hyperparameters"` artifact will be passed directly to your `Trainer` component to deploy the top performing model. Review `pipeline.py` to see how this environment variable changes the pipeline topology. Also, review the tuning function in `model.py` for configuring `CloudTuner`. Note that you might not want to tune the hyperparameters every time you retrain your model due to the computational cost and diminishing performance returns. Once you have used `Tuner` determine a good set of hyperparameters, you can remove `Tuner` from your pipeline and use model hyperparameters defined in your model code or use a `ImporterNode` to import the `Tuner` `"best_hyperparameters"`artifact from a previous `Tuner` run to your model `Trainer`. ### Set the compile time settings Default pipeline runtime environment values are configured in the pipeline folder `config.py`. You will set their values directly below: * `PIPELINE_NAME` - the pipeline's globally unique name. For each subsequent pipeline update, each pipeline version uploaded to KFP will be reflected on the `Pipelines` tab in the `Pipeline name > Version name` dropdown in the format `PIPELINE_NAME_datetime.now()`. * `MODEL_NAME` - the pipeline's unique model output name for AI Platform Prediction. For multiple pipeline runs, each pushed blessed model will create a new version with the format `'v{}'.format(int(time.time()))`. * `DATA_ROOT_URI` - the URI for the raw lab dataset `gs://workshop-datasets/covertype/small`. * `CUSTOM_TFX_IMAGE` - the image name of your pipeline container build by skaffold and published by `Cloud Build` to `Cloud Container Registry` in the format `'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME)`. * `RUNTIME_VERSION` - the TensorFlow runtime version. This lab was built and tested using TensorFlow `2.3`. * `PYTHON_VERSION` - the Python runtime version. This lab was built and tested using Python `3.7`. * `USE_KFP_SA` - The pipeline can run using a security context of the GKE default node pool's service account or the service account defined in the `user-gcp-sa` secret of the Kubernetes namespace hosting Kubeflow Pipelines. If you want to use the `user-gcp-sa` service account you change the value of `USE_KFP_SA` to `True`. Note that the default AI Platform Pipelines configuration does not define the `user-gcp-sa` secret. * `ENABLE_TUNING` - boolean value indicating whether to add the `Tuner` component to the pipeline or use hyperparameter defaults. See the `model.py` and `pipeline.py` files for details on how this changes the pipeline topology across pipeline versions. You will create pipeline versions without and with tuning enabled in the subsequent lab exercises for comparison. ``` PIPELINE_NAME = 'tfx_covertype_continuous_training' MODEL_NAME = 'tfx_covertype_classifier' DATA_ROOT_URI = 'gs://workshop-datasets/covertype/small' CUSTOM_TFX_IMAGE = 'gcr.io/{}/{}'.format(PROJECT_ID, PIPELINE_NAME) RUNTIME_VERSION = '2.3' PYTHON_VERSION = '3.7' USE_KFP_SA=False ENABLE_TUNING=True %env PIPELINE_NAME={PIPELINE_NAME} %env MODEL_NAME={MODEL_NAME} %env DATA_ROOT_URI={DATA_ROOT_URI} %env KUBEFLOW_TFX_IMAGE={CUSTOM_TFX_IMAGE} %env RUNTIME_VERSION={RUNTIME_VERSION} %env PYTHON_VERIONS={PYTHON_VERSION} %env USE_KFP_SA={USE_KFP_SA} %env ENABLE_TUNING={ENABLE_TUNING} ``` ### Compile your pipeline code You can build and upload the pipeline to the AI Platform Pipelines instance in one step, using the `tfx pipeline create` command. The `tfx pipeline create` goes through the following steps: - (Optional) Builds the custom image to that provides a runtime environment for TFX components or uses the latest image of the installed TFX version - Compiles the pipeline code into a pipeline package - Uploads the pipeline package via the `ENDPOINT` to the hosted AI Platform instance. As you debug the pipeline DSL, you may prefer to first use the `tfx pipeline compile` command, which only executes the compilation step. After the DSL compiles successfully you can use `tfx pipeline create` to go through all steps. ``` !tfx pipeline compile --engine kubeflow --pipeline_path runner.py ``` Note: you should see a `{PIPELINE_NAME}.tar.gz` file appear in your current `/pipeline` directory. ## Exercise: deploy your pipeline container to AI Platform Pipelines with TFX CLI After the pipeline code compiles without any errors you can use the `tfx pipeline create` command to perform the full build and deploy the pipeline. You will deploy your compiled pipeline container hosted on Google Container Registry e.g. `gcr.io/[PROJECT_ID]/[PIPELINE_NAME]` to run on AI Platform Pipelines with the TFX CLI. ``` # TODO: Your code here to use the TFX CLI to deploy your pipeline image to AI Platform Pipelines. !tfx pipeline create \ --pipeline_path=runner.py \ --endpoint={ENDPOINT} \ --build_target_image={CUSTOM_TFX_IMAGE} ``` **Hint**: review the [TFX CLI documentation](https://www.tensorflow.org/tfx/guide/cli#create) on the "pipeline group" to create your pipeline. You will need to specify the `--pipeline_path` to point at the pipeline DSL and runner defined locally in `runner.py`, `--endpoint`, and `--build_target_image` arguments using the environment variables specified above. Note: you should see a `build.yaml` file in your pipeline folder created by skaffold. The TFX CLI compile triggers a custom container to be built with skaffold using the instructions in the `Dockerfile`. If you need to redeploy the pipeline you can first delete the previous version using `tfx pipeline delete` or you can update the pipeline in-place using `tfx pipeline update`. To delete the pipeline: `tfx pipeline delete --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT}` To update the pipeline: `tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT}` ``` !tfx pipeline update --pipeline_path runner.py --endpoint {ENDPOINT} ``` ### Exercise: create a pipeline run with the TFX CLI After the pipeline has been deployed, you can trigger and monitor pipeline runs using TFX CLI. *Hint*: review the [TFX CLI documentation](https://www.tensorflow.org/tfx/guide/cli#run_group) on the "run group". ``` # TODO: your code here to trigger a pipeline run with the TFX CLI !tfx run create --pipeline_name={PIPELINE_NAME} --endpoint={ENDPOINT} ``` ### Exercise: monitor your pipeline runs with the TFX CLI To view the status of existing pipeline runs: ``` !tfx run list --pipeline_name {PIPELINE_NAME} --endpoint {ENDPOINT} ``` To retrieve the status of a given run retrieved from the command above: ``` RUN_ID='[YOUR RUN ID]' !tfx run status --pipeline_name {PIPELINE_NAME} --run_id {RUN_ID} --endpoint {ENDPOINT} ``` ### Exercise: monitor your pipeline runs with the Kubeflow Pipelines UI On the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page, click `OPEN PIPELINES DASHBOARD`. A new browser tab will open. Select the `Pipelines` tab to the left where you see the `PIPELINE_NAME` pipeline you deployed previously. Click on the most recent pipeline version which will open up a window with a visualization of your TFX pipeline directed graph. Pipeline components are represented as named boxes with direct arrows representing artifact dependencies and the execution order of your ML workflow. Next, click the `Experiments` tab. You will see your pipeline name under `Experiment name` with an downward arrow that allows you to view all active and previous runs. Click on the pipeline run that you trigger with the step above. You can follow your pipeline's run progress by viewing your pipeline graph get built on the screen and drill into individual components to view artifacts and logs. ### Important A full pipeline run with tuning enabled will take about 50 minutes to complete. You can view the run's progress using the TFX CLI commands above and in the KFP UI. Take the time to review the pipeline metadata artifacts created in the GCS artifact repository for each component including data splits, your Tensorflow SavedModel, model evaluation results, etc. as the pipeline executes. In the GCP console, you can also view the Dataflow jobs for pipeline data processing as well as the AI Platform Training jobs for model training and tuning. ## Next Steps In this lab, you learned how to build and deploy a TFX pipeline with the TFX CLI and then update, build and deploy a new continuous training pipeline with automatic hyperparameter tuning. As next steps, try leveraging a CI/CD tool like [Cloud Build]() to layer in additional automation during the building and deployment of the pipeline code in this lab. ## License <font size=-1>Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>
github_jupyter
# Table of Contents <div class="toc" style="margin-top: 1em;"><ul class="toc-item" id="toc-level0"><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-no-ndarray-bidimensional" data-toc-modified-id="Fatiamento-no-ndarray-bidimensional-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Fatiamento no ndarray bidimensional</a></span><ul class="toc-item"><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Inicializando-um-array-e-mudando-o-seu-shape" data-toc-modified-id="Inicializando-um-array-e-mudando-o-seu-shape-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Inicializando um array e mudando o seu shape</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-de-linhas-e-colunas-de-um-array" data-toc-modified-id="Fatiamento-de-linhas-e-colunas-de-um-array-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Fatiamento de linhas e colunas de um array</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-de-elementos-especรญficos-de-um-array" data-toc-modified-id="Fatiamento-de-elementos-especรญficos-de-um-array-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Fatiamento de elementos especรญficos de um array</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Fatiamento-com-รญndices-invertidos" data-toc-modified-id="Fatiamento-com-รญndices-invertidos-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>Fatiamento com รญndices invertidos</a></span></li></ul></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Documentaรงรฃo-Oficial-Numpy" data-toc-modified-id="Documentaรงรฃo-Oficial-Numpy-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Documentaรงรฃo Oficial Numpy</a></span></li><li><span><a href="http://localhost:8888/notebooks/ia898/master/tutorial_numpy_1_3.ipynb#Links-Interessantes" data-toc-modified-id="Links-Interessantes-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Links Interessantes</a></span></li></ul></div> # Fatiamento no ndarray bidimensional Um recurso importante do numpy รฉ o fatiamento no qual รฉ possรญvel acessar partes do array de diversas formas, como pode ser visto abaixo: ## Inicializando um array e mudando o seu shape ``` %matplotlib inline import numpy as np from PIL import Image a = np.arange(20) # a รฉ um vetor unidimensional de 20 elementos print(a) a = a.reshape(4,5) # a agora รฉ um array 4x5 (4 linhas por 5 colunas) print('a.reshape(4,5) = \n', a) ``` ## Fatiamento de linhas e colunas de um array O operador : indica que todos os elementos naquela dimensรฃo devem ser acessados. ``` print('A segunda linha do array: \n', a[1,:]) # A segunda linha รฉ o รญndice 1 print(' A primeira coluna do array: \n', a[:,0]) # A primeira coluna corresponde ao รญndice 0 ``` ## Fatiamento de elementos especรญficos de um array ``` print('Acessando as linhas do array de 2 em 2 comeรงando pelo รญndice 0: \n', a[0::2,:]) print(' Acessando as linhas e colunas do array de 2 em 2 comeรงando pela linha 0 e coluna 1: \n', a[0::2,1::2]) ``` ## Fatiamento com รญndices invertidos ``` print("Acesso as duas รบltimas linhas do array em ordem reversa:\n", a[-1:-3:-1,:]) print("Acesso elemento na รบltima linha e coluna do array:\n", a[-1,-1]) print("Invertendo a ordem das linhas do array:\n", a[::-1,:]) ``` # Documentaรงรฃo Oficial Numpy [Scipy.org Princรญpios bรกsicos de indexaรงรฃo de arrays](https://docs.scipy.org/doc/numpy/user/basics.indexing.html) # Links Interessantes [Scipy-lectures: operaรงรตes avanรงadas com fatiamento](http://scipy-lectures.github.io/intro/numpy/array_object.html#fancy-indexing)
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Dogs vs Cats Image Classification With Image Augmentation <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c02_dogs_vs_cats_with_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c02_dogs_vs_cats_with_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. ## Specific concepts that will be covered: In the process, we will build practical experience and develop intuition around the following concepts * Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator`class โ€” How can we efficiently work with data on disk to interface with our model? * _Overfitting_ - what is it, how to identify it, and how can we prevent it? * _Data Augmentation_ and _Dropout_ - Key techniques to fight overfitting in computer vision tasks that we will incorporate into our data pipeline and image classifier model. ## We will follow the general machine learning workflow: 1. Examine and understand data 2. Build an input pipeline 3. Build our model 4. Train our model 5. Test our model 6. Improve our model/Repeat the process <hr> **Before you begin** Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits. # Importing packages Let's start by importing required packages: * os โ€” to read files and directory structure * numpy โ€” for some matrix math outside of TensorFlow * matplotlib.pyplot โ€” to plot the graph and display images in our training and validation data ``` from __future__ import absolute_import, division, print_function, unicode_literals import os import numpy as np import matplotlib.pyplot as plt ``` For the TensorFlow imports, we directly specify Keras symbols (Sequential, Dense, etc.). This enables us to refer to these names directly in our code without having to qualify their full names (for example, `Dense` instead of `tf.keras.layer.Dense`). ``` import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator ``` # Data Loading To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research). In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem. ``` _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) ``` The dataset we have downloaded has following directory structure. <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>cats_and_dogs_filtered</b> |__ <b>train</b> |______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ....] |______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...] |__ <b>validation</b> |______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ....] |______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...] </pre> We'll now assign variables with the proper file path for the training and validation sets. ``` base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures ``` ### Understanding our data Let's look at how many cats and dogs images we have in our training and validation directory ``` num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val) ``` # Setting Model Parameters For convenience, let us set up variables that will be used later while pre-processing our dataset and training our network. ``` BATCH_SIZE = 100 IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels ``` After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk and will apply rescaling and will resize them into required dimensions using single line of code. # Data Augmentation Overfitting often occurs when we have a small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number and variety of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples through random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This exposes the model to more aspects of the data, allowing it to generalize better. In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process. To start off, let's define a function that can display an image, so we can see the type of augmentation that has been performed. Then, we'll look at specific augmentations that we'll use during training. ``` # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() ``` ### Flipping the image horizontally We can begin by randomly applying horizontal flip augmentation to our dataset and seeing how individual images will look after the transformation. This ia achieved by passing `horizontal_flip=True` as an argument to the `ImageDataGenerator` class. ``` image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True) train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE)) ``` To see the transformation in action, let's take one sample image from our training set and repeat it five times. The augmentation will be randomly applied (or not) to each repetition. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### Rotating the image The rotation augmentation will randomly rotate the image up to a specfied number of degrees. Here, we'll set it to 45. ``` image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45) train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE)) ``` To see the transformation in action, let's once again take a sample image from our training set and repeat it. The augmentation will be randomly applied (or not) to each repetition. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### Applying Zoom We can also apply Zoom augmentation to our dataset, zooming images up to 50% randomly. ``` image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5) train_data_gen = image_gen.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE)) ``` One more time, take a sample image from our training set and repeat it. The augmentation will be randomly applied (or not) to each repetition. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### Putting it all together We can apply all these augmentations, and even others, with just one line of code, by passing the augmentations as arguments with proper values. Here, we have applied rescale, rotation of 45 degrees, width shift, height shift, horizontal flip, and zoom augmentation to our training images. ``` image_gen_train = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') train_data_gen = image_gen_train.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), class_mode='binary') ``` Let's visualize how a single image would look like five different times, when we pass these augmentations randomly to our dataset. ``` augmented_images = [train_data_gen[0][0][0] for i in range(5)] plotImages(augmented_images) ``` ### Creating Validation Data generator Generally, we only apply data augmentation to our training examples, since the original images should be representative of what our model needs to manage. So, in this case we are only rescaling our validation images and converting them into batches using ImageDataGenerator. ``` image_gen_val = ImageDataGenerator(rescale=1./255) val_data_gen = image_gen_val.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary') ``` # Model Creation ## Define the model The model consists of four convolution blocks with a max pool layer in each of them. Before the final Dense layers, we're also applying a Dropout probability of 0.5. This mean that 50% of the values coming into the Dropout layer will be set to zero. This helps to prevent overfitting. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes โ€” dogs and cats โ€” using `softmax`. ``` model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Conv2D(128, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.5), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(2, activation='softmax') ]) ``` ### Compiling the model As usual, we will use the `adam` optimizer. Since we are output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument. ``` model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ``` ### Model Summary Let's look at all the layers of our network using **summary** method. ``` model.summary() ``` ### Train the model It's time we train our network. Since our batches are coming from a generator (`ImageDataGenerator`), we'll use `fit_generator` instead of `fit`. ``` epochs=100 history = model.fit_generator( train_data_gen, steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))), epochs=epochs, validation_data=val_data_gen, validation_steps=int(np.ceil(total_val / float(BATCH_SIZE))) ) ``` ### Visualizing results of the training We'll now visualize the results we get after training our network. ``` acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() ```
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np # plot style sns.set_style('whitegrid') sns.set_style({'font.family': 'Times New Roman'}) %matplotlib inline df = pd.read_csv("data-policy-results.csv", sep='\t') df.head() len(df) # Pie chart a = df['Policy type'].value_counts() labels = a.index sizes = a #colors colors1 = plt.cm.Set2(np.linspace(0,1,3)) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5)) axes[0].pie(sizes, colors = colors1, labels=labels, autopct='%1.1f%%', startangle=50, pctdistance=0.85) axes[0].add_patch(plt.Circle((0,0),0.70,fc='white')) axes[0].axis('equal') axes[0].set_title("Data policy strictness of top 30 economics journals") sns.countplot(x='Sharing mode', data=df, palette='Oranges', ax=axes[1]) axes[1].set_title("Recommended sharing mode per data policy") plt.tight_layout() plt.savefig('img/a.png', dpi=100) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5)) sns.countplot(x='Adopt AEA', data=df, palette="Blues", ax=axes[0]) axes[0].set_title("Count of journals that adopt AEA data policy") axes[0].set_xlabel("Adopt AEA data policy") axes[0].set(ylim=(0, 22)) axes[0].set_yticks(np.arange(0, 22, 2)) sns.countplot(x='Adopt AEA', hue='Policy type', data=df, palette=colors1, ax=axes[1]) axes[1].set_title("Count of journals that adopt AEA data policy & strictness") axes[1].set_xlabel("Adopt AEA data policy") axes[1].set(ylim=(0, 22)) axes[1].set_yticks(np.arange(0, 22, 2)) #plt.legend(loc=1) legend = plt.legend(frameon = 1) frame = legend.get_frame() frame.set_facecolor('white') #frame.set_edgecolor('gray') plt.tight_layout() plt.savefig('img/b.png', dpi=100) df = pd.read_csv("econ.csv") a = df.sum() df1 = a[['stata', 'julia', 'python','R','C','C++','matlab','fortran','sas']] df1.values from __future__ import division pom = (df1.values * 100 / df1.values.sum()) l = pd.Series(pom, index=df1.index) u = l.sort_values(ascending=False) u s = df[['stata', 'julia', 'python','R','C','C++','matlab','fortran','sas']] no_sw = s.sum(axis=1).astype(int).value_counts(sort=False) no_sw tot=len(s) no_ = no_sw.values*100/tot no_ = pd.Series(no_, index=no_sw.index) no_ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4.5)) sns.barplot(x=no_.index, y=no_.values, palette="Blues", ax=axes[0]) axes[0].set_ylabel("Percentage [%]") axes[0].set_xlabel("Number of software tools") axes[0].set_title("Number of software tools (out of listed) used in publications") sns.barplot(x=u.index, y=u.values, palette="deep", ax=axes[1]) axes[1].set_ylabel("Percentage [%]") axes[1].set_title("Most used software in AER publications from 1999 to 2018") plt.tight_layout() plt.savefig('img/c.png', dpi=100) ```
github_jupyter
# Recommender System: - The last thing to do is to use our saved models to recommend items to users: ### For the requested user: - Calculate the score for every item. - Sort the items based on the score and output the top results. ### Check which users exist on the test set ``` !pip install ipython-autotime #### To measure all running time # https://github.com/cpcloud/ipython-autotime %load_ext autotime import pandas as pd import pickle import pandas as pd import numpy as np import os #Keras from keras.models import load_model from keras import backend as K # Tensorflow import tensorflow as tf from sklearn.metrics import mean_squared_error ``` ### Set and Check GPUs ``` def set_check_gpu(): cfg = K.tf.ConfigProto() cfg.gpu_options.per_process_gpu_memory_fraction =1 # allow all of the GPU memory to be allocated # for 8 GPUs cfg.gpu_options.visible_device_list = "0,1,2,3,4,5,6,7" # "0,1" # for 1 GPU # cfg.gpu_options.visible_device_list = "0" #cfg.gpu_options.allow_growth = True # # Don't pre-allocate memory; dynamically allocate the memory used on the GPU as-needed #cfg.log_device_placement = True # to log device placement (on which device the operation ran) sess = K.tf.Session(config=cfg) K.set_session(sess) # set this TensorFlow session as the default session for Keras print("* TF version: ", [tf.__version__, tf.test.is_gpu_available()]) print("* List of GPU(s): ", tf.config.experimental.list_physical_devices() ) print("* Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"; # set for 8 GPUs os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"; # set for 1 GPU # os.environ["CUDA_VISIBLE_DEVICES"] = "0"; # Tf debugging option tf.debugging.set_log_device_placement(True) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) # print(tf.config.list_logical_devices('GPU')) print(tf.config.experimental.list_physical_devices('GPU')) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # set_check_gpu() from sklearn.model_selection import train_test_split review_data = pd.read_csv('../data/amazon_reviews_us_Shoes_v1_00_help_voted_And_cut_lognTail.csv') review_data.rename(columns={ 'star_rating': 'score','customer_id': 'user_id', 'user': 'user_name'}, inplace=True) items = review_data.product_id.unique() item_map = {i:val for i,val in enumerate(items)} inverse_item_map = {val:i for i,val in enumerate(items)} review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata review_data["item_id"] = review_data["product_id"].map(inverse_item_map) items = review_data.item_id.unique() print ("We have %d unique items in metadata "%items.shape[0]) users = review_data.user_id.unique() user_map = {i:val for i,val in enumerate(users)} inverse_user_map = {val:i for i,val in enumerate(users)} review_data["old_user_id"] = review_data["user_id"] review_data["user_id"] = review_data["user_id"].map(inverse_user_map) items_reviewed = review_data.product_id.unique() review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata review_data["item_id"] = review_data["product_id"].map(inverse_item_map) items_reviewed = review_data.item_id.unique() users = review_data.user_id.unique() helpful_votes = review_data.helpful_votes.unique() ratings_train, ratings_test = train_test_split( review_data, test_size=0.1, random_state=0) ratings_test.user_id.value_counts().sort_values(ascending=False).head(10) ``` ### ** Create a recommendation example dataset of 100 users from the test set and all items for each and predict recommendations for them ``` items = review_data.product_id.unique() item_map = {i:val for i,val in enumerate(items)} inverse_item_map = {val:i for i,val in enumerate(items)} review_data["old_item_id"] = review_data["product_id"] # copying for join with metadata review_data["item_id"] = review_data["product_id"].map(inverse_item_map) items = review_data.item_id.unique() print ("We have %d unique items in metadata "%items.shape[0]) # all_info['description'] = all_info['description'].fillna(all_info['title'].fillna('no_data')) # all_info['title'] = all_info['title'].fillna(all_info['description'].fillna('no_data').apply(str).str[:20]) # all_info['image'] = all_info['image'].fillna('no_data') # all_info['price'] = pd.to_numeric(all_info['price'],errors="coerce") # all_info['price'] = all_info['price'].fillna(all_info['price'].median()) review_data.head(n=2) type(review_data['product_id'].unique()) # creating metadata mappings titles = review_data['product_title'].unique() titles_map = {i:val for i,val in enumerate(titles)} inverse_titles_map = {val:i for i,val in enumerate(titles)} # price = review_data['price'].unique() # price_map = {i:val for i,val in enumerate(price)} # inverse_price_map = {val:i for i,val in enumerate(price)} # print ("We have %d prices" %price.shape) print ("We have %d titles" %titles.shape) # all_info['price_id'] = all_info['price'].map(inverse_price_map) review_data['title_id'] = review_data['product_title'].map(inverse_titles_map) # creating dict from item2prices = {} # for val in review_data[['item_id','price_id']].dropna().drop_duplicates().iterrows(): # item2prices[val[1]["item_id"]] = val[1]["price_id"] item2titles = {} # for val in all_info[['item_id','title_id']].dropna().drop_duplicates().iterrows(): # item2titles[val[1]["item_id"]] = val[1]["title_id"] # populating the rating dataset with item metadata info # ratings_train["price_id"] = ratings_train["item_id"].map(lambda x : item2prices[x]) # ratings_train["title_id"] = ratings_train["item_id"].map(lambda x : item2titles[x]) # populating the test dataset with item metadata info # ratings_test["price_id"] = ratings_test["item_id"].map(lambda x : item2prices[x]) # ratings_test["title_id"] = ratings_test["item_id"].map(lambda x : item2titles[x]) # ratings_test = pd.read_parquet('./data/ratings_test.parquet') # ratings_train = pd.read_parquet('./data/ratings_train.parquet') review_data.columns ``` ### Select products #### - use ALL product now. ``` items = review_data.item_id.unique() df_items = pd.DataFrame(data=items.flatten(),columns=['item_id']) df_items = pd.merge(df_items,review_data,how='left',left_on=('item_id'),right_on=('item_id')) ### use all products # df_items= df_items.sample(100) df_items['key'] = 1 print ("We have %d unique items "%df_items['item_id'].shape[0]) # df_items= df_items[['item_id', 'description', 'category', 'title', 'title_id', 'price', 'price_id', 'brand', 'key']] df_items= df_items[['item_id', 'product_id', 'score', 'product_title', 'helpful_votes', 'old_item_id', 'old_user_id', 'title_id', 'key']] print(df_items.shape) df_items.head(2) ``` ### Select 100 users ``` users = ratings_test.user_id.unique() df_users = pd.DataFrame(data=users.flatten(),columns=['user_id']) df_users = pd.merge(df_users,ratings_test,how='left',left_on=('user_id'),right_on=('user_id')) ### Select 100 users df_users= df_users.sample(100) df_users['key'] = 1 print ("We have %d unique users "%df_users['user_id'].shape[0]) df_users= df_users[['user_id', 'key']] print(df_users.shape) df_users.head(2) ``` ## Merge users and item and items metadata ``` df_unseenData= pd.merge(df_users, df_items, on='key') del df_unseenData['key'] print ("We have %d unique records in the recommendation example dataset "%df_unseenData.shape[0]) print(df_unseenData.shape) df_unseenData.sample(10) df_unseenData.columns df_unseenData from os import listdir from os.path import isfile, join mypath = '../models' onlyfiles = [f.replace('.h5', '') for f in listdir(mypath) if isfile(join(mypath, f))] onlyfiles ``` ## Predict the ratings for the items and users in the a recommendation example dataset: ### - dense_5_Multiply_50_embeddings_10_epochs_dropout ``` load_path = "../models/" # models =['dense_1_Multiply_50_embeddings_4_epochs_dropout', # 'dense_5_Multiply_50_embeddings_10_epochs_dropout', # 'matrix_facto_10_embeddings_100_epochs', # 'dense_1_Multiply_50_embeddings_100_epochs_dropout'] # select the best model models =[ 'dense_5_Multiply_50_embeddings_10_epochs_dropout' ] # models_meta = [ # 'dense_5_Meta_Multiply_50_embeddings_10_epochs_dropout', # ] # for mod in models: # model = load_model(load_path+mod+'.h5') # df_unseenData['preds_' + mod] = model.predict([df_unseenData['user_id'], # df_unseenData['item_id'], # df_unseenData['price_id'], # df_unseenData['title_id']]) for mod in models: model = load_model(load_path+mod+'.h5') df_unseenData['preds_' + mod] = model.predict([df_unseenData['user_id'], df_unseenData['item_id']]) df_unseenData.head(2) # df_unseenData.sort_values(by=['preds_dense_5_Multiply_50_embeddings_10_epochs_dropout', 'user_id'], ascending=False) df_unseenData['user_id'].head(n=2) df_unseenData.columns df_unseenData.shape ``` ## Check which users exist on the example set ``` # df_unseenData.T df_unseenData.user_id.value_counts().sort_values(ascending=False).head(5) df_unseenData[['user_id','preds_dense_5_Multiply_50_embeddings_10_epochs_dropout']].sort_values('preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',ascending=True).head(5) ``` ## A function that will return recommendation list for a given user ``` df_unseenData.tail(n=3) load_path = "../models/" def get_recommendations(userID , model_scr, df_Data): if userID not in df_Data['user_id'].values: print("\nUser ID not found %d" %userID) return userID # print("\nRecommendations for user id %d Name: %s is:" % (userID, df_Data.loc[df_Data['user_id'] == userID, 'user_name'].values[0])) df_output=df_Data.loc[df_Data['user_id'] == userID][['item_id','product_title','helpful_votes', model_scr, ]].sort_values(model_scr,ascending=False).set_index('item_id') # print(df_output) df_output.rename(columns={model_scr: 'score'}, inplace=True) df_output = df_output.sort_values(by=['score'], ascending=False) #add ASIN form item_id # df_output['product_id'] = df_Data['item_id'].apply(item_map) return df_output ``` ### Recommend items to a given user - Using dense_5_Multiply_50_embeddings_10_epochs_dropout ``` df_unseenData.columns ####### User ID: 63008 df_output = get_recommendations(userID=63008, model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout', df_Data=df_unseenData) print(df_output.shape) df_output = df_output.drop_duplicates(subset='product_title') print(df_output.shape) df_output.head(10) ``` ## Make predictions for another user using another model: ``` ####### User ID user_id = 26406 df_output = get_recommendations(userID=user_id, model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout', df_Data=df_unseenData) print(df_output.shape) df_output = df_output.drop_duplicates(subset='product_title') print(df_output.shape) df_output.head(10) # df_output.columns df_output = df_output.reset_index() df_output['user_id'] = user_id df_output['asin'] = df_output['item_id'].apply(lambda x : item_map[x]) df_output['url'] = df_output['item_id'].apply(lambda x : 'https://www.amazon.com/dp/'+item_map[x]) df_output = df_output[['user_id','item_id', 'score', 'asin', 'url', 'product_title']] df_output.head() unseenUser_list = df_unseenData['user_id'].values print(len(unseenUser_list)) len(unique_reviewer) ``` ### select 100 users from unseen data ``` unique_reviewer = list(set(unseenUser_list.tolist())) print("total number of users: ", len(unique_reviewer)) all_predicted_df = pd.DataFrame() for user_id in unique_reviewer: print("selected 100 user_id:", user_id) df_output = get_recommendations(userID=user_id ,model_scr='preds_dense_5_Multiply_50_embeddings_10_epochs_dropout',df_Data=df_unseenData) df_output = df_output.reset_index() df_output['user_id'] = user_id df_output['asin'] = df_output['item_id'].apply(lambda x : item_map[x]) df_output['url'] = df_output['item_id'].apply(lambda x : 'https://www.amazon.com/dp/'+item_map[x]) df_output = df_output[['user_id','item_id', 'score', 'asin', 'url', 'product_title']] df_output = df_output.sort_values(by=['score'], ascending=False) # print(df_output.shape) df_output = df_output.drop_duplicates(subset='product_title') # print(df_output.shape) ####### select top product pre user df_output = df_output.head(n=50) #concat all_predicted_df = all_predicted_df.append(df_output) # reset index all_predicted_df = all_predicted_df.reset_index(drop=True) # all_predicted_df all_predicted_df.shape all_predicted_df.columns # all_predicted_df = all_predicted_df.drop_duplicates() # all_predicted_df = all_predicted_df.reset_index() # all_predicted_df.drop(columns=['index']) all_predicted_df.shape #Shoes_for_100_users_per_20_products_prediction_Ver2.csv # all_predicted_df.to_csv('Shoes_for_100_users_per_20_products_prediction_Ver3.csv', header=True, index=False) # Shoes_for_100_users_per_100_products_prediction_Ver2 # all_predicted_df.to_csv('Shoes_for_100_users_per_100_products_prediction_Ver3.csv', header=True, index=False) #Shoes_for_100_users_per_50_products_prediction_Ver2.csv all_predicted_df.to_csv('Shoes_for_100_users_per_50_products_prediction_Ver3.csv', header=True, index=False) #Shoes_for_100_users_per_ALL_products_prediction_Ver2.csv # all_predicted_df.to_csv('Shoes_for_100_users_per_ALL_products_prediction_Ver3.csv', header=True, index=False) #Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv # all_predicted_df.to_csv('Shoes_for_ALL_users_per_ALL_products_prediction_Ver3.csv', header=True, index=False) # !aws s3 cp Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv s3://dse-cohort5-group1/3-Keras-DeepRecommender-for-Shoes/predictions/Shoes_for_ALL_users_per_ALL_products_prediction_Ver2.csv ```
github_jupyter
# About Welcome to the functionality examples notebook. This notebook is only intended for local use: it's a place to try out and explore the `henchman` api without worrying about what will render in html on github or in the docs. ``` import pandas as pd import featuretools as ft es = ft.demo.load_retail() cutoff_times = pd.read_csv('../../../../Downloads/predict_may_sales.csv')[['customer_id', 'cutoff_time', 'total']] cutoff_times['cutoff_time'] = pd.to_datetime(cutoff_times['cutoff_time']) fm, features = ft.dfs(entityset=es, target_entity='customers', cutoff_time=cutoff_times, verbose=True) es ``` # Diagnostics ``` from henchman.diagnostics import overview, warnings, column_report, profile overview(es['order_products'].df) column_report(es['order_products'].df) warnings(fm) ``` # Plotting ``` from henchman.plotting import show from henchman.plotting import (feature_importances, histogram, piechart, scatter, timeseries) show(piechart(es['orders'].df['cancelled']), title='Cancelled Orders') show(piechart(es['orders'].df['country'], mergepast=10), height=400, width=500) show(timeseries(es['customers'].df['first_orders_time'], es['customers'].df['customer_id'], n_bins=20, aggregate='count'), width=900, height=300) show(timeseries(es['order_products'].df['order_date'], es['order_products'].df['total'], aggregate='sum', n_bins=12), width=900, height=300) show(scatter(es['orders'].df['cancelled'], es['orders'].df['cancelled'], agg=es['orders'].df['country'], hover=True, aggregate='mean'), title='Cancelled by country', x_axis='Cancelled', y_axis='Cancelled', height=300, width=300) ``` # Selection ``` from henchman.selection import RandomSelect, Dendrogram from henchman.learning import inplace_encoder X = inplace_encoder(fm.copy()) y = X.pop('total') y = y > 1000 selector_1 = RandomSelect(n_feats=10) selector_1.fit(X) selector_1.transform(X).head() selector_2 = Dendrogram(X, max_threshes=500) from henchman.plotting import dendrogram show(dendrogram(selector_2)) selector_2._shuffle_all_representatives() X_p = selector_2.transform(X, n_feats=80) X_p.head() warnings(X_p) from henchman.learning import inplace_encoder, create_holdout, create_model from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score import numpy as np splits = 5 scores, fit_model = create_model(X, y, RandomForestClassifier(), roc_auc_score, n_splits=splits) print('Average score of {:.2f} over {} splits (stdev {:.3f})'.format(np.mean(scores), splits, np.std(scores))) scores, fit_model2 = create_model(X_p, y, RandomForestClassifier(), roc_auc_score, n_splits=splits) print('Average score of {:.2f} over {} splits (stdev {:.3f})'.format(np.mean(scores), splits, np.std(scores))) show(feature_importances(X_p, fit_model2, n_feats=10), height=300) show(histogram(X['MAX(orders.SUM(order_products.total))'], y, col_max=5000)) from henchman.plotting import roc_auc show(roc_auc(X_p, y, RandomForestClassifier(), n_splits=splits), height=400, width=400) from henchman.plotting import f1 show(f1(X_p, y, RandomForestClassifier(), n_splits=splits), height=400, width=400) import numpy as np np.__version__ pd.__version__ ```
github_jupyter
``` import numpy as np import torch from torch import nn from torch.nn import functional as F DEVICE = "cpu" # if torch.cuda.is_available(): # DEVICE = "cuda" DEVICE class Memory(nn.Module): def __init__(self, N, M): super().__init__() self.N = N self.M = M self.size = [self.N, self.M] self.register_buffer("memory_bias", torch.Tensor(N, M)) stdev = 1 / (np.sqrt(N + M)) nn.init.uniform_(self.memory_bias, -stdev, stdev) def reset(self, batch_size=1): self.batch_size = batch_size self.memory = self.memory_bias.clone().repeat(batch_size, 1, 1) def read(self, w): data = torch.matmul(w.unsqueeze(1), self.memory).squeeze(1) return data def write(self, w, e_gate, a_gate): self.flashback = self.memory self.memory = torch.Tensor(self.batch_size, self.N, self.M) erase = torch.matmul(w.unsqueeze(-1), e_gate.unsqueeze(1)) add = torch.matmul(w.unsqueeze(-1), a_gate.unsqueeze(1)) self.memory = self.flashback * (1 - erase) + add def address(self, k, b, g, s, y, w_prev): wc = self._similarity(k, b) wg = self._interpolate(w_prev, wc, g) w_ = self._shift(wg, s) w = self._sharpen(w_, y) return w def _similarity(self, k, b): k = k.view(self.batch_size, 1, -1) similarity = F.cosine_similarity(self.memory + 1e-16, k + 1e-16, dim=-1) content_weight = F.softmax(b * similarity, dim=1) return content_weight def _interpolate(self, w_prev, wc, g): focus = g * wc + (1 - g) * w_prev return focus def _shift(self, wg, s): shift = torch.zeros(wg.size()) for batch in range(self.batch_size): shift[batch] = _convolve(wg[batch], s[batch]) return shift def _sharpen(self, w_, y): w = w_ ** y w = torch.div(w, torch.sum(w, dim=1).view(-1, 1) + 1e-16) return w def _convolve(w, s): t = torch.cat([w[-1:], w, w[:1]]) c = F.conv1d(t.view(1, 1, -1), s.view(1, 1, -1)).view(-1) return c class ReadHead(nn.Module): def __init__(self, memory, controller_size): super().__init__() self.memory = memory self.N, self.M = self.memory.size self.controller_size = controller_size self.key = nn.Linear(self.controller_size, self.M) self.key_strength = nn.Linear(self.controller_size, 1) self.interpolation_gate = nn.Linear(self.controller_size, 1) self.shift_weighting = nn.Linear(self.controller_size, 3) self.sharpen_factor = nn.Linear(self.controller_size, 1) self.is_read_head = True self.reset() def _address(self, k, b, g, s, y, w_prev): k = k.clone() b = F.softplus(b) g = torch.sigmoid(g) s = torch.softmax(s, dim=1) y = 1 + F.softplus(y) w = self.memory.address(k, b, g, s, y, w_prev) return w def forward(self, controller_state, w_prev): k = self.key(controller_state) b = self.key_strength(controller_state) g = self.interpolation_gate(controller_state) s = self.shift_weighting(controller_state) y = self.sharpen_factor(controller_state) w = self._address(k, b, g, s, y, w_prev) data = self.memory.read(w) return data, w def create_new_state(self, batch_size): return torch.zeros(batch_size, self.N) def reset(self): nn.init.xavier_uniform_(self.key.weight, gain=1.4) nn.init.xavier_uniform_(self.key_strength.weight, gain=1.4) nn.init.xavier_uniform_(self.interpolation_gate.weight, gain=1.4) nn.init.xavier_uniform_(self.shift_weighting.weight, gain=1.4) nn.init.xavier_uniform_(self.sharpen_factor.weight, gain=1.4) nn.init.normal_(self.key.bias, std=0.01) nn.init.normal_(self.key_strength.bias, std=0.01) nn.init.normal_(self.interpolation_gate.bias, std=0.01) nn.init.normal_(self.shift_weighting.bias, std=0.01) nn.init.normal_(self.sharpen_factor.bias, std=0.01) class WriteHead(nn.Module): def __init__(self, memory, controller_size): super().__init__() self.memory = memory self.N, self.M = self.memory.size self.controller_size = controller_size self.key = nn.Linear(self.controller_size, self.M) self.key_strength = nn.Linear(self.controller_size, 1) self.interpolation_gate = nn.Linear(self.controller_size, 1) self.shift_weighting = nn.Linear(self.controller_size, 3) self.sharpen_factor = nn.Linear(self.controller_size, 1) self.erase = nn.Linear(self.controller_size, self.M) self.add = nn.Linear(self.controller_size, self.M) self.is_read_head = False self.reset() def _address(self, k, b, g, s, y, w_prev): k = k.clone() b = F.softplus(b) g = torch.sigmoid(g) s = torch.softmax(s, dim=1) y = 1 + F.softplus(y) w = self.memory.address(k, b, g, s, y, w_prev) return w def forward(self, controller_state, w_prev): k = self.key(controller_state) b = self.key_strength(controller_state) g = self.interpolation_gate(controller_state) s = self.shift_weighting(controller_state) y = self.sharpen_factor(controller_state) e = self.erase(controller_state) a = self.add(controller_state) e = torch.sigmoid(e) w = self._address(k, b, g, s, y, w_prev) self.memory.write(w, e, a) return w def create_new_state(self, batch_size): return torch.zeros(batch_size, self.N) def reset(self): nn.init.xavier_uniform_(self.key.weight, gain=1.4) nn.init.xavier_uniform_(self.key_strength.weight, gain=1.4) nn.init.xavier_uniform_(self.interpolation_gate.weight, gain=1.4) nn.init.xavier_uniform_(self.shift_weighting.weight, gain=1.4) nn.init.xavier_uniform_(self.sharpen_factor.weight, gain=1.4) nn.init.xavier_uniform_(self.erase.weight, gain=1.4) nn.init.xavier_uniform_(self.add.weight, gain=1.4) nn.init.normal_(self.key.bias, std=0.01) nn.init.normal_(self.key_strength.bias, std=0.01) nn.init.normal_(self.interpolation_gate.bias, std=0.01) nn.init.normal_(self.shift_weighting.bias, std=0.01) nn.init.normal_(self.sharpen_factor.bias, std=0.01) nn.init.normal_(self.erase.bias, std=0.01) nn.init.normal_(self.add.bias, std=0.01) class Controller(nn.Module): def __init__(self, no_input, no_output, no_layer): super().__init__() self.no_input = no_input self.no_output = no_output self.no_layer = no_layer self.size = [self.no_input, self.no_output] self.lstm = nn.LSTM(input_size =self.no_input, hidden_size=self.no_output, num_layers = self.no_layer) self.h_bias = nn.Parameter(torch.randn(self.no_layer, 1, self.no_output) * 0.05) self.c_bias = nn.Parameter(torch.randn(self.no_layer, 1, self.no_output) * 0.05) self.reset() def forward(self, data, prev_state): data = data.unsqueeze(0) output, state = self.lstm(data, prev_state) return output.squeeze(0), state def create_new_state(self, batch_size): h = self.h_bias.clone().repeat(1, batch_size, 1) c = self.c_bias.clone().repeat(1, batch_size, 1) return h, c def reset(self): for param in self.lstm.parameters(): if param.dim()==1: nn.init.constant_(param, 0) else: stdev = 1 / (np.sqrt(self.no_input + self.no_output)) nn.init.uniform_(param, -stdev, stdev) class NTM(nn.Module): def __init__(self, no_input, no_output, controller_size, controller_layer, no_head, N, M): super().__init__() self.no_input = no_input self.no_output = no_output self.controller_size = controller_size self.controller_layer = controller_layer self.no_head = no_head self.N = N self.M = M self.memory = Memory(self.N, self.M) self.controller = Controller(self.no_input + (self.M * self.no_head), self.controller_size, self.controller_layer) self.head = nn.ModuleList([]) _, self.controller_size = self.controller.size for head_no in range(self.no_head): self.head += [ ReadHead(self.memory, self.controller_size), WriteHead(self.memory, self.controller_size) ] self.no_read_head = 0 self.read = [] for head in self.head: if head.is_read_head: read_bias = torch.randn(1, self.M) * 0.01 self.register_buffer("read{}_bias".format(self.no_read_head), read_bias.data) self.read += [read_bias] self.no_read_head += 1 self.fc = nn.Linear(self.controller_size + self.no_read_head * self.M, self.no_output) self.reset() def create_new_state(self, batch_size): read = [r.clone().repeat(batch_size, 1) for r in self.read] controller_state = self.controller.create_new_state(batch_size) head_state = [head.create_new_state(batch_size) for head in self.head] return read, controller_state, head_state def init_sequence(self, batch_size): self.batch_size = batch_size self.memory.reset(batch_size) self.previous_state = self.create_new_state(batch_size) def forward(self, x=None): if x is None: x = torch.zeros(self.batch_size, self.no_input) prev_read, prev_controller_state, prev_head_state = self.previous_state inp = torch.cat([x] + prev_read, dim=1) controller_output, controller_state = self.controller(inp, prev_controller_state) reads = [] head_state = [] for head, prev_head_state in zip(self.head, prev_head_state): if head.is_read_head: r, h_state = head(controller_output, prev_head_state) reads += [r] else: h_state = head(controller_output, prev_head_state) head_state += [h_state] out = torch.cat([controller_output] + reads, dim=1) out = torch.sigmoid(self.fc(out)) self.previous_state = (reads, controller_state, head_state) return out, self.previous_state def reset(self): nn.init.xavier_uniform_(self.fc.weight, gain=1) nn.init.normal_(self.fc.bias, std=0.01) def no_param(self): no_param = 0 for param in self.parameters(): no_param += param.data.view(-1).size(0) return no_param def dataloader(no_batch, batch_size, seq_width, min_len, max_len): for batch_no in range(no_batch): seq_len = np.random.randint(min_len, max_len) seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width)) seq = torch.from_numpy(seq) inp = torch.zeros(seq_len+1, batch_size, seq_width+1) inp[:seq_len, :, :seq_width] = seq inp[seq_len, :, seq_width] = 1 out = seq.clone() yield batch_no+1, inp.float(), out.float() no_input = 9 no_output = 8 controller_size = 100 controller_layer = 1 no_head = 1 N = 128 M = 20 COPIER = NTM(no_input=no_input, no_output=no_output, controller_size=controller_size, controller_layer=controller_layer, no_head=no_head, N=N, M=M).to(DEVICE) def progress_clean(): """Clean the progress bar.""" print("\r{}".format(" " * 80), end='\r') def progress_bar(batch_num, report_interval, last_loss): """Prints the progress until the next report.""" progress = (((batch_num-1) % report_interval) + 1) / report_interval fill = int(progress * 40) print("\r[{}{}]: {} (Loss: {:.4f})".format( "=" * fill, " " * (40 - fill), batch_num, last_loss), end='') def save_checkpoint(net, name, args, batch_num, losses, costs, seq_lengths): progress_clean() basename = "{}/{}-{}-batch-{}".format(args.checkpoint_path, name, args.seed, batch_num) model_fname = basename + ".model" LOGGER.info("Saving model checkpoint to: '%s'", model_fname) torch.save(net.state_dict(), model_fname) # Save the training history train_fname = basename + ".json" LOGGER.info("Saving model training history to '%s'", train_fname) content = { "loss": losses, "cost": costs, "seq_lengths": seq_lengths } open(train_fname, 'wt').write(json.dumps(content)) def clip_grads(net): """Gradient clipping to the range [10, 10].""" parameters = list(filter(lambda p: p.grad is not None, net.parameters())) for p in parameters: p.grad.data.clamp_(-10, 10) import logging import time LOGGER = logging.getLogger(__name__) def get_ms(): """Returns the current time in miliseconds.""" return time.time() * 1000 no_batch = 50000 batch_size = 1 loss = nn.BCELoss() optimizer = torch.optim.RMSprop(COPIER.parameters(), momentum=0.9, alpha=0.95, lr=1e-4) errors = [] costs = [] seq_length = [] start_ms = get_ms() for batch_no, x, y in dataloader(no_batch=no_batch, batch_size=batch_size, seq_width=8, min_len=1, max_len=20): optimizer.zero_grad() inp_seq_len = x.size(0) out_seq_len = y.size(0) LOGGER.info("Training model for %d batches (batch_size=%d)...", no_batch, batch_size) COPIER.init_sequence(batch_size) for i in range(inp_seq_len): COPIER(x[i]) y_ = torch.zeros(y.size()) for i in range(out_seq_len): y_[i], _ = COPIER() error = loss(y_, y) error.backward() clip_grads(COPIER) optimizer.step() y_binarized = y_.clone().data y_binarized.apply_(lambda x: 0 if x < 0.5 else 1) cost = torch.sum(torch.abs(y_binarized - y.data)) errors.append(error.item()) costs.append(cost.item()/batch_size) seq_length += [y.size(0)] progress_bar(batch_no, 200, error) # Report if batch_no % 200 == 0: mean_loss = np.array(errors[-200:]).mean() mean_cost = np.array(costs[-200:]).mean() mean_time = int(((get_ms() - start_ms) / 200) / batch_size) progress_clean() print("Mean Time: {} ms".format(mean_time)) print("Mean Cost: {}".format(mean_cost)) print("Mean Loss: {}".format(mean_loss)) print("=====================================") LOGGER.info("Batch %d Loss: %.6f Cost: %.2f Time: %d ms/sequence", batch_no, mean_loss, mean_cost, mean_time) start_ms = get_ms() # # Checkpoint # if (1000 != 0) and (batch_no % 1000 == 0): # save_checkpoint(copier, "copier"+str(batch_no), args, # batch_0, losses, costs, seq_lengths) ``` # Network has successfully learnt to copy memory elements
github_jupyter
``` import numpy as np from os.path import isfile from scipy.io import loadmat from collections import OrderedDict from config import DATASET from train_classifiers import train_classifier from utils import compute_kernel, compute_precrec from utils import get_labels, _n_classes, _set_sizes # EXP_NAME = 'FK' EXP_NAME = 'imagenet-caffe-alex' DIR_DATA = './feature_extraction/' + EXP_NAME + '/codes/' DIR_SAVE = './feature_extraction/' + EXP_NAME + '/compdata/' TrainList = loadmat(DIR_DATA + EXP_NAME + '_train_files.mat') TrainList = TrainList['train_chunks'] TrainList = np.squeeze(TrainList) TrainList = np.concatenate(TrainList, axis=0) ValList = loadmat(DIR_DATA + EXP_NAME + '_val_files.mat') ValList = ValList['val_chunks'] ValList = np.squeeze(ValList) ValList = np.concatenate(ValList, axis=0) TestList = loadmat(DIR_DATA + EXP_NAME + '_test_files.mat') TestList = TestList['test_chunks'] TestList = np.squeeze(TestList) TestList = np.concatenate(TestList, axis=0) DataList = OrderedDict() DataList['train'] = TrainList DataList['val'] = ValList DataList['test'] = TestList if isfile(DIR_SAVE + 'Kernel.npy'): print('Loading the kernel matrix ...') K = np.load(DIR_SAVE + 'Kernel.npy') print('Kernel matrix is loaded.') else: K = compute_kernel(DataList) np.save(DIR_SAVE + 'Kernel.npy', K) def train_one_vs_all(K, train_set, all_epsilon, all_kappa): n_classes = _n_classes() set_sizes = _set_sizes() tr_size = 0 for ind, data in enumerate(DATASET): if data in train_set: tr_size += set_sizes[ind] K_tr = np.zeros((tr_size, tr_size)) idx = 0 for ind1, tr1 in enumerate(DATASET): if tr1 not in train_set: continue idy = 0 for ind2, tr2 in enumerate(DATASET): if tr2 not in train_set: continue K_tr[idx:set_sizes[ind1]+idx, idy:set_sizes[ind2]+idy] = K[ sum(set_sizes[:ind1]):sum(set_sizes[:ind1+1]), sum(set_sizes[:ind2]):sum(set_sizes[:ind2+1])] idy = set_sizes[ind2] idx = set_sizes[ind1] labels_raw = get_labels(train_set) alpha = np.array([train_classifier(K_tr, labels_raw, all_epsilon, all_kappa, nc) for nc in range(n_classes)]) return alpha def compute_score(K, alpha, train_set, test_set): n_classes = _n_classes() set_sizes = _set_sizes() tr_size = 0 ts_size = 0 for ind, data in enumerate(DATASET): if data in train_set: tr_size += set_sizes[ind] if data in test_set: ts_size += set_sizes[ind] K_tr_ts = np.zeros((tr_size, ts_size)) idx = 0 for ind1, tr1 in enumerate(DATASET): if tr1 not in train_set: continue idy = 0 for ind2, tr2 in enumerate(DATASET): if tr2 not in test_set: continue K_tr_ts[idx:set_sizes[ind1]+idx, idy:set_sizes[ind2]+idy] = K[ sum(set_sizes[:ind1]):sum(set_sizes[:ind1+1]), sum(set_sizes[:ind2]):sum(set_sizes[:ind2+1])] idy = set_sizes[ind2] idx = set_sizes[ind1] scores = np.zeros((ts_size, n_classes)) for ci in range(n_classes): scores[:,ci] = alpha[ci,:].dot(K_tr_ts) return scores train_set = ['train'] test_set = ['val'] all_epsilon = np.hstack([np.arange(1, 10) * 1e-4, np.arange(1, 10) * 1e-3, np.arange(1, 11) * 1e-2]) all_kappa = [np.inf] alpha_train = train_one_vs_all(K, train_set, all_epsilon, all_kappa) train_set = ['train'] test_set = ['val'] all_epsilon = np.hstack([np.arange(1, 10) * 1e-4, np.arange(1, 10) * 1e-3, np.arange(1, 11) * 1e-2]) all_kappa = [0.1, 0.2, 0.3, 0.4, 0.5, np.inf] if isfile(DIR_SAVE + 'alpha_train.npy'): print('Loading the trained classifiers ...') alpha_train = np.load(DIR_SAVE + 'alpha_train.npy') print('Classifiers are loaded.') else: alpha_train = train_one_vs_all(K, train_set, all_epsilon, all_kappa) np.save(DIR_SAVE + 'alpha_train.npy', alpha_train) AP = np.zeros((len(all_kappa), len(all_epsilon), _n_classes())) for ind_k in range(len(all_kappa)): for ind_e in range(len(all_epsilon)): scores = compute_score( K, alpha_train[:,:,ind_k,ind_e], train_set, test_set) labels = get_labels(test_set) AP[ind_k,ind_e,:] = compute_precrec(scores, labels) mAP = np.mean(AP, axis=2) mAP k_ind, e_ind = np.where(mAP == np.max(mAP[:-1,:])) c_ind, = np.where(mAP[-1,:] == np.max(mAP[-1,:])) train_set = ['train', 'val'] test_set = ['test'] if isfile(DIR_SAVE + 'alpha_rob.npy'): print('Loading the robust classifier ...') alpha_rob = np.load(DIR_SAVE + 'alpha_rob.npy') print('Classifier is loaded.') else: c_opt = [all_epsilon[c_ind[0]]] alpha_rob = train_one_vs_all(K, train_set, c_opt, [np.inf]).squeeze() np.save(DIR_SAVE + 'alpha_rob.npy', alpha_rob) if isfile(DIR_SAVE + 'alpha_dro.npy'): print('Loading the robust classifier ...') alpha_dro = np.load(DIR_SAVE + 'alpha_dro.npy') print('Classifier is loaded.') else: epsilon_opt = [all_epsilon[e_ind[0]]] kappa_opt = [all_kappa[k_ind[0]]] alpha_dro = train_one_vs_all(K, train_set, epsilon_opt, kappa_opt).squeeze() np.save(DIR_SAVE + 'alpha_dro.npy', alpha_dro) scores_dro = compute_score(K, alpha_dro, train_set, test_set) AP_dro = compute_precrec(scores_dro, get_labels(test_set)) scores_rob = compute_score(K, alpha_rob, train_set, test_set) AP_rob = compute_precrec(scores_rob, get_labels(test_set)) AP_rob.mean() AP_dro.mean() ```
github_jupyter
# DC Resistivity: 1D parametric inversion _Inverting for Resistivities and Layers_ Here we use the module *SimPEG.electromangetics.static.resistivity* to invert DC resistivity sounding data and recover the resistivities and layer thicknesses for a 1D layered Earth. In this tutorial, we focus on the following: - How to define sources and receivers from a survey file - How to define the survey - Defining a model that consists of resistivities and layer thicknesses For this tutorial, we will invert sounding data collected over a layered Earth using a Wenner array. The end product is layered Earth model which explains the data. ## Import modules ``` import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd from discretize import TensorMesh from SimPEG import ( maps, data, data_misfit, regularization, optimization, inverse_problem, inversion, directives ) from SimPEG.electromagnetics.static import resistivity as dc from SimPEG.electromagnetics.static.utils.StaticUtils import plot_layer mpl.rcParams.update({'font.size': 14}) ``` ## Define Parameters for the Inversion ``` # Define the file path to the data file. Also define the AB/2, MN/2 and apparent resistivity columns. # Recall that python counts starting at 0 data_filename = './sounding_data/Aung_San_Location_1_raw.csv' half_AB_column = 'AB/2 (m)' half_MN_column = 'MN/2 (m)' apparent_resistivity_column = 'App. Res. (Ohm m)' # Define the floor and percent uncertainty you would like to apply to apparent resistivity data uncertainty_floor = 5 uncertainty_percent = 10. # Define layer thicknesses and resistivities for the starting model. The thickness # of the bottom layer is not used, as we assume it extends downward to infinity. layer_thicknesses = np.r_[10, 10] halfspace_resistivity = 300. ``` ## Load Data, Define Survey and Plot Here we load the observed data, define the DC survey geometry and plot the data values. ``` # Load data df = pd.read_csv(data_filename) # Extract source and receiver electrode locations and the observed data half_AB_separations = df[half_AB_column] half_MN_separations = df[half_MN_column] dobs = df[apparent_resistivity_column].values resistivities = halfspace_resistivity*np.ones(layer_thicknesses.size+1) # Define survey unique_tx, k = np.unique(half_AB_separations, return_index=True) n_sources = len(k) k = np.sort(k) k = np.r_[k, len(dobs)+1] source_list = [] for ii in range(0, n_sources): # MN electrode locations for receivers. Each is an (N, 3) numpy array M_locations = -half_MN_separations[k[ii]:k[ii+1]] M_locations = np.c_[M_locations, np.zeros((np.shape(M_locations)[0], 2))] N_locations = half_MN_separations[k[ii]:k[ii+1]] N_locations = np.c_[N_locations, np.zeros((np.shape(N_locations)[0], 2))] receiver_list = [dc.receivers.Dipole(M_locations, N_locations)] # AB electrode locations for source. Each is a (1, 3) numpy array A_location = np.r_[-half_AB_separations[k[ii]], 0., 0.] B_location = np.r_[half_AB_separations[k[ii]], 0., 0.] source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) # Define survey survey = dc.Survey(source_list) # Compute the A, B, M and N electrode locations. survey.getABMN_locations() # Plot apparent resistivities on sounding curve as a function of Wenner separation # parameter. electrode_separations = np.sqrt( np.sum((survey.m_locations - survey.n_locations)**2, axis=1) ) fig, ax = plt.subplots(1, 1, figsize=(11, 5)) ax.loglog(half_AB_separations, dobs, 'b', lw=2) ax.grid(True, which='both', ls="--", c='gray') ax.set_xlabel("AB/2 (m)") ax.set_ylabel("Apparent Resistivity ($\Omega m$)") ``` ## Assign Uncertainties Inversion with SimPEG requires that we define uncertainties on our data. The uncertainty represents our estimate of the standard deviation of the noise on our data. ``` uncertainties = uncertainty_floor + 0.01*uncertainty_percent*np.abs(dobs) ``` ## Define Data Here is where we define the data that are inverted. The data are defined by the survey, the observation values and the uncertainties. ``` data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) ``` ## Defining the Starting Model and Mapping ``` # Define the layers as a mesh mesh = TensorMesh([layer_thicknesses], '0') print(mesh) # Define model. We are inverting for the layer resistivities and layer thicknesses. # Since the bottom layer extends to infinity, it is not a model parameter for # which we need to invert. For a 3 layer model, there is a total of 5 parameters. # For stability, our model is the log-resistivity and log-thickness. starting_model = np.r_[np.log(resistivities), np.log(layer_thicknesses)] # Since the model contains two different properties for each layer, we use # wire maps to distinguish the properties. wire_map = maps.Wires(('rho', mesh.nC+1), ('t', mesh.nC)) resistivity_map = maps.ExpMap(nP=mesh.nC+1) * wire_map.rho layer_map = maps.ExpMap(nP=mesh.nC) * wire_map.t ``` ## Define the Physics Here we define the physics of the problem using the DCSimulation_1D class. ``` simulation = dc.simulation_1d.Simulation1DLayers( survey=survey, rhoMap=resistivity_map, thicknessesMap=layer_map, data_type="apparent_resistivity" ) ``` ## Define Inverse Problem The inverse problem is defined by 3 things: 1) Data Misfit: a measure of how well our recovered model explains the field data 2) Regularization: constraints placed on the recovered model and a priori information 3) Optimization: the numerical approach used to solve the inverse problem ``` # Define the data misfit. Here the data misfit is the L2 norm of the weighted # residual between the observed data and the data predicted for a given model. # The weighting is defined by the reciprocal of the uncertainties. dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) # Define the regularization on the parameters related to resistivity mesh_rho = TensorMesh([mesh.hx.size+1]) reg_rho = regularization.Simple( mesh_rho, alpha_s=1., alpha_x=1, mapping=wire_map.rho ) # Define the regularization on the parameters related to layer thickness mesh_t = TensorMesh([mesh.hx.size]) reg_t = regularization.Simple( mesh_t, alpha_s=1., alpha_x=1, mapping=wire_map.t ) # Combine to make regularization for the inversion problem reg = reg_rho + reg_t # Define how the optimization problem is solved. Here we will use an inexact # Gauss-Newton approach that employs the conjugate gradient solver. opt = optimization.InexactGaussNewton( maxIter=20, maxIterCG=30, print_type='ubc' ) # Define the inverse problem inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) ``` ## Define Inversion Directives Here we define any directives that are carried out during the inversion. This includes the cooling schedule for the trade-off parameter (beta), stopping criteria for the inversion and saving inversion results at each iteration. ``` # Defining a starting value for the trade-off parameter (beta) between the data # misfit and the regularization. starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1.) # Set the rate of reduction in trade-off parameter (beta) each time the # the inverse problem is solved. And set the number of Gauss-Newton iterations # for each trade-off paramter value. beta_schedule = directives.BetaSchedule(coolingFactor=2., coolingRate=1.) # Apply and update sensitivity weighting as the model updates update_sensitivity_weights = directives.UpdateSensitivityWeights() # Options for outputting recovered models and predicted data for each beta. save_iteration = directives.SaveOutputEveryIteration(save_txt=False) # Setting a stopping criteria for the inversion. target_misfit = directives.TargetMisfit(chifact=1) # The directives are defined in a list directives_list = [ starting_beta, beta_schedule, target_misfit ] ``` ## Running the Inversion To define the inversion object, we need to define the inversion problem and the set of directives. We can then run the inversion. ``` # Here we combine the inverse problem and the set of directives inv = inversion.BaseInversion(inv_prob, directives_list) # Run the inversion recovered_model = inv.run(starting_model) # Inversion result from Mon DRD Mawlamyine location 3 res_tmp = np.array([348.4, 722.9, 282, 100.8, 51.4, 170.8, 31.1, 184.3]) thick_tmp = np.array([1.4, 1.6, 1.4, 12.1, 11.4, 25.1, 54.2]) plotting_mesh_tmp = TensorMesh([np.r_[thick_tmp, layer_thicknesses[-1]]], '0') ``` ## Examining the Results ``` # Plot true model and recovered model fig, ax = plt.subplots(1, 1, figsize=(5, 5)) plotting_mesh = TensorMesh([np.r_[layer_map*recovered_model, layer_thicknesses[-1]]], '0') x_min = np.min(resistivity_map*recovered_model) x_max = np.max(resistivity_map*recovered_model) plot_layer(resistivity_map*recovered_model, plotting_mesh, ax=ax, depth_axis=False, color='k') #plot_layer(res_tmp, plotting_mesh_tmp, ax=ax, depth_axis=False, color='r') #ax.set_xlim(10, 5000) #ax.set_ylim(-300, 0) #ax.legend(("SimPEG", "Mon State DRD")) ax.grid(True, which='both', ls="--", c='gray') # Plot the true and apparent resistivities on a sounding curve fig, ax = plt.subplots(1, 1, figsize=(7, 5)) ax.loglog(half_AB_separations, dobs, 'kx', lw=2, ms=10, mew=2) ax.loglog(half_AB_separations, inv_prob.dpred, 'k', lw=2) ax.set_xlabel("AB/2 (m)") ax.set_ylabel("Apparent Resistivity ($\Omega m$)") ax.legend(['Observed data','Predicted data']) #ax.set_ylim(50, 1000) ax.grid(True, which='both') ```
github_jupyter
# **DIVE INTO CODE COURSE** ## **Graduation Assignment** **Student Name**: Doan Anh Tien<br> **Student ID**: 1852789<br> **Email**: tien.doan.g0pr0@hcmut.edu.vn ## Introduction The graduation assignment was based on one of the challenges from the Vietnamese competition **Zalo AI Challenge**. The description of the challenge is described as follows: > During the Covid-19 outbreak, the Vietnamese government pushed the "5K" public health safety message. In the message, masking and keeping a safe distance are two key rules that have been shown to be extremely successful in preventing people from contracting or spreading the virus. Enforcing these principles on a large scale is where technology may help. In this challenge, you will create algorithm to detect whether or not a person or group of individuals in a picture adhere to the "mask" and "distance" standards. **Basic rules** We are given the dataset contains images of people either wearing mask or not and they are standing either close of far from each other. Our mission is to predict whether the formation of these people adhere the 5k standard. The 5k standard is also based on the two conditions, mask (0 == not wearing, 1 == wearing) and distancing (0 == too close, 1 == far enough). People that adhere the 5k standard will not likely to expose the virus to each other in case they did caught it before, and it is to prevent the spread of the COVID-19 pandamic through people interactions. --- ``` import tensorflow as tf tf.data.experimental.enable_debug_mode() print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU'))) #@title !pip install cloud_tpu_client from cloud_tpu_client import Client c = Client(tpu='') # For TPU runtime print(c.runtime_version()) #@title c.configure_tpu_version(tf.__version__, restart_type='ifNeeded') #@title print(c.runtime_version()) !nvidia-smi # For GPU runtime # For when the TPU is used tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu) !pip install wandb from google.colab import drive drive.mount('/content/drive') %cd /content/drive/MyDrive/Colab Notebooks/DIVE INTO CODE/Graduation !ls ``` ## **1. Resources preparation** ### Libraries ``` import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import wandb from wandb.keras import WandbCallback from tensorflow.data import AUTOTUNE from tensorflow import keras from tensorflow.keras import layers from PIL import Image # Some libraries will be imported later throughout the notebook print('Tensorflow version:', tf.__version__) print('Keras version:', keras.__version__) ``` ### W&B login and init project ``` !wandb login 88c91a7dc6dd5574f423e38f852c6fe640a7fcd0 wandb.init(project="diveintocode-grad-1st-approach", entity="atien228") ``` ### Hyperparamaters ``` standard = 'mask' #@param ['mask', 'distancing'] SEED = 42 #@param {type:'integer'} wandb.config = { "learning_rate": 0.001, "epochs": 15, "batch_size": 16, "momentum": 0.85, "smoothing": 0.1 } ``` ### Preprocessing data-set ``` data_path = '/content/drive/MyDrive/Colab Notebooks/DIVE INTO CODE/Graduation/data' img_dir = os.path.join(data_path, 'images') os.listdir(img_dir)[:10] meta = pd.read_csv(os.path.join(data_path, 'train_meta.csv')) meta img1 = meta.iloc[0] print(r'Image ID: {}, Mask: {}, Distancing: {}'.format(img1['image_id'], img1['mask'], img1['distancing'])) img = Image.open(os.path.join(img_dir, img1['fname'])) img dataset = [] label = [] for idx, row in meta.iterrows(): if pd.notna(row[standard]): dataset.append(os.path.join(img_dir, row['fname'])) # Mask or distancing label.append(row[standard]) for i in range(5): print(f'img: {dataset[i]} label: {label[i]}') len(label_val) ``` Create a small portion of test set since the competition won't let me submit a new entry to check my score ``` df_test = df_train[1200:1500] label_test = label_train[1200:1500] df_train = df_train[:1200] df_val = df_val[:300] label_train = label_train[:1200] label_val = label_val[:300] df_train[0] label_train[0] meta.iloc[3713] ``` Create tuple of train and validation set for further process ``` df_train = tuple(zip(df_train, label_train)) df_val = tuple(zip(df_val, label_val)) df_train = tuple(zip(*df_train)) df_val = tuple(zip(*df_val)) ``` ### Tensorflow Hub for a variety of CNN models EfficientNet models and ckpts (and other image classifer models too) ``` import tensorflow_hub as hub print("Hub version:", hub.__version__) print("GPU is", "available" if tf.config.list_physical_devices('GPU') else "NOT AVAILABLE") def get_hub_url_and_isize(model_name): model_handle_map = { "efficientnetv2-s": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_s/feature_vector/2", "efficientnetv2-m": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_m/feature_vector/2", "efficientnetv2-l": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_l/feature_vector/2", "efficientnetv2-s-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_s/feature_vector/2", "efficientnetv2-m-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_m/feature_vector/2", "efficientnetv2-l-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_l/feature_vector/2", "efficientnetv2-xl-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_xl/feature_vector/2", "efficientnetv2-b0-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b0/feature_vector/2", "efficientnetv2-b1-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b1/feature_vector/2", "efficientnetv2-b2-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b2/feature_vector/2", "efficientnetv2-b3-21k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_b3/feature_vector/2", "efficientnetv2-s-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_s/feature_vector/2", "efficientnetv2-m-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_m/feature_vector/2", "efficientnetv2-l-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_l/feature_vector/2", "efficientnetv2-xl-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_xl/feature_vector/2", "efficientnetv2-b0-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b0/feature_vector/2", "efficientnetv2-b1-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b1/feature_vector/2", "efficientnetv2-b2-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b2/feature_vector/2", "efficientnetv2-b3-21k-ft1k": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet21k_ft1k_b3/feature_vector/2", "efficientnetv2-b0": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b0/feature_vector/2", "efficientnetv2-b1": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b1/feature_vector/2", "efficientnetv2-b2": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b2/feature_vector/2", "efficientnetv2-b3": "https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_b3/feature_vector/2", "efficientnet_b0": "https://tfhub.dev/tensorflow/efficientnet/b0/feature-vector/1", "efficientnet_b1": "https://tfhub.dev/tensorflow/efficientnet/b1/feature-vector/1", "efficientnet_b2": "https://tfhub.dev/tensorflow/efficientnet/b2/feature-vector/1", "efficientnet_b3": "https://tfhub.dev/tensorflow/efficientnet/b3/feature-vector/1", "efficientnet_b4": "https://tfhub.dev/tensorflow/efficientnet/b4/feature-vector/1", "efficientnet_b5": "https://tfhub.dev/tensorflow/efficientnet/b5/feature-vector/1", "efficientnet_b6": "https://tfhub.dev/tensorflow/efficientnet/b6/feature-vector/1", "efficientnet_b7": "https://tfhub.dev/tensorflow/efficientnet/b7/feature-vector/1", "bit_s-r50x1": "https://tfhub.dev/google/bit/s-r50x1/1", "inception_v3": "https://tfhub.dev/google/imagenet/inception_v3/feature-vector/4", "inception_resnet_v2": "https://tfhub.dev/google/imagenet/inception_resnet_v2/feature-vector/4", "resnet_v1_50": "https://tfhub.dev/google/imagenet/resnet_v1_50/feature-vector/4", "resnet_v1_101": "https://tfhub.dev/google/imagenet/resnet_v1_101/feature-vector/4", "resnet_v1_152": "https://tfhub.dev/google/imagenet/resnet_v1_152/feature-vector/4", "resnet_v2_50": "https://tfhub.dev/google/imagenet/resnet_v2_50/feature-vector/4", "resnet_v2_101": "https://tfhub.dev/google/imagenet/resnet_v2_101/feature-vector/4", "resnet_v2_152": "https://tfhub.dev/google/imagenet/resnet_v2_152/feature-vector/4", "nasnet_large": "https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/4", "nasnet_mobile": "https://tfhub.dev/google/imagenet/nasnet_mobile/feature_vector/4", "pnasnet_large": "https://tfhub.dev/google/imagenet/pnasnet_large/feature_vector/4", "mobilenet_v2_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4", "mobilenet_v2_130_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/feature_vector/4", "mobilenet_v2_140_224": "https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/4", "mobilenet_v3_small_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/feature_vector/5", "mobilenet_v3_small_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/feature_vector/5", "mobilenet_v3_large_100_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5", "mobilenet_v3_large_075_224": "https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/feature_vector/5", } model_image_size_map = { "efficientnetv2-s": 384, "efficientnetv2-m": 480, "efficientnetv2-l": 480, "efficientnetv2-b0": 224, "efficientnetv2-b1": 240, "efficientnetv2-b2": 260, "efficientnetv2-b3": 300, "efficientnetv2-s-21k": 384, "efficientnetv2-m-21k": 480, "efficientnetv2-l-21k": 480, "efficientnetv2-xl-21k": 512, "efficientnetv2-b0-21k": 224, "efficientnetv2-b1-21k": 240, "efficientnetv2-b2-21k": 260, "efficientnetv2-b3-21k": 300, "efficientnetv2-s-21k-ft1k": 384, "efficientnetv2-m-21k-ft1k": 480, "efficientnetv2-l-21k-ft1k": 480, "efficientnetv2-xl-21k-ft1k": 512, "efficientnetv2-b0-21k-ft1k": 224, "efficientnetv2-b1-21k-ft1k": 240, "efficientnetv2-b2-21k-ft1k": 260, "efficientnetv2-b3-21k-ft1k": 300, "efficientnet_b0": 224, "efficientnet_b1": 240, "efficientnet_b2": 260, "efficientnet_b3": 300, "efficientnet_b4": 380, "efficientnet_b5": 456, "efficientnet_b6": 528, "efficientnet_b7": 600, "inception_v3": 299, "inception_resnet_v2": 299, "nasnet_large": 331, "pnasnet_large": 331, } model_type = model_handle_map.get(model_name) pixels = model_image_size_map.get(model_name) print(f"Selected model: {model_name} : {model_type}") IMAGE_SIZE = (pixels, pixels) print(f"Input size {IMAGE_SIZE}") return model_type, IMAGE_SIZE, pixels model_name = "efficientnetv2-b3-21k-ft1k" # @param ['efficientnetv2-s', 'efficientnetv2-m', 'efficientnetv2-l', 'efficientnetv2-s-21k', 'efficientnetv2-m-21k', 'efficientnetv2-l-21k', 'efficientnetv2-xl-21k', 'efficientnetv2-b0-21k', 'efficientnetv2-b1-21k', 'efficientnetv2-b2-21k', 'efficientnetv2-b3-21k', 'efficientnetv2-s-21k-ft1k', 'efficientnetv2-m-21k-ft1k', 'efficientnetv2-l-21k-ft1k', 'efficientnetv2-xl-21k-ft1k', 'efficientnetv2-b0-21k-ft1k', 'efficientnetv2-b1-21k-ft1k', 'efficientnetv2-b2-21k-ft1k', 'efficientnetv2-b3-21k-ft1k', 'efficientnetv2-b0', 'efficientnetv2-b1', 'efficientnetv2-b2', 'efficientnetv2-b3', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'bit_s-r50x1', 'inception_v3', 'inception_resnet_v2', 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'nasnet_large', 'nasnet_mobile', 'pnasnet_large', 'mobilenet_v2_100_224', 'mobilenet_v2_130_224', 'mobilenet_v2_140_224', 'mobilenet_v3_small_100_224', 'mobilenet_v3_small_075_224', 'mobilenet_v3_large_100_224', 'mobilenet_v3_large_075_224'] # num_epochs = 5 #@param {type: "integer"} trainable = True #@param {type: "boolean"} model_url, img_size, pixels = get_hub_url_and_isize(model_name) IMG_HEIGHT = IMG_WIDTH = pixels ``` ### Data-set interpretion #### Load Image function for W&B ``` def load_img(path, label): img = tf.io.read_file(path) # <= For non-TPU # with open(path, "rb") as local_file: # <= For TPU # img = local_file.read() img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, (IMG_HEIGHT, IMG_WIDTH)) onehot_label = tf.argmax(label == [0.0, 1.0]) # img = np.load(img.numpy(), allow_pickle=True) # onehot_label = np.load(onehot_label.numpy(), allow_pickle=True) return img, onehot_label # ,img.shape(), onehot_label.shape() ``` #### Tensorflow Data-set ``` ds_train = tf.data.Dataset.from_tensor_slices((list(df_train[0]), list(df_train[1]))) # Configure with W&B settings ds_train = (ds_train .shuffle(buffer_size=1024) .map(load_img, num_parallel_calls=AUTOTUNE) .batch(wandb.config['batch_size']) .cache() .prefetch(AUTOTUNE)) ds_val = tf.data.Dataset.from_tensor_slices((list(df_val[0]), list(df_val[1]))) # Configure with W&B settings ds_val = (ds_val .shuffle(buffer_size=1024) .map(load_img, num_parallel_calls=AUTOTUNE) .batch(wandb.config['batch_size']) .cache() .prefetch(AUTOTUNE)) ds_train ``` ## **2. Modeling** ### Define model structure and metrics ``` from sklearn.metrics import f1_score tf.config.run_functions_eagerly(True) @tf.autograph.experimental.do_not_convert def f1(y_true, y_pred): return f1_score(y_true, tf.math.argmax(y_pred, 1)) # Data augmentation layer for image tf.keras.backend.clear_session() # =============== TPU ================== # with strategy.scope(): # data_augmentation = tf.keras.Sequential([ # keras.layers.InputLayer(input_shape=img_size + (3,)), # layers.RandomFlip("horizontal_and_vertical", seed=SEED), # layers.RandomRotation(0.2, seed=SEED), # layers.RandomZoom(0.1, seed=SEED) # ]) # model = tf.keras.Sequential([ # data_augmentation, # hub.KerasLayer(model_url, trainable=trainable), # Trainable: Fine tuning # layers.Dropout(rate=0.2, seed=SEED), # layers.Dense(units=2, # Binary classifcation # activation='softmax') # ]) # model.build((None,) + img_size + (3,)) # (IMG_SIZE, IMG_SIZE, 3) # model.summary() # # Update formula rule # # velocity = momentum * velocity - learning_rate * g # # w = w + momentum * velocity - learning_rate * g # model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=wandb.config['learning_rate'], momentum=wandb.config['momentum'], nesterov=True), # #loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=wandb.config['label_smoothing']) # loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), # metrics=['accuracy', f1]) # =============== GPU ================== data_augmentation = tf.keras.Sequential([ keras.layers.InputLayer(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3]), layers.RandomFlip("horizontal_and_vertical", seed=SEED), # layers.RandomRotation(0.2, seed=SEED), layers.RandomZoom(0.1, seed=SEED), layers.experimental.preprocessing.RandomWidth(0.1, seed=SEED), ]) model = tf.keras.Sequential([ data_augmentation, hub.KerasLayer(model_url, trainable=trainable), # Trainable: Fine tuning layers.Dropout(rate=0.2, seed=SEED), layers.Dense(units=2, # Binary classifcation activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.0001)) ]) model.build((None,) + img_size + (3,)) # (IMG_SIZE, IMG_SIZE, 3) model.summary() # Update formula rule (when nesterov=True) # velocity = momentum * velocity - learning_rate * g # w = w + momentum * velocity - learning_rate * g model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=wandb.config['learning_rate'], momentum=wandb.config['momentum'], nesterov=False), #loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=wandb.config['label_smoothing']) loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy', f1]) ``` ### Train model with W&B monitoring ``` hist = model.fit(ds_train, validation_data=ds_val, epochs=wandb.config['epochs'], callbacks=[WandbCallback()], verbose=1).history ``` ### Save model and weights ``` model.save(data_path + f'/{standard}.keras') model.save_weights( data_path + f'/{standard}_weight.h5', overwrite=True, save_format=None, options=None ) ``` ## **3. Evaluation** ### Self-made test dataset We will evaluate the model performance with the small proportion of the test data-set that we have created #### Mask detection Predict trial for one image ``` x_test = df_test[0] # Path to 655.jpg y_test = label_test[0] # Mask label of 655.jpg image = tf.io.read_file(x_test) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH)) true_label = 'No mask' if (np.argmax(y_test) == 0) else 'Mask' plt.imshow(image/255.0) plt.axis('off') plt.show() prediction_scores = model.predict(np.expand_dims(image, axis=0)) predicted_label = 'No mask' if (np.argmax(prediction_scores) == 0) else 'Mask' print("True label: " + true_label) print("Predicted label: " + predicted_label) ``` Evaluate the test dataset ``` from sklearn.metrics import accuracy_score prediction_list = [] for i in range(len(df_test)): image = tf.io.read_file(df_test[i]) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH)) prediction_scores = model.predict(np.expand_dims(image, axis=0)) prediction_list.append(np.argmax(prediction_scores)) if (i % 10 == 0): print(f"Predicted {i} images.") acc = accuracy_score(label_test, prediction_list) print(f"Test accuracy: {acc}") ``` The test dataset was originally cut down from the train dataset and have not even interfere the training process of the model. So this accuracy is quite reasonable. Currently we have trained the model for detecting mask on people and predict whether they have adhered the 5K standards. From here, we can change the `standard` variable from `'mask'` to `'distancing'` to train the second model that specifically serves for the distance detection purpose. After finished all requirements, we can use the results from both models to conclude the `5k attribute` and export the final submission. The 5k attribute can be evaluated as the pseudo code below: ``` 5k = 1 if (mask == 1 and distancing == 1) else 0 ``` #### Distancing detection Predict trial for one image ``` x_test = df_test[10] # Path to 1995.jpg y_test = label_test[10] # Mask label of 1995.jpg image = tf.io.read_file(x_test) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH)) true_label = 'Too close' if (np.argmax(y_test) == 0) else 'Good distance' plt.imshow(image/255.0) plt.axis('off') plt.show() prediction_scores = model.predict(np.expand_dims(image, axis=0)) predicted_label = 'Too close' if (np.argmax(prediction_scores) == 0) else 'Good distance' print("True label: " + true_label) print("Predicted label: " + predicted_label) ``` Because there are many images missing either mask, distancing or 5k labels (even all of them), the model cannot determine so well and hence the accuracy is reduced. Evaluate the test dataset ``` from sklearn.metrics import accuracy_score prediction_list = [] for i in range(len(df_test)): image = tf.io.read_file(df_test[i]) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH)) prediction_scores = model.predict(np.expand_dims(image, axis=0)) prediction_list.append(np.argmax(prediction_scores)) if (i % 10 == 0): print(f"Predicted {i} images.") acc = accuracy_score(label_test, prediction_list) print(f"Test accuracy: {acc}") ``` Apparently, the **dataset** is missing a lot of distancing attribute compared to the **mask**. As said, the accuracy for detecting the distance is quite lower than the model of mask detection. ### Public Test set ``` meta_test = pd.read_csv(data_path + '/test/public_test_meta.csv') df_public_test = meta_test['fname'] test_img_path = data_path + '/test/images/' ``` #### Mask prediction Load Model ``` dependencies = { 'f1': f1, 'KerasLayer': hub.KerasLayer(model_url, trainable=trainable) } model_mask = keras.models.load_model(data_path + f'/{standard}.keras', custom_objects=dependencies) ``` Predict ``` def predict_public_test(model, img_path): prediction_list = [] for i, row in enumerate(df_public_test): image = tf.io.read_file(img_path + row) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, (IMG_HEIGHT, IMG_WIDTH)) prediction_scores = model.predict(np.expand_dims(image, axis=0)) prediction_list.append(np.argmax(prediction_scores)) if (i % 10 == 0): print(f"Predicted {i} images.") return prediction_list # Mask prediction prediction_mask_list = predict_public_test(model_mask, test_img_path) ``` #### Distancing prediction ``` # Switch standards standard = 'distancing' #@param ['mask', 'distancing'] ``` Load model ``` dependencies = { 'f1': f1, 'KerasLayer': hub.KerasLayer(model_url, trainable=trainable) } model_distancing = keras.models.load_model(data_path + f'/{standard}.keras', custom_objects=dependencies) ``` Predict ``` # Distancing prediction prediction_distancing_list = predict_public_test(model_distancing, test_img_path) meta_test_results = meta_test.copy() meta_test['5k'] = [1 if prediction_mask_list[i] == 1 and prediction_distancing_list[i] == 1 else 0 for i in range(len(meta_test))] meta_test_results[:10] import os os.makedirs(data_path + '/submission', exist_ok=True) meta_test_results.to_csv(data_path + '/submission/5k-compliance-submission.csv') ``` ## **4. Recreate the pipeline** Since making the process of detecting mask and distancing to be seperated procedures, evaluate new models or changing hyperparameters would be exhausted. In this section, I manage to create the pipeline that can be run once to train, predict and monitor the metrics. But before heading to that part, we can re-examine our problem to find a better way for a better results. One problem still remains is that the dataset contain so many missing values, and it is in fact can affect our model predictions, hence getting less accuracy. Missing values ``` #@title plt.figure(figsize=(10,6)) sns.heatmap(meta.isnull(), cbar=False) #@title print('Num. missing mask',\ len(meta[meta['mask'].isna()])) print('Num. missing distancing',\ len(meta[meta['distancing'].isna()])) print('Num. missing 5k',\ len(meta[meta['5k'].isna()])) print('Num. missing mask and distancing:',\ len(meta[(meta['mask'].isna()) & (meta['distancing'].isna())])) print('Num. missing mask and 5k:',\ len(meta[(meta['mask'].isna()) & (meta['5k'].isna())])) print('Num. missing distancing and 5k:',\ len(meta[(meta['distancing'].isna()) & (meta['5k'].isna())])) print('Num. missing all three attributes:',\ len(meta[(meta['mask'].isna()) & (meta['distancing'].isna()) & (meta['5k'].isna())])) ``` Apparently, the missing values are occurs as either missing one of three attribute, or a pair of attributes respectively (except for mask and distancing). None of row missing all three attributes. To get the 5k value, we should have know the mask and distancing value first. Luckily, none of row miss these two variables. Therefore, we can fill the missing values with our own logics (not all the cases). The original rule for 5k evaluation can be described as follow: ``` 5k = 1 if (mask == 1 and distancing == 1) else 0 ``` Base on this, we can design a pipeline that can fill out the missing values and produce better results: > 1. Model mask detection -> Use to predict the missing mask values -> From there continue to fill the missing distancing values ``` if (mask == 1) and (5k == 1): distancing = 1 elif (mask == 1) and (5k == 0): distancing = 0 elif (mask == 0) and (5k == 0) distancing = 0 ``` In case the mask is 0, we can skip it since `mask == 0 and 5k == 0` is the only case we can intepret with and in that case, I have run the code: `meta[(meta['mask'] == 0) & (meta['5k'] == 0) & (meta['distancing'].isna())]` and it return nothing. So it is safe to assume this part does not miss any values and is skippable. > 2. Model distancing -> Use to predict the missing 5k values ``` if (distancing == 1) and (mask == 1) 5k == 1 elif (distancing == 0) or (mask == 0) 5k == 0 ``` > 3. Model 5k -> Use to predict the final output 5k In conclusion, the difference between the previous section and this section is that we will make three models instead of two. This is doable as we are going to fill the missing 5k values, thus we can use this attribute for our final prediction. For the new approach, please switch to `new_approach_kaggle.ipynb` **Note 1**: After having a bad experience with Google Colab, I have switched the later approach to Kaggle with a longer session period and stronger GPU. But since Kaggle does not adapt the data storage/data retrieval well as Google Drive, I did had some trouble during using it. Thus some of the output files will need to download onto my PC in order to save the progress. **Note 2**: The approach and procedures applied in this notebook is the initial one that I come up with first. In summary, I trained two models of mask detection and distancing detection. And after having the model trained, they will predict the `mask` and `distancing` labels. Based on the mask and distancing label, I use conditions to get the final label 5k. This approach is heavily unreliable since I skip all the missing values here. For the new approach, I trained the mask model to predict the original train data-set again and fill all missing mask values. Then I used the updated data-set to continue to train the distance model and predict again to fill all missing distance values. After this step, I use the conditions again to fill the missing 5k values. In final step, I train the 5k model in order to predict the 5k label, which is differ from the initial approach where I did not use the 5k label for the train and evaluation process but instead generate it immediately based on the mask and distancing labels. Comparing these two approaches, I personally think that the later one is better since it rely on all of the data. For example, having the results based on the mask and distancing only is not a good way since errors can occur in either predictions. Therefore, if we want to have 5k results for submission, we should train the model based on 5k values as well. And to make it happen, we should have investigate and interpret the missing values too.
github_jupyter
# ะะตะฟะฐั€ะฐะผะตั‚ั€ะธั‡ะตัะบะธะต ะบั€ะธะตั‚ั€ะธะธ ะšั€ะธั‚ะตั€ะธะน | ะžะดะฝะพะฒั‹ะฑะพั€ะพั‡ะฝั‹ะน | ะ”ะฒัƒั…ะฒั‹ะฑะพั€ะพั‡ะฝั‹ะน | ะ”ะฒัƒั…ะฒั‹ะฑะพั€ะพั‡ะฝั‹ะน (ัะฒัะทะฐะฝะฝั‹ะต ะฒั‹ะฑะพั€ะบะธ) ------------- | -------------| **ะ—ะฝะฐะบะพะฒ** | $\times$ | | $\times$ **ะ ะฐะฝะณะพะฒั‹ะน** | $\times$ | $\times$ | $\times$ **ะŸะตั€ะตัั‚ะฐะฝะพะฒะพั‡ะฝั‹ะน** | $\times$ | $\times$ | $\times$ ## ะะตะดะฒะธะถะธะผะพัั‚ัŒ ะฒ ะกะธัั‚ั‚ะปะต ะ˜ะผะตัŽั‚ัั ะดะฐะฝะฝั‹ะต ะพ ะฟั€ะพะดะฐะถะฝะพะน ัั‚ะพะธะผะพัั‚ะธ ะฝะตะดะฒะธะถะธะผะพัั‚ะธ ะฒ ะกะธัั‚ะปะต ะดะปั 50 ัะดะตะปะพะบ ะฒ 2001 ะณะพะดัƒ ะธ 50 ะฒ 2002. ะ˜ะทะผะตะฝะธะปะธััŒ ะปะธ ะฒ ัั€ะตะดะฝะตะผ ั†ะตะฝั‹? ``` import numpy as np import pandas as pd import itertools from scipy import stats from statsmodels.stats.descriptivestats import sign_test from statsmodels.stats.weightstats import zconfint from statsmodels.stats.weightstats import * %pylab inline ``` ### ะ—ะฐะณั€ัƒะทะบะฐ ะดะฐะฝะฝั‹ั… ``` seattle_data = pd.read_csv('seattle.txt', sep = '\t', header = 0) seattle_data.shape seattle_data.head() price2001 = seattle_data[seattle_data['Year'] == 2001].Price price2002 = seattle_data[seattle_data['Year'] == 2002].Price pylab.figure(figsize=(12,4)) pylab.subplot(1,2,1) pylab.grid() pylab.hist(price2001, color = 'r') pylab.xlabel('2001') pylab.subplot(1,2,2) pylab.grid() pylab.hist(price2002, color = 'b') pylab.xlabel('2002') pylab.show() ``` ## ะ”ะฒัƒั…ะฒั‹ะฑะพั€ะพั‡ะฝั‹ะต ะบั€ะธั‚ะตั€ะธะธ ะดะปั ะฝะตะทะฐะฒะธัะธะผั‹ั… ะฒั‹ะฑะพั€ะพะบ ``` print('95%% confidence interval for the mean: [%f, %f]' % zconfint(price2001)) print('95%% confidence interval for the mean: [%f, %f]' % zconfint(price2002)) ``` ### ะ ะฐะฝะณะพะฒั‹ะน ะบั€ะธั‚ะตั€ะธะน ะœะฐะฝะฝะฐ-ะฃะธั‚ะฝะธ $H_0\colon F_{X_1}(x) = F_{X_2}(x)$ $H_1\colon F_{X_1}(x) = F_{X_2}(x + \Delta), \Delta\neq 0$ ``` stats.mannwhitneyu(price2001, price2002) ``` ### ะŸะตั€ะตัั‚ะฐะฝะพะฒะพั‡ะฝั‹ะน ะบั€ะธั‚ะตั€ะธะน $H_0\colon F_{X_1}(x) = F_{X_2}(x)$ $H_1\colon F_{X_1}(x) = F_{X_2}(x + \Delta), \Delta\neq 0$ ``` def permutation_t_stat_ind(sample1, sample2): return np.mean(sample1) - np.mean(sample2) def get_random_combinations(n1, n2, max_combinations): index = list(range(n1 + n2)) indices = set([tuple(index)]) for i in range(max_combinations - 1): np.random.shuffle(index) indices.add(tuple(index)) return [(index[:n1], index[n1:]) for index in indices] def permutation_zero_dist_ind(sample1, sample2, max_combinations = None): joined_sample = np.hstack((sample1, sample2)) n1 = len(sample1) n = len(joined_sample) if max_combinations: indices = get_random_combinations(n1, len(sample2), max_combinations) else: indices = [(list(index), filter(lambda i: i not in index, range(n))) \ for index in itertools.combinations(range(n), n1)] distr = [joined_sample[list(i[0])].mean() - joined_sample[list(i[1])].mean() \ for i in indices] return distr pylab.hist(permutation_zero_dist_ind(price2001, price2002, max_combinations = 1000)) pylab.show() def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'): if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") t_stat = permutation_t_stat_ind(sample, mean) zero_distr = permutation_zero_dist_ind(sample, mean, max_permutations) if alternative == 'two-sided': return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'less': return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'greater': return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr) print("p-value: %f" % permutation_test(price2001, price2002, max_permutations = 10000)) print("p-value: %f" % permutation_test(price2001, price2002, max_permutations = 50000)) ```
github_jupyter
``` import exmp import qiime2 import tempfile import os.path import pandas as pd from qiime2.plugins.feature_table.methods import filter_samples from qiime2.plugins.taxa.methods import collapse ``` # EXMP 1 ``` taxonomy = exmp.load_taxonomy() sample_metadata = exmp.load_sample_metadata() data_dir = exmp.cm_path rarefied_table = qiime2.Artifact.load(os.path.join(data_dir, "rarefied_table.qza")) uu_dm = qiime2.Artifact.load(os.path.join(data_dir, "unweighted_unifrac_distance_matrix.qza")) wu_dm = qiime2.Artifact.load(os.path.join(data_dir, "weighted_unifrac_distance_matrix.qza")) faith_pd = qiime2.Artifact.load(os.path.join(data_dir, "faith_pd_vector.qza")) shannon = qiime2.Artifact.load(os.path.join(data_dir, "shannon_vector.qza")) evenness = qiime2.Artifact.load(os.path.join(data_dir, "evenness_vector.qza")) with tempfile.TemporaryDirectory() as output_dir: _, _, _, sample_metadata = exmp.ols_and_anova('RER_change', 'exmp1', '1.0', output_dir, 'week', sample_metadata, uu_dm, wu_dm, faith_pd, shannon, evenness) rarefied_table = filter_samples(table=rarefied_table, metadata=sample_metadata).filtered_table taxa_table = collapse(table=rarefied_table, taxonomy=taxonomy, level=6).collapsed_table.view(pd.DataFrame) sample_metadata = sample_metadata.to_dataframe() sorted_wu_pc3_correlations = pd.DataFrame(taxa_table.corrwith(sample_metadata['Weighted_UniFrac_PC3'], method='spearman').sort_values(), columns=['Spearman rho']) sorted_wu_pc3_correlations['25th percentile rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.25) sorted_wu_pc3_correlations['Median rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.50) sorted_wu_pc3_correlations['75th percentile rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.75) ``` The data are most easily interpreted if the ordination axes are positively correlated with the RER change. Since the direction of the PCs are arbitrary, I generally just run this a few times till I get a positive correlation. ``` sample_metadata['Weighted_UniFrac_PC3'].corr(sample_metadata['RER_change']) output_dir = os.path.join(exmp.cm_path, 'ols-and-anova', 'exmp1-RER_change-week1.0') sorted_wu_pc3_correlations.to_csv(open(os.path.join(output_dir, 'wu-pcoa3-genus-correlations.csv'), 'w')) ``` # EXMP 2 ``` taxonomy = exmp.load_taxonomy() sample_metadata = exmp.load_sample_metadata() data_dir = exmp.cm_path rarefied_table = qiime2.Artifact.load(os.path.join(data_dir, "rarefied_table.qza")) uu_dm = qiime2.Artifact.load(os.path.join(data_dir, "unweighted_unifrac_distance_matrix.qza")) wu_dm = qiime2.Artifact.load(os.path.join(data_dir, "weighted_unifrac_distance_matrix.qza")) faith_pd = qiime2.Artifact.load(os.path.join(data_dir, "faith_pd_vector.qza")) shannon = qiime2.Artifact.load(os.path.join(data_dir, "shannon_vector.qza")) evenness = qiime2.Artifact.load(os.path.join(data_dir, "evenness_vector.qza")) with tempfile.TemporaryDirectory() as output_dir: _, _, _, sample_metadata = exmp.ols_and_anova('three_rep_max_squat_change', 'exmp2', '1.0', output_dir, 'week', sample_metadata, uu_dm, wu_dm, faith_pd, shannon, evenness) rarefied_table = filter_samples(table=rarefied_table, metadata=sample_metadata).filtered_table taxa_table = collapse(table=rarefied_table, taxonomy=taxonomy, level=6).collapsed_table.view(pd.DataFrame) sample_metadata = sample_metadata.to_dataframe() sorted_wu_pc2_correlations = pd.DataFrame(taxa_table.corrwith(sample_metadata['Weighted_UniFrac_PC2'], method='spearman').sort_values(), columns=['Spearman rho']) sorted_wu_pc2_correlations['25th percentile rarefied count'] = taxa_table[sorted_wu_pc2_correlations.index].quantile(0.25) sorted_wu_pc2_correlations['Median rarefied count'] = taxa_table[sorted_wu_pc2_correlations.index].quantile(0.50) sorted_wu_pc2_correlations['75th percentile rarefied count'] = taxa_table[sorted_wu_pc2_correlations.index].quantile(0.75) sorted_wu_pc3_correlations = pd.DataFrame(taxa_table.corrwith(sample_metadata['Weighted_UniFrac_PC3'], method='spearman').sort_values(), columns=['Spearman rho']) sorted_wu_pc3_correlations['25th percentile rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.25) sorted_wu_pc3_correlations['Median rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.50) sorted_wu_pc3_correlations['75th percentile rarefied count'] = taxa_table[sorted_wu_pc3_correlations.index].quantile(0.75) sample_metadata['Weighted_UniFrac_PC2'].corr(sample_metadata['three_rep_max_squat_change']) sample_metadata['Weighted_UniFrac_PC3'].corr(sample_metadata['three_rep_max_squat_change']) output_dir = os.path.join(exmp.cm_path, 'ols-and-anova', 'exmp2-three_rep_max_squat_change-week1.0') sorted_wu_pc2_correlations.to_csv(open(os.path.join(output_dir, 'wu-pcoa2-genus-correlations.csv'), 'w')) sorted_wu_pc3_correlations.to_csv(open(os.path.join(output_dir, 'wu-pcoa3-genus-correlations.csv'), 'w')) ```
github_jupyter
``` import numpy import urllib import scipy.optimize import random from math import * def parseData(fname): for l in urllib.urlopen(fname): yield eval(l) print "Reading data..." data = list(parseData("file:beer_50000.json")) print "done" def feature(datum): text = datum['review/text'].lower().replace(',',' ').replace('?',' ')\ .replace('!',' ').replace(':',' ').replace('"',' ').replace('.',' ')\ .replace('(',' ').replace(')',' ').split() num_lactic = 0 num_tart = 0 num_sour = 0 num_citric = 0 num_sweet = 0 num_acid = 0 num_hop = 0 num_fruit = 0 num_salt = 0 num_spicy = 0 for word in text: if word == 'lactic': num_lactic += 1 if word == 'tart': num_tart += 1 if word == 'sour': num_sour += 1 if word == 'citric': num_citric += 1 if word == 'sweet': num_sweet += 1 if word == 'acid': num_acid += 1 if word == 'hop': num_hop += 1 if word == 'fruit': num_fruit += 1 if word == 'salt': num_salt += 1 if word == 'spicy': num_spicy += 1 feat = [1, num_lactic, num_tart, num_sour, \ num_citric, num_sweet, num_acid, num_hop, \ num_fruit, num_salt, num_spicy] return feat X = [feature(d) for d in data] y = [d['beer/ABV'] >= 6.5 for d in data] def inner(x,y): return sum([x[i]*y[i] for i in range(len(x))]) def sigmoid(x): res = 1.0 / (1 + exp(-x)) return res length = int(len(data)/3) X_train = X[:length] y_train = y[:length] X_validation = X[length:2*length] y_validation = y[length:2*length] X_test = X[2*length:] y_test = y[2*length:] # Count for number of total data, y=0 and y=1 num_total = len(y_train) num_y0 = y_train.count(0) num_y1 = y_train.count(1) # NEGATIVE Log-likelihood def f(theta, X, y, lam): loglikelihood = 0 for i in range(len(X)): logit = inner(X[i], theta) if y[i]: loglikelihood -= log(1 + exp(-logit)) * num_total / (2 * num_y1) if not y[i]: loglikelihood -= (log(1 + exp(-logit)) + logit ) * num_total / (2 * num_y0) for k in range(len(theta)): loglikelihood -= lam * theta[k]*theta[k] # for debugging # print("ll =" + str(loglikelihood)) return -loglikelihood # NEGATIVE Derivative of log-likelihood def fprime(theta, X, y, lam): dl = [0]*len(theta) for i in range(len(X)): logit = inner(X[i], theta) for k in range(len(theta)): if y[i]: dl[k] += X[i][k] * (1 - sigmoid(logit)) * num_total / (2 * num_y1) if not y[i]: dl[k] -= X[i][k] * (1 - sigmoid(logit)) * num_total / (2 * num_y0) for k in range(len(theta)): dl[k] -= lam*2*theta[k] return numpy.array([-x for x in dl]) def train(lam): theta,_,_ = scipy.optimize.fmin_l_bfgs_b(f, [0]*len(X[0]), fprime, pgtol = 10, args = (X_train, y_train, lam)) return theta lam = 1.0 theta = train(lam) print theta X_data = [X_train, X_validation, X_test] y_data = [y_train, y_validation, y_test] symbol = ['train', 'valid', 'test'] print 'ฮป\tDataset\t\tTruePositive\tFalsePositive\tTrueNegative\tFalseNegative\tAccuracy\tBER' for i in range(3): def TP(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==1) and (b==1)) for (a,b) in zip(predictions,y_data[i])] tp = sum(correct) * 1.0 return tp def TN(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==0) and (b==0)) for (a,b) in zip(predictions,y_data[i])] tn = sum(correct) * 1.0 return tn def FP(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==1) and (b==0)) for (a,b) in zip(predictions,y_data[i])] fp = sum(correct) * 1.0 return fp def FN(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==0) and (b==1)) for (a,b) in zip(predictions,y_data[i])] fn = sum(correct) * 1.0 return fn tp = TP(theta) fp = FP(theta) tn = TN(theta) fn = FN(theta) TPR = tp / (tp + fn) TNR = tn / (tn + fp) BER = 1 - 0.5 * (TPR + TNR) accuracy = (tp+tn)/(tp+tn+fp+fn) print str(lam)+'\t'+symbol[i]+'\t\t'+str(tp)+'\t\t'+str(fp)+'\t\t'+str(tn)+'\t\t'+str(fn)+'\t\t'+str(accuracy)+'\t'+str(BER) # Original Algorithm # NEGATIVE Log-likelihood def f(theta, X, y, lam): loglikelihood = 0 for i in range(len(X)): logit = inner(X[i], theta) loglikelihood -= log(1 + exp(-logit)) if not y[i]: loglikelihood -= logit for k in range(len(theta)): loglikelihood -= lam * theta[k]*theta[k] # for debugging # print("ll =" + str(loglikelihood)) return -loglikelihood # NEGATIVE Derivative of log-likelihood def fprime(theta, X, y, lam): dl = [0]*len(theta) for i in range(len(X)): logit = inner(X[i], theta) for k in range(len(theta)): dl[k] += X[i][k] * (1 - sigmoid(logit)) if not y[i]: dl[k] -= X[i][k] for k in range(len(theta)): dl[k] -= lam*2*theta[k] return numpy.array([-x for x in dl]) def train(lam): theta,_,_ = scipy.optimize.fmin_l_bfgs_b(f, [0]*len(X[0]), fprime, pgtol = 10, args = (X_train, y_train, lam)) return theta lam = 1.0 theta = train(lam) X_data = [X_train, X_validation, X_test] y_data = [y_train, y_validation, y_test] symbol = ['train', 'valid', 'test'] print 'ฮป\tDataset\t\tTruePositive\tFalsePositive\tTrueNegative\tFalseNegative\tAccuracy\tBER' for i in range(3): def TP(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==1) and (b==1)) for (a,b) in zip(predictions,y_data[i])] tp = sum(correct) * 1.0 return tp def TN(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==0) and (b==0)) for (a,b) in zip(predictions,y_data[i])] tn = sum(correct) * 1.0 return tn def FP(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==1) and (b==0)) for (a,b) in zip(predictions,y_data[i])] fp = sum(correct) * 1.0 return fp def FN(theta): scores = [inner(theta,x) for x in X_data[i]] predictions = [s > 0 for s in scores] correct = [((a==0) and (b==1)) for (a,b) in zip(predictions,y_data[i])] fn = sum(correct) * 1.0 return fn tp = TP(theta) fp = FP(theta) tn = TN(theta) fn = FN(theta) TPR = tp / (tp + fn) TNR = tn / (tn + fp) BER = 1 - 0.5 * (TPR + TNR) accuracy = (tp+tn)/(tp+tn+fp+fn) print str(lam)+'\t'+symbol[i]+'\t\t'+str(tp)+'\t\t'+str(fp)+'\t\t'+str(tn)+'\t\t'+str(fn)+'\t\t'+str(accuracy)+'\t'+str(BER) ```
github_jupyter
# Strategies High-performance solvers, such as Z3, contain many tightly integrated, handcrafted heuristic combinations of algorithmic proof methods. While these heuristic combinations tend to be highly tuned for known classes of problems, they may easily perform very badly on new classes of problems. This issue is becoming increasingly pressing as solvers begin to gain the attention of practitioners in diverse areas of science and engineering. In many cases, changes to the solver heuristics can make a tremendous difference. More information on Z3 is available from https://github.com/z3prover/z3.git ## Introduction Z3 implements a methodology for orchestrating reasoning engines where "big" symbolic reasoning steps are represented as functions known as tactics, and tactics are composed using combinators known as tacticals. Tactics process sets of formulas called Goals. When a tactic is applied to some goal G, four different outcomes are possible. The tactic succeeds in showing G to be satisfiable (i.e., feasible); succeeds in showing G to be unsatisfiable (i.e., infeasible); produces a sequence of subgoals; or fails. When reducing a goal G to a sequence of subgoals G1, ..., Gn, we face the problem of model conversion. A model converter construct a model for G using a model for some subgoal Gi. In the following example, we create a goal g consisting of three formulas, and a tactic t composed of two built-in tactics: simplify and solve-eqs. The tactic simplify apply transformations equivalent to the ones found in the command simplify. The tactic solver-eqs eliminate variables using Gaussian elimination. Actually, solve-eqs is not restricted only to linear arithmetic. It can also eliminate arbitrary variables. Then, combinator Then applies simplify to the input goal and solve-eqs to each subgoal produced by simplify. In this example, only one subgoal is produced. ``` !pip install "z3-solver" from z3 import * x, y = Reals('x y') g = Goal() g.add(x > 0, y > 0, x == y + 2) print(g) t1 = Tactic('simplify') t2 = Tactic('solve-eqs') t = Then(t1, t2) print(t(g)) ``` In the example above, variable x is eliminated, and is not present the resultant goal. In Z3, we say a clause is any constraint of the form Or(f_1, ..., f_n). The tactic split-clause will select a clause Or(f_1, ..., f_n) in the input goal, and split it n subgoals. One for each subformula f_i. ``` x, y = Reals('x y') g = Goal() g.add(Or(x < 0, x > 0), x == y + 1, y < 0) t = Tactic('split-clause') r = t(g) for g in r: print(g) ``` Tactics Z3 comes equipped with many built-in tactics. The command describe_tactics() provides a short description of all built-in tactics. describe_tactics() Z3Py comes equipped with the following tactic combinators (aka tacticals): * Then(t, s) applies t to the input goal and s to every subgoal produced by t. * OrElse(t, s) first applies t to the given goal, if it fails then returns the result of s applied to the given goal. * Repeat(t) Keep applying the given tactic until no subgoal is modified by it. * Repeat(t, n) Keep applying the given tactic until no subgoal is modified by it, or the number of iterations is greater than n. * TryFor(t, ms) Apply tactic t to the input goal, if it does not return in ms milliseconds, it fails. * With(t, params) Apply the given tactic using the given parameters. The following example demonstrate how to use these combinators. ``` x, y, z = Reals('x y z') g = Goal() g.add(Or(x == 0, x == 1), Or(y == 0, y == 1), Or(z == 0, z == 1), x + y + z > 2) # Split all clauses" split_all = Repeat(OrElse(Tactic('split-clause'), Tactic('skip'))) print(split_all(g)) split_at_most_2 = Repeat(OrElse(Tactic('split-clause'), Tactic('skip')), 1) print(split_at_most_2(g)) # Split all clauses and solve equations split_solve = Then(Repeat(OrElse(Tactic('split-clause'), Tactic('skip'))), Tactic('solve-eqs')) print(split_solve(g)) ``` In the tactic split_solver, the tactic solve-eqs discharges all but one goal. Note that, this tactic generates one goal: the empty goal which is trivially satisfiable (i.e., feasible) The list of subgoals can be easily traversed using the Python for statement. ``` x, y, z = Reals('x y z') g = Goal() g.add(Or(x == 0, x == 1), Or(y == 0, y == 1), Or(z == 0, z == 1), x + y + z > 2) # Split all clauses" split_all = Repeat(OrElse(Tactic('split-clause'), Tactic('skip'))) for s in split_all(g): print(s) ``` A tactic can be converted into a solver object using the method solver(). If the tactic produces the empty goal, then the associated solver returns sat. If the tactic produces a single goal containing False, then the solver returns unsat. Otherwise, it returns unknown. ``` bv_solver = Then('simplify', 'solve-eqs', 'bit-blast', 'sat').solver() x, y = BitVecs('x y', 16) solve_using(bv_solver, x | y == 13, x > y) ``` In the example above, the tactic bv_solver implements a basic bit-vector solver using equation solving, bit-blasting, and a propositional SAT solver. Note that, the command Tactic is suppressed. All Z3Py combinators automatically invoke Tactic command if the argument is a string. Finally, the command solve_using is a variant of the solve command where the first argument specifies the solver to be used. In the following example, we use the solver API directly instead of the command solve_using. We use the combinator With to configure our little solver. We also include the tactic aig which tries to compress Boolean formulas using And-Inverted Graphs. ``` bv_solver = Then(With('simplify', mul2concat=True), 'solve-eqs', 'bit-blast', 'aig', 'sat').solver() x, y = BitVecs('x y', 16) bv_solver.add(x*32 + y == 13, x & y < 10, y > -100) print(bv_solver.check()) m = bv_solver.model() print(m) print(x*32 + y, "==", m.evaluate(x*32 + y)) print(x & y, "==", m.evaluate(x & y)) ``` The tactic smt wraps the main solver in Z3 as a tactic. ``` x, y = Ints('x y') s = Tactic('smt').solver() s.add(x > y + 1) print(s.check()) print(s.model()) ``` Now, we show how to implement a solver for integer arithmetic using SAT. The solver is complete only for problems where every variable has a lower and upper bound. ``` s = Then(With('simplify', arith_lhs=True, som=True), 'normalize-bounds', 'lia2pb', 'pb2bv', 'bit-blast', 'sat').solver() x, y, z = Ints('x y z') solve_using(s, x > 0, x < 10, y > 0, y < 10, z > 0, z < 10, 3*y + 2*x == z) # It fails on the next example (it is unbounded) s.reset() solve_using(s, 3*y + 2*x == z) ``` Tactics can be combined with solvers. For example, we can apply a tactic to a goal, produced a set of subgoals, then select one of the subgoals and solve it using a solver. The next example demonstrates how to do that, and how to use model converters to convert a model for a subgoal into a model for the original goal. ``` t = Then('simplify', 'normalize-bounds', 'solve-eqs') x, y, z = Ints('x y z') g = Goal() g.add(x > 10, y == x + 3, z > y) r = t(g) # r contains only one subgoal print(r) s = Solver() s.add(r[0]) print(s.check()) # Model for the subgoal print(s.model()) # Model for the original goal print(r[0].convert_model(s.model())) ``` ## Probes Probes (aka formula measures) are evaluated over goals. Boolean expressions over them can be built using relational operators and Boolean connectives. The tactic FailIf(cond) fails if the given goal does not satisfy the condition cond. Many numeric and Boolean measures are available in Z3Py. The command describe_probes() provides the list of all built-in probes. ``` describe_probes() ``` In the following example, we build a simple tactic using FailIf. It also shows that a probe can be applied directly to a goal. ``` x, y, z = Reals('x y z') g = Goal() g.add(x + y + z > 0) p = Probe('num-consts') print("num-consts:", p(g)) t = FailIf(p > 2) try: t(g) except Z3Exception: print("tactic failed") print("trying again...") g = Goal() g.add(x + y > 0) print(t(g)) ``` Z3Py also provides the combinator (tactical) If(p, t1, t2) which is a shorthand for: OrElse(Then(FailIf(Not(p)), t1), t2) The combinator When(p, t) is a shorthand for: If(p, t, 'skip') The tactic skip just returns the input goal. The following example demonstrates how to use the If combinator. ``` x, y, z = Reals('x y z') g = Goal() g.add(x**2 - y**2 >= 0) p = Probe('num-consts') t = If(p > 2, 'simplify', 'factor') print(t(g)) g = Goal() g.add(x + x + y + z >= 0, x**2 - y**2 >= 0) print(t(g)) ```
github_jupyter
### Specify a text string to examine with NEMO ``` # specify query string payload = 'The World Health Organization on Sunday reported the largest single-day increase in coronavirus cases by its count, at more than 183,000 new cases in the latest 24 hours. The UN health agency said Brazil led the way with 54,771 cases tallied and the U.S. next at 36,617. Over 15,400 came in in India.' payload = 'is strongly affected by large ground-water withdrawals at or near Tupelo, Aberdeen, and West Point.' # payload = 'Overall design: Teliospores of pathogenic races T-1, T-5 and T-16 of T. caries provided by a collection in Aberdeen, ID, USA' payload = 'The results provide evidence of substantial population structure in C. posadasii and demonstrate presence of distinct geographic clades in Central and Southern Arizona as well as dispersed populations in Texas, Mexico and South and Central America' payload = 'Most frequent numerical abnormalities in B-NHL were gains of chromosomes 3 and 18, although gains of chromosome 3 were less prominent in FL.' ``` ### Load functions ``` # import credentials file import yaml with open("config.yml", 'r') as ymlfile: cfg = yaml.safe_load(ymlfile) # general way to extract values for a given key. Returns an array. Used to parse Nemo response and extract wikipedia id # from https://hackersandslackers.com/extract-data-from-complex-json-python/ def extract_values(obj, key): """Pull all values of specified key from nested JSON.""" arr = [] def extract(obj, arr, key): """Recursively search for values of key in JSON tree.""" if isinstance(obj, dict): for k, v in obj.items(): if isinstance(v, (dict, list)): extract(v, arr, key) elif k == key: arr.append(v) elif isinstance(obj, list): for item in obj: extract(item, arr, key) return arr results = extract(obj, arr, key) return results # getting wikipedia ID # see he API at https://www.mediawiki.org/wiki/API:Query#Example_5:_Batchcomplete # also, https://stackoverflow.com/questions/37024807/how-to-get-wikidata-id-for-an-wikipedia-article-by-api def get_WPID (name): import json url = 'https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&ppprop=wikibase_item&redirects=1&format=json&titles=' +name r=requests.get(url).json() return extract_values(r,'wikibase_item') ``` ### Send a request to NEMO, and get a response ``` # make a service request import requests # payloadutf = payload.encode('utf-8') url = "https://nemoservice.azurewebsites.net/nemo?appid=" + cfg['api_creds']['nmo1'] newHeaders = {'Content-type': 'application/json', 'Accept': 'text/plain'} response = requests.post(url, data='"{' + payload + '}"', headers=newHeaders) # display the results as string (remove json braces) a = response.content.decode() resp_full = a[a.find('{')+1 : a.find('}')] resp_full ``` ### Parse the response and load all found elements into a dataframe ``` # create a dataframe with entities, remove duplicates, then add wikipedia/wikidata concept IDs import pandas as pd import re import xml.etree.ElementTree as ET df = pd.DataFrame(columns=["Type","Ref","EntityType","Name","Form","WP","Value","Alt","WP_ID"]) # note that the last column is to be populated later, via Wikipedia API # all previous columns are from Nemo: based on "e" (entity) and "d" (data) elements. "c" (concept) to be explored # get starting and ending positions of xml fragments in the Nemo output pattern_start = "<(e|d|c)\s" iter = re.finditer(pattern_start,resp_full) indices1 = [m.start(0) for m in iter] pattern_end = "</(e|d|c)>" iter = re.finditer(pattern_end,resp_full) indices2 = [m.start(0) for m in iter] # iterate over xml fragments returned by Nemo, extracting attributes from each and adding to dataframe for i, entity in enumerate(indices1): a = resp_full[indices1[i] : indices2[i]+4] root = ET.fromstring(a) tag = root.tag attributes = root.attrib df = df.append({"Type":root.tag, "Ref":attributes.get('ref'), "EntityType":attributes.get('type'), "Name":attributes.get('name'), "Form":attributes.get('form'), "WP":attributes.get('wp'), "Value":attributes.get('value'), "Alt":attributes.get('alt')}, ignore_index=True) ``` E stands for entity; the attribute ref gives you the title of the corresponding Wikipedia page when the attribute wp has the value โ€œyโ€; the attribute type gives you the type of entity for known entities; the types of interest for you are G, which is geo-political entity, L โ€“ geographic form/location (such as a mountain), and F, which is facility (such as an airport). D stands for datafield, which comprises dates, NUMEX, email addresses and URLs, tracking numbers, and so on. C stands for concept; these appear in Wikipedia and are deemed as relevant for the input text, but they do not get disambiguated ``` # remove duplicate records from the df df = df.drop_duplicates(keep='first') # for each found entity, add wikidata unique identifiers to the dataframe for index, row in df.iterrows(): if (row['WP']=='y'): row['WP_ID'] = get_WPID(row['Name'])[0] df ```
github_jupyter
This notebook will cover the assumed knowledge of pandas. Here's a few questions to check if you already know the material in this notebook. 1. Does a NumPy array have a single dtype or multiple dtypes? 2. Why is broadcasting useful? 3. How do you slice a DataFrame by row label? 4. How do you select a column of a DataFrame? 5. Is the Index a column in the DataFrame? If you feel pretty comfortable with those, go ahead and skip this notebook. [Answers](#Answers) are at the end. We'll meet up at the next notebook. # Aside: IPython Notebook - two modes command and edit - command -> edit: `Enter` - edit -> command: `Esc` - `h` : Keyboard Shortcuts: (from command mode) - `j` / `k` : navigate cells - `shift+Enter` executes a cell Outline: - [NumPy Foundation](#NumPy-Foundation) - [Pandas](#Pandas) - [Data Structures](#Data-Structures) ## Numpy Foundation pandas is built atop NumPy, historically and in the actual library. It's helpful to have a good understanding of some NumPyisms. [Speak the vernacular](https://www.youtube.com/watch?v=u2yvNw49AX4). ### ndarray The core of numpy is the `ndarray`, N-dimensional array. These are homogenously-typed, fixed-length data containers. NumPy also provides many convenient and fast methods implemented on the `ndarray`. ``` from __future__ import print_function import numpy as np import pandas as pd x = np.array([1, 2, 3]) x x.dtype y = np.array([[True, False], [False, True]]) y y.shape ``` ### dtypes Unlike python lists, NumPy arrays care about the type of data stored within. The full list of NumPy dtypes can be found in the [NumPy documentation](http://docs.scipy.org/doc/numpy/user/basics.types.html). ![dtypes](http://docs.scipy.org/doc/numpy/_images/dtype-hierarchy.png) We sacrifice the convinience of mixing bools and ints and floats within an array for much better performance. However, an unexpected `dtype` change will probably bite you at some point in the future. The two biggest things to remember are - Missing values (NaN) cast integer or boolean arrays to floats - the object dtype is the fallback You'll want to avoid object dtypes. It's typically slow. ### Broadcasting It's super cool and super useful. The one-line explanation is that when doing elementwise operations, things expand to the "correct" shape. ``` # add a scalar to a 1-d array x = np.arange(5) print('x: ', x) print('x+1:', x + 1, end='\n\n') y = np.random.uniform(size=(2, 5)) print('y: ', y, sep='\n') print('y+1:', y + 1, sep='\n') ``` Since `x` is shaped `(5,)` and `y` is shaped `(2,5)` we can do operations between them. ``` x * y ``` Without broadcasting we'd have to manually reshape our arrays, which quickly gets annoying. ``` x.reshape(1, 5).repeat(2, axis=0) * y ``` # Pandas We'll breeze through the basics here, and get onto some interesting applications in a bit. I want to provide the *barest* of intuition so things stick down the road. ## Why pandas NumPy is great. But it lacks a few things that are conducive to doing statisitcal analysis. By building on top of NumPy, pandas provides - labeled arrays - heterogenous data types within a table - better missing data handling - convenient methods - more data types (Categorical, Datetime) ## Data Structures This is the typical starting point for any intro to pandas. We'll follow suit. ### The DataFrame Here we have the workhorse data structure for pandas. It's an in-memory table holding your data, and provides a few conviniences over lists of lists or NumPy arrays. ``` import numpy as np import pandas as pd # Many ways to construct a DataFrame # We pass a dict of {column name: column values} np.random.seed(42) df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, True, False], 'C': np.random.randn(3)}, index=['a', 'b', 'c']) # also this weird index thing df from IPython.display import Image Image('dataframe.png') ``` ### Selecting Our first improvement over numpy arrays is labeled indexing. We can select subsets by column, row, or both. Column selection uses the regular python `__getitem__` machinery. Pass in a single column label `'A'` or a list of labels `['A', 'C']` to select subsets of the original `DataFrame`. ``` # Single column, reduces to a Series df['A'] cols = ['A', 'C'] df[cols] ``` For row-wise selection, use the special `.loc` accessor. ``` df.loc[['a', 'b']] ``` When your index labels are ordered, you can use ranges to select rows or columns. ``` df.loc['a':'b'] ``` Notice that the slice is *inclusive* on both sides, unlike your typical slicing of a list. Sometimes, you'd rather slice by *position* instead of label. `.iloc` has you covered: ``` df.iloc[0:2] ``` This follows the usual python slicing rules: closed on the left, open on the right. As I mentioned, you can slice both rows and columns. Use `.loc` for label or `.iloc` for position indexing. ``` df.loc['a', 'B'] ``` Pandas, like NumPy, will reduce dimensions when possible. Select a single column and you get back `Series` (see below). Select a single row and single column, you get a scalar. You can get pretty fancy: ``` df.loc['a':'b', ['A', 'C']] ``` #### Summary - Use `[]` for selecting columns - Use `.loc[row_lables, column_labels]` for label-based indexing - Use `.iloc[row_positions, column_positions]` for positional index I've left out boolean and hierarchical indexing, which we'll see later. ## Series You've already seen some `Series` up above. It's the 1-dimensional analog of the DataFrame. Each column in a `DataFrame` is in some sense a `Series`. You can select a `Series` from a DataFrame in a few ways: ``` # __getitem__ like before df['A'] # .loc, like before df.loc[:, 'A'] # using `.` attribute lookup df.A ``` You'll have to be careful with the last one. It won't work if you're column name isn't a valid python identifier (say it has a space) or if it conflicts with one of the (many) methods on `DataFrame`. The `.` accessor is extremely convient for interactive use though. You should never *assign* a column with `.` e.g. don't do ```python # bad df.A = [1, 2, 3] ``` It's unclear whether your attaching the list `[1, 2, 3]` as an attirbute of `df`, or whether you want it as a column. It's better to just say ```python df['A'] = [1, 2, 3] # or df.loc[:, 'A'] = [1, 2, 3] ``` `Series` share many of the same methods as `DataFrame`s. ## Index `Index`es are something of a peculiarity to pandas. First off, they are not the kind of indexes you'll find in SQL, which are used to help the engine speed up certain queries. In pandas, `Index`es are about lables. This helps with selection (like we did above) and automatic alignment when performing operations between two `DataFrame`s or `Series`. R does have row labels, but they're nowhere near as powerful (or complicated) as in pandas. You can access the index of a `DataFrame` or `Series` with the `.index` attribute. ``` df.index ``` There are special kinds of `Index`es that you'll come across. Some of these are - `MultiIndex` for multidimensional (Hierarchical) labels - `DatetimeIndex` for datetimes - `Float64Index` for floats - `CategoricalIndex` for, you guessed it, `Categorical`s We'll talk *a lot* more about indexes. They're a complex topic and can introduce headaches. <blockquote class="twitter-tweet" lang="en"><p lang="en" dir="ltr"><a href="https://twitter.com/gjreda">@gjreda</a> <a href="https://twitter.com/treycausey">@treycausey</a> in some cases row indexes are the best thing since sliced bread, in others they simply get in the way. Hard problem</p>&mdash; Wes McKinney (@wesmckinn) <a href="https://twitter.com/wesmckinn/status/547177248768659457">December 22, 2014</a></blockquote> Pandas, for better or for worse, does usually provide ways around row indexes being obstacles. The problem is knowing *when* they are just getting in the way, which mostly comes by experience. Sorry. # Answers 1. Does a NumPy array have a single dtype or multiple dtypes? - NumPy arrays are homogenous: they only have a single dtype (unlike DataFrames). You can have an array that holds mixed types, e.g. `np.array(['a', 1])`, but the dtype of that array is `object`, which you probably want to avoid. 2. Why is broadcasting useful? - It lets you perform operations between arrays that are compatable, but not nescessarily identical, in shape. This makes your code cleaner. 3. How do you slice a DataFrame by row label? - Use `.loc[label]`. For position based use `.iloc[integer]`. 4. How do you select a column of a DataFrame? - Standard `__getitem__`: `df[column_name]` 5. Is the Index a column in the DataFrame? - No. It isn't included in any operations (`mean`, etc). It can be inserted as a regular column with `df.reset_index()`.
github_jupyter
# Testing performance of different 2D Feature detectors in OpenCV Imports... ``` import cv2 import matplotlib.pyplot as plt import numpy as np import seaborn as sn import time sn.set() # Utilities r2b = lambda x: cv2.cvtColor(x, cv2.COLOR_BGR2RGB) r2ba = lambda x: cv2.cvtColor(x, cv2.COLOR_BGRA2RGBA) ``` ## Create an artificial split image Read image ``` im = cv2.imread('full.jpg') ``` Find an overlapping horizontal (width) split at: 5/8 and 3/8. E.g. the left image will end at 5/8 the width, and the right image will start at 3/8 the width. This was the images will overlap in 2/8 or 1/4 of the width. ``` im.shape[1] * (5./8.), im.shape[1] * (3./8.) ``` Using the above information we split the image horizontally. ``` im_left = im[:,:1617] im_right = im[:,970:] plt.subplot(131),plt.imshow(r2b(im)),plt.axis('off'),plt.title('Original') plt.subplot(132),plt.imshow(r2b(im_left)),plt.axis('off'),plt.title('Left') plt.subplot(133),plt.imshow(r2b(im_right)),plt.axis('off'),plt.title('Right'); h_orig,w_orig = im.shape[:2] h,w = im_right.shape[:2] ``` ## A feature-type-oblivious test To measure the success of features we will need to change the feature types while keeping the same API. Luckily this is easily possible in OpenCV. The following function will provide us with a `cv2.Feature2D` feature detector for each algorithm as well as a `cv2.DetectorMatcher` to match the features. ``` # adapted from: https://github.com/opencv/opencv/blob/master/samples/python/find_obj.py def init_feature(name): chunks = name.split('-') if chunks[0] == 'sift': detector = cv2.xfeatures2d.SIFT_create(2500) norm = cv2.NORM_L2 elif chunks[0] == 'surf': detector = cv2.xfeatures2d.SURF_create(6500) norm = cv2.NORM_L2 elif chunks[0] == 'orb': detector = cv2.ORB_create(2500) norm = cv2.NORM_HAMMING elif chunks[0] == 'akaze': detector = cv2.AKAZE_create(threshold=0.0065) norm = cv2.NORM_HAMMING elif chunks[0] == 'brisk': detector = cv2.BRISK_create(100) norm = cv2.NORM_HAMMING else: return None, None if 'flann' in chunks: FLANN_INDEX_KDTREE = 1 FLANN_INDEX_LSH = 6 if norm == cv2.NORM_L2: flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) else: flann_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, # 12 key_size = 12, # 20 multi_probe_level = 1) #2 matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) else: matcher = cv2.BFMatcher(norm) return detector, matcher # A utility function to align feature points using the given matching, also apply the 2-NN ratio test def filter_matches(kp1, kp2, matches, ratio = 0.75): mkp = [(kp1[m[0].queryIdx], kp2[m[0].trainIdx]) for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio] mkp1,mkp2 = zip(*mkp) p1 = np.float32([kp.pt for kp in mkp1]) p2 = np.float32([kp.pt for kp in mkp2]) return p1, p2, mkp ``` Visualize some features ``` out = im_left.copy() # get keypoints detector, matcher = init_feature('surf-flann') im_kpts, im_desc = detector.detectAndCompute(im_left, None) cv2.drawKeypoints(out, im_kpts, out, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS+cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG) plt.figure(figsize=(6,6)) plt.imshow(r2b(out)), plt.grid(False); ``` ### Pre-cache the rotated images In order to save time in the main loop, we will create the rotated images ``` num_stops = 19 w_r = int(np.sqrt(w*w+h*h)) # the diagonal of the image can serve as the width of the new images h_r = int(np.sqrt(w*w+h*h)) # since a 45deg rotation will have the diagonal as width. s_y,s_x = int(h_r/2-h/2),int(w_r/2-w/2) # offset for central-rotation ims, masks = [],[] for i,a in enumerate(np.linspace(-90,90,num_stops)): im_right_ext = np.ones((h_r,w_r,3), np.uint8) * 255 # white border... im_right_ext[s_y:s_y+h, s_x:s_x+w] = im_right # offset the image to the center im_right_mask = np.zeros((h_r,w_r,3), np.uint8) # a mask is also needed im_right_mask[s_y:s_y+h, s_x:s_x+w] = 255 M = cv2.getRotationMatrix2D((w_r/2.0+0.5,h_r/2.0+0.5), a, 1.0) # rotate about the center ims += [cv2.warpAffine(im_right_ext, M, (w_r,h_r), borderValue=(255,255,255))] masks += [cv2.warpAffine(im_right_mask, M, (w_r,h_r))] # also rotate the mask... cv2.imwrite('rotations/%03d.jpg'%i, ims[-1]) ``` The major test loop follows. We will go through all feature types, extract the features for the left image and then for each of the rotated images. Then we match and align. ``` results = {} for feature_name in ['akaze','surf','sift','orb','brisk']: # extract features for left image detector, matcher = init_feature(feature_name + '-flann') im_kpts, im_desc = detector.detectAndCompute(im_left, None) matcher.add([im_desc]) # cache the left features results[feature_name] = {'MSE': [], 'time': []} # loop the right image rotations for i,a in enumerate(np.linspace(-90,90,num_stops)): t = time.process_time() # extract features kpts,desc = detector.detectAndCompute(ims[i],masks[i][:,:,0]) # match with the left image raw_matches = matcher.knnMatch(desc,k=2) #2 p1, p2, _ = filter_matches(kpts, im_kpts, raw_matches) # align and filter results[feature_name]['time'].append(time.process_time() - t) # record execution time if len(p1) < 4: print('%d matches found, not enough for transform estimation' % len(p1)) continue # recover the transformation (rotation + translation) H, status = cv2.estimateAffine2D(p1, p2, method=cv2.RANSAC, ransacReprojThreshold=5.0) p1, p2 = p1[status], p2[status] warped = cv2.warpAffine(ims[i], H, (w_orig,h_orig)) # undo th rotation on the right image warped_mask = cv2.warpAffine(masks[i], H, (w_orig,h_orig)) / 255.0 # stitch the images using the mask stitched_out = np.zeros((h_orig,w_orig,3),np.uint8) stitched_out[:,:w-1] = im_left stitched_out = np.uint8(stitched_out * (1 - warped_mask)) + np.uint8(warped * warped_mask) # calculate the metrics MSE = np.mean((stitched_out-im)**2) results[feature_name]['MSE'].append(MSE) print('%s, angle= %d, %d / %d inliers/matched, MSE = %.3f' % (feature_name, int(a), np.sum(status), len(status), MSE)) from scipy.interpolate import make_interp_spline, BSpline ``` ## Charting the results ``` xold = np.linspace(-90,90,num_stops) for f in results: plt.plot(xold,results[f]['MSE'],label=f) plt.legend(loc='upper center',ncol=3,fancybox=True, shadow=True) plt.ylim(0,65),plt.xticks(np.linspace(-90,90,13).astype(np.int)) plt.ylabel('MSE'),plt.xlabel('Rotation Angle') plt.savefig('rotation_perf.svg') times = [np.mean(results[r]['time']) for r in results] mses = [np.mean(results[r]['MSE']) for r in results] fig = plt.figure() ax = fig.add_subplot(111) # Create matplotlib axes ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax. indices = np.arange(len(results)) width = 0.35 ax.bar(indices - width/2, times, width = width, color='b', label='Mean Time') ax.set_label('Maan Time') ax.set_ylabel('Mean Time (seconds)') ax.set_xlabel('Feature Type') ax.set_ylim(0,7.7) ax.axes.set_xticklabels([0]+list(results.keys())) ax2.bar(indices + width/2, mses, width = width, color='r', label='Mean MSE') ax2.set_ylabel('Mean MSE') ax2.grid(None) ax2.set_ylim(0,49) fig.legend(loc='upper center', ncol=2,fancybox=True, shadow=True, bbox_to_anchor=(0.5,0.9)) plt.savefig('time_vs_mse.svg') ``` At this point we can decide on the best feature type for the job. In this case it seems AKAZE is the best performer.
github_jupyter
# Logistic regression model ## Setup ``` !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null %cd -q /pyprobml/scripts !pip install -q optax !pip install -q blackjax !pip install -q sgmcmcjax %matplotlib inline import matplotlib.pyplot as plt import numpy as np import itertools import warnings from functools import partial import jax import jax.numpy as jnp from jax.random import uniform, normal, bernoulli, split from jax import jit, grad, value_and_grad, vmap from jax.experimental import optimizers from jax.scipy.special import logsumexp from blackjax import nuts, stan_warmup import optax import sgmcmc_subspace_lib as sub from sgmcmc_utils import build_optax_optimizer, build_nuts_sampler from sgmcmcjax.samplers import * ``` ## Generate Data This part is based on https://github.com/jeremiecoullon/SGMCMCJax/blob/master/docs/nbs/models/logistic_regression.py ``` #ignore by GPU/TPU message (generated by jax module) warnings.filterwarnings("ignore", message='No GPU/TPU found, falling back to CPU.') # Sample initial beta values from random normal def init_params(rng_key, d): return normal(rng_key, (d,)) def gen_cov_mat(key, d, rho): Sigma0 = np.diag(np.ones(d)) for i in range(1,d): for j in range(0, i): Sigma0[i,j] = (uniform(key)*2*rho - rho)**(i-j) Sigma0[j,i] = Sigma0[i,j] return jnp.array(Sigma0) def logistic(theta, x): return 1/(1+jnp.exp(-jnp.dot(theta, x))) def gen_data(key, dim, N): """ Generate data with dimension `dim` and `N` data points Parameters ---------- key: uint32 random key dim: int dimension of data N: int Size of dataset Returns ------- theta_true: ndarray Theta array used to generate data X: ndarray Input data, shape=(N,dim) y: ndarray Output data: 0 or 1s. shape=(N,) """ key, theta_key, cov_key, x_key = split(key, 4) rho = 0.4 print(f"Generating data, with N={N} and dim={dim}") theta_true = normal(theta_key, shape=(dim, ))*jnp.sqrt(10) covX = gen_cov_mat(cov_key, dim, rho) X = jnp.dot(normal(x_key, shape=(N,dim)), jnp.linalg.cholesky(covX)) p_array = batch_logistic(theta_true, X) keys = split(key, N) y = batch_benoulli(keys, p_array).astype(jnp.int32) return theta_true, X, y @jit def predict(params, inputs): return batch_logistic(params, inputs) > 0.5 @jit def accuracy(params, batch): inputs, targets = batch predicted_class = predict(params, inputs) return jnp.mean(predicted_class == targets) @jit def loglikelihood(theta, x_val, y_val): return -logsumexp(jnp.array([0., (1.-2.*y_val)*jnp.dot(theta, x_val)])) @jit def logprior(theta): return -(0.5/10)*jnp.dot(theta,theta) batch_logistic = jit(vmap(logistic, in_axes=(None, 0))) batch_benoulli = vmap(bernoulli, in_axes=(0, 0)) batch_loglikelihood = vmap(loglikelihood, in_axes=(None, 0, 0)) dim = 10 # Choose a dimension for the parameters (10, 50,100 in the paper) subspace_dim = 2 # Choose a dimension for the subspace parameters ndata = 10000 # Number of data points nwarmup = 1000 # Number of iterations during warmup phase nsamples = 10000 # Number of SGMCMC iterations nsamplesCV = nsamples // 2 key = jax.random.PRNGKey(42) theta_true, X, y = gen_data(key, dim, ndata) batch_size = int(0.01*X.shape[0]) data = (X, y) init_key, key = split(key) theta_init = init_params(init_key, dim) ``` ## SGD ``` niters = 5000 learning_rate = 6e-5 opt = optax.sgd(learning_rate=learning_rate) optimizer = build_optax_optimizer(opt, loglikelihood, logprior, data, batch_size, pbar=False) opt_key, key = split(key) sgd_params, logpost_array = optimizer(opt_key, niters, theta_init) train_accuracy = accuracy(sgd_params, data) print("Training set accuracy {}".format(train_accuracy)) plt.plot(-logpost_array, color='tab:orange') plt.show() ``` ### Subspace Model ``` sub_opt_key, key = split(key) sgd_sub_params, _, opt_log_post_trace, _ = sub.subspace_optimizer( sub_opt_key, loglikelihood, logprior, theta_init, data, batch_size, subspace_dim, nwarmup, nsamples, opt, pbar=False) train_accuracy = accuracy(sgd_sub_params, data) print("Training set accuracy {}".format(train_accuracy)) plt.plot(-opt_log_post_trace, color='tab:pink') plt.show() ``` ## NUTS ``` nuts_sampler = build_nuts_sampler(nwarmup, loglikelihood, logprior, data, batch_size=ndata, pbar=False) nuts_key, key = split(key) nuts_params = nuts_sampler(nuts_key, nsamples//10, theta_init) train_accuracy = accuracy(jnp.mean(nuts_params, axis=0), data) print("Training set accuracy {}".format(train_accuracy)) ``` ### Subspace Model ``` build_nuts_sampler_partial = partial(build_nuts_sampler, nwarmup=nwarmup) nuts_key, key = split(key) nuts_sub_params = sub.subspace_sampler(nuts_key, loglikelihood, logprior, theta_init, build_nuts_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=0, nsteps_sub=0, use_cv=False, opt=None, pbar=False) ``` ## SGLD ``` dt = 1e-5 # Run sampler sgld_sampler = build_sgld_sampler(dt, loglikelihood, logprior, data, batch_size) sgld_key, key = split(key) sgld_output = sgld_sampler(sgld_key, nsamples, theta_init) ``` ### Subspace Sampler ``` build_sgld_sampler_partial = partial(build_sgld_sampler, dt=dt) sgld_key, key = split(key) sgld_sub_output = sub.subspace_sampler(sgld_key, loglikelihood, logprior, theta_init, build_sgld_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=0, nsteps_sub=0, use_cv=False, opt=None, pbar=False) ``` ## SGLDCV ``` dt = 1e-5 sgldCV_sampler = build_sgldCV_sampler(dt, loglikelihood, logprior, data, batch_size, sgd_params) sgldCV_key, key = split(key) sgldCV_output = sgldCV_sampler(sgldCV_key, nsamplesCV, sgd_params) ``` ### Subspace Sampler ``` build_sgldCV_sampler_partial = partial(build_sgldCV_sampler, dt=dt) sgldCV_key, key = split(key) sgldCV_sub_output = sub.subspace_sampler(sgldCV_key, loglikelihood, logprior, theta_init, build_sgldCV_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=niters//2, nsteps_sub=niters//2, use_cv=True, opt=opt, pbar=False) ``` ## SGHMC ``` L = 5 dt = 1e-6 sghmc_sampler = build_sghmc_sampler(dt, L, loglikelihood, logprior, data, batch_size) sghmc_key, key = split(key) sghmc_output = sghmc_sampler(sghmc_key, nsamples, theta_init) ``` ### Subspace Sampler ``` build_sghmc_sampler_partial = partial(build_sghmc_sampler, dt=dt, L=L) sghmc_key, key = split(key) sghmc_sub_output = sub.subspace_sampler(sghmc_key, loglikelihood, logprior, theta_init, build_sghmc_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=0, nsteps_sub=0, use_cv=False, opt=None, pbar=False) ``` ## SGHMCCV ``` dt = 1e-7 # step size parameter L = 5 sghmcCV_sampler = build_sghmcCV_sampler(dt, L, loglikelihood, logprior, data, batch_size, sgd_params) sghmcCV_key, key = split(key) sghmcCV_output = sghmcCV_sampler(sghmcCV_key, nsamplesCV, sgd_params) ``` ### Subspace Sampler ``` build_sghmcCV_sampler_partial = partial(build_sghmcCV_sampler, dt=dt, L=L) sghmcCV_key, key = split(key) sghmcCV_sub_output = sub.subspace_sampler(sghmcCV_key, loglikelihood, logprior, theta_init, build_sghmcCV_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=niters//2, nsteps_sub=niters//2, use_cv=True, opt=opt, pbar=False) ``` ## SGNHT ``` dt = 1e-6 # step size parameter a = 0.02 sgnht_sampler = build_sgnht_sampler(dt, loglikelihood, logprior, data, batch_size, a=a) sgnht_key, key = split(key) sgnht_output = sgnht_sampler(sgnht_key, nsamples, theta_init) ``` ### Subspace Sampler ``` build_sgnht_sampler_partial = partial(build_sgnht_sampler, dt=dt, a=a) sgnht_key, key = split(key) sgnht_sub_output = sub.subspace_sampler(sgnht_key, loglikelihood, logprior, theta_init, build_sgnht_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=0, nsteps_sub=0, use_cv=False, opt=opt, pbar=False) ``` ## SGHNTCV ``` dt = 1e-6 # step size parameter a = 0.02 sgnhtCV_sampler = build_sgnhtCV_sampler(dt, loglikelihood, logprior, data, batch_size, sgd_params, a=a) sgnhtCV_key, key = split(key) sgnhtCV_output = sgnhtCV_sampler(sgnhtCV_key, nsamplesCV, sgd_params) ``` ### Subspace Sampler ``` build_sgnhtCV_sampler_partial = partial(build_sgnhtCV_sampler, dt=dt, a=a) sgnhtCV_key, key = split(key) sgnhtCV_sub_output = sub.subspace_sampler(sgnhtCV_key, loglikelihood, logprior, theta_init, build_sgnhtCV_sampler_partial, data, batch_size, subspace_dim, nsamples, nsteps_full=niters//2, nsteps_sub=niters//2, use_cv=True, opt=opt, pbar=False) ``` ## ULA - SGLD with the full dataset ``` dt = 4e-5 # step size parameter ula_sampler = build_sgld_sampler(dt, loglikelihood, logprior, data, batch_size=ndata) ula_key, key = split(key) ula_output = ula_sampler(ula_key, nsamples, theta_init) ``` ### Subspace Sampler ``` build_sgld_sampler_partial = partial(build_sgld_sampler, dt=dt) ula_key, key = split(key) ula_sub_output = sub.subspace_sampler(ula_key, loglikelihood, logprior, theta_init, build_sgld_sampler_partial, data, ndata, subspace_dim, nsamples, nsteps_full=0, nsteps_sub=0, use_cv=False, opt=None, pbar=False) ``` ## Trace plots ``` def trace_plot(outs): nrows, ncols = 2, 4 fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16, 12)) for ax, (title, out) in zip(axes.flatten(), outs.items()): ax.plot(out) ax.set_title(title) ax.set_xlabel("Iteration") ax.grid(color='white', linestyle='-', linewidth=2) ax.set_axisbelow(True) ax.set_facecolor("#EAEBF0") plt.tight_layout() plt.savefig("traceplot.pdf", dpi=300) plt.show() outs = {"STAN": nuts_params, "SGLD": sgld_output, "SGLDCV": sgldCV_output, "ULA": ula_output, "SGHMC": sghmc_output, "SGHMCCV":sghmcCV_output, "SGNHT": sgnht_output, "SGNHTCV": sgnhtCV_output} trace_plot(outs) subspace_outs = {"STAN": nuts_sub_params, "SGLD": sgld_sub_output, "SGLDCV": sgldCV_sub_output, "ULA": ula_sub_output, "SGHMC": sghmc_sub_output, "SGHMCCV":sghmcCV_sub_output, "SGNHT": sgnht_sub_output, "SGNHTCV": sgnhtCV_sub_output} trace_plot(subspace_outs) ```
github_jupyter
# Finding locations to establish temporary emergency facilities Run this notebook to create a Decision Optimization model with Decision Optimization for Watson Studio and deploy the model using Watson Machine Learning. The deployed model can later be accessed using the [Watson Machine Learning client library](https://wml-api-pyclient-dev-v4.mybluemix.net/) to find optimal location based on given constraints. The model created here is a basic Decision Optimization model. The main purpose is to demonstrate creating a model and deploying using Watson Machine Learning. This model can and should be improved upon to include better constraints that can provide a more optimal solution. ## Steps **Build and deploy model** 1. [Provision a Watson Machine Learning service](#provision-a-watson-machine-learning-service) 1. [Set up the Watson Machine Learning client library](#set-up-the-watson-machine-learning-client-library) 1. [Build the Decision Optimization model](#build-the-decision-optimization-model) 1. [Deploy the Decision Optimization model](#deploy-the-decision-optimization-model) **Test the deployed model** 1. [Generate an API Key from the HERE Developer Portal](#generate-an-api-key-from-the-here-developer-portal) 1. [Query HERE API for Places](#query-here-api-for-places) 1. [Create and monitor a job to test the deployed model](#create-and-monitor-a-job-to-test-the-deployed-model) 1. [Extract and display solution](#extract-and-display-solution) <br> ### Provision a Watson Machine Learning service - If you do not have an IBM Cloud account, [register for a free trial account](https://cloud.ibm.com/registration). - Log into [IBM Cloud](https://cloud.ibm.com/login) - Create a [create a Watson Machine Learning instance](https://cloud.ibm.com/catalog/services/machine-learning) <br> ### Set up the Watson Machine Learning client library Install the [Watson Machine Learning client library](https://wml-api-pyclient-dev-v4.mybluemix.net/). This notebook uses the preview Python client based on v4 of Watson Machine Learning APIs. > **Important** Do not load both (V3 and V4) WML API client libraries into a notebook. ``` # Uninstall the Watson Machine Learning client Python client based on v3 APIs !pip uninstall watson-machine-learning-client -y # Install the WML client API v4 !pip install watson-machine-learning-client-V4 ``` <br> #### Create a client instance Use your [Watson Machine Learning service credentials](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-get-wml-credentials.html) and update the next cell. ``` # @hidden_cell WML_API_KEY = '...' WML_INSTANCE_ID = '...' WML_URL = 'https://us-south.ml.cloud.ibm.com' from watson_machine_learning_client import WatsonMachineLearningAPIClient # Instantiate a client using credentials wml_credentials = { 'apikey': WML_API_KEY, 'instance_id': WML_INSTANCE_ID, 'url': WML_URL } client = WatsonMachineLearningAPIClient(wml_credentials) client.version ``` <br> ### Build the Decision Optimization model - The Decision Optimization model will be saved to a `model.py` file in a subdirectory (i.e., `model/`) of the current working directory. - The model will be placed in a tar archive and uploaded to Watson Machine Learning. Set up variables for model and deployment ``` import os model_dir = 'model' model_file = 'model.py' model_path = '{}/{}'.format(model_dir, model_file) model_tar = 'model.tar.gz' model_tar_path = '{}/{}'.format(os.getcwd(), model_tar) model_name = 'DO_HERE_DEMO' model_desc = 'Finding locations for short-term emergency facilities' deployment_name = 'DO_HERE_DEMO Deployment' deployment_desc = 'Deployment of DO_HERE_DEMO model' print(model_path) print(model_tar_path) ``` <br> #### Create the model.py in a model subdirectory Use the `mkdir` and `write_file` commands to create the subdirectory and write the model code to a file. ``` %mkdir $model_dir %%writefile $model_path from docplex.util.environment import get_environment from os.path import splitext import pandas from six import iteritems import json def get_all_inputs(): '''Utility method to read a list of files and return a tuple with all read data frames. Returns: a map { datasetname: data frame } ''' result = {} env = get_environment() for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']: with env.get_input_stream(iname) as in_stream: df = pandas.read_csv(in_stream) datasetname, _ = splitext(iname) result[datasetname] = df return result def write_all_outputs(outputs): '''Write all dataframes in ``outputs`` as .csv. Args: outputs: The map of outputs 'outputname' -> 'output df' ''' for (name, df) in iteritems(outputs): if isinstance(df, pandas.DataFrame): csv_file = '%s.csv' % name print(csv_file) with get_environment().get_output_stream(csv_file) as fp: if sys.version_info[0] < 3: fp.write(df.to_csv(index=False, encoding='utf8')) else: fp.write(df.to_csv(index=False).encode(encoding='utf8')) elif isinstance(df, str): txt_file = '%s.txt' % name with get_environment().get_output_stream(txt_file) as fp: fp.write(df.encode(encoding='utf8')) if len(outputs) == 0: print('Warning: no outputs written') %%writefile -a $model_path from docplex.mp.model import Model from statistics import mean def get_distance(routes_df, start, destination): s = getattr(start, 'geocode', start) d = getattr(destination, 'geocode', destination) row = routes_df.loc[ (routes_df['start'] == s) & (routes_df['destination'] == d) ] return row['distance'].values[0] def build_and_solve(places_df, routes_df, number_sites=3): print('Building and solving model') mean_dist = mean(routes_df['distance'].unique()) p_only = places_df.loc[places_df['is_medical'] == False] h_only = places_df.loc[places_df['is_medical'] == True] places = list(p_only.itertuples(name='Place', index=False)) postal_codes = p_only['postal_code'].unique() hospital_geocodes = h_only['geocode'].unique() mdl = Model(name='temporary emergency sites') ## decision variables places_vars = mdl.binary_var_dict(places, name='is_place') postal_link_vars = mdl.binary_var_matrix(postal_codes, places, 'link') hosp_link_vars = mdl.binary_var_matrix(hospital_geocodes, places, 'link') ## objective function # minimize hospital distances h_total_distance = mdl.sum(hosp_link_vars[h, p] * abs(mean_dist - get_distance(routes_df, h, p)) for h in hospital_geocodes for p in places) mdl.minimize(h_total_distance) ## constraints # match places with their correct postal_code for p in places: for c in postal_codes: if p.postal_code != c: mdl.add_constraint(postal_link_vars[c, p] == 0, 'ct_forbid_{0!s}_{1!s}'.format(c, p)) # # each postal_code should have one only place # mdl.add_constraints( # mdl.sum(postal_link_vars[c, p] for p in places) == 1 for c in postal_codes # ) # # each postal_code must be associated with a place # mdl.add_constraints( # postal_link_vars[c, p] <= places_vars[p] for p in places for c in postal_codes # ) # solve for 'number_sites' places mdl.add_constraint(mdl.sum(places_vars[p] for p in places) == number_sites) ## model info mdl.print_information() stats = mdl.get_statistics() ## model solve mdl.solve(log_output=True) details = mdl.solve_details status = ''' Model stats number of variables: {} number of constraints: {} Model solve time (s): {} status: {} '''.format( stats.number_of_variables, stats.number_of_constraints, details.time, details.status ) possible_sites = [p for p in places if places_vars[p].solution_value == 1] return possible_sites, status %%writefile -a $model_path import pandas def run(): # Load CSV files into inputs dictionary inputs = get_all_inputs() places_df = inputs['places'] routes_df = inputs['routes'] site_suggestions, status = build_and_solve(places_df, routes_df) solution_df = pandas.DataFrame(site_suggestions) outputs = { 'solution': solution_df, 'status': status } # Generate output files write_all_outputs(outputs) run() ``` <br> #### Create the model tar archive Use the `tar` command to create a tar archive with the model file. ``` import tarfile def reset(tarinfo): tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = 'root' return tarinfo tar = tarfile.open(model_tar, 'w:gz') tar.add(model_path, arcname=model_file, filter=reset) tar.close() ``` <br> ### Deploy the Decision Optimization model Store model in Watson Machine Learning with: - the tar archive previously created, - metadata including the model type and runtime ``` # All available meta data properties client.repository.ModelMetaNames.show() # All available runtimes client.runtimes.list(pre_defined=True) ``` <br> #### Upload the model to Watson Machine Learning Configure the model metadata and set the model type (i.e., `do-docplex_12.9`) and runtime (i.e., `do_12.9`) ``` import os model_metadata = { client.repository.ModelMetaNames.NAME: model_name, client.repository.ModelMetaNames.DESCRIPTION: model_desc, client.repository.ModelMetaNames.TYPE: 'do-docplex_12.9', client.repository.ModelMetaNames.RUNTIME_UID: 'do_12.9' } model_details = client.repository.store_model(model=model_tar_path, meta_props=model_metadata) model_uid = client.repository.get_model_uid(model_details) print('Model GUID: {}'.format(model_uid)) ``` <br> #### Create a deployment Create a batch deployment for the model, providing deployment metadata and model UID. ``` deployment_metadata = { client.deployments.ConfigurationMetaNames.NAME: deployment_name, client.deployments.ConfigurationMetaNames.DESCRIPTION: deployment_desc, client.deployments.ConfigurationMetaNames.BATCH: {}, client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': 1} } deployment_details = client.deployments.create(model_uid, meta_props=deployment_metadata) deployment_uid = client.deployments.get_uid(deployment_details) print('Deployment GUID: {}'.format(deployment_uid)) ``` <br> **Congratulations!** The model has been succesfully deployed. Please make a note of the deployment UID. <br> ## Test the deployed model ### Generate an API Key from the HERE Developer Portal To test your deployed model using actual data from HERE Location services, you'll need an API key. Follow the instructions outlined in the [HERE Developer Portal](https://developer.here.com/sign-up) to [generate an API key](https://developer.here.com/documentation/authentication/dev_guide/topics/api-key-credentials.html). Use your [HERE.com API key](https://developer.here.com/sign-up) and update the next cell. ``` # @hidden_cell HERE_APIKEY = '...' ``` <br> Set up helper functions to query HERE APIs ``` import re import requests geocode_endpoint = 'https://geocode.search.hereapi.com/v1/geocode?q={address}&apiKey={api_key}' browse_endpoint = 'https://browse.search.hereapi.com/v1/browse?categories=%s&at=%s&apiKey=%s' matrix_routing_endpoint = 'https://matrix.route.ls.hereapi.com/routing/7.2/calculatematrix.json?mode=%s&summaryAttributes=%s&apiKey=%s' coordinates_regex = '^[-+]?([1-8]?\d(\.\d+)?|90(\.0+)?),\s*[-+]?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$' def is_geocode (location): geocode = None if isinstance(location, str): l = location.split(',') if len(l) == 2: geocode = '{},{}'.format(l[0].strip(), l[1].strip()) elif isinstance(location, list) and len(location) == 2: geocode = ','.join(str(l) for l in location) if geocode is not None and re.match(coordinates_regex, geocode): return [float(l) for l in geocode.split(',')] else: return False def get_geocode (address): g = is_geocode(address) if not g: url = geocode_endpoint.format(address=address, api_key=HERE_APIKEY) response = requests.get(url) if response.ok: jsonResponse = response.json() position = jsonResponse['items'][0]['position'] g = [position['lat'], position['lng']] else: print(response.text) return g def get_browse_url (location, categories, limit=25): categories = ','.join(c for c in categories) geocode = get_geocode(location) coordinates = ','.join(str(g) for g in geocode) browse_url = browse_endpoint % ( categories, coordinates, HERE_APIKEY ) if limit > 0: browse_url = '{}&limit={}'.format(browse_url, limit) return browse_url def browse_places (location, categories=[], results_limit=100): places_list = [] browse_url = get_browse_url(location, categories, limit=results_limit) response = requests.get(browse_url) if response.ok: json_response = response.json() places_list = json_response['items'] else: print(response.text) return places_list def get_places_nearby (location, categories=[], results_limit=100, max_distance_km=50): places_list = browse_places(location, categories=categories, results_limit=results_limit) filtered_places = [] for p in places_list: if p['distance'] <= max_distance_km * 1000: filtered_places.append(Place(p)) return filtered_places def get_hospitals_nearby (location, results_limit=100, max_distance_km=50): h_cat = ['800-8000-0159'] hospitals_list = browse_places(location, categories=h_cat, results_limit=results_limit) filtered_hospitals = [] for h in hospitals_list: if h['distance'] <= max_distance_km * 1000: filtered_hospitals.append(Place(h, is_medical=True)) return filtered_hospitals def get_matrix_routing_url (): route_mode = 'shortest;car;traffic:disabled;' summary_attributes = 'routeId,distance' matrix_routing_url = matrix_routing_endpoint % ( route_mode, summary_attributes, HERE_APIKEY ) return matrix_routing_url def get_route_summaries (current_geocode, places, hospitals): # Request should not contain more than 15 start positions num_starts = 15 postal_codes_set = set() postal_codes_geocodes = [] places_waypoints = {} for i, p in enumerate(places): if p.postal_code: postal_codes_set.add('{}:{}'.format(p.postal_code, p.country)) places_waypoints['destination{}'.format(i)] = p.geocode for p in postal_codes_set: geocode = get_geocode(p) postal_codes_geocodes.append({ 'postal_code': p.split(':')[0], 'geocode': ','.join(str(g) for g in geocode) }) current = { 'geocode': ','.join(str(g) for g in current_geocode) } start_geocodes = [current] + postal_codes_geocodes + [h.to_dict() for h in hospitals] start_coords = [ start_geocodes[i:i+num_starts] for i in range(0, len(start_geocodes), num_starts) ] route_summaries = [] matrix_routing_url = get_matrix_routing_url() for sc in start_coords: start_waypoints = {} for i, s in enumerate(sc): start_waypoints['start{}'.format(i)] = s['geocode'] coords = {**start_waypoints, **places_waypoints} response = requests.post(matrix_routing_url, data = coords) if not response.ok: print(response.text) else: json_response = response.json() for entry in json_response['response']['matrixEntry']: start_geocode = start_waypoints['start{}'.format(entry['startIndex'])] dest_geocode = places_waypoints[ 'destination{}'.format(entry['destinationIndex']) ] for s in sc: if 'address' not in s and 'postal_code' in s and s['geocode'] == start_geocode: route_summaries.append({ 'start': s['postal_code'], 'destination': dest_geocode, 'distance': entry['summary']['distance'], 'route_id': entry['summary']['routeId'] }) break route_summaries.append({ 'start': start_geocode, 'destination': dest_geocode, 'distance': entry['summary']['distance'], 'route_id': entry['summary']['routeId'] }) return route_summaries ``` <br> Define a Place class ``` class Place(object): def __init__(self, p, is_medical=False): self.id = p['id'] self.title = p['title'] self.address = p['address']['label'] if 'label' in p['address'] else p['address'] self.postal_code = p['address']['postalCode'] if 'postalCode' in p['address'] else p['postal_code'] self.distance = p['distance'] self.primary_category = p['categories'][0]['id'] if 'categories' in p else p['primary_category'] self.geocode = '{},{}'.format(p['position']['lat'], p['position']['lng']) if 'position' in p else p['geocode'] self.country = p['address']['countryCode'] if 'countryCode' in p['address'] else p['country'] self.is_medical = p['is_medical'] if 'is_medical' in p else is_medical if isinstance(self.is_medical, str): self.is_medical = self.is_medical.lower() in ['true', '1'] def to_dict(self): location = self.geocode.split(',') return({ 'id': self.id, 'title': self.title, 'address': self.address, 'postal_code': self.postal_code, 'distance': self.distance, 'primary_category': self.primary_category, 'geocode': self.geocode, 'country': self.country, 'is_medical': self.is_medical }) def __str__(self): return self.address ``` <br> ### Query HERE API for Places Use the HERE API to get a list of Places in the vicinity of an address Example of `Place` entity returned by HERE API: ```json { 'title': 'Duane Street Hotel', 'id': 'here:pds:place:840dr5re-fba2a2b91f944ee4a699eea7556896bd', 'resultType': 'place', 'address': { 'label': 'Duane Street Hotel, 130 Duane St, New York, NY 10013, United States', 'countryCode': 'USA', 'countryName': 'United States', 'state': 'New York', 'county': 'New York', 'city': 'New York', 'district': 'Tribeca', 'street': 'Duane St', 'postalCode': '10013', 'houseNumber': '130' }, 'position': { 'lat': 40.71599, 'lng': -74.00735 }, 'access': [ { 'lat': 40.71608, 'lng': -74.00728 } ], 'distance': 161, 'categories': [ { 'id': '100-1000-0000' }, { 'id': '200-2000-0000' }, { 'id': '500-5000-0000' }, { 'id': '500-5000-0053' }, { 'id': '500-5100-0000' }, { 'id': '700-7400-0145' } ], 'foodTypes': [ { 'id': '101-000' } ], 'contacts': [ ], 'openingHours': [ { 'text': [ 'Mon-Sun: 00:00 - 24:00' ], 'isOpen': true, 'structured': [ { 'start': 'T000000', 'duration': 'PT24H00M', 'recurrence': 'FREQ:DAILY;BYDAY:MO,TU,WE,TH,FR,SA,SU' } ] } ] } ``` ``` address = 'New York, NY' max_results = 20 # HERE Place Category System # https://developer.here.com/documentation/geocoding-search-api/dev_guide/topics-places/places-category-system-full.html places_categories = ['500-5000'] # Hotel-Motel current_geocode = get_geocode(address) places = get_places_nearby( current_geocode, categories=places_categories, results_limit=max_results ) hospitals = get_hospitals_nearby( current_geocode, results_limit=3 ) print('Places:') for p in places: print(p) print('\nHospitals:') for h in hospitals: print(h) ``` <br> ### Create and monitor a job to test the deployed model Create a payload containing places data received from HERE ``` import pandas as pd places_df = pd.DataFrame.from_records([p.to_dict() for p in (places + hospitals)]) places_df.head() route_summaries = get_route_summaries(current_geocode, places, hospitals) routes_df = pd.DataFrame.from_records(route_summaries) routes_df.drop_duplicates(keep='last', inplace=True) routes_df.head() solve_payload = { client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [ { 'id': 'places.csv', 'values' : places_df }, { 'id': 'routes.csv', 'values' : routes_df } ], client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [ { 'id': '.*\.csv' }, { 'id': '.*\.txt' } ] } ``` <br> Submit a new job with the payload and deployment. Set the UID of the deployed model. ``` # deployment_uid = '...' job_details = client.deployments.create_job(deployment_uid, solve_payload) job_uid = client.deployments.get_job_uid(job_details) print('Job UID: {}'.format(job_uid)) ``` Display job status until it is completed. The first job of a new deployment might take some time as a compute node must be started. ``` from time import sleep while job_details['entity']['decision_optimization']['status']['state'] not in ['completed', 'failed', 'canceled']: print(job_details['entity']['decision_optimization']['status']['state'] + '...') sleep(3) job_details=client.deployments.get_job_details(job_uid) print(job_details['entity']['decision_optimization']['status']['state']) # job_details job_details['entity']['decision_optimization']['status'] ``` <br> ### Extract and display solution Display the output solution. ``` import base64 output_data = job_details['entity']['decision_optimization']['output_data'] solution = None stats = None for i, d in enumerate(output_data): if d['id'] == 'solution.csv': solution = pd.DataFrame(output_data[i]['values'], columns = job_details['entity']['decision_optimization']['output_data'][0]['fields']) else: stats = base64.b64decode(output_data[i]['values'][0][0]).decode('utf-8') print(stats) solution.head() ``` <br> Check out the online documentation at <a href="https://dataplatform.cloud.ibm.com/docs" target="_blank" rel="noopener noreferrer">https://dataplatform.cloud.ibm.com/docs</a> for more samples, tutorials and documentation. <br> ## Helper functions See `watson-machine-learning-client(V4)` Python library documentation for more info on the API: https://wml-api-pyclient-dev-v4.mybluemix.net/ ``` ## List models def list_models(wml_client): wml_client.repository.list_models() ## List deployments def list_deployments(wml_client): wml_client.deployments.list() ## Delete a model def delete_model(wml_client, model_uid): wml_client.repository.delete(model_uid) ## Delete a deployment def delete_deployment(wml_client, deployment_uid): wml_client.deployments.delete(deployment_uid) ## Get details of all models def details_all_models(wml_client): return wml_client.repository.get_model_details()['resources'] ## Get details of all deployments def details_all_deployments(wml_client): return wml_client.deployments.get_details()['resources'] # Find model using model name def get_models_by_name(wml_client, model_name): all_models = wml_client.repository.get_model_details()['resources'] models = [m for m in all_models if m['entity']['name'] == model_name] return models # Find deployment using deployment name def get_deployments_by_name(wml_client, deployment_name): all_deployments = wml_client.deployments.get_details()['resources'] deployments = [d for d in all_deployments if d['entity']['name'] == deployment_name][0] return deployments delete_deployment(client, deployment_uid) delete_model(client, model_uid) list_deployments(client) list_models(client) ```
github_jupyter
``` %matplotlib inline import os import time import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import ndimage import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from IPython.display import clear_output from datetime import datetime from lib.utils import SamplePool, make_seed, make_circle_masks, get_rand_avail from lib.utils import get_sobel, softmax from lib.NCCAModel2 import NCCAModel2 with open('anchor_loc.pickle', 'rb') as handle: anchor_loc = pickle.load(handle) root = "_maps/" full_size = (100,100) map_size = (80,80) color_map = [(0.5,0.5,0.5), (0.5,1.0,0.5), (1.0,1.0,0.5), (1.0,0.7,0.2), (1.0,0.5,0.5), (1.0,0.5,1.0)] ################################################################ d_trains = [] d_tests = [] alive_maps = [] for d_i, obj_name in enumerate(list(anchor_loc.keys())[:10]): filenames = [] common_index = {} for filename in os.listdir(root): if filename[:len(obj_name)]==obj_name: filenames.append(root+filename) for filename in filenames: with open(filename, 'rb') as handle: map_dict = pickle.load(handle) for index in map_dict: try: tmp = int(map_dict[index]['status']) if index in common_index: common_index[index]+= 1 else: common_index[index] = 1 except (TypeError, KeyError): continue common_index = [x for x in common_index.keys() if common_index[x]==len(filenames)] d_train = np.zeros([64, full_size[0], full_size[1], 4]) d_test = np.zeros([len(filenames)-d_train.shape[0], full_size[0], full_size[1], d_train.shape[-1]]) for i,filename in enumerate(filenames[:d_train.shape[0]]): with open(filename, 'rb') as handle: map_dict = pickle.load(handle) for index in common_index: try: status = min(int(map_dict[index]['status'])-1, 3) d_train[i, index[0], index[1]] = np.zeros(d_train.shape[-1]) d_train[i, index[0], index[1], status] = 1 except (TypeError, KeyError): continue for i,filename in enumerate(filenames[d_train.shape[0]:]): with open(filename, 'rb') as handle: map_dict = pickle.load(handle) for index in common_index: try: status = min(int(map_dict[index]['status'])-1, 3) d_test[i, index[0], index[1]] = np.zeros(d_test.shape[-1]) d_test[i, index[0], index[1], status] = 1 except (TypeError, KeyError): continue alive_map = np.expand_dims(np.expand_dims(np.sum(d_train[0, ...], -1)>0.001, 0), -1) cut_off = ((full_size[0]-map_size[0])//2, (full_size[1]-map_size[1])//2) d_train = d_train[:, cut_off[0]:(cut_off[0]+map_size[0]), cut_off[1]:(cut_off[1]+map_size[1]), :] d_test = d_test[:, cut_off[0]:(cut_off[0]+map_size[0]), cut_off[1]:(cut_off[1]+map_size[1]), :] alive_map = alive_map[:, cut_off[0]:(cut_off[0]+map_size[0]), cut_off[1]:(cut_off[1]+map_size[1]), :] print(d_train.shape, d_test.shape, alive_map.shape) d_trains.append(d_train) d_tests.append(d_test) alive_maps.append(alive_map) DEVICE = torch.device("cuda:0") model_path = "models/ncca_softmax_multi_traffic.pth" CHANNEL_N = 16 ALPHA_CHANNEL = 4 lr = 8e-4 lr_gamma = 0.99997 betas = (0.8, 0.9) n_epoch = 60000 BATCH_SIZE = 8 N_STEPS = 128 POOL_SIZE = 16 CELL_FIRE_RATE = 0.5 CALIBRATION = 1.0 eps = 1e-3 USE_PATTERN_POOL = 1 DAMAGE_N = 4 TRANS_N = 2 valid_masks = [] for alive_map in alive_maps: valid_masks.append(alive_map.astype(bool)) valid_masks = np.concatenate(valid_masks, 0) pools_list = [] for d_i, d_train in enumerate(d_trains): pools = [] for _ in range(d_train.shape[0]): init_coord = get_rand_avail(valid_masks[d_i:(d_i+1)]) seed = make_seed(map_size, CHANNEL_N, np.arange(CHANNEL_N-ALPHA_CHANNEL)+ALPHA_CHANNEL, init_coord) pools.append(SamplePool(x=np.repeat(seed[None, ...], POOL_SIZE, 0))) pools_list.append(pools) my_model = NCCAModel2(CHANNEL_N, ALPHA_CHANNEL, CELL_FIRE_RATE, DEVICE).to(DEVICE) # my_model.load_state_dict(torch.load(model_path)) optimizer = optim.Adam(my_model.parameters(), lr=lr, betas=betas) scheduler = optim.lr_scheduler.ExponentialLR(optimizer, lr_gamma) # torch.autograd.set_detect_anomaly(True) loss_log = [] def plot_loss(loss_log): plt.figure(figsize=(10, 4)) plt.title('Loss history (log10)') plt.plot(np.log10(loss_log), '.', alpha=0.1) plt.show() return def train(x, target, valid_mask_t, calibration_map, steps, optimizer, scheduler): for _ in range(steps): x = my_model(x, valid_mask_t, 1) h = torch.softmax(x[..., :ALPHA_CHANNEL], -1) t = target[..., :ALPHA_CHANNEL] _delta = t*(h-1) delta = _delta * calibration_map * CALIBRATION y1 = x[..., :ALPHA_CHANNEL]-delta alpha_h = x[..., ALPHA_CHANNEL:(ALPHA_CHANNEL+1)] y2 = alpha_h - 2 * (alpha_h-valid_mask_t) * calibration_map * CALIBRATION x = torch.cat((y1,y2,x[..., (ALPHA_CHANNEL+1):]), -1) loss_kldiv, loss_alpha = loss_f(x, target) loss_kldiv, loss_alpha = torch.mean(loss_kldiv), torch.mean(loss_alpha) loss = loss_kldiv+loss_alpha print(loss_kldiv.item(), loss_alpha.item()) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() return x, loss def loss_f(x, target, epsilon=1e-8): h = torch.softmax(x[..., :ALPHA_CHANNEL], -1) t = target[..., :ALPHA_CHANNEL] loss_kldiv = torch.mean(torch.sum(h*torch.log(h/t+epsilon), -1), [-1,-2]) loss_alpha = torch.mean(torch.pow(x[..., ALPHA_CHANNEL]-target[..., ALPHA_CHANNEL], 2), [-1,-2]) return loss_kldiv, loss_alpha starting_time = time.time() for i_epoch in range(n_epoch+1): targets = [] target_is = [] for _ in range(BATCH_SIZE): d_i = np.random.randint(len(d_trains)) target_i = np.random.randint(d_trains[d_i].shape[0]) target_is.append((d_i, target_i)) target = np.concatenate((d_trains[d_i][target_i:target_i+1], valid_masks[d_i:(d_i+1)]), -1) targets.append(target) targets = np.concatenate(targets, 0).astype(np.float32) targets[..., :-1] += eps targets[..., :-1] /= np.sum(targets[..., :-1], axis=-1, keepdims=True) _target = torch.from_numpy(targets).to(DEVICE) calibration_map = make_circle_masks(_target.size(0), map_size[0], map_size[1], rmin=0.5, rmax=0.5)[..., None] calibration_map = torch.from_numpy(calibration_map.astype(np.float32)).to(DEVICE) if USE_PATTERN_POOL: batches = [] batch_x = [] train_x = [] for target_index, (d_i, target_i) in enumerate(target_is): batch = pools_list[d_i][target_i].sample(1) batches.append(batch) batch_x.append(batch.x) if target_index<(len(target_is)-TRANS_N): train_x.append(batch.x) else: new_target_i = np.random.randint(d_trains[d_i].shape[0]) batch = pools_list[d_i][new_target_i].sample(1) train_x.append(batch.x) x0 = np.concatenate(train_x, 0) init_coord = get_rand_avail(valid_masks[:1]) seed = make_seed(map_size, CHANNEL_N, np.arange(CHANNEL_N-ALPHA_CHANNEL)+ALPHA_CHANNEL, init_coord) x0[:1] = seed if DAMAGE_N: damage = 1.0-make_circle_masks(DAMAGE_N, map_size[0], map_size[1])[..., None] x0[1:1+DAMAGE_N] *= damage else: x0 = np.repeat(seed[None, ...], BATCH_SIZE, 0) x0 = torch.from_numpy(x0.astype(np.float32)).to(DEVICE) valid_mask_t = valid_masks[[tmp[0] for tmp in target_is]] valid_mask_t = torch.from_numpy(valid_mask_t.astype(np.float32)).to(DEVICE) x, loss = train(x0, _target, valid_mask_t, calibration_map, N_STEPS, optimizer, scheduler) if USE_PATTERN_POOL: for batch_i, batch in enumerate(batches): batch.x[:] = x.detach().cpu().numpy()[batch_i:(batch_i+1)] batch.commit() loss_log.append(loss.item()) if (i_epoch)%50 == 0: clear_output() x0 = x0.detach().cpu().numpy() x0 = softmax(x0, -1) hyp = x.detach().cpu().numpy() hyp = softmax(hyp, -1) cali_map_numpy = calibration_map.detach().cpu().numpy() for i in range(targets.shape[0]): plt.figure(figsize=(18,4)) for j in range(4): plt.subplot(1,15,j+1) rotated_img = ndimage.rotate(targets[i, ..., j], 90) plt.imshow(rotated_img, cmap=plt.cm.gray, vmin=0, vmax=1) plt.axis('off') # all white plt.subplot(1,15,5) plt.imshow(np.ones(map_size), cmap='binary', vmin=1, vmax=1) plt.axis('off') for j in range(4): plt.subplot(1,15,j+6) rotated_img = ndimage.rotate(x0[i, ..., j], 90) plt.imshow(rotated_img, cmap=plt.cm.gray, vmin=0, vmax=1) plt.axis('off') # all white plt.subplot(1,15,10) plt.imshow(np.ones(map_size), cmap='binary', vmin=1, vmax=1) plt.axis('off') # calibration_map plt.subplot(1,15,11) rotated_img = ndimage.rotate(cali_map_numpy[i, ..., 0], 90) plt.imshow(rotated_img, cmap=plt.cm.gray, vmin=0, vmax=1) plt.axis('off') for j in range(4): plt.subplot(1,15,j+12) rotated_img = ndimage.rotate(hyp[i, ..., j], 90) plt.imshow(rotated_img, cmap=plt.cm.gray, vmin=0, vmax=1) plt.axis('off') plt.show() plot_loss(loss_log) total_time_cost = np.round((time.time()-starting_time)/60, 4) ave_time_cost = np.round((time.time()-starting_time)/60/(i_epoch+1), 4) print(i_epoch, "loss =", loss.item(), "ave_log_loss", np.log(np.mean(loss_log[-100:]))/np.log(10)) print("Toal Time Cost:", total_time_cost, "min") print("Ave Time Cost:", ave_time_cost, "min/epoch") torch.save(my_model.state_dict(), model_path) np.save("loss_logs/loss_log_train_2_hidden_16_pool", loss_log) ```
github_jupyter
# Data Analysis This is the main notebook performing all feature engineering, model selection, training, evaluation etc. The different steps are: - Step1 - import dependencies - Step2 - load payloads into memory - Step3A - Feature engineering custom features - Step3B - Feature engineering bag-of-words - Step3C - Feature space visualization - Step4 - Model selection - (Step4B - Load pre-trained classifiers) - Step5 - Visualization - Step6 - Website integration extract # Step1 import dependencies ``` %matplotlib inline import pandas as pd import numpy as np import pickle import matplotlib.pyplot as plt import seaborn import string from IPython.display import display from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import learning_curve from sklearn.decomposition import TruncatedSVD from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.neural_network import MLPClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import SGDClassifier from sklearn.neighbors import NearestNeighbors from sklearn.neighbors.nearest_centroid import NearestCentroid from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.tree import DecisionTreeClassifier import sklearn.gaussian_process.kernels as kernels from sklearn.cross_validation import ShuffleSplit from sklearn.cross_validation import KFold from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from scipy.stats import expon ``` # Step2 load the payloads into memory ``` payloads = pd.read_csv("data/payloads.csv",index_col='index') display(payloads.head(30)) ``` # Step3A - feature engineering custom features We will create our own feature space with features that might be important for this task, this includes: - length of payload - number of non-printable characters in payload - number of punctuation characters in payload - the minimum byte value of payload - the maximum byte value of payload - the mean byte value of payload - the standard deviation of payload byte values - number of distinct bytes in payload - number of SQL keywords in payload - number of javascript keywords in payload ``` def plot_feature_distribution(features): print('Properties of feature: ' + features.name) print(features.describe()) f, ax = plt.subplots(1, figsize=(10, 6)) ax.hist(features, bins=features.max()-features.min()+1, normed=1) ax.set_xlabel('value') ax.set_ylabel('fraction') plt.show() def create_feature_length(payloads): ''' Feature describing the lengh of the input ''' payloads['length'] = [len(str(row)) for row in payloads['payload']] return payloads payloads = create_feature_length(payloads) display(payloads.head()) plot_feature_distribution(payloads['length']) def create_feature_non_printable_characters(payloads): ''' Feature Number of non printable characthers within payload ''' payloads['non-printable'] = [ len([1 for letter in str(row) if letter not in string.printable]) for row in payloads['payload']] return payloads create_feature_non_printable_characters(payloads) display(payloads.head()) plot_feature_distribution(payloads['non-printable']) def create_feature_punctuation_characters(payloads): ''' Feature Number of punctuation characthers within payload ''' payloads['punctuation'] = [ len([1 for letter in str(row) if letter in string.punctuation]) for row in payloads['payload']] return payloads create_feature_punctuation_characters(payloads) display(payloads.head()) plot_feature_distribution(payloads['punctuation']) def create_feature_min_byte_value(payloads): ''' Feature Minimum byte value in payload ''' payloads['min-byte'] = [ min(bytearray(str(row), 'utf8')) for row in payloads['payload']] return payloads create_feature_min_byte_value(payloads) display(payloads.head()) plot_feature_distribution(payloads['min-byte']) def create_feature_max_byte_value(payloads): ''' Feature Maximum byte value in payload ''' payloads['max-byte'] = [ max(bytearray(str(row), 'utf8')) for row in payloads['payload']] return payloads create_feature_max_byte_value(payloads) display(payloads.head()) plot_feature_distribution(payloads['max-byte']) def create_feature_mean_byte_value(payloads): ''' Feature Maximum byte value in payload ''' payloads['mean-byte'] = [ np.mean(bytearray(str(row), 'utf8')) for row in payloads['payload']] return payloads create_feature_mean_byte_value(payloads) display(payloads.head()) plot_feature_distribution(payloads['mean-byte'].astype(int)) def create_feature_std_byte_value(payloads): ''' Feature Standard deviation byte value in payload ''' payloads['std-byte'] = [ np.std(bytearray(str(row), 'utf8')) for row in payloads['payload']] return payloads create_feature_std_byte_value(payloads) display(payloads.head()) plot_feature_distribution(payloads['std-byte'].astype(int)) def create_feature_distinct_bytes(payloads): ''' Feature Number of distinct bytes in payload ''' payloads['distinct-bytes'] = [ len(list(set(bytearray(str(row), 'utf8')))) for row in payloads['payload']] return payloads create_feature_distinct_bytes(payloads) display(payloads.head()) plot_feature_distribution(payloads['distinct-bytes']) sql_keywords = pd.read_csv('data/SQLKeywords.txt', index_col=False) def create_feature_sql_keywords(payloads): ''' Feature Number of SQL keywords within payload ''' payloads['sql-keywords'] = [ len([1 for keyword in sql_keywords['Keyword'] if str(keyword).lower() in str(row).lower()]) for row in payloads['payload']] return payloads create_feature_sql_keywords(payloads) display(type(sql_keywords)) display(payloads.head()) plot_feature_distribution(payloads['sql-keywords']) js_keywords = pd.read_csv('data/JavascriptKeywords.txt', index_col=False) def create_feature_javascript_keywords(payloads): ''' Feature Number of Javascript keywords within payload ''' payloads['js-keywords'] = [len([1 for keyword in js_keywords['Keyword'] if str(keyword).lower() in str(row).lower()]) for row in payloads['payload']] return payloads create_feature_javascript_keywords(payloads) display(payloads.head()) plot_feature_distribution(payloads['js-keywords']) ``` define a function that makes a feature vector from the payload using the custom features ``` def create_features(payloads): features = create_feature_length(payloads) features = create_feature_non_printable_characters(features) features = create_feature_punctuation_characters(features) features = create_feature_max_byte_value(features) features = create_feature_min_byte_value(features) features = create_feature_mean_byte_value(features) features = create_feature_std_byte_value(features) features = create_feature_distinct_bytes(features) features = create_feature_sql_keywords(features) features = create_feature_javascript_keywords(features) del features['payload'] return features ``` ### Scoring custom features Score the custom features using the SelectKBest function, then visualize the scores in a graph to see which features are less significant ``` Y = payloads['is_malicious'] X = create_features(pd.DataFrame(payloads['payload'].copy())) test = SelectKBest(score_func=chi2, k='all') fit = test.fit(X, Y) # summarize scores print(fit.scores_) features = fit.transform(X) # summarize selected features # summarize scores np.set_printoptions(precision=2) print(fit.scores_) # Get the indices sorted by most important to least important indices = np.argsort(fit.scores_) # To get your top 10 feature names featuress = [] for i in range(10): featuress.append(X.columns[indices[i]]) display(featuress) display([featuress[i] + ' ' + str(fit.scores_[i]) for i in indices[range(10)]]) plt.rcdefaults() fig, ax = plt.subplots() y_pos = np.arange(len(featuress)) performance = 3 + 10 * np.random.rand(len(featuress)) error = np.random.rand(len(featuress)) ax.barh(y_pos, fit.scores_[indices[range(10)]], align='center', color='green', ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(featuress) ax.set_xscale('log') #ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Points') ax.set_title('SelectKBest()') plt.show() ``` # Step3B - Feature engineering using bag of words techniques. Additional to our custom feature space, we will create 6 more feature spaces using bag-of-words techniques The following vectorizers below is another way of creating features for text input. We will test the performance of these techniques independently from our custom features in Step 3A. We will create vectorizers of these combinations: - 1-grams CountVectorizer - 2-grams CountVectorizer - 3-grams CountVectorizer - 1-grams TfidfVectorizer - 2-grams TfidfVectorizer - 3-grams TfidfVectorizer The type of N-gram function determines how the actual "words" should be created from the payload string Each vectorizer is used later in Step4 in Pipeline objects before training See report for further explanation ### 1-Grams features create a Countvectorizer and TF-IDFvectorizer that uses 1-grams. 1-grams equals one feature for each letter/symbol recorded ``` def get1Grams(payload_obj): '''Divides a string into 1-grams Example: input - payload: "<script>" output- ["<","s","c","r","i","p","t",">"] ''' payload = str(payload_obj) ngrams = [] for i in range(0,len(payload)-1): ngrams.append(payload[i:i+1]) return ngrams tfidf_vectorizer_1grams = TfidfVectorizer(tokenizer=get1Grams) count_vectorizer_1grams = CountVectorizer(min_df=1, tokenizer=get1Grams) ``` ### 2-Grams features create a Countvectorizer and TF-IDFvectorizer that uses 2-grams. ``` def get2Grams(payload_obj): '''Divides a string into 2-grams Example: input - payload: "<script>" output- ["<s","sc","cr","ri","ip","pt","t>"] ''' payload = str(payload_obj) ngrams = [] for i in range(0,len(payload)-2): ngrams.append(payload[i:i+2]) return ngrams tfidf_vectorizer_2grams = TfidfVectorizer(tokenizer=get2Grams) count_vectorizer_2grams = CountVectorizer(min_df=1, tokenizer=get2Grams) ``` ### 3-Grams features Create a Countvectorizer and TF-IDFvectorizer that uses 3-grams ``` def get3Grams(payload_obj): '''Divides a string into 3-grams Example: input - payload: "<script>" output- ["<sc","scr","cri","rip","ipt","pt>"] ''' payload = str(payload_obj) ngrams = [] for i in range(0,len(payload)-3): ngrams.append(payload[i:i+3]) return ngrams tfidf_vectorizer_3grams = TfidfVectorizer(tokenizer=get3Grams) count_vectorizer_3grams = CountVectorizer(min_df=1, tokenizer=get3Grams) ``` ## Step3C - Feature space visualization After creating our different feature spaces to later train each classifier on, we first examine them visually by projecting the feature spaces into two dimensions using Principle Component Analysis Graphs are shown below displaying the data in 3 out of 7 of our feature spaces ``` def visualize_feature_space_by_projection(X,Y,title='PCA'): '''Plot a two-dimensional projection of the dataset in the specified feature space input: X - data Y - labels title - title of plot ''' pca = TruncatedSVD(n_components=2) X_r = pca.fit(X).transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) plt.figure() colors = ['blue', 'darkorange'] lw = 2 #Plot malicious and non-malicious separately with different colors for color, i, y in zip(colors, [0, 1], Y): plt.scatter(X_r[Y == i, 0], X_r[Y == i, 1], color=color, alpha=.3, lw=lw, label=i) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title(title) plt.show() ``` ### 1-Grams CountVectorizer feature space visualization ``` X = count_vectorizer_1grams.fit_transform(payloads['payload']) Y = payloads['is_malicious'] visualize_feature_space_by_projection(X,Y,title='PCA visualization of 1-grams CountVectorizer feature space') ``` ### 3-Grams TFIDFVectorizer feature space visualization ``` X = tfidf_vectorizer_3grams.fit_transform(payloads['payload']) Y = payloads['is_malicious'] visualize_feature_space_by_projection(X,Y,title='PCA visualization of 3-grams TFIDFVectorizer feature space') ``` ### Custom feature space visualization ``` X = create_features(pd.DataFrame(payloads['payload'].copy())) Y = payloads['is_malicious'] visualize_feature_space_by_projection(X,Y,title='PCA visualization of custom feature space') ``` # Step4 - Model selection and evaluation First, we will automate hyperparameter tuning and out of sample testing using train_model below ``` def train_model(clf, param_grid, X, Y): '''Trains and evaluates the model clf from input The function selects the best model of clf by optimizing for the validation data, then evaluates its performance using the out of sample test data. input - clf: the model to train param_grid: a dict of hyperparameters to use for optimization X: features Y: labels output - the best estimator (trained model) the confusion matrix from classifying the test data ''' #First, partition into train and test data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42) n_iter = 5 #If number of possible iterations are less than prefered number of iterations, #set it to the number of possible iterations #number of possible iterations are not less than prefered number of iterations if any argument is expon() #because expon() is continous (writing 100 instead, could be any large number) n_iter = min(n_iter,np.prod([ 100 if type(xs) == type(expon()) else len(xs) for xs in param_grid.values() ])) #perform a grid search for the best parameters on the training data. #Cross validation is made to select the parameters, so the training data is actually split into #a new train data set and a validation data set, K number of times cv = ShuffleSplit(n=len(X_train), n_iter=5, test_size=0.2, random_state=0) #DEBUG: n_iter=10 #cv = KFold(n=len(X), n_folds=10) random_grid_search = RandomizedSearchCV( clf, param_distributions=param_grid, cv=cv, scoring='f1', n_iter=n_iter, #DEBUG 1 random_state=5, refit=True, verbose=10 ) '''Randomized search used instead. We have limited computing power grid_search = GridSearchCV( clf, param_grid=param_grid, cv=cv, scoring='f1', #accuracy/f1/f1_weighted all give same result? verbose=10, n_jobs=-1 ) grid_search.fit(X_train, Y_train) ''' random_grid_search.fit(X_train, Y_train) #Evaluate the best model on the test data Y_test_predicted = random_grid_search.best_estimator_.predict(X_test) Y_test_predicted_prob = random_grid_search.best_estimator_.predict_proba(X_test)[:, 1] confusion = confusion_matrix(Y_test, Y_test_predicted) TP = confusion[1, 1] TN = confusion[0, 0] FP = confusion[0, 1] FN = confusion[1, 0] #Calculate recall (sensitivity) from confusion matrix sensitivity = TP / float(TP + FN) #Calculate specificity from confusion matrix specificity = TN / float(TN + FP) #Calculate accuracy accuracy = (confusion[0][0] + confusion[1][1]) / (confusion.sum().sum()) #Calculate axes of ROC curve fpr, tpr, thresholds = roc_curve(Y_test, Y_test_predicted_prob) #Area under the ROC curve auc = roc_auc_score(Y_test, Y_test_predicted_prob) return { 'conf_matrix':confusion, 'accuracy':accuracy, 'sensitivity':sensitivity, 'specificity':specificity, 'auc':auc, 'params':random_grid_search.best_params_, 'model':random_grid_search.best_estimator_, 'roc':{'fpr':fpr,'tpr':tpr,'thresholds':thresholds} } ``` Then, we will use the train_model function to train, optimize and retrieve out of sample testing results from a range of classifiers. Classifiers tested using our custom feature space: - AdaBoost - SGD classifier - MultiLayerPerceptron classifier - Logistic Regression - Support Vector Machine - Random forest - Decision Tree - Multinomial Naive Bayes Classifiers tested using bag-of-words feature spaces: - MultiLayerPerceptron classifier - Logistic Regression - Support Vector Machine - Random forest - Multinomial Naive Bayes Some classifiers were unable to train using a bag-of-words feature space because they couldn't handle sparse graphs All their best parameters with their performance is stored in a dataframe called classifier_results Make dictionary of models with parameters to optimize using bag-of-words feature spaces ``` def create_classifier_inputs_using_vectorizers(vectorizer, subscript): '''make pipelines of the specified vectorizer with the classifiers to train input - vectorizer: the vectorizer to add to the pipelines subscript: subscript name for the dictionary key output - A dict of inputs to use for train_model(); a pipeline and a dict of params to optimize ''' classifier_inputs = {} classifier_inputs[subscript + ' MLPClassifier'] = { 'pipeline':Pipeline([('vect', vectorizer),('clf',MLPClassifier( activation='relu', solver='adam', early_stopping=False, verbose=True ))]), 'dict_params': { 'vect__min_df':[1,2,5,10,20,40], 'clf__hidden_layer_sizes':[(500,250,125,62)], 'clf__alpha':[0.0005,0.001,0.01,0.1,1], 'clf__learning_rate':['constant','invscaling'], 'clf__learning_rate_init':[0.001,0.01,0.1,1], 'clf__momentum':[0,0.9], } } ''' classifier_inputs[subscript + ' MultinomialNB'] = { 'pipeline':Pipeline([('vect', vectorizer),('clf',MultinomialNB())]), 'dict_params': { 'vect__min_df':[1,2,5,10,20,40] } } classifier_inputs[subscript + ' RandomForest'] = { 'pipeline':Pipeline([('vect', vectorizer),('clf',RandomForestClassifier( max_depth=None,min_samples_split=2, random_state=0))]), 'dict_params': { 'vect__min_df':[1,2,5,10,20,40], 'clf__n_estimators':[10,20,40,60] } } classifier_inputs[subscript + ' Logistic'] = { 'pipeline':Pipeline([('vect', vectorizer), ('clf',LogisticRegression())]), 'dict_params': { 'vect__min_df':[1,2,5,10,20,40], 'clf__C':[0.001, 0.01, 0.1, 1, 10, 100, 1000] } } classifier_inputs[subscript + ' SVM'] = { 'pipeline':Pipeline([('vect', vectorizer), ('clf',SVC(probability=True))]), 'dict_params': { 'vect__min_df':[1,2,5,10,20,40], 'clf__C':[0.001, 0.01, 0.1, 1, 10, 100, 1000], 'clf__gamma':[0.001, 0.0001,'auto'], 'clf__kernel':['rbf'] } } ''' return classifier_inputs ``` Make dictionary of models with parameters to optimize using custom feature spaces ``` def create_classifier_inputs(subscript): classifier_inputs = {} '''classifier_inputs[subscript + ' GPC'] = { 'pipeline':GaussianProcessClassifier(), 'dict_params': { 'kernel':[ 1.0*kernels.RBF(1.0), 1.0*kernels.Matern(), 1.0*kernels.RationalQuadratic(), 1.0*kernels.DotProduct() ] } }''' classifier_inputs[subscript + ' AdaBoostClassifier'] = { 'pipeline':AdaBoostClassifier(n_estimators=100), 'dict_params': { 'n_estimators':[10,20,50, 100], 'learning_rate':[0.1, 0.5, 1.0, 2.0] } } classifier_inputs[subscript + ' SGD'] = { 'pipeline':SGDClassifier(loss="log", penalty="l2"), 'dict_params': { 'learning_rate': ['optimal'] } } classifier_inputs[subscript + ' RandomForest'] = { 'pipeline':RandomForestClassifier( max_depth=None,min_samples_split=2, random_state=0), 'dict_params': { 'n_estimators':[10,20,40,60] } } classifier_inputs[subscript + ' DecisionTree'] = { 'pipeline': DecisionTreeClassifier(max_depth=5), 'dict_params': { 'min_samples_split': [2] } } '''classifier_inputs[subscript + ' MLPClassifier'] = { 'pipeline':MLPClassifier( activation='relu', solver='adam', early_stopping=False, verbose=True ), 'dict_params': { 'hidden_layer_sizes':[(300, 200, 150, 150), (30, 30, 30), (150, 30, 30, 150), (400, 250, 100, 100) , (150, 200, 300)], 'alpha':[0.0005,0.001,0.01,0.1,1], 'learning_rate':['constant','invscaling'], 'learning_rate_init':[0.0005,0.001,0.01,0.1,1], 'momentum':[0,0.9], } }''' classifier_inputs[subscript + ' Logistic'] = { 'pipeline':LogisticRegression(), 'dict_params': { 'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] } } classifier_inputs[subscript + ' MultinomialNB'] = { 'pipeline':MultinomialNB(), 'dict_params': { 'alpha': [1.0] } } '''classifier_inputs[subscript + ' SVM'] = { 'pipeline':SVC(probability=True), 'dict_params': { 'C':[0.001, 0.01, 0.1, 1, 10, 100, 1000], 'gamma':[0.001, 0.0001,'auto'], 'kernel':['rbf'] } }''' return classifier_inputs ``` Create a new result table ``` classifier_results = pd.DataFrame(columns=['accuracy','sensitivity','specificity','auc','conf_matrix','params','model','roc'])#,index=classifier_inputs.keys()) ``` Use the 6 different feature spaces generated from the vectorizers previously above, and train every classifier in classifier_inputs in every feature space ### P.S! Don't try to run this, it will take several days to complete ### Instead skip to Step4B ``` classifier_inputs = {} classifier_inputs.update(create_classifier_inputs_using_vectorizers(count_vectorizer_1grams,'count 1grams')) classifier_inputs.update(create_classifier_inputs_using_vectorizers(count_vectorizer_2grams,'count 2grams')) classifier_inputs.update(create_classifier_inputs_using_vectorizers(count_vectorizer_3grams,'count 3grams')) classifier_inputs.update(create_classifier_inputs_using_vectorizers(tfidf_vectorizer_1grams,'tfidf 1grams')) classifier_inputs.update(create_classifier_inputs_using_vectorizers(tfidf_vectorizer_2grams,'tfidf 2grams')) classifier_inputs.update(create_classifier_inputs_using_vectorizers(tfidf_vectorizer_3grams,'tfidf 3grams')) X = payloads['payload'] Y = payloads['is_malicious'] for classifier_name, inputs in classifier_inputs.items(): display(inputs['dict_params']) if classifier_name in classifier_results.index.values.tolist(): print('Skipping ' + classifier_name + ', already trained') else: result_dict = train_model(inputs['pipeline'],inputs['dict_params'],X,Y) classifier_results.loc[classifier_name] = result_dict display(classifier_results) display(pd.DataFrame(payloads['payload'].copy())) ``` Use our custom feature space, and train every classifier in classifier_inputs_custom with ### P.S! Don't try to run this, it will take many hours to complete ### Instead skip to Step4B ``` classifier_inputs_custom = {} #Get classifiers and parameters to optimize classifier_inputs_custom.update(create_classifier_inputs('custom')) #Extract payloads and labels Y = payloads['is_malicious'] X = create_features(pd.DataFrame(payloads['payload'].copy())) #Select the best features X_new = SelectKBest(score_func=chi2, k=4).fit_transform(X,Y) #Call train_model for every classifier and save results to classifier_results for classifier_name, inputs in classifier_inputs_custom.items(): if classifier_name in classifier_results.index.values.tolist(): print('Skipping ' + classifier_name + ', already trained') else: result_dict = train_model(inputs['pipeline'],inputs['dict_params'],X,Y) classifier_results.loc[classifier_name] = result_dict display(classifier_results) #pickle.dump( classifier_results, open( "data/trained_classifiers_custom_all_features.p", "wb" ) ) #Save classifiers in a pickle file to be able to re-use them without re-training pickle.dump( classifier_results, open( "data/trained_classifiers.p", "wb" ) ) ``` ### Classifier results ``` #Display the results for the classifiers that were trained using our custom feature space custom_features_classifiers = pickle.load( open("data/trained_classifier_custom_all_features.p", "rb")) display(custom_features_classifiers) #Display the results for the classifiers that were using bag of words feature spaces classifier_results = pickle.load( open( "data/trained_classifiers.p", "rb" ) ) display(classifier_results) #Combine the two tables into one table classifier_results = classifier_results.append(custom_features_classifiers) classifier_results = classifier_results.sort_values(['sensitivity','accuracy'], ascending=[False,False]) display(classifier_results) ``` ### F1-score Calculate F1-score of each classifier and add to classifiers table (We didn't implement this in the train_model function as with the other performance metrics because we've already done a 82 hour training session before this and didn't want to re-run the entire training just to add F1-score from inside train_model) ``` def f1_score(conf_matrix): precision = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[0][1] ) recall = conf_matrix[0][0] / (conf_matrix[0][0] + conf_matrix[1][0] ) return (2 * precision * recall) / (precision + recall) #load classifier table if not yet loaded classifier_results = pickle.load( open( "data/trained_classifiers.p", "rb" ) ) #Calculate F1-scores classifier_results['F1-score'] = [ f1_score(conf_matrix) for conf_matrix in classifier_results['conf_matrix']] #Re-arrange columns classifier_results = classifier_results[['F1-score','accuracy','sensitivity','specificity','auc','conf_matrix','params','model','roc']] #re-sort on F1-score classifier_results = classifier_results.sort_values(['F1-score','accuracy'], ascending=[False,False]) display(classifier_results) ``` Final formating Convert numeric columns to float Round numeric columns to 4 decimals ``` classifier_results[['F1-score','accuracy','sensitivity','specificity','auc']] = classifier_results[['F1-score','accuracy','sensitivity','specificity','auc']].apply(pd.to_numeric) classifier_results = classifier_results.round({'F1-score':4,'accuracy':4,'sensitivity':4,'specificity':4,'auc':4}) #classifier_results[['F1-score','accuracy','sensitivity','specificity','auc','conf_matrix','params']].to_csv('data/classifiers_result_table.csv') display(classifier_results.dtypes) ``` ### Export classifiers First, export full list of trained classifiers for later use Second, pick one classifier to save in a separate pickle, used later to implement in a dummy server ``` #save complete list of classifiers to 'trained_classifiers' pickle.dump( classifier_results, open( "data/trained_classifiers.p", "wb" ) ) #In this case, we are going to implement tfidf 2grams RandomForest in our dummy server classifier = (custom_features_classifiers['model'].iloc[0]) print(classifier) #Save classifiers in a pickle file to be able to re-use them without re-training pickle.dump( classifier, open( "data/tfidf_2grams_randomforest.p", "wb" ) ) ``` ## Step4B - load pre-trained classifiers Instead of re-training all classifiers, load the classifiers from disk that we have already trained ``` classifier_results = pickle.load( open( "data/trained_classifiers.p", "rb" ) ) ``` ## Step5 - Visualization In this section we will visualize: - Histogram of classifier performances - Learning curves - ROC curves ### Performance histogram First, make a histogram of classifier performance measured by F1-score. Same classifier using different feature spaces are clustered together in the graph Also, print the table of F1-scores and computes the averages along the x-axis and y-axis, e.g. the average F1-score for each classifier, and the average F1-score for each feature space ``` def get_classifier_name(index): ''' Returns the name of the classifier at the given index name ''' return index.split()[len(index.split())-1] #Group rows together using same classifier grouped = classifier_results.groupby(get_classifier_name) hist_df = pd.DataFrame(columns=['custom','count 1grams','count 2grams','count 3grams','tfidf 1grams','tfidf 2grams','tfidf 3grams']) for classifier, indices in grouped.groups.items(): #Make a list of feature spaces feature_spaces = indices.tolist() feature_spaces = [feature_space.replace(classifier,'') for feature_space in feature_spaces] feature_spaces = [feature_space.strip() for feature_space in feature_spaces] #If no result exists, it will stay as 0 hist_df.loc[classifier] = { 'custom':0, 'count 1grams':0, 'count 2grams':0, 'count 3grams':0, 'tfidf 1grams':0, 'tfidf 2grams':0, 'tfidf 3grams':0 } #Extract F1-score from classifier_results to corrensponding entry in hist_df for fs in feature_spaces: hist_df[fs].loc[classifier] = classifier_results['F1-score'].loc[fs + ' ' + classifier] #Plot the bar plot f, ax = plt.subplots() ax.set_ylim([0.989,1]) hist_df.plot(kind='bar', figsize=(12,7), title='F1-score of all models grouped by classifiers', ax=ax, width=0.8) #Make Avgerage F1-score row and cols for the table and print the table hist_df_nonzero = hist_df.copy() hist_df_nonzero[hist_df > 0] = True hist_df['Avg Feature'] = (hist_df.sum(axis=1) / np.array(hist_df_nonzero.sum(axis=1))) hist_df_nonzero = hist_df.copy() hist_df_nonzero[hist_df > 0] = True hist_df.loc['Avg Classifier'] = (hist_df.sum(axis=0) / np.array(hist_df_nonzero.sum(axis=0))) hist_df = hist_df.round(4) display(hist_df) ``` ### Learning curves Create learning curves for a sample of classifiers. This is to visualize how the dataset size impacts the performance ``` def plot_learning_curve(df_row,X,Y): '''Plots the learning curve of a classifier with its parameters input - df_row: row of classifier_result X: payload data Y: labels ''' #The classifier to plot learning curve for estimator = df_row['model'] title = 'Learning curves for classifier ' + df_row.name train_sizes = np.linspace(0.1,1.0,5) cv = ShuffleSplit(n=len(X), n_iter=3, test_size=0.2, random_state=0) #plot settings plt.figure() plt.title(title) plt.xlabel("Training examples") plt.ylabel("Score") print('learning curve in process...') train_sizes, train_scores, test_scores = learning_curve( estimator, X, Y, cv=cv, n_jobs=-1, train_sizes=train_sizes, verbose=0) #Change verbose=10 to print progress print('Learning curve done!') train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") plt.show() ``` Three examples of learning curves from the trained classifiers. All learning curves have upsloping cross-validation score at the end, which means that adding more data would potentially increase the accuracy ``` #plot learning curve for tfidf 1grams RandomForest X = payloads['payload'] Y = payloads['is_malicious'] plot_learning_curve(classifier_results.iloc[0],X,Y) #plot learning curve for count 3grams MultinomialNB X = payloads['payload'] Y = payloads['is_malicious'] plot_learning_curve(classifier_results.iloc[6],X,Y) #plot learning curve for custom svm X = create_features(pd.DataFrame(payloads['payload'].copy())) Y = payloads['is_malicious'] plot_learning_curve(classifier_results.iloc[5],X,Y) ``` ### ROC curves Plot ROC curves for a range of classifiers to visualize the sensitivity/specificity trade-off and the AUC ``` def visualize_result(classifier_list): '''Plot the ROC curve for a list of classifiers in the same graph input - classifier_list: a subset of classifier_results ''' f, (ax1, ax2) = plt.subplots(1,2) f.set_figheight(6) f.set_figwidth(15) #Subplot 1, ROC curve for classifier in classifier_list: ax1.plot(classifier['roc']['fpr'], classifier['roc']['tpr']) ax1.scatter(1-classifier['specificity'],classifier['sensitivity'], edgecolor='k') ax1.set_xlim([0, 1]) ax1.set_ylim([0, 1.0]) ax1.set_title('ROC curve for top3 and bottom3 classifiers') ax1.set_xlabel('False Positive Rate (1 - Specificity)') ax1.set_ylabel('True Positive Rate (Sensitivity)') ax1.grid(True) #subplot 2, ROC curve zoomed for classifier in classifier_list: ax2.plot(classifier['roc']['fpr'], classifier['roc']['tpr']) ax2.scatter(1-classifier['specificity'],classifier['sensitivity'], edgecolor='k') ax2.set_xlim([0, 0.3]) ax2.set_ylim([0.85, 1.0]) ax2.set_title('ROC curve for top3 and bottom3 classifiers (Zoomed)') ax2.set_xlabel('False Positive Rate (1 - Specificity)') ax2.set_ylabel('True Positive Rate (Sensitivity)') ax2.grid(True) #Add further zoom left, bottom, width, height = [0.7, 0.27, 0.15, 0.15] ax3 = f.add_axes([left, bottom, width, height]) for classifier in classifier_list: ax3.plot(classifier['roc']['fpr'], classifier['roc']['tpr']) ax3.scatter(1-classifier['specificity'],classifier['sensitivity'], edgecolor='k') ax3.set_xlim([0, 0.002]) ax3.set_ylim([0.983, 1.0]) ax3.set_title('Zoomed even further') ax3.grid(True) plt.show() ``` Plot ROC curves for the top3 classifiers and the bottom 3 classifiers, sorted by F1-score Left: standard scale ROC curve Right: zoomed in version of same graph, to easier see in the upper right corner ``` indices = [0,1,2, len(classifier_results)-1,len(classifier_results)-2,len(classifier_results)-3] visualize_result([classifier_results.iloc[index] for index in indices]) ``` ## Step6 - Website integration extract This is the code needed when implementing the saved classifier in tfidf_2grams_randomforest.p on a server ``` import pickle from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.ensemble import RandomForestClassifier def get2Grams(payload_obj): '''Divides a string into 2-grams Example: input - payload: "<script>" output- ["<s","sc","cr","ri","ip","pt","t>"] ''' payload = str(payload_obj) ngrams = [] for i in range(0,len(payload)-2): ngrams.append(payload[i:i+2]) return ngrams classifier = pickle.load( open("data/tfidf_2grams_randomforest.p", "rb")) def injection_test(inputs): variables = inputs.split('&') values = [ variable.split('=')[1] for variable in variables] print(values) return 'MALICIOUS' if classifier.predict(values).sum() > 0 else 'NOT_MALICIOUS' #test injection_test display(injection_test("val1=%3Cscript%3Ekiddie")) ``` # (Step7) we can display which types of queries the classifiers failed to classify. These are interesting to examine for further work on how to improve the classifiers and the quality of the data set ``` pipe = Pipeline([('vect', vectorizer), ('clf',LogisticRegression(C=10))]) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42) cv = ShuffleSplit(n=len(X_train), n_iter=1, test_size=0.2, random_state=0) #DEBUG: n_iter=10 random_grid_search = RandomizedSearchCV( pipe, param_distributions={ 'clf__C':[10] }, cv=cv, scoring='roc_auc', n_iter=1, random_state=5, refit=True ) random_grid_search.fit(X_train, Y_train) #Evaluate the best model on the test data Y_test_predicted = random_grid_search.best_estimator_.predict(X_test) #Payloads classified incorrectly pd.options.display.max_colwidth = 200 print('False positives') print(X_test[(Y_test == 0) & (Y_test_predicted == 1)]) print('False negatives') print(X_test[(Y_test == 1) & (Y_test_predicted == 0)]) ```
github_jupyter
Handling files belongs also to the basic skills in programming, that's why this chapter was added as a completion by me (Kinga Sipos). <!--NAVIGATION--> < [Strings and Regular Expressions](13-Strings-and-Regular-Expressions.ipynb) | [Contents](Index.ipynb) | [Modules and Packages](15-Modules-and-Packages.ipynb) > # File Input and Output ## Filesystem operations Filesystem operations can be carried out by executing a normal shell command preceded by exclamation mark: ``` !ls ``` Another alternative to operate files is to use the ``os`` module: * ``os.getcwd()``- Returns the path to the current working directory. * ``os.chdir(path)`` - Changes the current working directory to path. * ``os.listdir(dir)`` - Returns the list of entries in directory dir (omitting โ€˜.โ€™ and โ€˜..โ€™) * ``os.makedirs(path)`` - Creates a directory; nothing happens if the directory already exists. Creates all the intermediate-level directories needed to contain the leaf. * ``os.rename(old,new)`` - Renames a file or directory from old to new. Specific path related functions are methods of ``os.path``: * ``os.path.exists(path)`` - Returns True if path exists. * ``os.path.isdir(path)`` - Returns True if path is a directory. * ``os.path.isfile(path)`` - Returns True if path is a regular file. * ``os.path.basename(path)`` - Returns the base name (the part after the last โ€˜/โ€™ character) * ``os.path.dirname(path)`` - Returns the directory name (the part before the last / character). * ``os.path.abspath(path)`` - Make path absolute (i.e., start with a /). ## Read from a file and write to a file ### Reading from and writing to textfiles 1. The **first line** of code for processing a text file usually looks like this: `with open(filename, mode) as stream:` - which prepares the file for processing. Mode is one of ``'r'``, ``'w'`` or ``'a'`` for reading, writing, appending. You can add a โ€˜+โ€™ character to enable read+write (other effects being the same). `stream = open(filename, mode)` is equivalent to the first line of the previous code, the difference is that `with` ensures that the file is closed after the desired operation is carried out, otherwise one should close the file explicitely by the `stream.close()` command. 2. a) If the chosen processing mode is read, the **second line** can be something like `content = stream.read()` - which returns the whole content of a file as a multiline string or `content = stream.readlines()` - which returns the content of the file as a list of one line strings or `for line in stream:` - which reads the lines of file line by line. 2. b) If the chosen processing is write, the **second line** can be `stream.write(text)` ### Exercise As an exercise we will create a file with the haikus from the previous chapter. ``` mytext = """WORKSHOP HAIKU translated by ร‰va Antal Perhaps do not even touch it. Just look at it, look at it, until it becomes beautiful. TEST QUESTION FOR EVERY DAY translated by ร‰va Antal Do you still see what you look at, or you only know: "there" "it" "is"? FROM THE BEST OF INTENTIONS translated by Gรกbor G. Gyukics and Michael Castro fall asleep; die the same way a child bites into an apple. MEETING translated by Gรกbor G. Gyukics and Michael Castro I plan it as a farewell THE HAIKU translated by Tamรกs Rรฉvbรญrรณ in front of my feet a bird sat, and then took flight. Now I'm heavier. AXIOM translated by Tamรกs Rรฉvbรญrรณ You should try and help everything to be the way it is anyway. ECHO ON EPICTETUS translated by Tamรกs Rรฉvbรญrรณ Don't say, "I lost it", about anything. Rather say, "I gave it back". AXIOM translated by Tamรกs Rรฉvbรญrรณ Parents and killers: almost-innocent servants. They just execute. ZENsation translated by Tamรกs Rรฉvbรญrรณ Look, the snow gives body to the wind! DISILLUSIONIST translated by Tamรกs Rรฉvbรญrรณ Why should I travel when I can be a stranger right here, standing still?""" with open('Haikus.txt', 'w') as outstream: outstream.write(mytext) ``` One can check whether the file is closed. ``` outstream.closed ``` Now let's read the first two lines from the created file. ``` with open('Haikus.txt', 'r') as instream: print(instream.readline()) print(instream.readline()) ``` This time let's read all the lines of the file into a list and print the first 6 lines. ``` with open('Haikus.txt', 'r') as instream: textlines = instream.readlines() for i in range(6): print(textlines[i]) ``` ### Reading from and writing to Comma Separated Values files Reading and writing can be performed in the same way as above. For example one can create a CSV file by the following code: ``` import csv with open('employee_file.csv', mode='w') as employee_file: employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) employee_writer.writerow(['John Smith', 'Accounting', 'November', '27']) employee_writer.writerow(['Erica Meyers', 'IT', 'March', '31']) ``` or with the following code: ``` import csv with open('employee_file2.csv', mode='w') as csv_file: fieldnames = ['emp_name', 'dept', 'birth_month', 'age'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() writer.writerow({'emp_name': 'John Smith', 'dept': 'Accounting', 'birth_month': 'November', 'age': '27'}) writer.writerow({'emp_name': 'Erica Meyers', 'dept': 'IT', 'birth_month': 'March', 'age': 31}) ``` Reading can be performed in the following way: ``` with open('employee_file2.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: if line_count == 0: print(f'Column names are {", ".join(row)}') line_count += 1 else: print(f'\t{row[0]} works at {row[1]}, has birthday in {row[2]} and is {row[3]} years old.') line_count += 1 print(f'Processed {line_count} lines.') with open('employee_file2.csv', mode='r') as csv_file: csv_reader = csv.DictReader(csv_file) line_count = 0 for row in csv_reader: if line_count == 0: print(f'Column names are {", ".join(row)}') line_count += 1 print(f'\t{row["emp_name"]} works in the {row["dept"]} department, and was born in {row["birth_month"]}.') line_count += 1 print(f'Processed {line_count} lines.') ``` One can import a CSV file directly as dataframe. ``` import pandas as pd df = pd.read_csv('employee_file2.csv') print(df) ``` Experiment with possible methods and attributes of dataframes! ``` df.head() df.describe() df.dtypes ``` <!--NAVIGATION--> < [Strings and Regular Expressions](13-Strings-and-Regular-Expressions.ipynb) | [Contents](Index.ipynb) | [Modules and Packages](15-Modules-and-Packages.ipynb) >
github_jupyter
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Dynamic-Schedule" data-toc-modified-id="Dynamic-Schedule-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Dynamic Schedule</a></span><ul class="toc-item"><li><span><a href="#Homogeneous-Exponential-Case" data-toc-modified-id="Homogeneous-Exponential-Case-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Homogeneous Exponential Case</a></span></li><li><span><a href="#Heterogeneous-Exponential-Case" data-toc-modified-id="Heterogeneous-Exponential-Case-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Heterogeneous Exponential Case</a></span></li><li><span><a href="#Phase-Type-Case" data-toc-modified-id="Phase-Type-Case-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Phase-Type Case</a></span><ul class="toc-item"><li><span><a href="#Phase-Type-Fit" data-toc-modified-id="Phase-Type-Fit-1.3.1"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span>Phase-Type Fit</a></span></li><li><span><a href="#Weighted-Erlang-Distribution" data-toc-modified-id="Weighted-Erlang-Distribution-1.3.2"><span class="toc-item-num">1.3.2&nbsp;&nbsp;</span>Weighted Erlang Distribution</a></span></li><li><span><a href="#Hyperexponential-Distribution" data-toc-modified-id="Hyperexponential-Distribution-1.3.3"><span class="toc-item-num">1.3.3&nbsp;&nbsp;</span>Hyperexponential Distribution</a></span></li></ul></li></ul></li></ul></div> # Dynamic Schedule _Roshan Mahes, Michel Mandjes, Marko Boon_ In this notebook we determine dynamic schedules that minimize the following cost function: \begin{align*} \omega \sum_{i=1}^{n}\mathbb{E}I_i + (1 - \omega)\sum_{i=1}^{n}\mathbb{E}W_i,\quad \omega\in(0,1), \end{align*} where $I_i$ and $W_i$ are the expected idle and waiting time associated to client $i$, respectively. We assume that the service tasks $B_1,\dots,B_n$ are independent and solve the problem assuming different types of distributions. The following packages are required: ``` # math import numpy as np import scipy import math from scipy.stats import binom, erlang, poisson from scipy.optimize import minimize # web scraping from urllib.request import urlopen from bs4 import BeautifulSoup as soup import pandas as pd # plotting import plotly.graph_objects as go import plotly.express as px from itertools import cycle # caching from functools import cache ``` ## Homogeneous Exponential Case In the first case, we assume $B_1,\dots,B_n \stackrel{i.i.d.}{\sim} B \stackrel{d}{=} \text{Exp}(\mu)$ for some $\mu > 0$. In our thesis, we have determined a recursive procedure. We state the result. <div class="alert alert-warning"> <b>Corollary 2.5.</b> For arrival time $t$ we have, with $X_t \sim \text{Pois}(\mu t)$ and $\ell = 2,\dots,k+1$, \begin{align*} p_{k1}(t) = \mathbb{P}(X_t\geq k),\quad p_{k\ell}(t) = \mathbb{P}(X_t = k-\ell+1). \end{align*} </div> <div class="alert alert-warning"> <b>Proposition 2.7.</b> Let $X_t \sim \text{Pois}(\mu t)$. Then \begin{align*} f_k(t) &= t\mathbb{P}(X_t\geq k) - \frac{k}{\mu}\mathbb{P}(X_t\geq k+1), \\ g_k(t) &= \frac{k(k-1)}{2\mu}\mathbb{P}(X_t\geq k+1) + (k-1)t\mathbb{P}(X_t\leq k-1) - \frac{\mu t^2}{2}\mathbb{P}(X_t\leq k-2). \end{align*} </div> <div class="alert alert-warning"> <b>Theorem 3.5.</b> Let $p_{k\ell}(t)$, $f_k(t)$ and $g_k(t)$ be given by Corollary 2.5 and Proposition 2.7. The following recursion holds: for $i=1,\dots,n-1$ and $k=1,\dots,i$, \[ C_i^{\star}(k) = \inf_{t\geq 0}\left(\omega f_k(t) + (1 - \omega)g_k(t) + \sum_{\ell=1}^{k+1}p_{k\ell}(t)C_{i+1}^{\star}(\ell)\right), \] whereas, for $k=1,\dots,n$, \[ C_n^{\star}(k) = (1-\omega)g_{k}(\infty) = (1-\omega)\frac{k(k-1)}{2\mu}. \] </div> We have implemented the formulas as follows. ``` def cost(t,i,k,mu,omega,n,C_matrix,use_h=True): """ Computes the cost of the (remaining) schedule when t is the next interarrival time. """ Fk = [poisson.cdf(k,mu*t), poisson.cdf(k-2,mu*t), poisson.cdf(k-1,mu*t)] f = (1 - Fk[-1]) * t - (1 - Fk[0]) * k / mu if use_h: g = (k - 1) / mu else: g = Fk[-1] * (k - 1) * t - Fk[-2] * mu * t**2 / 2 + (1 - Fk[0]) * k * (k - 1) / (2 * mu) cost = omega * f + (1 - omega) * g cost += (1 - Fk[-1]) * Cstar_homexp(i+1,1,mu,omega,n,C_matrix,use_h) for l in range(2,k+2): cost += poisson.pmf(k-l+1,mu*t) * Cstar_homexp(i+1,l,mu,omega,n,C_matrix,use_h) return cost def Cstar_homexp(i,k,mu=1,omega=1/2,n=15,C_matrix=None,use_h=True): """ Computes C*_i(k) in the homogeneous exponential case. """ if C_matrix[i-1][k-1] != None: # retrieve stored value pass elif i == n: # initial condition if use_h: C_matrix[i-1][k-1] = (1 - omega) * (k - 1) / mu else: C_matrix[i-1][k-1] = (1 - omega) * k * (k - 1) / (2 * mu) else: optimization = minimize(cost,0,args=(i,k,mu,omega,n,C_matrix,use_h),method='Nelder-Mead') C_matrix[i-1][k-1] = optimization.fun minima[i-1][k-1] = optimization.x[0] return C_matrix[i-1][k-1] ``` Now we plot our dynamic schedule for $n = 15$ and $\omega = 0.5$: ``` omega = 0.5 n = 15 # compute schedule C_matrix = [[None for k in range(n+1)] for i in range(n)] minima = [[None for k in range(n+1)] for i in range(n)] for i in range(1,n+1): for k in range(1,i+1): Cstar_homexp(i,k,mu=1,omega=omega,n=n,C_matrix=C_matrix,use_h=True) # plot schedule palette = cycle(px.colors.cyclical.mrybm[2:]) fig = go.Figure() for k in range(1,n): fig.add_trace(go.Scatter(x=np.arange(1,n+2), y=[minima[i][k-1] for i in range(n)], name=k, marker_color=next(palette))) fig.update_layout( template='plotly_white', title='$\\text{Dynamic Schedule}\ (n=' + f'{n},\ \omega={omega})$', legend_title='$\\text{Clients in System}\ (k)$', xaxis = {'title': '$\\text{Client Position}\ (i)$', 'range': [0.7, n - 0.7], 'dtick': 1}, yaxis = {'title': '$\\text{Interarrival Time}\ (\\tau_{i}(k))$', 'dtick': 1} ) fig.show() print(f'Cost: {C_matrix[0][0]}') minima ``` ## Heterogeneous Exponential Case Now we consider the case that the service tasks $B_i$ are independent and _heterogeneous exponentially_ distributed, i.e. $B_i \sim \text{Exp}(\mu_i)$, $i=1,\dots,n$. For ease we assume that all $\mu_i$ are distinct, i.e., $\mu_i \neq \mu_j$ for $i,j = 1,\dots,n$, $i\neq j$, but the case that some of the $\mu_i$ coincide can be considered analogously. We obtain the following result. <div class="alert alert-warning"> <b>Lemma 2.12.</b> For $k=1,\dots,n$ and $\ell=0,\dots,n-k$, we can write the density $\varphi_{k\ell}$ as \[ \varphi_{k\ell}(s) := \mathbb{P}\left(\sum_{j=k}^{k+\ell}B_j \in\mathrm{d}s\right) = \sum_{j=k}^{k+\ell}c_{k\ell j}e^{-\mu_j s},\quad s \geq 0. \] The coefficients $c_{k\ell j}$ are given recursively through $c_{k0k} = \mu_k$ and \[ c_{k,\ell+1,j} = c_{k\ell j}\frac{\mu_{k+\ell+1}}{\mu_{k+\ell+1} - \mu_j}\quad \text{for}\ j = k,\dots,k+\ell,\quad c_{k,\ell+1,k+\ell+1} = \sum_{j=k}^{k+\ell}c_{k\ell j}\frac{\mu_{k+\ell+1}}{\mu_j - \mu_{k+\ell+1}}. \] </div> <div class="alert alert-warning"> <b>Proposition 2.16.</b> For $i=1,\dots,n-1$, $k=1,\dots,i$, $\ell = 2,\dots,k+1$ and $t\geq 0$, \[ p_{k1,i}(t) = 1 - \sum_{\ell=2}^{k+1}p_{k\ell,i}(t),\quad p_{k\ell,i}(t) = \frac{\varphi_{i-k+1,k-\ell+1}(t)}{\mu_{i-\ell+2}}. \] </div> <div class="alert alert-warning"> <b>Proposition 2.17.</b> For $i=1,\dots,n-1$ and $k=1,\dots,i$, \begin{align*} f_{k,i}(t) = t - \sum_{j=i-k+1}^{i}\frac{c_{i-k+1,k-1,j}}{\mu_j}\psi_{j}(t), \quad g_{k,i}(t) = \sum_{\ell=0}^{k-1}(k-\ell-1)\sum_{j=i-k+1}^{i-k+\ell+1}\frac{c_{i-k+1,\ell,j}}{\mu_{i-k+\ell+1}}\psi_{j}(t), \end{align*} with $\psi_{j}(t) = (1 - e^{-\mu_j t})/\mu_j$. </div> <div class="alert alert-warning"> <b>Theorem 3.9.</b> We can determine the $C^{\star}_i(k)$ recursively: for $i=1,\dots,n-1$ and $k=1,\dots,i$, \[ C^{\star}_i(k) = \inf_{t\ge 0}\left(\omega f_{k,i}(t) + (1-\omega)g_{k,i}(t) + \sum_{\ell=1}^{k+1}p_{k\ell,i}(t)C^{\star}_{i+1}(\ell)\right), \] whereas, for $k=1,\dots,n$, \[ C^{\star}_n(k) = (1 - \omega)g_{k,n}(\infty) = (1 - \omega)\sum_{\ell=0}^{k-1}(k-\ell-1)\frac{1}{\mu_{n-k+\ell+1}}. \] </div> These formulas lead to the following implementation. ``` # helper functions def c(k,l,j,mu): """Computes the weights c of phi recursively (Lemma 2.23).""" # storage indices k_, l_, j_ = k - 1, l, j - 1 if c_stored[k_][l_][j_] != None: pass elif k == j and not l: c_stored[k_][l_][j_] = mu[k_] elif l: if j >= k and j < k + l: c_stored[k_][l_][j_] = c(k,l-1,j,mu) * mu[k_+l_] / (mu[k_+l_] - mu[j-1]) elif k + l == j: c_stored[k_][l_][j_] = sum([c(k,l-1,m,mu) * mu[j-1] / (mu[m-1] - mu[j-1]) for m in range(k,k+l)]) return c_stored[k_][l_][j_] def phi(k,l,s,mu): return sum([c(k,l,j,mu) * math.exp(-mu[j-1] * s) for j in range(k,k+l+1)]) def psi(j,t,mu): return (1 - math.exp(-mu[j-1] * t)) / mu[j-1] # transition probabilities def trans_prob_het(t,i,k,mu): """Computes the transition probabilities (Prop. 2.25).""" p = [phi(i-k+1,k-l+1,t,mu) / mu[i-l+1] for l in range(2,k+2)] return [1 - sum(p)] + p # cost function def cost_het(t,i,k,mu,omega,n,C_matrix,use_h=True): """Computes the cost of the (remaining) schedule when t is the next interarrival time.""" f = t - sum([c(i-k+1,k-1,j,mu) * psi(j,t,mu) / mu[j-1] for j in range(i-k+1,i+1)]) if use_h: g = sum(1 / mu[i-k:i-1]) else: g = 0 for l in range(k-1): g += (k - l - 1) * sum([c(i-k+1,l,j,mu) * psi(j,t,mu) / mu[i-k+l] for j in range(i-k+1,i-k+l+2)]) p = trans_prob_het(t,i,k,mu) cost = omega * f + (1 - omega) * g cost += sum([Cstar_het(i+1,l,mu,omega,n,C_matrix,use_h) * p[l-1] for l in range(1,k+2)]) return cost def Cstar_het(i,k,mu,omega,n,C_matrix,use_h=True): """Computes C*_i(k) in the heterogeneous exponential case.""" if C_matrix[i-1][k-1] != None: # retrieve stored value pass elif i == n: # initial condition if use_h: C_matrix[i-1][k-1] = (1 - omega) * sum(1 / mu[i-k:i-1]) else: C_matrix[i-1][k-1] = (1 - omega) * sum([(k - l - 1) / mu[n-k+l] for l in range(k)]) else: optimization = minimize(cost_het,0,args=(i,k,mu,omega,n,C_matrix,use_h))#,bounds=((0,500),)) C_matrix[i-1][k-1] = optimization.fun minima[i-1][k-1] = optimization.x[0] return C_matrix[i-1][k-1] ``` Again we can plot our dynamic schedule: ``` omega = 0.5 n = 11 mus = np.linspace(0.5,1.5,n) # plot schedule palette = cycle(px.colors.cyclical.mrybm[2:]) fig = go.Figure() print(f'omega = {omega}\nmu = {mus}\n') C_matrix = [[None for k in range(n)] for i in range(n)] minima = [[None for k in range(n)] for i in range(n)] c_stored = [[[None for j in range(n)] for l in range(n)] for k in range(n)] # compute values for i in range(1,n+1): for k in range(1,i+1): Cstar_het(i,k,mus,omega=omega,n=n,C_matrix=C_matrix,use_h=True) # cost print(f'Cost: {C_matrix[0][0]}') for k in range(1,n): fig.add_trace(go.Scatter(x=np.arange(1,n+2), y=[minima[i][k-1] for i in range(n)], name=k, marker_color=next(palette))) fig.update_layout( template='plotly_white', title='$\\text{Dynamic Schedule}\ (n=' + f'{n},\ \omega={omega})$', legend_title='$\\text{Clients in System}\ (k)$', xaxis = {'title': '$\\text{Client Position}\ (i)$', 'range': [0.7, n - 0.7], 'dtick': 1}, yaxis = {'title': '$\\text{Interarrival Time}\ (\\tau_{i}(k))$', 'dtick': 1}, width=800, height=600 ) fig.show() ``` ## Phase-Type Case Our most general case consists of service time distributions constructed by convolutions and mixtures of exponential distributions, the so-called _phase-type distributions_. ### Phase-Type Fit There are two special cases of phase-type distributions that are of particular interest: the weighted Erlang distribution and the hyperexponential distribution. The idea is to fit the first two moments of the real service-time distribution. The former distribution can be used to approximate any non-negative distribution with coefficient of variation below 1, whereas the latter can be used if this coefficient of variation is larger than 1. The parameters of the weighted Erlang and hyperexponential distribution are obtained with the following function. ``` def SCV_to_params(SCV, mean=1): # weighted Erlang case if SCV <= 1: K = math.floor(1/SCV) p = ((K + 1) * SCV - math.sqrt((K + 1) * (1 - K * SCV))) / (SCV + 1) mu = (K + 1 - p) / mean return K, p, mu # hyperexponential case else: p = 0.5 * (1 + np.sqrt((SCV - 1) / (SCV + 1))) mu = 1 / mean mu1 = 2 * p * mu mu2 = 2 * (1 - p) * mu return p, mu1, mu2 ``` In the following subsections we develop procedures for finding the optimal static schedule in the weighted Erlang case and the hyperexponential case, respectively. ### Weighted Erlang Distribution In this case, we assume that the service time $B$ equals w.p. $p\in[0,1]$ an Erlang-distributed random variable with $K$ exponentially distributed phases, each of them having mean $\mu^{-1}$, and with probability $1-p$ an Erlang-distributed random variable with $K+1$ exponentially distributed phases, again with mean $\mu^{-1}$: \begin{align*} B \stackrel{\text{d}}{=} \sum_{i=1}^{K}X_i + X_{K+1}\mathbb{1}_{\{U > p\}}, \end{align*} where $X_i \stackrel{iid}{\sim} \text{Exp}(\mu)$ and $U\sim\text{Unif}[0,1]$. The following recursion can be found in the thesis. <div class="alert alert-warning"> <b>Theorem 3.16 (discrete version).</b> For $i=1,\dots,n-1$, $k=1,\dots,i$, and $m\in\mathbb{N}_0$, \[ \xi_i(k,m) = \inf_{t\in \mathbb{N}_0}\Bigg(\omega \bar{f}^{\circ}_{k,m\Delta}(t\Delta) + (1 - \omega)\bar{h}^{\circ}_{k,m\Delta} + \sum_{\ell=2}^{k}\sum_{j=0}^{t}\bar{q}_{k\ell,mj}(t)\xi_{i+1}(\ell,j) + P^{\downarrow}_{k,m\Delta}(t\Delta)\xi_{i+1}(1,0) + P^{\uparrow}_{k,m\Delta}(t\Delta)\xi_{i+1}(k+1,m+t) \Bigg), \] whereas, for $k=1,\dots,n$ and $m \in \mathbb{N}_0$, \[ \xi_n(k,m) = (1 - \omega)\bar{h}^{\circ}_{k,m\Delta}. \] </div> Below is our implementation. ``` ### helper functions @cache def gamma(z, u): gamma_circ = poisson.pmf(z-1, mu*u) if z == K + 1: gamma_circ *= (1 - p) return gamma_circ / B_sf(u) @cache def B_sf(t): """The survival function P(B > t).""" return poisson.cdf(K-1, mu*t) + (1 - p) * poisson.pmf(K, mu*t) @cache def P_k0(k, z, t): """Computes P(N_t- = 0 | N_0 = k, Z_0 = z).""" if z <= K: return sum([binom.pmf(m, k, 1-p) * erlang.cdf(t, k*K-z+1+m, scale=1/mu) for m in range(k+1)]) elif z == K + 1: return sum([binom.pmf(m, k-1, 1-p) * erlang.cdf(t, (k-1)*K+1+m, scale=1/mu) for m in range(k)]) @cache def psi(v, t, k, l): """ Computes P(t-v < Erl(k,mu) < t, Erl(k,mu) + Erl(l-k,mu) > t), where Erl(k,mu) and Erl(l-k,mu) are independent. """ return sum([poisson.pmf(j, mu*t) * binom.sf(j-k, j, v/t) for j in range(k, l)]) @cache def f(k, t): return poisson.sf(k-1, mu*t) * t - poisson.sf(k, mu*t) * k / mu @cache def f_bar(k, z, t): """Computes the mean idle time given (N_0, Z_0) = (k,z).""" if z <= K: return sum([binom.pmf(m, k, 1 - p) * f(k*K+1-z+m, t) for m in range(k+1)]) elif z == K + 1: return sum([binom.pmf(m, k-1, 1 - p) * f((k-1)*K+1+m, t) for m in range(k)]) @cache def f_circ(k, u, t): """Computes the mean idle time given (N_0, B_0) = (k,u).""" return sum([gamma(z, u) * f_bar(k, z, t) for z in range(1, K+2)]) @cache def h_bar(k, z): """Computes the mean waiting time given (N_0, Z_0) = (k,z).""" if k == 1: return 0 elif z <= K: return ((k - 1) * (K + 1 - p) + 1 - z) / mu elif z == K + 1: return ((k - 2) * (K + 1 - p) + 1) / mu @cache def h_circ(k, u): """Computes the mean waiting time given (N_0, B_0) = (k,u).""" return sum([gamma(z, u) * h_bar(k, z) for z in range(1, K+2)]) ### transition probabilities # 1. No client has been served before time t. @cache def P_up(k, u, t): """Computes P(N_t- = k | N_0 = k, B_0 = u).""" return B_sf(u+t) / B_sf(u) # 2. All clients have been served before time t. @cache def P_down(k, u, t): """Computes P(N_t- = 0 | N_0 = k, B_0 = u).""" return sum([gamma(z, u) * P_k0(k, z, t) for z in range(1, K+2)]) # 3. Some (but not all) clients have been served before time t. @cache def q(diff, z, v, t): """ Computes P(N_t = l, B_t < v | N_0 = k, Z_0 = z). Note: diff = k-l. """ q = 0 if z <= K: for m in range(diff+2): I_klmz = (diff + 1) * K - z + m + 1 E = p * psi(v, t, I_klmz, I_klmz+K) + (1 - p) * psi(v, t, I_klmz, I_klmz+K+1) q += binom.pmf(m, diff+1, 1-p) * E elif z == K + 1: for m in range(diff+1): I_klm = diff * K + m + 1 E = p * psi(v, t, I_klm, I_klm+K) + (1 - p) * psi(v, t, I_klm, I_klm+K+1) q += binom.pmf(m, diff, 1-p) * E return q @cache def q_bar(diff, m, j, t): """ Approximates P(N_{t*Delta} = l, B_{t*Delta} in d(j*Delta) | N_0 = k, B_0 = m * Delta). Note: diff = k-l. """ lower = min(max(0, (j - 0.5) * Delta), t*Delta) upper = min(max(0, (j + 0.5) * Delta), t*Delta) q_bar = sum([gamma(z, m*Delta) * (q(diff, z, upper, t*Delta) - q(diff, z, lower, t*Delta)) for z in range(1, K+2)]) return q_bar ### cost function @cache def cost_we(t, i, k, m): """Computes (approximately) the cost when t/Delta is the next interarrival time.""" cost = omega * f_circ(k, m*Delta, t*Delta) + (1 - omega) * h_circ(k, m*Delta) cost += P_down(k, m*Delta, t*Delta) * xi_we(i+1, 1, 0) + P_up(k, m*Delta, t*Delta) * xi_we(i+1, k+1, m+t) #### # print('f_circ(k, m*Delta, t*Delta)', f_circ(k, m*Delta, t*Delta)) # print('h_circ(k, m*Delta)', h_circ(k, m*Delta)) # print('P_down(k, m*Delta, t*Delta)', P_down(k, m*Delta, t*Delta)) # print('xi_we(i+1, 1, 0)', xi_we(i+1, 1, 0)) # print('P_up(k, m*Delta, t*Delta', P_up(k, m*Delta, t*Delta)) # print('xi_we(i+1, k+1, m+t)', xi_we(i+1, k+1, m+t)) for l in range(2, k+1): for j in range(t+1): cost += q_bar(k-l, m, j, t) * xi_we(i+1, l, j) return cost k, u = 3, 4 h_circ(k, u) i = 2 k = 1 m = 1 t = 9 # cost_we(t, i, k, m) # for t in range(1,21): # print(t, cost_we(t, i, k, m) - cost_we(t-1, i, k, m)) (1 - 0.5) * h_circ(2, 1) xi_we(3,2,10) #### 0.4362059564857282 i = 3 k = 2 m = 1 t = 9 (1 - omega) * h_circ(k, (m+t)*Delta) # def xi_we(i, k, m): # """Implements the Weighted Erlang Case.""" # # truncate time in service m # if m >= t_MAX: # m_new = t_MAX-1 # else: # m_new = m # if xi_matrix[i-1][k-1][m]: # retrieve stored value # pass # elif i == n: # initial condition # xi_matrix[i-1][k-1][m] = (1 - omega) * h_circ(k, m*Delta) # else: # # initial guess # if m > 0 and minima[i-1][k-1][m-1]: # t_guess = minima[i-1][k-1][m-1] # else: # t_guess = eval(old_minima[i-1][k-1])[m] # cost_guess = cost_we(t_guess, i, k, m) # t_new = t_guess # # walk to the left # while True: # t_new -= 1 # cost_new = cost_we(t_new, i, k, m) # if cost_new < cost_guess: # t_guess = t_new # cost_guess = cost_new # elif cost_new > cost_guess: # break # # walk to the right # while True: # t_new += 1 # cost_new = cost_we(t_new, i, k, m) # if cost_new < cost_guess: # t_guess = t_new # cost_guess = cost_new # elif cost_new > cost_guess: # break # xi_matrix[i-1][k-1][m] = cost_guess # minima[i-1][k-1][m] = t_guess # print("end",i,k,m,t_guess,cost_guess) # return xi_matrix[i-1][k-1][m] def xi_we(i, k, m): """Implements the Weighted Erlang Case.""" if m <= t_MAX and xi_matrix[i-1][k-1][m]: # retrieve stored value pass elif i == n: # initial condition if m <= t_MAX: xi_matrix[i-1][k-1][m] = (1 - omega) * h_circ(k, m*Delta) else: return (1 - omega) * h_circ(k, m*Delta) else: if m <= t_MAX: # initial guess if m > 0 and minima[i-1][k-1][m-1]: t_guess = minima[i-1][k-1][m-1] else: t_guess = eval(old_minima[i-1][k-1])[m] else: if minima[i-1][k-1][t_MAX]: t_guess = minima[i-1][k-1][t_MAX] else: t_guess = old_minima[i-1][k-1][t_MAX] cost_guess = cost_we(t_guess, i, k, m) t_new = t_guess # walk to the left while True: t_new -= 1 cost_new = cost_we(t_new, i, k, m) if cost_new < cost_guess: t_guess = t_new cost_guess = cost_new elif cost_new > cost_guess: break # walk to the right while True: t_new += 1 cost_new = cost_we(t_new, i, k, m) if cost_new < cost_guess: t_guess = t_new cost_guess = cost_new elif cost_new > cost_guess: break if m <= t_MAX: xi_matrix[i-1][k-1][m] = cost_guess minima[i-1][k-1][m] = t_guess else: return cost_guess if m <= 2: print("end",i,k,m,t_guess,cost_guess) return xi_matrix[i-1][k-1][m] SCV = 0.6 K, p, mu = SCV_to_params(SCV) Delta = 0.01 # epsilon = 0.005 t_MAX = int(5/Delta) # int(5/Delta) n = 5 omega = 0.5 import csv C_matrix = [[None for k in range(n)] for i in range(n)] minima = [[None for k in range(n-1)] for i in range(n-1)] # compute values for i in range(1,n+1): for k in range(1,i+1): Cstar_homexp(i,k,mu=1,omega=omega,n=n,C_matrix=C_matrix) # # cost print("\nCost:", C_matrix[0][0]) new_minima = [[[None for m in range(t_MAX+1)] for k in range(n-1)] for i in range(n-1)] for i in range(n-1): for k in range(i+1): new_minima[i][k] = [int(round(minima[i][k],2) / Delta)] * t_MAX * 2 with open(f'SCV_1.00_omega_{omega}_minima.csv','w', newline='') as myfile: out = csv.writer(myfile) out.writerows(new_minima) with open(f'SCV_1.00_omega_{omega:.1f}_minima.csv','r') as csvfile: reader = csv.reader(csvfile) old_minima = list(reader) xi_matrix = [[[None for m in range(t_MAX+1)] for k in range(i+1)] for i in range(n)] minima = [[[None for m in range(t_MAX+1)] for k in range(i+1)] for i in range(n)] for i in np.arange(n,0,-1): for k in range(1,i+1): print("i =",i,"k =",k) for m in range(t_MAX+1): xi_we(i,k,m) i, k, m = 5, 4, 2 print(xi_we(i,k,m)) print(minima[i-1][k-1][m]) ``` We proceed by analyzing the second case, i.e., the hyperexponential case. ### Hyperexponential Distribution In this case the service times $B_i$ are independent and distributed as $B$, where $B$ equals with probability $p\in [0,1]$ an exponentially distributed random variable with mean $\mu_1^{-1}$, and with probability $1-p$ an exponentially distributed random variable with mean $\mu_{2}^{-1}$. The following recursion can be derived from the thesis. <div class="alert alert-warning"> <b>Theorem 3.19 (discrete version).</b> For $i=1,\dots,n-1$, $k=1,\dots,i$, and $m\in\mathbb{N}_0$, \[ \xi_i(k,m) = \inf_{t\in \mathbb{N}_0}\Bigg(\omega \bar{f}^{\circ}_{k,m\Delta}(t\Delta) + (1 - \omega)\bar{h}^{\circ}_{k,m\Delta} + \sum_{\ell=2}^{k}\sum_{j=0}^{t}\bar{q}_{k\ell,mj}(t)\xi_{i+1}(\ell,j) + P^{\downarrow}_{k,m\Delta}(t\Delta)\xi_{i+1}(1,0) + P^{\uparrow}_{k,m\Delta}(t\Delta)\xi_{i+1}(k+1,m+t) \Bigg), \] whereas, for $k=1,\dots,n$ and $m \in \mathbb{N}_0$, \[ \xi_n(k,m) = (1 - \omega)\bar{h}^{\circ}_{k,m\Delta}. \] </div> Below is our implementation. ``` ### helper functions # @cache def gamma(z, u): if z == 1: return p * np.exp(-mu1 * u) / B_sf(u) elif z == 2: return (1 - p) * np.exp(-mu2 * u) / B_sf(u) # @cache def B_sf(t): return p * np.exp(-mu1 * t) + (1 - p) * np.exp(-mu2 * t) ### gamma_circ # @cache def zeta(alpha, t, k): if not k: return (np.exp(alpha * t) - 1) / alpha else: return ((t ** k) * np.exp(alpha * t) - k * zeta(alpha, t, k-1)) / alpha # @cache def rho(t,m,k): if not k: return np.exp(-mu2 * t) * (mu1 ** m) / ((mu1 - mu2) ** (m + 1)) * erlang.cdf(t, m+1, scale=1/(mu1 - mu2)) elif not m: return np.exp(-mu1 * t) * (mu2 ** k) / math.factorial(k) * zeta(mu1-mu2, t, k) else: return (mu1 * rho(t, m-1, k) - mu2 * rho(t, m, k-1)) / (mu1 - mu2) # @cache def Psi(t,m,k): if not m: return erlang.cdf(t, k, scale=1/mu2) else: return erlang.cdf(t, m, scale=1/mu1) - mu1 * sum([rho(t, m-1, i) for i in range(k)]) # @cache def chi(v, t, z, k, l): """ Computes P(t-v < Erl(k,mu1) + Erl(l,mu2) < t, Erl(k,mu1) + Erl(l,mu2) + E(1,mu_z) > t), where Erl(k,mu1) and Erl(l,mu2) are independent. """ if z == 1: if not k and l: return np.exp(-mu1 * t) * ((mu2) ** l) \ * (zeta(mu1-mu2, t, l-1) - zeta(mu1-mu2, t-v, l-1)) / math.factorial(l-1) elif k and not l: return poisson.pmf(k, mu1*t) * binom.sf(0, k, v/t) else: return mu2 * (rho(t, k, l-1) - np.exp(-mu1 * v) * rho(t-v, k, l-1)) elif z == 2: if not k and l: return poisson.pmf(l, mu2*t) * binom.sf(0, l, v/t) elif k and not l: return np.exp(-mu2 * t) * (erlang.cdf(t, k, scale=1/(mu1-mu2)) - erlang.cdf(t-v, k, scale=1/(mu1-mu2))) \ * (mu1 / (mu1 - mu2)) ** k else: return mu1 * (rho(t, k-1, l) - np.exp(-mu2 * v) * rho(t-v, k-1, l)) # @cache def sigma(t, m, k): if not k: return t * erlang.cdf(t, m, scale=1/mu1) - (m / mu1) * erlang.cdf(t, m+1, scale=1/mu1) elif not m: return t * erlang.cdf(t, k, scale=1/mu2) - (k / mu2) * erlang.cdf(t, k+1, scale=1/mu2) else: return (t - k / mu2) * erlang.cdf(t, m, scale=1/mu1) - (m / mu1) * erlang.cdf(t, m+1, scale=1/mu1) \ + (mu1 / mu2) * sum([(k - i) * rho(t, m-1, i) for i in range(k)]) # @cache def f_bar(k, z, t): """Computes the mean idle time given (N_0, Z_0) = (k,z).""" if z == 1: return sum([binom.pmf(m, k-1, p) * sigma(t, m+1, k-1-m) for m in range(k)]) elif z == 2: return sum([binom.pmf(m, k-1, p) * sigma(t, m, k-m) for m in range(k)]) # @cache def h_bar(k, z): """Computes the mean waiting time given (N_0, Z_0) = (k,z).""" if k == 1: return 0 else: if z == 1: return (k-2) + (1/mu1) elif z == 2: return (k-2) + (1/mu2) # @cache def f_circ(k, u, t): """Computes the mean idle time given (N_0, B_0) = (k,u).""" return gamma(1, u) * f_bar(k, 1, t) + gamma(2, u) * f_bar(k, 2, t) # @cache def h_circ(k, u): """Computes the mean waiting time given (N_0, B_0) = (k,u).""" return gamma(1, u) * h_bar(k, 1) + gamma(2, u) * h_bar(k, 2) ### transition probabilities # 1. No client has been served before time t. # @cache def P_up(k, u, t): """Computes P(N_t- = k | N_0 = k, B_0 = u).""" return B_sf(u + t) / B_sf(u) # 2. All clients have been served before time t. # @cache def P_down(k, u, t): """Computes P(N_t- = 0 | N_0 = k, B_0 = u).""" return sum([binom.pmf(m, k-1, p) * (Psi(t, m+1, k-1-m) * gamma(1, u) \ + Psi(t, m, k-m) * gamma(2, u)) for m in range(k)]) # 3. Some (but not all) clients have been served before time t. # @cache def q(diff, z, v, t): """ Computes P(N_t = l, B_t < v | N_0 = k, Z_0 = z). Note: diff = k-l. """ if z == 1: return sum([binom.pmf(m, diff, p) * (p * chi(v, t, 1, m+1, diff-m) \ + (1 - p) * chi(v, t, 2, m+1, diff-m)) for m in range(diff+1)]) elif z == 2: return sum([binom.pmf(m, diff, p) * (p * chi(v, t, 1, m, diff-m+1) \ + (1 - p) * chi(v, t, 2, m, diff-m+1)) for m in range(diff+1)]) # @cache def q_bar(diff, m, j, t): """ Approximates P(N_{t*Delta} = l, B_{t*Delta} in d(j*Delta) | N_0 = k, B_0 = m * Delta). Note: diff = k-l. """ lower = min(max(0, (j - 0.5) * Delta), t*Delta) upper = min(max(0, (j + 0.5) * Delta), t*Delta) q1_low = q(diff, 1, lower, t*Delta) q1_upp = q(diff, 1, upper, t*Delta) q2_low = q(diff, 2, lower, t*Delta) q2_upp = q(diff, 2, upper, t*Delta) return gamma(1, m*Delta) * (q1_upp - q1_low) + gamma(2, m*Delta) * (q2_upp - q2_low) ### cost function # @cache def cost_he(t, i, k, m): """ Computes (approximately) the cost when t/Delta is the next interarrival time. """ cost = omega * f_circ(k, m*Delta, t*Delta) + (1 - omega) * h_circ(k, m*Delta) cost += P_down(k, m*Delta, t*Delta) * xi_he(i+1, 1, 0) + P_up(k, m*Delta, t*Delta) * xi_he(i+1, k+1, m+t) for l in range(2, k+1): for j in range(t+1): cost_diff = q_bar(k-l, m, j, t) * xi_he(i+1, l, j) # if cost_diff > 1e-10: cost += cost_diff return cost # k = 2 # np.exp(-mu1 * t) * (mu2 ** k) / math.factorial(k) * zeta(mu1-mu2, t, k) # (np.exp(-mu1 * t) * (mu2 ** k) / (mu2 - mu1) ** (k+1)) * \ # (1 - sum([np.exp((mu1 - mu2) * t) * ((((mu2 - mu1) * t) ** i) / math.factorial(i)) for i in range(k+1)])) l = 2 # chi_1[0,l] np.exp(-mu1 * t) * ((mu2) ** l) \ * (zeta(mu1-mu2, t, l-1) - zeta(mu1-mu2, t-v, l-1)) / math.factorial(l-1) (np.exp(-mu1 * t) * ((mu2 / (mu2 - mu1)) ** l)) * \ (sum([np.exp(-(mu2-mu1)*(t-v)) * (((mu2 - mu1) * (t - v)) ** i) / math.factorial(i) for i in range(l)]) - \ sum([np.exp(-(mu2-mu1)*t) * (((mu2 - mu1) * t) ** i) / math.factorial(i) for i in range(l)])) f_circ(k, m*Delta, t*Delta) h_circ(k, m*Delta) P_down(k, m*Delta, t*Delta) xi_he(i+1, 1, 0) P_up(k, m*Delta, t*Delta) xi_he(i+1, k+1, m+t) t = 2 i = 4 k = 2 ### k > 1 m = 0 cost_he(t,i,k,m) v = 1.3 t = 2.8 z = 2 k = 4 l = 0 q(k-l,z,v,t) ### q hangt alleen af van k-l q_bar(k-l, v, v, t) np.exp(-mu2 * t) * ((mu1 ** k) / math.factorial(k-1)) * (zeta(mu2 - mu1, t, k-1) - zeta(mu2 - mu1, t-v, k-1)) SCV = 2 p, mu1, mu2 = SCV_to_params(SCV) n = 5 v = 0.05 t = 0.10 print(chi(v,t,1,1,0)) ## 0.00776 (klopt) print(chi(v,t,1,0,1)) ## 0.02081 (FOUT) bij mij 0???? print(chi(v,t,2,0,1)) ## 0.0021 (klopt) print(chi(v,t,2,1,0)) ## 0.0077 (klopt) mu2-mu1 l = 1 np.exp(-mu1 * t) * ((mu2 / (mu1 - mu2)) ** l) * \ ( sum([np.exp(-(mu1-mu2)*(t-v)) * (((mu2 - mu1) * (t - v)) ** i) / math.factorial(i) for i in range(l)])) - \ sum([np.exp(-(mu1-mu2)*t) * (((mu2 - mu1) * t) ** i) / math.factorial(i) for i in range(l)] ) l = 1 np.exp(-mu1 * t) * ((mu2 / (mu2 - mu1)) ** l) * \ (1 - sum([np.exp(-(mu2-mu1)*t) * (((mu2 - mu1) * t) ** i) / math.factorial(i) for i in range(l)])) \ - np.exp(-mu1*(t-v)) * ((mu2 / (mu2 - mu1)) ** l) * \ (1 - sum([np.exp(-(mu2-mu1)*(t-v)) * (((mu2 - mu1) * (t - v)) ** i) / math.factorial(i) for i in range(l)])) def xi_he(i, k, m): """Implements the Hyperexponential Case.""" # truncate time in service m if m >= t_MAX: m = t_MAX-1 if xi_matrix[i-1][k-1][m]: # retrieve stored value pass elif i == n: # initial condition xi_matrix[i-1][k-1][m] = (1 - omega) * h_circ(k, m*Delta) else: # if m >= 2 and xi_matrix[i-1][k-1][m-1] and xi_matrix[i-1][k-1][m-2]: # # fill all coming values with current cost & minimum # if abs(xi_matrix[i-1][k-1][m-1] - xi_matrix[i-1][k-1][m-2]) < epsilon: # xi_matrix[i-1][k-1][m:] = [xi_matrix[i-1][k-1][m-1]] * (t_MAX - (m - 1)) # minima[i-1][k-1][m:] = [minima[i-1][k-1][m-1]] * (t_MAX - (m - 1)) # print(i,k,m,"break") # return xi_matrix[i-1][k-1][m] # initial guess if m > 0 and minima[i-1][k-1][m-1]: t_guess = minima[i-1][k-1][m-1] else: t_guess = eval(old_minima[i-1][k-1])[m] cost_guess = cost_he(t_guess, i, k, m) t_new = t_guess # walk to the left while True: t_new -= 1 cost_new = cost_he(t_new, i, k, m) if cost_new < cost_guess: t_guess = t_new cost_guess = cost_new elif cost_new > cost_guess: break # walk to the right while True: t_new += 1 cost_new = cost_he(t_new, i, k, m) if cost_new < cost_guess: t_guess = t_new cost_guess = cost_new elif cost_new > cost_guess: break xi_matrix[i-1][k-1][m] = cost_guess minima[i-1][k-1][m] = t_guess if m <= 20: print("end",i,k,m,t_guess,cost_guess) return xi_matrix[i-1][k-1][m] ``` With this program, we can obtain dynamic schedules in the hyperexponential case: ``` SCV = 2.5 p, mu1, mu2 = SCV_to_params(SCV) Delta = 0.01 epsilon = 0.005 t_MAX = int(5/Delta) n = 5 omega = 0.5 import csv C_matrix = [[None for k in range(n)] for i in range(n)] minima = [[None for k in range(n-1)] for i in range(n-1)] # compute values for i in range(1,n+1): for k in range(1,i+1): Cstar_homexp(i,k,mu=1,omega=omega,n=n,C_matrix=C_matrix) # # cost print("\nCost:", C_matrix[0][0]) new_minima = [[[None for m in range(t_MAX)] for k in range(n-1)] for i in range(n-1)] for i in range(n-1): for k in range(i+1): new_minima[i][k] = [int(round(minima[i][k],2) / Delta)] * t_MAX * 2 with open(f'SCV_1.00_omega_{omega}_minima.csv','w', newline='') as myfile: out = csv.writer(myfile) out.writerows(new_minima) with open(f'SCV_1.00_omega_{omega:.1f}_minima.csv','r') as csvfile: reader = csv.reader(csvfile) old_minima = list(reader) xi_matrix = [[[None for m in range(t_MAX)] for k in range(i+1)] for i in range(n)] minima = [[[None for m in range(t_MAX)] for k in range(i+1)] for i in range(n)] # i = 3 # k = 1 # # m = 0 # # for k in np.arange(1,5): # for m in np.arange(3): # print(i,k,m,xi_he(i,k,m)) xi_matrix = [[[None for m in range(t_MAX)] for k in range(i+1)] for i in range(n)] minima = [[[None for m in range(t_MAX)] for k in range(i+1)] for i in range(n)] for i in np.arange(n,0,-1): for k in range(1,i+1): print("i =",i,"k =",k) for m in range(101): xi_he(i,k,m) xi_he(1,1,0) print('Function Summary') functions = ['gamma', 'B_sf', 'zeta', 'rho', 'Psi', 'chi', 'sigma', 'f_bar', 'h_bar', 'f_circ', 'h_circ', 'P_up', 'P_down', 'q', 'q_bar', 'cost_he'] for function in functions: info = eval(function).cache_info() print(f'{str(function):8s}: {info.hits:8d} hits\ {info.misses:8d} misses\ {info.hits/(info.hits + info.misses):.2%} gain') ```
github_jupyter
# Circuit Simulation This tutorial demonstrates how to compute (simulate) the outcome probabilities of circuits in pyGSTi. There are currently two basic ways to to this - but constructing and simulating a `Circuit` object, or by constructing and propagating a state. ## Method 1: `Circuit` simulation This is the primary way circuit simulation is done in pyGSTi. `Model` objects are statistical models that predict the outcome probabilities of events, and (at least for all current model types) "events" are circuits, described by `Circuit` objects. Thus, the three steps to simulating a circuit using this approach are: 1. create a `Model` 2. create a `Circuit` 3. call `model.probs(circuit)` Building models and circuits (steps 1 and 2) are largely covered in other tutorials (see the [essential objects tutorial](../01-EssentialObjects.ipynb), [circuits tutorial](../objects/Circuit.ipynb), and [explicit-op model](../objects/ExplicitModel.ipynb) and [implicit-op model](../objects/ImplicitModel.ipynb) tutorials). This section focuses on step 3 and `Model` options which impact the way in which a model computes probabilities. This approach to circuit simulation is most convenient when you have a large number of circuits which are known (and fixed) beforehand. Let's begin with a simple example, essentially the same as the one in the [using-essential-objects tutorial](../02-Using-Essential-Objects.ipynb): ``` import pygsti mdl = pygsti.construction.build_explicit_model((0,1), [(), ('Gxpi2',0), ('Gypi2',0), ('Gxpi2',1), ('Gypi2',1), ('Gcnot',0,1)], ["I(0,1)","X(pi/2,0)", "Y(pi/2,0)", "X(pi/2,1)", "Y(pi/2,1)", "CNOT(0,1)"]) c = pygsti.objects.Circuit([('Gxpi2',0),('Gcnot',0,1),('Gypi2',1)] , line_labels=[0,1]) print(c) mdl.probs(c) # Compute the outcome probabilities of circuit `c` ``` This example builds an `ExplicitOpModel` (best for 1-2 qubits) on 2 qubits with $X(\pi/2)$ and $Y(\pi/2)$ rotation gates on each qubit and a CNOT gate between them. This model is able to simulate any circuit *layer* (a.k.a. "time-step" or "clock-cycle") that contains any *one* of these gates (this is what it means to be an explicit-op model: the operation for every simulate-able circuit layer must be explicitly supplied to the `Model`). For example, this model cannot simulate a circuit layer where two `Gxpi2` gates occur in parallel: ``` c2 = pygsti.objects.Circuit([ [('Gxpi2',0), ('Gxpi2',1)],('Gcnot',0,1) ] , line_labels=[0,1]) print(c2) try: mdl.probs(c2) except KeyError as e: print("KEY ERROR (can't simulate this layer): " + str(e)) ``` As is detailed in the [implicit-op model tutorial](../objects/ImplicitModel.ipynb), an "implicit-operation" model *is* able to implicitly create layer operations from constituent gates, and thus perform the simulation of `c2`: ``` implicit_mdl = pygsti.construction.build_localnoise_model(2, ('Gxpi2', 'Gypi2', 'Gcnot')) print(c2) implicit_mdl.probs(c2) ``` ## Method 2: state propagation In this method of circuit simulation, a state object (a `SPAMVec` in pyGSTi) is propagated circuit-layer by circuit-layer. This method is convenient when a there are few (or just one!) circuit that involves substantial classical logic or needs to be probed at various points in time. It is slower to simulate circuits in this way, as it requires calls more calls between pyGSTi's Python and C routines than method 1 does. The two cells below show how to perform the same two circuits above using the state-propagation method. ``` #Simulating circuit `c` above using `mdl`: [('Gxpi2',0),('Gcnot',0,1),('Gypi2',1)] rho = mdl['rho0'] rho = mdl[('Gxpi2',0)].acton(rho) rho = mdl[('Gcnot',0,1)].acton(rho) rho = mdl[('Gypi2',1)].acton(rho) probs = mdl['Mdefault'].acton(rho) print(probs) ``` Note that, especially for implicit models, the interface is a bit clunky. <font style="color:red">Simulation by state propagation is a work in progress in pyGSTi, and users should expect that this interface may change (improve!) in the future</font>. ``` #Simulating circuit `c2` above using `implicit_mdl`: [ [('Gxpi2',0), ('Gxpi2',1)], ('Gcnot',0,1) ] from pygsti.objects import Label as L liz = implicit_mdl._layer_lizard() rho = liz.get_prep( L('rho0') ) rho = liz.get_operation( L((('Gxpi2',0),('Gxpi2',1))) ).acton(rho) rho = liz.get_operation( L('Gcnot',(0,1)) ).acton(rho) probs = liz.povm_blks['layers']['Mdefault'].acton(rho) print(probs) ``` ## Method 3: hybrid (an addition planned in future releases of pyGSTi) ## Forward-simulation types PyGSTi refers to the process of computing circuit-outcome probabilities as *forward simulation*, and there are several methods of forward simulation currently available. The default method for 1- and 2-qubit models multiplies together dense process matrices, and is named `"matrix"` (because operations are *matrices*). The default method for 3+ qubit models performs sparse matrix-vector products, and is named `"map"` (because operations are abstract *maps*). A `Model` is constructed for a single type of forward simulation, and it stores this within its `.simtype` member. For more information on using different types of forward simulation see the [forward simulation types tutorial](algorithms/advanced/ForwardSimulationTypes.ipynb). Here are some examples showing which method is being used and how to switch between them. Usually you don't need to worry about the forward-simulation type, but in the future pyGSTi may have more options for specialized purposes. ``` c3 = pygsti.objects.Circuit([('Gxpi2',0),('Gcnot',0,1)] , line_labels=[0,1]) explicit_mdl = pygsti.construction.build_explicit_model((0,1), [(), ('Gxpi2',0), ('Gypi2',0), ('Gxpi2',1), ('Gypi2',1), ('Gcnot',0,1)], ["I(0,1)","X(pi/2,0)", "Y(pi/2,0)", "X(pi/2,1)", "Y(pi/2,1)", "CNOT(0,1)"]) print("2Q explicit_mdl will simulate probabilities using the '%s' forward-simulation method." % explicit_mdl.simtype) explicit_mdl.probs(c3) implicit_mdl = pygsti.construction.build_localnoise_model(3, ('Gxpi2', 'Gypi2', 'Gcnot')) print("3Q implicit_mdl will simulate probabilities using the '%s' forward-simulation method." % implicit_mdl.simtype) implicit_mdl.probs(c) implicit_mdl.set_simtype('matrix') print("3Q implicit_mdl will simulate probabilities using the '%s' forward-simulation method." % implicit_mdl.simtype) implicit_mdl.probs(c) ```
github_jupyter
``` # fundamentals import os, glob import numpy as np import pandas as pd from calendar import monthrange, month_name import scipy.stats as stats import funcs as funcs import datetime import imp # plotting libraries and setup from matplotlib.colors import BoundaryNorm import matplotlib.pyplot as plt %matplotlib inline plt.rc('font', family='serif') plt.rc('font', size=12) plt.rc('facecolor', ) # met mast functions and utilities import met_funcs as MET import vis as vis import utils as utils # paths (must mount volume smb://nrel.gov/shared/wind/WindWeb/MetData/135mData/) towerID = 'M5' metPathLoHz = '/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/{}/txt/'.format(towerID) figPath = '../../figs/{}'.format(towerID) # time range years = [ int(a) for a in np.arange(2012,2018,1) ] # months = [ int(a) for a in np.arange(1,12.1,1) ] # or just get all? inputfiles = [] # list of files to be read into metdata object filecount = 0 for year in years: for month in months: fName = glob.glob(os.path.join(metPathLoHz,'{0}_{1}.txt'.format(year,month_name[month]))) if len(fName)>0: fName = fName[0] inputfiles.append(fName) print('{} files to be read into MetData'.format(len(inputfiles))) ## load data from list of input data files metdat = MET.load_met_data(inputfiles, verbose=False) ## remove columns that are all nans MET.drop_nan_cols(metdat) ## use qc columns to mask data (qc != 1 --> questionable data) metdat = MET.qc_mask(metdat) ## flag data by stability class stabconds, stabcat = MET.flag_stability(metdat) ## group columns based on category, assign units, labels, savenames varcats, varunits, varlabels, varsave = MET.categorize_fields(metdat, keeplist=True) ## drop columns not in any of the categories, filter TI, temperature, stability parameters MET.groom_data(metdat, varcats) filtcols = [col for col in metdat.columns if 'air' not in col.lower() and 'humidity' not in col.lower()] ## Finally, reject outliers more than 5 standard deviations from the mean for col in metdat.columns: try: metdat[col] = MET.reject_outliers(metdat[col], m=6) except: continue catinfo = {} catinfo['columns'] = varcats catinfo['units'] = varunits catinfo['labels'] = varlabels catinfo['save'] = varsave # M5 excluded angles, # Obstruction, start ang. end ang. # GE 46 146 # Alstom 123 203 # CART-3 172 213 # CART-2 177 212 # Siemens 165 210 # Gamesa 189 228 exclude_angles = [(46,146),(123,203),(172,213),(177,212),(165,210),(189,228)] tempcol,_,_ = utils.get_vertical_locations(catinfo['columns']['air temperature']) temperaturedata = metdat[tempcol] presscol,_,_ = utils.get_vertical_locations(catinfo['columns']['air pressure']) pressuredata = metdat[presscol] tempcol[::2] fig, ax = plt.subplots(2,1, figsize = (8,5), sharex=True) colors = utils.get_colors(len(presscol), basecolor='blue') pressuredata.plot.line(ax=ax.flatten()[0], color=colors, legend=False, alpha=0.75) leg = ax.flatten()[0].legend(presscol, frameon=False, loc=6, bbox_to_anchor=(1,0.5)) ax.flatten()[0].set_ylabel(catinfo['labels']['air pressure']) colors = utils.get_colors(len(tempcol[::2]), basecolor='red') temperaturedata[tempcol[::2]].plot.line(ax=ax.flatten()[1], color=colors, legend=False, alpha=0.75) leg = ax.flatten()[1].legend(tempcol[::2], frameon=False, loc=6, bbox_to_anchor=(1,0.5)) ax.flatten()[1].set_ylabel(catinfo['labels']['air temperature']) fig.tight_layout() fig.savefig(os.path.join(figPath,'M5_pressure_v_temperature_timeseries.png'), dpi=200, bbox_inches='tight') ``` # Cut by TI ``` turbclasses = np.linspace(0,50,6) turbcol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns']['ti'], 87) metdat['turbclass'] = pd.cut(metdat[turbcol], turbclasses, include_lowest=False).astype(str) metdat['turbclass'].value_counts() temp = metdat.groupby('turbclass') turbclasses = list(temp.groups.keys())[:-1] lowTI = temp.get_group(turbclasses[0]) plotcats = ['air density', 'air pressure', 'air temperature', 'direction', 'relative humidity', 'speed', 'wind shear', 'wind veer'] lowtifigpath = '../../figs/lowTI' try: os.makedirs(lowtifigpath) except: pass ``` # Full data histograms ``` catinfo['labels']['direction'] nrelcolors = utils.get_nrelcolors() for cat in ['direction']:#plotcats: height = 87 if 'shear' in cat.lower(): height = 110 plotvar, probe_height, _ = utils.get_vertical_locations(catinfo['columns'][cat], height) fulldat = metdat[plotvar].dropna().sort_values() fulldat = MET.reject_outliers(fulldat,m=4) fig, ax = plt.subplots(figsize=(5,3)) binwidth = np.round((fulldat.max()-fulldat.min())/35.0,decimals=3) bins = np.arange(fulldat.min(), fulldat.max(), binwidth) fulldat.plot.hist(bins = bins, color=nrelcolors['blue'][0], edgecolor='k', alpha=1, ax=ax, weights=np.ones(len(fulldat))/len(fulldat)*100, legend=False) ax.set_xlabel(catinfo['labels'][cat]) ax.set_ylabel('Frequency [%]') fig.savefig(os.path.join(figPath,'M5_{}_hist_comp_{}m.png'.format(catinfo['save'][cat],probe_height)),dpi=200,bbox_inches='tight') plt.clf() ``` # Low TI histograms comparisons ``` nrelcolors = utils.get_nrelcolors() for cat in ['direction']:#plotcats: height = 87 if 'shear' in cat.lower(): height = 110 plotvar, _, _ = utils.get_vertical_locations(catinfo['columns'][cat], height) fulldat = metdat[plotvar].dropna().sort_values() fulldat = MET.reject_outliers(fulldat,m=4) lowtidat = lowTI[plotvar].dropna().sort_values() lowtidat = MET.reject_outliers(lowtidat,m=4) result = pd.concat([fulldat, lowtidat], axis=1) fig, ax = plt.subplots(figsize=(5,3)) binwidth = np.round((fulldat.max()-fulldat.min())/35.0,decimals=3) bins = np.arange(fulldat.min(), fulldat.max(), binwidth) fulldat.plot.hist(bins = bins, color=nrelcolors['blue'][0], edgecolor='k', alpha=0.5, ax=ax, weights=np.ones(len(fulldat))/len(fulldat)*100, legend=False) lowtidat.plot.hist(bins = bins, color=nrelcolors['red'][0], edgecolor='k', alpha=0.5, ax=ax, weights=np.ones(len(lowtidat))/len(lowtidat)*100, legend=False) ax.set_xlabel(catinfo['labels'][cat]) ax.set_ylabel('Frequency [%]') leg = ax.legend(['Full Data', 'Low TI'], frameon=False) fig.savefig(os.path.join(lowtifigpath,'LOWTI_{}_hist_comp.png'.format(catinfo['save'][cat])),dpi=200,bbox_inches='tight') plt.clf() turbcol,_,_ = utils.get_vertical_locations(catinfo['columns']['ti'], 87) nrelcolors= utils.get_nrelcolors() colors = utils.get_colors(5, basecolor='span') # test = metdat.groupby([metdat.index.weekofyear,'turbclass']) test = metdat.groupby([metdat.index.dayofyear,'turbclass']) test2 = test[turbcol].count().unstack().drop('nan',axis=1).transpose() test2 = test2/test2.sum() test2 = test2.transpose() test2.mean()*100 test2.std()*100 test3 = test2[test2.columns[-1::-1]] colors = utils.get_colors(5, basecolor='span',reverse=True) fig, ax = plt.subplots(figsize=(5,3)) for ii,turb in enumerate(turbclasses[-1::-1]): data = test3[turb].dropna() plt.hist(data, bins=np.arange(data.min(),data.max(),0.01), color=colors[ii], edgecolor='k', alpha=0.9, weights= np.ones(len(data))/len(data), density=False) ax.set_xlabel('Daily Contribution [%]') ax.set_ylabel('Frequency [%]') leg = ax.legend(turbclasses[-1::-1], loc=6, bbox_to_anchor = (1,0.5), frameon=False) leg.set_title(catinfo['labels']['ti']) # fig.savefig(os.path.join(lowtifigpath,'TI_frequency_hist.png'),dpi=200,bbox_inches='tight') test3 = test2[test2.columns[-1::-1]] colors = utils.get_colors(5, basecolor='span',reverse=True) fig, ax = plt.subplots(figsize=(5,3)) for ii,turb in enumerate(turbclasses[-1::-1]): data = test3[turb] ax.hist(data, bins=np.arange(data.min(),data.max(),0.02), color=colors[ii], edgecolor='k', alpha=0.85, weights= 100*np.ones(len(data))/len(data), density=False) # data.plot.kde(color=colors[ii], ax=ax) ax.set_xlim(0,0.65) ax.set_xlabel('Daily Contribution [%]') ax.set_ylabel('Frequency [%]') leg = ax.legend(turbclasses[-1::-1], loc=6, bbox_to_anchor = (1,0.5), frameon=False) leg.set_title(catinfo['labels']['ti']) fig.savefig(os.path.join(lowtifigpath,'LOWTI_frequency_hist.png'),dpi=200,bbox_inches='tight') test3 = test2[test2.columns[-1::-1]] times = pd.to_datetime(test3.index, format='%j') pd.DatetimeIndex(times, format='%m-%d') import matplotlib.dates as mdates test3 = test2[test2.columns[-1::-1]] times = pd.to_datetime(test3.index, format='%j') test3 = test3.set_index(times.format('%m')) fig, ax = plt.subplots(figsize=(5,3)) ax = test3.plot(x=test3.index, color=colors, ax=ax) leg = ax.legend(turbclasses[-1::-1], loc=6, bbox_to_anchor=(1,0.5), frameon=False) leg.set_title(catinfo['labels']['ti']) ax.set_ylabel('Daily Contribution [%]') ax.set_xlabel('Day of Year') ax.format_xdata = mdates.DateFormatter('%m') # fig.savefig(os.path.join(lowtifigpath,'LOWTI_plot_by_day.png'),dpi=200,bbox_inches='tight') ``` ## Low TI figs ``` categories = list(catinfo['columns'].keys()) for cat in ['speed']:#categories: if 'stability flag' in cat.lower(): continue # # savepath for new figs # savecat = catinfo['save'][cat] # catfigpath = os.makedirs(os.path.join(figPath,savecat), mode=0o777, exist_ok=True) # catfigpath = os.path.join(figPath,savecat) # Profiles ## cumulative profile fig, ax = vis.cumulative_profile(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_profile.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') ## monthly profile fig, ax = vis.monthly_profile(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_profile_monthly.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') ## stability profile fig,ax = vis.stability_profile(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_profile_stability.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') ## monthly stability profile fig,ax = vis.monthly_stability_profiles(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_profile_monthly_stability.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') # Diurnal cycle ## cumulative hourly plot fig,ax = vis.hourlyplot(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_hourly.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') ## monthly hourly plot fig,ax = vis.monthlyhourlyplot(lowTI, catinfo, cat) # fig.savefig(os.path.join(catfigpath,'{}_{}_hourly_monthly.png'.format(towerID, savecat)), dpi=200, bbox_inches='tight') plt.close('all') temp = lowTI.copy() temp = temp.groupby(temp.index.month) fig,ax = vis.monthlyhourlyplot(lowTI,catinfo,'direction') fig.savefig(os.path.join(lowtifigpath,'TI_hourly_monthly.png'),dpi=200,bbox_inches='tight') fig, ax, leg = vis.monthly_rose_fig(lowTI,catinfo,'speed',vertloc=90, bins=[0,3,5,7,12], ylim=12) fig.savefig(os.path.join(lowtifigpath,'TI_monthly_wind_rose.png'),dpi=200,bbox_inches='tight') dircol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns']['direction'], 87) spdcol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns']['speed'], 87) colors = utils.get_colors(5,basecolor='span') fig,ax = plt.subplots(figsize=(8,3)) for ii, tclass in enumerate(turbclasses): test.get_group((1,tclass)).plot.scatter(dircol, spdcol, color=colors[ii], alpha = 0.35, edgecolor='k', ax=ax) ax.legend(turbclasses) dircol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns']['direction'], 87) fig, ax = plt.subplots(figsize=(8,3)) cat = 'gradient richardson' stabcol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns'][cat], 87) test.get_group((1,turbclasses[0])).plot.scatter(dircol, stabcol, color=colors[0], alpha = 0.35, edgecolor='k', ax=ax) ax.set_title(catinfo['labels'][cat]) fig, ax = plt.subplots(figsize=(8,3)) cat = 'stability parameter z/l' stabcol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns'][cat], 87) test.get_group((1,turbclasses[0])).plot.scatter(dircol, stabcol, color=colors[0], alpha = 0.35, edgecolor='k', ax=ax) ax.set_title(catinfo['labels'][cat]) fig, ax = plt.subplots(figsize=(8,3)) cat = 'monin-obukhov length' stabcol, probe_heights, _ = utils.get_vertical_locations(catinfo['columns'][cat], 87) test.get_group((1,turbclasses[0])).plot.scatter(dircol, stabcol, color=colors[0], alpha = 0.35, edgecolor='k', ax=ax) ax.set_title(catinfo['labels'][cat]) ``` # Weibull distribution ``` import windrose import scipy.stats as stats speedcols, _, _ = utils.get_vertical_locations(catinfo['columns']['speed']) for plotvar in speedcols[0:1]: fulldat = metdat[plotvar].dropna() fulldat = MET.reject_outliers(fulldat,m=4) lowtidat = lowTI[plotvar].dropna() lowtidat = MET.reject_outliers(lowtidat,m=4) binwidth = np.round((lowtidat.max()-lowtidat.min())/35.0,decimals=3) bins = np.arange(lowtidat.min(), lowtidat.max(), binwidth) nrecolors = utils.get_nrelcolors() binwidth = np.round((lowtidat.max()-lowtidat.min())/35.0,decimals=3) bins = np.arange(lowtidat.min(), lowtidat.max(), binwidth) fig, ax = plt.subplots(figsize = (5,3)) lowtiparams = stats.exponweib.fit(lowtidat, fc=1) ax.plot(bins, stats.exponweib.pdf(bins, *lowtiparams), color=nrecolors['red'][0]) # thing,stuff = output = ax.hist(lowtidat, bins = bins, facecolor=nrecolors['red'][0], edgecolor='k', alpha=0.3, normed=True) fullparams = stats.exponweib.fit(fulldat, fc=1) pdf = stats.exponweib.pdf(bins, *fullparams) ax.plot(bins, pdf, color=nrecolors['blue'][0]) fullparams = stats.weibull_min.fit(fulldat) pdf = stats.weibull_min.pdf(bins, *fullparams) ax.plot(bins, pdf) # thing,stuff = output = ax.hist(fulldat, bins = bins, facecolor=nrecolors['blue'][0], edgecolor='k', alpha=0.3, normed=True) leg = fig.legend() # fig.savefig(os.path.join(lowtifigpath,'TI_monthly_wind_rose.png'),dpi=200,bbox_inches='tight') plotvar, _, _ = utils.get_vertical_locations(catinfo['columns']['speed'], height) fulldat = metdat[plotvar].dropna().sort_values() fulldat = MET.reject_outliers(fulldat,m=4) binwidth = np.round((fulldat.max()-fulldat.min())/35.0,decimals=3) bins = np.arange(fulldat.min(), fulldat.max(), binwidth) fullparams = stats.rayleigh.fit(fulldat) plt.plot(bins, stats.rayleigh.pdf(bins, *fullparams), color=nrecolors['blue'][0]) output = plt.hist(fulldat, bins = bins, facecolor=nrecolors['blue'][0], edgecolor='k', alpha=0.3, normed=True) fullparams = stats.weibull_min.fit(lowtidat, floc=1) pdf = stats.weibull_min.pdf(bins, *fullparams) plt.plot(bins, pdf, color=nrecolors['blue'][0]) # thing,stuff = output = plt.hist(lowtidat, bins = bins, facecolor=nrecolors['blue'][0], edgecolor='k', alpha=0.3, normed=True) fullparams ```
github_jupyter
# Create a circuit to generate any two-qubit quantum state in Qiskit Build a general 2-qubit circuit that could output all Hilbert space of states by tuning its parameters. ``` from qiskit import * import numpy as np def state_maker(theta, ang0, ang1): circ = QuantumCircuit(2,2) circ.u3(theta, 0, 0, 0) circ.cx(0, 1) circ.u3(*ang1, 1) circ.u3(*ang0, 0) return circ def get_ensemble(theta0, theta1, theta2, N=1024): circuit = state_maker(theta0, [theta1,0,0], [theta2,0,0]) circuit.measure(0,0) circuit.measure(1,1) simulator = Aer.get_backend('qasm_simulator') result = execute(circuit, backend = simulator, shots = N).result() counts = result.get_counts() return counts from qiskit.tools.visualization import plot_histogram #angi = [theta, phi, lam] ang0 = [0,0,0] ang1 = [0,0,0] theta = 0 circ = state_maker(theta, ang0, ang1) %matplotlib inline #circ.draw(output='mpl') circ.measure(0,0) circ.measure(1,1) circ.draw(output='mpl') ``` Example of the count result for some parameters $\theta_0$, $\theta_1$ and $\theta_2$. ``` from ttq.optimizer import optimize pi = np.pi _EXPECTED_VALUES = { '00' : 0.5, '01' : 0.2, '10' : 0.2, '11' : 0.1 } _MAX_ERROR = 0.05 _N = 1024 _STEP = 0.1 _PARAMS = 3 theta0, theta1, theta2 = optimize(conf = { 'bound': [0, 2 * pi], 'expected_values': _EXPECTED_VALUES, 'max_error': _MAX_ERROR, 'max_iter': None, 'n_states': _N, 'step': _STEP, 'x0': [0] * _PARAMS }) counts = get_ensemble(theta0, theta1, theta2) print(counts) plot_histogram(counts) ``` Example of the generated state result for some parameters $\theta_0$, $\theta_1$ and $\theta_2$. ``` circ = state_maker(theta0, [theta1, 0, 0], [theta2, 0, 0]) simulator = Aer.get_backend('statevector_simulator') result = execute(circ, backend = simulator).result() statevector = result.get_statevector() print(statevector) ``` # For some $\theta$'s Plot the probability of measuring each state for a given set of parameters. ``` ntheta = 100 N = 1024 theta = np.linspace(0, 2*np.pi, ntheta) prob00, prob01, prob10, prob11 = [], [], [], [] for t in theta: # to check it we only change one parameter counts = get_ensemble(t, t, t, N) prob00.append(counts['00']/N if '00' in counts.keys() else 0) prob01.append(counts['01']/N if '01' in counts.keys() else 0) prob10.append(counts['10']/N if '10' in counts.keys() else 0) prob11.append(counts['11']/N if '11' in counts.keys() else 0) import matplotlib.pyplot as plt plt.plot(theta, prob00, label='| 00 >') plt.plot(theta, prob01, label='| 01 >') plt.plot(theta, prob10, label='| 10 >') plt.plot(theta, prob11, label='| 11 >') plt.legend(loc = 'upper right') plt.show() ``` # Measuring the 'entanglement' We measure the realtion between the amplidudes of states $| 00 >$ and $| 01 >$ for different $\theta_0$'s. ``` import matplotlib.pyplot as plt ang0 = [0,0,0] ang1 = [0,0,0] entang = [] e00 = [] e11 = [] thetas = np.linspace(0, 2*np.pi, 10) thetas = thetas[2:-2] for theta in thetas: circ = state_maker(theta, ang0, ang1) simulator = Aer.get_backend('statevector_simulator') result = execute(circ, backend = simulator).result() statevector = result.get_statevector() print('theta = {:2.2f}pi '.format(theta/np.pi) ) print('state = ', statevector) print() entang.append(abs(statevector[0])/(abs(statevector[3])+.0001)) plt.plot(thetas, entang) ```
github_jupyter
# First fitting from amalgams In this phase, we are not considering sequences, leave alone syntax trees, in prediction. Instead we are using the frequency of (shallow) occurence of names in types to predict the (shallow) occurence in definitions. Here we consider the first two models. The second has some depth and shows overfitting. ## Structure of the models Both the models have a similar structure. * there is a common representation of the input data. * a prediction is made from this of a component the output name distribution (we call this the _low rank prediction_). * the other component is the input scaled, i.e., it is assumed that elements in the statement are in the proofs. - this should be rectified, currently the scaling is uniform, depending on the amalgams. It should depend on the specific elements. * the scaling is also determined from the representation (not too good as mentioned) * the components are put together. ``` import amalgam_predict as pred import keras from matplotlib import pyplot as plt ``` We fit the first model. * The fit is reasonable. * More importantly, the validation data fits almost as well as the training data. ``` hist1 = pred.fit(1024, pred.model1) plt.rcParams['figure.figsize'] = [20, 15] plt.plot(hist1.history['kullback_leibler_divergence']) plt.plot(hist1.history['val_kullback_leibler_divergence']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ``` As we see, the final KL-divergence is `2.7425` for the training data, and `2.8078` for the validation data. We now fit the second model. As mentioned, this fits much better, but that is clearly a case of overfitting. ``` hist2 = pred.fit(1024, pred.model2) plt.plot(hist2.history['kullback_leibler_divergence']) plt.plot(hist2.history['val_kullback_leibler_divergence']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ``` We see the fit keeps improving (this was before the early stop), reaching `1.2060`, but the validation error flattens, ending at `2.4163` To do: * use better model for persistence After adding the dropout layer, we get a similar validation fit without the overfitting. ``` hist3 = pred.fit(1024, pred.model3) plt.plot(hist3.history['kullback_leibler_divergence']) plt.plot(hist3.history['val_kullback_leibler_divergence']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ``` The above shows really no overfitting. So one can try to improve the model. ### Better persistence model (TODO) * Have a variable which is a row by which to scale each term by pointwise multiplication. * Initialize from data. * Multiply and then apply sigmoid to get a probability distribution on terms. * Use this instead of the input when mixing in. ``` pred.data.keys() pred.data['types'] import numpy as np def count_matrix(pairs, dim): vec = np.zeros((dim, ), np.float32) for d in pairs: name = d['name'] count = d['count'] vec[pred.indices[name]] = count return vec term_count = count_matrix(pred.data['terms'], pred.dim) term_count np.sum(term_count) ```
github_jupyter
### Deutsch Jozsa Algorithm! We are given an oracle that implements either a constant function or a balanced function. With just one query, we can find out which one it is. --- Done as part of the NPTEL Course - Introduction to Quantum Computing: Quantum Algorithms and Qiskit https://onlinecourses.nptel.ac.in/noc21_cs103/preview ``` # Importingstandard Qiskit libraries from qiskit import QuantumCircuit, execute, Aer, IBMQ from qiskit.compiler import transpile, assemble from qiskit.tools.jupyter import * from qiskit.visualization import plot_histogram from ibm_quantum_widgets import * from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from numpy import pi # Loading the IBM Q Account provider = IBMQ.load_account() print("Process Complete!") """ f is constant, f(x) = 0 There's only 1 way of making a constant function such that f(x) = 0 Important! While giving an arbitrary input like 011, apply the NOT gate to to q1 and q2. Then, before taking the final measurement, apply the NOT gate to q1 and q2 again for it again. """ qreg_q = QuantumRegister(4, 'q') creg_c = ClassicalRegister(4, 'c') circuit = QuantumCircuit(qreg_q, creg_c) """ Making the input |0001> """ circuit.x(qreg_q[3]) circuit.barrier(qreg_q[3], qreg_q[0], qreg_q[1], qreg_q[2]) """ Step 1 - Hadamard Transform Setting the state to |+++-> """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.h(qreg_q[3]) circuit.barrier(qreg_q[3], qreg_q[0], qreg_q[1], qreg_q[2]) """ Step 2 - Implementation of the Oracle Constant function, gives 0 """ None circuit.barrier(qreg_q[3], qreg_q[0], qreg_q[1], qreg_q[2]) """ Step 3 - Hadamard on Input Qubits """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.barrier() """ Step 4 - Measure the first n qubits """ circuit.measure(qreg_q[0], creg_c[0]) circuit.measure(qreg_q[1], creg_c[1]) circuit.measure(qreg_q[2], creg_c[2]) circuit.draw() backend = Aer.get_backend('qasm_simulator') job_simulator = execute(circuit, backend, shots = 1024) results_simulator = job_simulator.result() counts = results_simulator.get_counts(circuit) print("Counts:", counts) plot_histogram(counts) """ f is constant, f(x) = 1 There's only 1 way of making a constant function such that f(x) = 1 """ qreg_q = QuantumRegister(4, 'q') creg_c = ClassicalRegister(4, 'c') circuit = QuantumCircuit(qreg_q, creg_c) """ Making the input |0001> """ circuit.x(qreg_q[3]) circuit.barrier() """ Step 1 - Hadamard Transform Setting the state to |+++-> """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.h(qreg_q[3]) circuit.barrier() """ Step 2 - Implementation of the Oracle Constant function, gives 1 """ circuit.x(qreg_q[3]) circuit.barrier() """ Step 3 - Hadamard on Input Qubits """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.barrier() """ Step 4 - Measure the first n qubits """ circuit.measure(qreg_q[0], creg_c[0]) circuit.measure(qreg_q[1], creg_c[1]) circuit.measure(qreg_q[2], creg_c[2]) circuit.draw() backend = Aer.get_backend('qasm_simulator') job_simulator = execute(circuit, backend, shots = 1024) results_simulator = job_simulator.result() counts = results_simulator.get_counts(circuit) print("Counts:", counts) plot_histogram(counts) """ f is balanced There are many ways of implementing a balanced function. One way is by using consecutive CNOTs """ qreg_q = QuantumRegister(4, 'q') creg_c = ClassicalRegister(4, 'c') circuit = QuantumCircuit(qreg_q, creg_c) """ Making the input |0001> """ circuit.x(qreg_q[3]) circuit.barrier() """ Step 1 - Hadamard Transform Setting the state to |+++-> """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.h(qreg_q[3]) circuit.barrier() """ Step 2 - Implementation of the Oracle Balanced function with CNOT """ circuit.cx(qreg_q[0], qreg_q[3]) circuit.cx(qreg_q[1], qreg_q[3]) circuit.cx(qreg_q[2], qreg_q[3]) circuit.barrier() """ Step 3 - Hadamard on Input Qubits """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.barrier() """ Step 4 - Measure the first n qubits """ circuit.measure(qreg_q[0], creg_c[0]) circuit.measure(qreg_q[1], creg_c[1]) circuit.measure(qreg_q[2], creg_c[2]) circuit.draw() backend = Aer.get_backend('qasm_simulator') job_simulator = execute(circuit, backend, shots = 1024) results_simulator = job_simulator.result() counts = results_simulator.get_counts(circuit) print("Counts:", counts) plot_histogram(counts) """ f is balanced There are many ways of implementing a balanced function. One way is by using consecutive CNOTs. We can use any number of NOTs as well in this oracle """ qreg_q = QuantumRegister(4, 'q') creg_c = ClassicalRegister(4, 'c') circuit = QuantumCircuit(qreg_q, creg_c) """ Making the input |0001> """ circuit.x(qreg_q[3]) circuit.barrier() """ Step 1 - Hadamard Transform Setting the state to |+++-> """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.h(qreg_q[3]) circuit.barrier() """ Step 2 - Implementation of the Oracle Balanced function with CNOTs and NOTs """ circuit.x(qreg_q[0]) circuit.cx(qreg_q[0], qreg_q[3]) circuit.x(qreg_q[1]) circuit.cx(qreg_q[1], qreg_q[3]) circuit.x(qreg_q[2]) circuit.cx(qreg_q[2], qreg_q[3]) circuit.barrier() """ Step 3 - Hadamard on Input Qubits """ circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.h(qreg_q[2]) circuit.barrier() """ Step 4 - Measure the first n qubits """ circuit.measure(qreg_q[0], creg_c[0]) circuit.measure(qreg_q[1], creg_c[1]) circuit.measure(qreg_q[2], creg_c[2]) circuit.draw() backend = Aer.get_backend('qasm_simulator') job_simulator = execute(circuit, backend, shots = 1024) results_simulator = job_simulator.result() counts = results_simulator.get_counts(circuit) print("Counts:", counts) plot_histogram(counts) ```
github_jupyter
![](https://i.pinimg.com/564x/79/7b/06/797b06f0efa5afa161add7abaac817dd.jpg) # Magnetometer Calibration Kevin Walchko, Phd 30 May 2020 --- To calibrate a magnetometer, you need to get readings from all directions in 3D space. Ideally, when you plot the readings out, you should get a perfect sphere centered at (0,0,0). However, due to misalignments, offset, etc ... you end up with ellipsoids centered at some biased location. Here we are going to try and get enough readings to estimate these errors and properly calibrate the sensor. We will load in a pre-recorded data set, where the sensor was tumbled around and calibrate it. ## Errors ![](soft-and-hard.png) - **Soft iron errors:** caused by distortion of the Earth's magnetic field due to materials in the environment. Think of it like electricity - the magnetic field is looking for the easiest path to get to where it is going. Since magnetic fields can flow more easily through ferromagnetic materials than air, more of the field will flow through the ferromagnetic material than you would expect if it were just air. This distortion effect causes the magnetic field lines to be bent sometimes quite a bit. Note that unlike hard iron interference which is the result of materials which actually have a magnetic field of their own, soft iron interference is caused by non-magnetic materials distorting the Earth's magnetic field. This type of interference has a squishing effect on the magnetic data circle turning it into more of an ellipsoid shape. The distortion in this case depends on the direction that the compass is facing. Because of this, the distortion cannot be calibrated out with a simple offset - **Hard iron errors:** caused by static magnetic fields associated with the enviornment. For example, this could include any minor (or major) magnetism in the metal chassis or frame of a vehicle, any actual magnets such as speakers, etc... This interference pattern is unique to the environment but is constant. If you have your compass in an enclosure that is held together with metal screws, these relatively small amounts of ferromagnetic material can cause issues. If we consider the magnetic data circle, hard iron interference has the effect of shifting the entire circle away from the origin by some amount. The amount is dependent on any number of different factors and can be very large. ## References - Ozyagcilar, T. ["Calibrating an eCompass in the Presence of Hard and Soft-iron Interference."](AN4246.pdf) Freescale Semiconductor Ltd. 1992, pp. 1-17. - Teslabs: [Magnetometer Calibration](https://teslabs.com/articles/magnetometer-calibration/) - ThePoorEngineer: [Calibrating the Magnetometer](https://www.thepoorengineer.com/en/calibrating-the-magnetometer/) - Mathworks: [magcal](https://www.mathworks.com/help/fusion/ref/magcal.html#mw_34252c54-1f78-46b9-8c30-1a2b7351b0ce) ``` import numpy as np np.set_printoptions(precision=3) np.set_printoptions(suppress=True) from scipy import linalg import sys from squaternion import Quaternion import pandas as pd %matplotlib inline from matplotlib import pyplot as plt # from math import sqrt, atan2, asin, pi from math import radians as deg2rad from math import degrees as rad2deg from slurm import storage from datetime import datetime import os import pickle def loadPickle(filename): with open(filename, 'rb') as fd: d = pickle.load(fd) return d # let's load in some data and have a look at what we have def bag_info(bag): print('Bag keys:') print('-'*50) for k in bag.keys(): print(f' {k:>10}: {len(bag[k]):<7}') # fname = "../../software/python/data.pickle" fname = "../../software/python/dddd.pickle" data = loadPickle(fname) accel = [] gyro = [] mag = [] pres = [] temp = [] stamp = [] # bnoq = [] # bnoe = [] bno = { "euler": { "roll": [], "pitch": [], "yaw": [], "time": [] }, "q": { "w": [], "x": [], "y": [], "z": [], "time": [] } } tstart = data[0][-1] for d in data: a,g,m,p,t,q,e,dt = d accel.append(a) gyro.append(g) mag.append(m) pres.append(p) temp.append(t) bno["q"]["w"].append(q[0]) bno["q"]["x"].append(q[1]) bno["q"]["y"].append(q[2]) bno["q"]["z"].append(q[3]) bno["q"]["time"].append(dt - tstart) bno["euler"]["roll"].append(e[0]) bno["euler"]["pitch"].append(e[1]) bno["euler"]["yaw"].append(e[2]) bno["euler"]["time"].append(dt - tstart) stamp.append(dt) accel = np.array(accel) gyro = np.array(gyro) uT = 50.8862 Bpp = np.array(mag) print(f">> Mag data size: {Bpp.shape}") def plotMagnetometer(data): x = [v[0] for v in data] rx = (max(x)-min(x))/2 cx = min(x)+rx y = [v[1] for v in data] ry = (max(y)-min(y))/2 cy = min(y)+ry z = [v[2] for v in data] rz = (max(z)-min(z))/2 cz = min(z)+rz alpha = 0.1 u = np.linspace(0, 2 * np.pi, 100) plt.plot(rx*np.cos(u)+cx, ry*np.sin(u)+cy,'-r',label='xy') plt.plot(x,y,'.r',alpha=alpha) plt.plot(rx*np.cos(u)+cx, rz*np.sin(u)+cz,'-g',label='xz') plt.plot(x,z,'.g',alpha=alpha) plt.plot(rz*np.cos(u)+cz, ry*np.sin(u)+cy,'-b',label='zy') plt.plot(z,y, '.b',alpha=alpha) plt.title(f"CM:({cx:.1f}, {cy:.1f}, {cz:.1f}) uT R:({rx:.1f}, {ry:.1f}, {rz:.1f}) uT") plt.xlabel('$\mu$T') plt.ylabel('$\mu$T') plt.grid(True); plt.axis('equal') plt.legend(); def magcal(Bp, uT=None): """ Modelled after the matlab function: magcal(D) -> A, b, expmfs inputs: Bp: data points uT: expected field strength for longitude/altitude. If None is given, then automatically calculated and used returns: A: soft-iron 3x3 matrix of scaling b: hard-iron offsets expmfs: expected field strength""" Y = np.array([v[0]**2+v[1]**2+v[2]**2 for v in Bp]) X = np.hstack((Bp,np.ones((Bp.shape[0],1)))) beta = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y)) b=0.5*beta[:3] # expected mag field strength expmfs=np.sqrt(beta[3]+b[0]**2+b[1]**2+b[2]**2) if uT is None: uT = expmfs x = [v[0] for v in Bp] rx = (max(x)-min(x))/2 y = [v[1] for v in Bp] ry = (max(y)-min(y))/2 z = [v[2] for v in Bp] rz = (max(z)-min(z))/2 A = np.diag([uT/rx,uT/ry,uT/rz]) return A,b,expmfs # Raw uncalibrated values - you can see the hard-iron offsets # and the soft-iron ellipses plotMagnetometer(Bpp) # calibrated w/o expected field strength A,vv,bb = magcal(Bpp) print(f">> soft-iron correction:\n{A}") print(f">> hard-iron offset: {vv}uT expmfs: {bb:.1f}uT") plotMagnetometer((Bpp-vv).dot(A)) # calibrated with expected field strength - it only changes # the radius of the circles A,vv,bb = magcal(Bpp,uT) print(f">> soft-iron correction:\n{A}") print(f">> hard-iron offset: {vv}uT expmfs: {bb:.1f}uT") plotMagnetometer((Bpp-vv).dot(A)) ``` ``` >> soft-iron correction: [[0.983 0. 0. ] [0. 0.947 0. ] [0. 0. 0.941]] >> hard-iron offset: [-20.438 34.429 -2.368]uT expmfs: 52.6uT ``` ``` >> soft-iron correction: [[0.951 0. 0. ] [0. 0.916 0. ] [0. 0. 0.91 ]] >> hard-iron offset: [-20.438 34.429 -2.368]uT expmfs: 52.6uT ``` # Save Parameters ``` M = np.vstack((A,vv)) print(M) params = {} params["imu"] = "adafruit NXP" params["timestamp"] = datetime.now() params["mag"] = M.tolist() params["shape"] = M.shape storage.write("magnetometer-alt.yaml", params) np.hstack((A,vv.reshape((3,1)))) rr = ["# hello", { "A": A.tolist(), "b": vv.tolist() }] storage.write("temp.yaml", rr) ```
github_jupyter
#### New to Plotly? Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! ### Imports This tutorial imports [Plotly](https://plot.ly/python/getting-started/), [Numpy](http://www.numpy.org/), and [Pandas](https://plot.ly/pandas/intro-to-pandas-tutorial/). ``` import plotly.plotly as py from plotly.tools import FigureFactory as FF import numpy as np import pandas as pd ``` #### Import Data For this histogram example, we will import some real data. ``` import plotly.plotly as py from plotly.tools import FigureFactory as FF data = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/wind_speed_laurel_nebraska.csv') df = data[0:10] table = FF.create_table(df) py.iplot(table, filename='wind-data-sample') ``` #### Histogram Using `np.histogram()` we can compute histogram data from a data array. This function returns the values of the histogram (i.e. the number for each bin) and the bin endpoints as well, which denote the intervals for which the histogram values correspond to. ``` import plotly.plotly as py import plotly.graph_objs as go data_array = np.array((data['10 Min Std Dev'])) hist_data = np.histogram(data_array) binsize = hist_data[1][1] - hist_data[1][0] trace1 = go.Histogram( x=data_array, histnorm='count', name='Histogram of Wind Speed', autobinx=False, xbins=dict( start=hist_data[1][0], end=hist_data[1][-1], size=binsize ) ) trace_data = [trace1] layout = go.Layout( bargroupgap=0.3 ) fig = go.Figure(data=trace_data, layout=layout) py.iplot(fig) hist_data help(np.histogram) from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Histogram.ipynb', 'numpy/histogram/', 'Histogram | plotly', 'A histogram is a chart which divides data into bins with a numeric range, and each bin gets a bar corresponding to the number of data points in that bin.', title = 'Numpy Histogram | plotly', name = 'Histogram', has_thumbnail='true', thumbnail='thumbnail/histogram.jpg', language='numpy', page_type='example_index', display_as='numpy-statistics', order=2) ```
github_jupyter
[[source]](../api/alibi.explainers.cem.rst) # Contrastive Explanation Method ## Overview The *Contrastive Explanation Method* (CEM) is based on the paper [Explanations based on the Missing: Towards Constrastive Explanations with Pertinent Negatives](https://arxiv.org/abs/1802.07623) and extends the [code](https://github.com/IBM/Contrastive-Explanation-Method) open sourced by the authors. CEM generates instance based local black box explanations for classification models in terms of Pertinent Positives (PP) and Pertinent Negatives (PN). For a PP, the method finds the features that should be minimally and sufficiently present (e.g. important pixels in an image) to predict the same class as on the original instance. PN's on the other hand identify what features should be minimally and necessarily absent from the instance to be explained in order to maintain the original prediction class. The aim of PN's is not to provide a full set of characteristics that should be absent in the explained instance, but to provide a minimal set that differentiates it from the closest different class. Intuitively, the Pertinent Positives could be compared to Anchors while Pertinent Negatives are similar to Counterfactuals. As the authors of the paper state, CEM can generate clear explanations of the form: "An input x is classified in class y because features $f_{i}$, ..., $f_{k}$ are present and because features $f_{m}$, ..., $f_{p}$ are absent." The current implementation is most suitable for images and tabular data without categorical features. In order to create interpretable PP's and PN's, feature-wise perturbation needs to be done in a meaningful way. To keep the perturbations sparse and close to the original instance, the objective function contains an elastic net ($\beta$$L_{1}$ + $L_{2}$) regularizer. Optionally, an auto-encoder can be trained to reconstruct instances of the training set. We can then introduce the $L_{2}$ reconstruction error of the perturbed instance as an additional loss term in our objective function. As a result, the perturbed instance lies close to the training data manifold. The ability to add or remove features to arrive at respectively PN's or PP's implies that there are feature values that contain no information with regards to the model's predictions. Consider for instance the MNIST image below where the pixels are scaled between 0 and 1. The pixels with values close to 1 define the number in the image while the background pixels have value 0. We assume that perturbations towards the background value 0 are equivalent to removing features, while perturbations towards 1 imply adding features. ![mnist4](mnist_orig.png) It is intuitive to understand that adding features to get a PN means changing 0's into 1's until a different number is formed, in this case changing a 4 into a 9. ![mnist4pn](mnist_pn.png) To find the PP, we do the opposite and change 1's from the original instance into 0's, the background value, and only keep a vague outline of the original 4. ![mnist4pp](mnist_pp.png) It is however often not trivial to find these non-informative feature values and domain knowledge becomes very important. For more details, we refer the reader to the original [paper](https://arxiv.org/abs/1802.07623). ## Usage ### Initialization The optimizer is defined in TensorFlow (TF) internally. We first load our MNIST classifier and the (optional) auto-encoder. The example below uses Keras or TF models. This allows optimization of the objective function to run entirely with automatic differentiation because the TF graph has access to the underlying model architecture. For models built in different frameworks (e.g. scikit-learn), the gradients of part of the loss function with respect to the input features need to be evaluated numerically. We'll handle this case later. ```python # define models cnn = load_model('mnist_cnn.h5') ae = load_model('mnist_ae.h5') ``` We can now initialize the CEM explainer: ```python # initialize CEM explainer shape = (1,) + x_train.shape[1:] mode = 'PN' cem = CEM(cnn, mode, shape, kappa=0., beta=.1, feature_range=(x_train.min(), x_train.max()), gamma=100, ae_model=ae, max_iterations=1000, c_init=1., c_steps=10, learning_rate_init=1e-2, clip=(-1000.,1000.), no_info_val=-1.) ``` Besides passing the the predictive and auto-encoder models, we set a number of **hyperparameters** ... ... **general**: * `mode`: 'PN' or 'PP'. * `shape`: shape of the instance to be explained, starting with batch dimension. Currently only single explanations are supported, so the batch dimension should be equal to 1. * `feature_range`: global or feature-wise min and max values for the perturbed instance. ... related to the **optimizer**: * `max_iterations`: number of loss optimization steps for each value of *c*; the multiplier of the first loss term. * `learning_rate_init`: initial learning rate, follows polynomial decay. * `clip`: min and max gradient values. ... related to the **non-informative value**: * `no_info_val`: as explained in the previous section, it is important to define which feature values are considered background and not crucial for the class predictions. For MNIST images scaled between 0 and 1 or -0.5 and 0.5 as in the notebooks, pixel perturbations in the direction of the (low) background pixel value can be seen as removing features, moving towards the non-informative value. As a result, the `no_info_val` parameter is set at a low value like -1. `no_info_val` can be defined globally or feature-wise. For most applications, domain knowledge becomes very important here. If a representative sample of the training set is available, we can always (naively) infer a `no_info_val` by taking the feature-wise median or mean: ```python cem.fit(x_train, no_info_type='median') ``` ... related to the **objective function**: * `c_init` and `c_steps`: the multiplier $c$ of the first loss term is updated for `c_steps` iterations, starting at `c_init`. The first loss term encourages the perturbed instance to be predicted as a different class for a PN and the same class for a PP. If we find a candidate PN or PP for the current value of $c$, we reduce the value of $c$ for the next optimization cycle to put more emphasis on the regularization terms and improve the solution. If we cannot find a solution, $c$ is increased to put more weight on the prediction class restrictions of the PN and PP before focusing on the regularization. * `kappa`: the first term in the loss function is defined by a difference between the predicted probabilities for the perturbed instance of the original class and the max of the other classes. $\kappa \geq 0$ defines a cap for this difference, limiting its impact on the overall loss to be optimized. Similar to the original paper, we set $\kappa$ to 0. in the examples. * `beta`: $\beta$ is the $L_{1}$ loss term multiplier. A higher value for $\beta$ means more weight on the sparsity restrictions of the perturbations. Similar to the paper, we set $\beta$ to 0.1 for the MNIST and Iris datasets. * `gamma`: multiplier for the optional $L_{2}$ reconstruction error. A higher value for $\gamma$ means more emphasis on the reconstruction error penalty defined by the auto-encoder. Similar to the paper, we set $\gamma$ to 100 when we have an auto-encoder available. While the paper's default values for the loss term coefficients worked well for the simple examples provided in the notebooks, it is recommended to test their robustness for your own applications. ### Explanation We can finally explain the instance: ```python explanation = cem.explain(X) ``` The ```explain``` method returns a dictionary with the following *key: value* pairs: * *X*: original instance * *X_pred*: predicted class of original instance * *PN* or *PP*: Pertinent Negative or Pertinant Positive * *PN_pred* or *PP_pred*: predicted class of PN or PP * *grads_graph*: gradient values computed from the TF graph with respect to the input features at the PN or PP * *grads_num*: numerical gradient values with respect to the input features at the PN or PP ### Numerical Gradients So far, the whole optimization problem could be defined within the internal TF graph, making autodiff possible. It is however possible that we do not have access to the model architecture and weights, and are only provided with a ```predict``` function returning probabilities for each class. We initialize the CEM in the same way as before: ```python # define model lr = load_model('iris_lr.h5') predict_fn = lambda x: lr.predict(x) # initialize CEM explainer shape = (1,) + x_train.shape[1:] mode = 'PP' cem = CEM(predict_fn, mode, shape, kappa=0., beta=.1, feature_range=(x_train.min(), x_train.max()), eps=(1e-2, 1e-2), update_num_grad=100) ``` In this case, we need to evaluate the gradients of the loss function with respect to the input features numerically: \begin{equation*} \frac{\partial L}{\partial x} = \frac{\partial L}{\partial p} \frac{\partial p}{\partial x} \end{equation*} where $L$ is the loss function, $p$ the predict function and $x$ the input features to optimize. There are now 2 additional hyperparameters to consider: * `eps`: a tuple to define the perturbation size used to compute the numerical gradients. `eps[0]` and `eps[1]` are used respectively for $^{\delta L}/_{\delta p}$ and $^{\delta p}/_{\delta x}$. `eps[0]` and `eps[1]` can be a combination of float values or numpy arrays. For `eps[0]`, the array dimension should be *(1 x nb of prediction categories)* and for `eps[1]` it should be *(1 x nb of features)*. For the Iris dataset, `eps` could look as follows: ```python eps0 = np.array([[1e-2, 1e-2, 1e-2]]) # 3 prediction categories, equivalent to 1e-2 eps1 = np.array([[1e-2, 1e-2, 1e-2, 1e-2]]) # 4 features, also equivalent to 1e-2 eps = (eps0, eps1) ``` - `update_num_grad`: for complex models with a high number of parameters and a high dimensional feature space (e.g. Inception on ImageNet), evaluating numerical gradients can be expensive as they involve prediction calls for each perturbed instance. The `update_num_grad` parameter allows you to set a batch size on which to evaluate the numerical gradients, reducing the number of prediction calls required. ## Examples [Contrastive Explanations Method (CEM) applied to MNIST](../examples/cem_mnist.nblink) [Contrastive Explanations Method (CEM) applied to Iris dataset](../examples/cem_iris.nblink)
github_jupyter
``` import pandas as pd import numpy as np #?pd.read_csv ``` # Read Daily Shareprices and Quarterlรถy Income statements (Source SimFin) ``` # Import the main functionality from the SimFin Python API. import simfin as sf # Import names used for easy access to SimFin's data-columns. from simfin.names import * sf.set_data_dir('data/') #sf.load_api_key(path='../../keys/simfin.key', default_key='free') sf.set_api_key(api_key='free') df = sf.load(dataset='income', variant='quarterly', market='us') stock = sf.load(dataset='shareprices', variant='daily', market='us') ``` # Re-read Files, Parse dates and cleanup column names ``` df = pd.read_csv("data/us-income-quarterly.csv",sep=';',header=0, parse_dates=[5,6,7]) stock = pd.read_csv("data/us-shareprices-daily.csv",sep=';',header=0, parse_dates=[2]) for s in [' ','.',',','(',')']: df.columns = df.columns.str.replace(s, '') stock.columns = stock.columns.str.replace(s, '') symbol = 'MSFT' msft = stock[stock.Ticker == symbol].set_index('Date') df1 = df[df.Ticker == symbol].set_index('PublishDate') df1['PublishDate'] = df1.index df1.index.name = 'Date' ms = msft.join(df1,how='left',rsuffix='inc').fillna(method='ffill') ms.shape ms = ms[~ms.Tickerinc.isnull()] ms = ms.dropna(axis=1) ``` # Create Target to Predict Target: Adjusted Close of Tomorrow ``` data = ms tgt = 'AdjClose' data[f"{tgt}_s1"] = ms[tgt].shift(-1) data[f"spread"] = data[f"{tgt}_s1"] - data[tgt] data[f"target"] = data[f"spread"] > 0 # Create Date/Timebased Features data['weekday'] = data.index.weekday.astype(float) data['dayofyear'] = data.index.dayofyear.astype(float) data['month'] = data.index.month.astype(float) # Create Hist. targes/values for var in [f"spread",f"target"]: for lag in np.arange(1,10): data[f"{var}_lag{lag}"] = data[var].shift(lag) for var in [f"target"]: for lag in np.arange(1,10): data[f"{var}_lag{lag}"] = data[var].shift(lag).astype(bool) data["Days_Since_Report"] = (data.index-data["PublishDate"]).apply(lambda x: x.days).astype(float) data["Days_Since_Report"] data = data.dropna() data.to_parquet("data/msft.parq") import matplotlib.pyplot as plt # create figure and axis objects with subplots() fig,ax = plt.subplots() # twin object for two different y-axis on the sample plot ax2=ax.twinx() display(ms[['Revenue','AdjClose']].corr()) ms['Revenue'].plot(ax=ax,color='red') ms['AdjClose'].plot(ax=ax2) fig,ax = plt.subplots() # twin object for two different y-axis on the sample plot ax2=ax.twinx() display(ms[['NetIncome','AdjClose']].corr()) ms['NetIncome'].plot(ax=ax,color='red') ms['AdjClose'].plot(ax=ax2) ```
github_jupyter
# Compound Video Player Widget Most everything in this notebook is a work in progress. ``` import os import IPython import ipywidgets # import nutmeg from jpy_video import Video, TimeCode, compound # Display cells full width txt = """ <style> div#notebook-container { width: 95%; } div#menubar-container { width: 65%; } div#maintoolbar-container { width: 99%; } </style> """ IPython.display.display(IPython.display.HTML(data=txt)) ``` # Setup ``` f = '/home/pierre/Projects/GoProHelper/notebooks/data/GOPR8802.intra.mp4' os.path.isfile(f) fps = 59.9 wid = compound.VideoPlayer(f, 1/fps) wid.display() wid.wid_video. wid.wid_timecode.layout.top wid.wid_info wid.wid_timecode._model_module_version ``` # Components ``` # HTML5 video widget wid_video = Video(f) wid_video.set_property('controls', False) # Timecode wid_timecode = TimeCode() # Slider wid_slider = ipywidgets.FloatSlider(step=1/fps, continuous_update=True, readout=False) wid_slider.layout.width='500pt' # wid_button = ipywidgets.Button(icon='play') # http://fontawesome.io/icon/pause/ # self.wid_slider = ipywidgets.FloatSlider(min=0, max=60, step=timebase, # continuous_update=True, orientation='horizontal', # readout=False, # slider_color='blue') # self.wid_slider.layout.width = '50%' ``` # Assemble ``` wid_controls = ipywidgets.HBox(children=[wid_timecode, wid_slider]) wid_outer = ipywidgets.VBox(children=[wid_video, wid_controls]) # Link widgets at front end ipywidgets.jslink((wid_video, 'current_time'), (wid_slider, 'value')) ipywidgets.jsdlink((wid_video, 'current_time'), (wid_timecode, 'timecode')) ``` # Event Handlers ``` # def handle_any(wid, **event): # """Respond to any event type # """ # update_timecode(wid_time, wid_video.properties.currentTime) def handle_displayed(wid, **event): """Do stuff that can only be done after widget is displayed """ wid.set_property('controls', False) def handle_loaded_metadata(wid, **event): """Function to be called when sufficient video metadata has been loaded at the frontend """ pass # print(wid.properties) def handle_duration_change(wid, **event): """Update anything that depends on video duration """ wid_slider.max = wid.properties.duration wid_video.on_displayed(handle_displayed) # wid_video.on_event(handle_any) wid_video.on_event(handle_loaded_metadata, 'loadedmetadata') wid_video.on_event(handle_duration_change, 'loadedmetadata') wid_video.on_event(handle_duration_change, 'durationchange') ```
github_jupyter
# Exploring Seattle Weather **Learning Objective:** Apply data visualization practices to study the weather in Seattle. In this notebook, we will create visualizations to explore weather data for Seattle, taken from NOAA. The dataset is a CSV file with columns for the temperature (in Celsius), precipitation (in centimeters), wind speed (in meter/second), and weather type. We have one row for each day from January 1st, 2012 to December 31st, 2015. This notebook is based on the Seattle weather example in the [Altair](https://altair-viz.github.io/tutorials/exploring-weather.html) and [Vega-Lite](https://vega.github.io/vega-lite/tutorials/explore.html) documentation. ## Imports ``` import altair as alt alt.data_transformers.enable('json') from vega_datasets import data ``` ## Data Load the Seattle weather data from Altair: ``` df = data.seattle_weather() df.head() ``` ## Explore Letโ€™s start by looking at the precipitation, using tick marks to see the distribution of precipitation values: ``` alt.Chart(df).mark_tick().encode( x='precipitation' ) ``` It looks as though precipitation is skewed towards lower values; that is, when it rains in Seattle, it usually doesnโ€™t rain very much. It is difficult to see patterns across continuous variables, and so to better see this, we can create a histogram of the precipitation data. For this we first discretize the precipitation values by adding a binning to x. Additionally, we set our encoding channel y with the special field `*` that is aggregated with `count`. The result is a histogram of precipitation values: ``` alt.Chart(df, width=400, height=200).mark_bar().encode( alt.X('precipitation', bin=True), alt.Y('count(*):Q') ) ``` Next, letโ€™s look at how precipitation in Seattle changes throughout the year. Altair natively supports dates and discretization of dates when we set the type to temporal (shorthand `T`). For example, in the following plot, we compute the total precipitation for each month. To discretize the data into months, we set the keyword `timeUnit="month"`: ``` alt.Chart(df, width=400, height=200).mark_line().encode( alt.X('date:T', timeUnit='month'), alt.Y('average(precipitation)') ) ``` This chart shows that in Seattle the precipitation in the winter is, on average, much higher than summer (an unsurprising observation to those who live there!). By changing the mapping of encoding channels to data features, you can begin to explore the relationships within the data. When looking at precipitation and temperature, we might want to aggregate by year and month (`yearmonth`) rather than just month. This allows us to see seasonal trends, with daily variation smoothed out. We might also wish to see the maximum and minimum temperature in each month: ``` alt.Chart(df, width=400, height=200).mark_line().encode( alt.X('date:T', timeUnit='yearmonth'), alt.Y('max(temp_max)'), ) ``` In this chart, it looks as though the maximum temperature is increasing from year to year over the course of this relatively short baseline. To look closer into this, letโ€™s instead look at the mean of the maximum daily temperatures for each year: ``` alt.Chart(df, width=400, height=200).mark_line().encode( alt.X('date:T', timeUnit='year'), alt.Y('mean(temp_max)'), ) ``` And in fact, the chart indicates that yes, the annual average of the daily high temperatures increased over the course of these four years, a fact that you can confirm for minimum daily temperatures as well. You might also wonder how the variability of the temperatures changes throughout the year. For this, we have to add a computation to derive a new field. You can do with with Pandas: ``` df['temp_range'] = df.temp_max - df.temp_min ``` Now, let's look at the temperature range over time: ``` alt.Chart(df, width=400, height=200).mark_line().encode( alt.X('date:T', timeUnit='month'), y='mean(temp_range):Q' ) ``` Next we will explore the weather field, which encodes a categorical variable describing the weather on a given day. We might wish to know how different kinds of weather (e.g. sunny days or rainy days) are distributed throughout the year. To answer this, we can discretize the date by month and then count the number of records on the y-Axis. We then break down the bars by the weather type by mapping this column to a color channel. When a bar chart has a field mapped to color, Altair will automatically stack the bars atop each other: ``` alt.Chart(df).mark_bar().encode( x=alt.X('date:T', timeUnit='month'), y='count()', color='weather', ) ``` The default color paletteโ€™s semantics might not match our expectation. For example, we probably do not expect โ€œsunโ€ (sunny) to be purple. We can tune the chart by providing a color scale range that maps the values from the weather field to meaningful colors, using standard hex color codes: ``` scale = alt.Scale(domain=['sun', 'fog', 'drizzle', 'rain', 'snow'], range=['#e7ba52', '#c7c7c7', '#aec7e8', '#1f77b4', '#9467bd']) ``` This scale can be passed to the color encoding to be applied to the plot style. In addition, we can customize the titles for the axis and legend to make the meaning of the plot more clear: ``` alt.Chart(df, width=400, height=200).mark_bar().encode( x=alt.X('date:T', timeUnit='month', axis=alt.Axis(title='Month of the year')), y='count():Q', color=alt.Color('weather', legend=alt.Legend(title='Weather type'), scale=scale), ) ``` Combining the above ideas lets us create any number of flexible visualizations of this dataset. For example, here is a plot that uses the customizations we have developed above to explore the relationship between weather, precipitation, maximum temperature, and temperature range, configured to use a larger canvas: ``` alt.Chart(df).mark_point().encode( alt.X('temp_max', axis=alt.Axis(title='Maximum Daily Temperature (C)')), alt.Y('temp_range', axis=alt.Axis(title='Daily Temperature Range (C)')), alt.Color('weather', scale=scale), size='precipitation', ).interactive() ``` This gives us even more insight into the weather patterns in Seattle: rainy and foggy days tend to be cooler with a narrower range of temperatures, while warmer days tend to be dry and sunny, with a wider spread between low and high temperature.
github_jupyter
# Mentoria Evolution - Python para Data Science https://minerandodados.com.br * Para executar uma cรฉlula digite **Control + enter** ou clique em **Run**. * As celulas para rodar script Python devem ser do tipo code. * As celular aceitam comandos python e jรก executam um "print" automรกticamente. ## Mentoria Evolution - Aula 2 ### Estruturas de Dados - Listas ### ** Sintaxe: nome = [elementos] ** ``` idades = [25,38,18,47] type(idades) endereco = ['Rua dos Fulanos, Belo Horizonte.',2500] endereco ``` - Acessando elementos ``` rua = endereco[0] rua numero = endereco[1] print(rua,numero) ``` - Atualizando elementos ``` endereco[1] = 2750 endereco endereco[1] = 2750 numero = endereco[1] print(rua,numero) ``` - Operaรงรตes com Listas ``` nomes = ['Felipe','Joao','Maria'] nomes ``` - Contando elementos ``` len(nomes) ``` - Verificando elementos ``` 'Felipe' in nomes ``` - Valores mรกximo e mรญnimo ``` max(nomes) min(nomes) ``` - Concatenando listas ``` nomes nomes + ['Jose','Carla'] nomes = nomes + ['Jose','Carla'] print(nomes) ``` - Adiciona novos elementos ``` nomes.append('Marcelo') nomes ``` - รndice de um determinado elemento ``` nomes.index('Joao') ``` - Removendo um elemento ``` nomes.remove('Marcelo') nomes ``` - Contando elementos ``` nomes.count('Joao') ``` - Ordenando elementos ``` nomes.sort(reverse=True) print(nomes) ``` ### Estruturas de Dados - Dicionรกrios ### - Objeto do tipo Chave e Valor - Sintaxe: nome = {'chave':'valor'} ``` dic = {'nome':'Rodrigo'} pessoas = { 'Felipe':30, 'Fulana':18, 'Maria':55, 'Jose':80, 'valores':[1,3.5,400,5,6], 'pesos':{'Felipe':68,'Fulana':55} } pessoas ``` - Acessando o valor a partir de uma chave ``` pessoas['Jose'] pessoas['pesos'] ``` - Dicionario Aninhado ``` pessoas = { 'Felipe': {'Idade':30,'Cidade':'Belo Horizonte','Peso':65}, 'valores':[1,3.5,400,5,6], } pessoas pessoas['Felipe']['Cidade'] pessoas['Felipe']['Peso'] Cadastro_pessoas = { 'Clientes': {'Cliente_01': {'Nome':'Rodrigo', 'Idade':30, 'Cidade':'Belo Horizonte', 'Peso':65 }, 'Cliente_02': {'Nome': 'Felipe', 'e-mail': 'felipe10@gmail.com' }, }, 'valores':[1,3.5,400,5,6]} Cadastro_pessoas['Clientes']['Cliente_01']['Cidade'] Cadastro_pessoas['Clientes']['Cliente_02'] ``` **Mรฉtodos** - keys() โ€“ Retorna as chaves do dicionรกrio ``` pessoas pessoas.keys() ``` - values() - Retorna os valores do dicionรกrio ``` pessoas.values() ``` - get() โ€“ Retorna o valor de uma determinada chave senรฃo existir retorna o valor passado como parรขmetro ``` pessoas.get('Juca','Nรฃo existe') ``` - setdefault() - Retorna o valor da chave caso ela exista, senรฃo inseri a chave e o valor no dicionรกrio ``` pessoas.setdefault('Felipe',40) pessoas pessoas.setdefault('Marcos',30) ``` - items() โ€“ Retorna itens e valores ``` pessoas.items() ``` - clear() โ€“ Limpa o objeto dicionรกrio ``` pessoas.clear() pessoas ``` ### Estruturas Condicionais e Loops ### **Controles de fluxo** **Sintaxe**<br> **if <condiรงรฃo>:** >**instruรงรตes** ``` if 1 > 10: print('O numero 10 รฉ maior que 1') ``` **Sintaxe**<br> **if <condiรงรฃo>:** >**instruรงรตes** **else:** >**instruรงรตes** ``` x=1 y=10 if x > y: print("O numero em X รฉ maior que Y") else: print('O numero em Y รฉ maior que X') ``` **Trabalhando com loops FOR** ** Sintaxe: for <variรกvel> in <condiรงรฃo>: instruรงรตes ** - Loop em elementos de uma lista ``` for i in [1,2,3,4,5]: print ("Valor: %s" %i) print (i + 2) ``` - Loop em elementos de uma string ``` for c in "Python รฉ uma linguagem de programaรงรฃo": print (c) ``` **Loops FOR aninhados** **Sintaxe**<br> **for <variรกvel> in <condiรงรฃo>:** >**instruรงรตes**<br> >**for <variรกvel> in <condiรงรฃo>:** >>**instruรงรตes** ``` for i in ['1a Fase','2a Fase','3a Fase']: print (i) for y in ['manha','tarde','noite']: print (y) print(' ') ``` **Loops com While** **Sintaxe**<br> **while <condiรงรฃo>: ** >**instruรงรตes** ``` i = 0 while i < 10: print (i) i = i + 1 ``` **Loops while com instruรงรฃo else** **Sintaxe**<br> **while <condiรงรฃo>:** >**instruรงรตes** **else:** >**instruรงรตes** ``` i = 0 while i < 10: print (i) i = i + 1 else: print("Numero รฉ maior ou igual a 10") ``` - Ao concluir, salve seu notebook e envie suas respostas para **contato@minerandodados.com.br**
github_jupyter
## Exercise 3 - Quantum error correction ### Importing Packages ``` from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, Aer, transpile from qc_grader import grade_ex3 import qiskit.tools.jupyter from qiskit.test.mock import FakeTokyo ``` #### -------------------------------------------------------------------------------------------------------------------- ### 1. Circuit In this example we'll use 5 qubits that we'll call code qubits. To keep track of them, we'll define a special quantum register. ``` code = QuantumRegister(5,'code') ``` We'll also have an additional four qubits we'll call syndrome qubits. ``` syn = QuantumRegister(4,'syn') ``` Similarly we define a register for the four output bits, used when measuring the syndrome qubits. ``` out = ClassicalRegister(4,'output') ``` We consider the qubits to be laid out as follows, with the code qubits forming the corners of four triangles, and the syndrome qubits living inside each triangle. ``` c0----------c1 | \ s0 / | | \ / | | s1 c2 s2 | | / \ | | / s3 \ | c3----------c4 ``` For each triangle we associate a stabilizer operation on its three qubits. For the qubits on the sides, the stabilizers are $ZZZ$. For the top and bottom ones, they are $XXX$. The syndrome measurement circuit corresponds to a measurement of these observables. This is done in a similar way to surface code stabilizers (in fact, this code is a small version of a surface code). ``` qc_syn = QuantumCircuit(code,syn,out) # Left ZZZ qc_syn.cx(code[0],syn[1]) qc_syn.cx(code[2],syn[1]) qc_syn.cx(code[3],syn[1]) #qc_syn.barrier() # Right ZZZ qc_syn.swap(code[1],code[2]) qc_syn.cx(code[2],syn[2]) qc_syn.swap(code[1],code[2]) qc_syn.cx(code[2],syn[2]) qc_syn.cx(code[4],syn[2]) #qc_syn.barrier() # Top XXX qc_syn.h(syn[0]) qc_syn.cx(syn[0],code[0]) qc_syn.cx(syn[0],code[1]) qc_syn.cx(syn[0],code[2]) qc_syn.h(syn[0]) #qc_syn.barrier() # Bottom XXX qc_syn.h(syn[3]) qc_syn.cx(syn[3],code[2]) qc_syn.cx(syn[3],code[3]) qc_syn.cx(syn[3],code[4]) qc_syn.h(syn[3]) #qc_syn.barrier() # Measure the auxilliary qubits qc_syn.measure(syn,out) qc_syn.draw('mpl') qc_init = QuantumCircuit(code,syn,out) qc_init.h(syn[0]) qc_init.cx(syn[0],code[0]) qc_init.cx(syn[0],code[1]) qc_init.cx(syn[0],code[2]) qc_init.cx(code[2],syn[0]) qc_init.h(syn[3]) qc_init.cx(syn[3],code[2]) qc_init.cx(syn[3],code[3]) qc_init.cx(syn[3],code[4]) qc_init.cx(code[4],syn[3]) #qc_init.barrier() qc_init.draw('mpl') ``` The initialization circuit prepares an eigenstate of these observables, such that the output of the syndrome measurement will be `0000` with certainty. ``` qc = qc_init.compose(qc_syn) display(qc.draw('mpl')) job = Aer.get_backend('qasm_simulator').run(qc) job.result().get_counts() ``` #### -------------------------------------------------------------------------------------------------------------------- ### 2. Error Qubits ``` error_qubits = [0,4] ``` Here 0 and 4 refer to the positions of the qubits in the following list, and hence are qubits `code[0]` and `code[4]`. ``` qc.qubits ``` To check that the code does as we require, we can use the following function to create circuits for inserting artificial errors. Here the errors we want to add are listed in `errors` as a simple text string, such as `x0` for an `x` on `error_qubits[0]`. ``` def insert(errors,error_qubits,code,syn,out): qc_insert = QuantumCircuit(code,syn,out) if 'x0' in errors: qc_insert.x(error_qubits[0]) if 'x1' in errors: qc_insert.x(error_qubits[1]) if 'z0' in errors: qc_insert.z(error_qubits[0]) if 'z1' in errors: qc_insert.z(error_qubits[1]) return qc_insert ``` Rather than all 16 possibilities, let's just look at the four cases where a single error is inserted. ``` for error in ['x0','x1','z0','z1']: qc = qc_init.compose(insert([error],error_qubits,code,syn,out)).compose(qc_syn) job = Aer.get_backend('qasm_simulator').run(qc) print('\nFor error '+error+':') counts = job.result().get_counts() for output in counts: print('Output was',output,'for',counts[output],'shots.') ``` ### 2. Backend ``` backend = FakeTokyo() backend ``` As a simple idea of how our original circuit is laid out, let's see how many two-qubit gates it contains. ``` qc = qc_init.compose(qc_syn) qc = transpile(qc, basis_gates=['u','cx']) qc.num_nonlocal_gates() qc1 = transpile(qc,backend,basis_gates=['u','cx'], optimization_level=3) qc1.num_nonlocal_gates() ``` #### -------------------------------------------------------------------------------------------------------------------- ### 3. Initial Layout ``` initial_layout = [0,2,6,10,12,1,5,7,11] qc2 = transpile(qc,backend,initial_layout=initial_layout, basis_gates=['u','cx'], optimization_level=3) qc2.num_nonlocal_gates() ``` #### -------------------------------------------------------------------------------------------------------------------- ### 4. Grading ``` grade_ex3(qc_init,qc_syn,error_qubits,initial_layout) ``` #### --------------------------------------------------------------------------------------------------------------------
github_jupyter
# Simulating Clifford randomized benchmarking using a generic noise model This tutorial demonstrates shows how to simulate Clifford RB sequences using arbitrary $n$-qubit process matrices. In this example $n=2$. ``` import pygsti import numpy as np ``` ## Get some CRB circuits First, we follow the [Clifford RB](../CliffordRB.ipynb) tutorial to generate a set of sequences. If you want to perform Direct RB instead, just replace this cell with the contents of the [Direct RB](../DirectRB.ipynb) tutorial up until the point where it creates `circuitlist`: ``` #Specify the device to be benchmarked - in this case 2 qubits nQubits = 2 qubit_labels = [0,1] gate_names = ['Gxpi2', 'Gypi2','Gcphase'] availability = {'Gcphase':[(0,1)]} pspec = pygsti.obj.ProcessorSpec(nQubits, gate_names, availability=availability, qubit_labels=qubit_labels) #Specify RB parameters (k = number of repetitions at each length) lengths = [0,1,2,4,8,16] k = 10 subsetQs = [0,1] randomizeout = False # ==> all circuits have the *same* ideal outcome (the all-zeros bitstring) #Generate clifford RB circuits exp_design = pygsti.protocols.CliffordRBDesign(pspec, lengths, k, qubit_labels=subsetQs, randomizeout=randomizeout) #Collect all the circuits into one list: circuitlist = exp_design.all_circuits_needing_data ``` ## Create a model to simulate these circuits Now we need to create a model that can simulate circuits like this. Two things to note: 1. RB circuits use our "multi-qubit" gate naming, so you have gates like `Gxpi2:0` and `Gcphase:0:1`. 2. RB circuits do gates in parallel (this only matters for >1 qubits), so you have layers like `[Gypi2:0Gypi2:1]` In this example, we'll make a model with $n$-qubit process matrices, so this will be practically limited to small $n$. We construct a model based on our standard 2-qubit X, Y, and CPHASE model, since this has all the appropriate gates. To get a model with the multi-qubit labels, we'll use a standard multi-qubit "model-pack", which packages a `Model` object with relevant meta information needed by other protocols (like GST). If you can't start with a standard model, then you'll need to create an `ExplicitOpModel` object of the appropriate dimension (see the [explicit models tutorial](../../objects/ExplicitModel.ipynb)) and assign to it gates with are, for instance `('Gxpi2',0)` rather than just `'Gxpi2'`. Here we import the `smq2Q_XYCPHASE` model pack: ``` from pygsti.modelpacks import smq2Q_XYCPHASE ``` We'll depolarize the target model and set one of the process matrices to a custom value as a demonstration. Here is where you can set any 2-qubit process matrices you want to any of the gates: ``` myModel = smq2Q_XYCPHASE.target_model().depolarize(op_noise=0.01, spam_noise=0.01) myModel[('Gx',0)] = np.kron( np.array([[1, 0, 0, 0], [0, 0.85, 0, 0], [0, 0, 0, -0.85], [0, 0, 0.85, 0]], 'd'), np.array([[1, 0, 0, 0], [0, 0.95, 0, 0], [0, 0, 0.95, 0], [0, 0, 0, 0.95]], 'd')) #print(myModel[('Gx',0)]) myModel.operations.keys() #voila! you have gates like "Gx:0" rather than "Gxi" ``` Since, `ExplicitOpModel` objects (e.g., those in the model packs) don't know how to automatically simulate multiple gates in parallel (you'd need to add an operation for each layer explicitly), we'll just *serialize* the circuits so they don't contain any parallel gates. This addresses point 2) above. Then we can simulate our circuits using our `ExplicitOpModel`, creating a `DataSet`. ``` serial_circuits = [c.serialize() for c in circuitlist] ds = pygsti.construction.generate_fake_data(myModel, serial_circuits, 100, seed=1234) #See how the DataSet contains serialized circuits (just printing the first several layers for clarity) print(ds.keys()[10][0:7]) print(circuitlist[10][0:5]) ``` Next, we "un-serialize" the circuits in the resulting data-set (`ds`) using the `process_circuits` function. This is needed because the RB experiment design calls for the original (parallel-gate) circuits, not the serialized ones. The cell below updates the circuits for all the data we just simulated so the data counts are associated with the original circuits. ``` #map circuits in dataset back to non-serialized RB circuits that we expect to have data for: unserialize_map = { serial_circuit: orig_circuit for (serial_circuit, orig_circuit) in zip(serial_circuits, circuitlist)} ds = ds.copy_nonstatic() ds.process_circuits(lambda c: unserialize_map[c]) ds.done_adding_data() ``` ## Running RB on the simulated `DataSet` To run an RB analysis, we just package up the experiment design and data set into a `ProtocolData` object and give this to a `RB` protocol's `run` method. This returns a `RandomizedBenchmarkingResults` object that can be used to plot the RB decay curve. (See the [RB analysis tutorial](../RBAnalysis.ipynb) for more details.) ``` data = pygsti.protocols.ProtocolData(exp_design, ds) results = pygsti.protocols.RB().run(data) %matplotlib inline results.plot() ```
github_jupyter
# Introduction to programming 1 Joรฃo Pedro Malhado and Clyde Fare, Imperial College London (contact: [chemistry-git@imperial.ac.uk](mailto:chemistry-git@imperial.ac.uk)) This notebook is licensed under a [Creative Commons Attribution 4.0 (CC-by) license](http://creativecommons.org/licenses/by/4.0/) This is an interactive tutorial! As you go through it any time you see something that looks like this: a = "Hello" that's followed by an empty *code cell* (a light grey rectangle with a label like *"In[ ]"*), you should type the expression in the code cell, hit Shift+Return to *execute* it, and note the output. No copying and pasting! You'll learn the concepts better if you type them out yourself. Learning how to program is a skill that is developed by experimenting, trying ideas out, thinking about what works and what doesn't, and asking for help. You are strongly encouraged to open new cells in the notebook and trying things out. The computer will not complain! ## Overview In this workshop we will distinguish different data types that can be manipulated in computer programs, and what kind of operations can be performed on each type. These will be the building blocks we will be using when constructing our programs. As these workshops develop we will see how to bring things together and construct functionality. ## Data types At the centre of all computer programs is manipulating and performing logical operations on data and data structures. Data structures can be very complex and abstract, but we'll start with simple data types which are present in almost all programming languages. We will first identify what they are, and look at what simple operations can be performed on them. ### Different types of numbers In scientific computing, the most important data types are numbers. In a similar way to mathematics where we can distinguish different types of numbers (integers, rationals, reals, complex, etc...), in computing we also have different types of numbers. Is there a difference between 1 and 1.0? In order to enquire the data type of a given object we can use a function (more on what a function is later on) called *type* that tells you what kind of thing -- what data type -- something is. We can check for ourselves that Python considers 1 and 1.0 to be different data types: type(1) type(1.0) Python is identifying the two numbers above as an *integer* and a *[floating point number](https://en.wikipedia.org/wiki/Floating_point)*. This difference is related to the way computers represent numbers, and can be important in some operation on some programming languages. Fortunately in Python (version 3 or above) we will not have to worry too much about them. What do you think the types of -1, 0.2, 9753 and 6.626e-34 are? Check your guesses in the cell below. The last number of the sequence above is the Plank constant to 4 significant figures: 6.626&times;10<sup>-34</sup>. Therefore we see that we can use the *e* to specify scientific notation. What would be the result of 3e-3 If we change the sign 3e3 What is the type of *3e3*? Is it the same as the type of 300? Test it out! The last numerical type available in Python is that of complex numbers. Python uses the convention followed in engineering, where the imaginary constant is represented by the letter *j*. type(1+2j) What will the type of the difference of the complex number 1+2j and the pure imaginary number 2j be? #### Operations with numbers The simplest programming operations you can do with numbers are the same as the ones you would do in a simple calculator. In fact you can use the notebook as a sophisticated calculator. Let's try a few. 1+1 Try a couple of expressions in the cell below. Note that you can use brackets to specify the order of the operations. (5+7)/2 Calculate the energy of a photon of violet light with a wavelength of 440 nm (you can remind yourself of the value of the [constants you need](http://physics.nist.gov/cgi-bin/cuu/Category?view=html&Universal.x=70&Universal.y=18)). Two \* can be used for exponentiation. We can try it on a complex number. (3-1j)**2 Or to do a fourth root 16**(1/4) One operation that we learn in primary school, seldom used in our daily lives, but is somewhat surprisingly useful in programming is the modulo operation which calculates the remainder of the division of two numbers. This is done using the '%' symbol 3%2 The mathematical operations available in de bare Python language are relatively simple, and common functions like trigonometric functions, exponentials or logarithms are not available by default. These can be made available by loading special modules (to be discussed further later). By loading the [math module](https://docs.python.org/3/library/math.html), many more mathematical functions and constants are made available. import math math.sin(3*math.pi/2) The math module is loaded when you use the pylab environment you may already be familiar with. However, we shall delay the discussion of these extensions until later, as we want to focus on the main characteristics of the programming language. ### Strings of text Another very important data type we are interested in manipulating is text strings. This is evident when dealing with a text editor, a spell checker, or even how you are typing into this notebook, but even in simple computational tasks text strings will appear often. Text strings appear in quotations. If you type a simple Hello below, Python will not understand what you mean and output an error message If you type it in quotes 'Hello' it will tolerate it much better And if you ask what type this quoted thing is, Python will inform you that it is a *string* type('Hello') Note that the difference in type is related with the use of quotes and not the use of letters type(1.2) type('1.2') ``` type("We can even make 1 single string with spaces, punctuation and numbers such as 0.00729") ``` Note that in the string above we used double quotes instead of single quotes. Both are indeed equivalent in Python, but we must be consistent. In the English language, because of the use of the apostrophe, single quotes can be problematic 'I wouldn't want to contradict you' In the example above, Python would think the string finishes half way through the second word, and it will not understand what the rest of the command means. The use of double quotes helps in this case. "I wouldn't want to contradict you" Or to use triple quotes ('''), which have the advantage of being used for long strings that do not fit in one line. You could put a full novel in triple quotes. ``` '''This is a multiline string. With some horrible characters that would normally create complications: '{}"/ As you can see, it extends over more than one line''' ``` #### Operations with strings We know what to expect from the operation *1+1* and we tested this operation above. We have also seen that *1* and *"1"* are essentially different objects, for a computer in general, and Python in this case. So let us see what the following operation will result into "1"+"1" Any ideas on what happened, or is the numeracy of the computer is broken beyond repair? It is certainly helpful to query what is the type of the result of the operation we just did. type("1"+"1") Are things a bit more clear? What was achieved with the operation + acting on two strings ("1" and "1") was a string concatenation, i.e. we joined the two strings together. Let's try to do that again "Hello "+"mate!" We note here that there is no difference in the symbol we use for summation +, and the symbol we use for string concatenation +. Yet the operations are essentially different, because the objects we are operating on are of different types. Summing (in a mathematical sense) two strings is an ill defined operation, and concatenating two numbers together would not be a very useful thing to do. Similarly, it is not defined the operation minus "-" between two strings "Remove what?" - "what?" Let's now try to outsmart the computer and "add" a string and a number 1+"Hello" Let us look at this error message (a traceback) in some detail. A traceback gives details on what was happening when Python encountered an Exception or Error -- something it doesn't know how to handle. There are many kinds of Python errors, with descriptive names to help us understand what went wrong. In this case we are getting a TypeError: we tried to do some operation on a data type that isn't supported for that data type. Python gives us a helpful error message as part of the TypeError: unsupported operand type(s) for +: 'int' and 'str' In order to render the operation meaningful we would need to convert the number into a string. This can be done using the *str* function str(1) We can confirm that the type of the previous operation is a string. We can now meaningfully concatenate the two objects str(1)+"Hello" It is important to understand what is going on. We are transforming the number 1 into the string "1" using the function *str*, and concatenating the resulting string "1" with the second string "Hello" via the operation + between two strings. We can also be interested in the reverse operation, given a string we may want to use it as a number in order to perform some numerical operation. This can be done with the function *float*, that converts a string into a floating point number. 37+float("2.998e8") The ability to convert between data types is very useful and often occurs in solving practical problems. A second operation \*, this time involving an integer number and a string. 20*"pancake" Before you try the command out, can you see that it would be strange if the result was a number? There you have, 20 pancakes for you. Note that the \* operation in this case is between an integer and a string. You can see that "20"*"pancake" is not as good Using a floating point (real) number will also not work 1.5*"pancake" Using the two operations with strings above, + and \*, we can start to be creative: produce the string "Hehehehehehehehehehehello" using strings with maximum 3 characters as building blocks There are many operations that can be done with strings, and we will be looking at them as we go along, but one that often comes in handy is the *len* which gives the total number of characters (including spaces) in a string. We can use it to determine the number of characters in the long string above len(insert your previous expression here) Write down the expression that would give you the difference in string length (character number) between the string above and a normal "Hello". ### Booleans If we want to introduce any logic in a program, or instruct the computer on how to perform instructions according to specific conditions, we need to make use of a less obvious but very important data type: the logical statements of *True* and *False*, called booleans type(True) Note that they are capitalized type(true) And are essentially different from a string with the word "True" type("True") Booleans do not often show up explicitly written in programs. However they are present all the time as a result of some operation. For example a comparison of two numbers. -2 <= 1 In the expression above, the computer is testing whether -2 is smaller or equal to 1, and we should be happy to see that it got the order of the numbers right. The examples we use here are obvious, and we use them to illustrate how booleans work. In practice we will be dealing with more realistic cases. We can also test if two numbers are equal 5 == 5 Note the two = signs above. This is not a typo, == is used when we want to test for equality. We will see further down that one = sign has a different meaning. Similarly, we can test if two numbers are different 5 != 5 Above we are testing if 5 is different from 5. Since they are equal, the result of the test is False. We can also compare two strings "introvert"=="shy" We could also compare a string to a number, and we can expect these two things to be always different. "1.0"==1.0 A common operation could be the comparison of the length of two strings len("introvert") > len("shy") Let us complicate things slightly. We can use the operation *not* to obtain the complementary of a boolean. not True If something is not True, then it is False! Note that what we wrote is not a string. It is the operation *not* on the boolean *True*. It should be easy to understand what the result of not False should be (create a new cell and test it, if it is not clear). Used on in these simple cases, the operation *not* does not seem very useful, but it can be used to negate increasingly more complex constructions not (3 > 6/2) Can you see why we obtained this result? We can check the result of multiple logical operations together, using the logical operators *and* and *or*. If two expressions are joined by an *and*, they both have to be True for the overall expression to be True. If two expressions are joined by an *or*, as long as at least one is True, the overall expression is True. Let us give it a try 1 > 0 and 1 < 2 Although the use of brackets is not necessary, we can think of these compound conditional expressions as written as (1 > 0) and (1 < 2). Since each of the two expressions is True, the overall result is True. We can construct more complex logical statements (1 > 0 or 1 < 2) and 1 > 10 Would the result be different if we change the position of the parenthesis? 1 > 0 or (1 < 2 and 1 > 10) ## Variables We have now covered the three basic data types present in almost all programming languages: numbers (of several kinds), strings and booleans; as well as some simple operations specific to each type. Another language element common to all programming languages are variables. Variables allow us to store the result of specific operations for later use, making it easier to write programs. If, for example, we were writing a program to perform quantum mechanical calculations, it would be very tedious and error prone to explicitly write the Plank constant each time it is used. Instead we can store the value of the Plank constant into a variable. h=6.626e-34 We have just defined a variable "called" h, with the value 6.626e-34. Note that we used a single = sign in this process. One = is an assignment of the variable on the left hand side, the value on the right hand side; two == is a operation testing whether the left hand side is equal to the right hand side, and yields a boolean. When using a notebook interface, assigning a variable does not yield an output (see above). The output of a code cell is in general the output of the operation in that cell, so any cell that ends with a variable assignment will yield no output. This does not mean the operations in the cell have not been performed. Indeed we can see that the value of the Plank constant has been stored in variable h, by just executing the cell with the variable name h We can now use h in doing operations instead of typing the value explicitly, and we can quickly see the advantage of this if we want to calculate the energy of a red photon with 400&times;10<sup>12</sup> Hz frequency the energy of a yellow photon of 515&times;10<sup>12</sup> Hz and the energy of a blue photon of 640&times;10<sup>12</sup> Hz Defining a variable in programming is similar to defining a variable in mathematics, with some slight differences. We can define variables as numbers, but also as strings, booleans, or more complex structures. w="fantastic" w type(w) type(h) In mathematics we define a variable and don't necessarily think of the specific values this variable takes. When we define a mathematical function $f(x)=x^2$, we often think of $x$ as a continuous "thing", taking all real values at the "same time". *In programming, variables always have one and only one well defined value at each point of the program.* The point just made about variable values at each point in the program illustrates well the notion that a program is a sequence of instructions. Let us define the following variables a=3 b=4 a=b b="done" Since in the above cell all instructions are variable assignments we don't see any output. But what is the value of each variable now? You can test them below. Any surprises? In the examples above we have always used single letter names for variables. We are not restricted to this however, and although variable names can not have spaces or other special characters, it makes life easier for yourself and whoever reads your program, to use more explicit variable names. magic_number = 1/137 amountOfFlour = 0.75 my_name = "Genghis" Again we see no output, but we are happy there are no error messages. In order to check the values of the variables all at once we can use the function *print*, which simply prints its content on the screen. print(magic_number) print(amountOfFlour) print(my_name) Note that unlike other cells, the code above does not have a "Out \[\_\]:" tag associated with it on the left of the cell. This is because the *print* function is not returning a result. The only thing that it does is printing to the screen, but otherwise has no other effect on the program. The *print* function is surprisingly useful for a command that does so little. It is used to check values of variables without logically changing the code and thus is useful when checking for errors and debugging code. Never fear to use it! ``` a = "| (_| -()- -()- -()- -()- | -()- -()- -()- -()- ||\n" b = "|_\_|_/___|__|__|__|___|__|___|__|___________________________||\n" c = "|________________________________|__|__()_|__()_|__()__|_____||\n" d = " ___|)_______________________________________________________\n" e = "|_/(|,\____/_|___/_|____/_|______|___________________________||\n" f = "|___/____________________________|___________________________||\n" g = "| | | () | () | () | | ||\n" h = "|__\___|.________________________|___\_|___\_|___\_|___|_____||\n" i = "|__/|_______/|____/|_____/|______|___________________________||\n" j = "|_____/__________________________|____\|____\|____\|_________||\n" k = "|____/___________________________|___________________________||\n" l = "|__/___\_._______________________|__|__|__|__|__|__|___|_____||\n" print(d + f + i + e + b + g + a + c + l + h + j + k) ``` ## Making decisions: if statements Life is made of decisions, and any minimally versatile code will need to execute different instructions depending on some criteria. An example from daily life would be: if it is hot, open the window; if it is not hot, don't! All programming languages provide mechanism to conditionally execute a piece of code. In python it takes the form: if 6 > 5: print("Six is greater than five!") That was our first multi-line piece of code, and we need to pay attention to the indentation on the second line (the notebook tries to take care of this for you). Make sure to hit space four times before typing the second line; Python needs them to understand what we want it to do. So what is going on here? When Python encounters the *if* keyword, it evaluates the expression following the keyword and before the colon. This expression must always evaluate to a boolean type, i.e. evaluate to True or evaluate to False. If that expression is True, Python executes the code in the indented code block under the if line. If that expression is False, Python skips over the code block. a="same" if 0 > 1: a="changed" a By using one *if* statement in the examples above, we have seen that how to execute code if the expression in the if statement is True, and if it is False no code is executed. A "true choice" will be executing one piece of code if the expression is True, and another piece of code if it is False. This is done using the *else* statement. sister_age = 15 brother_age = 12 if sister_age > brother_age: verdict="sister is older" else: verdict="brother is older" verdict An *else* is not stand alone and is always associated with a particular *if*. If we have more than two cases, we can use the *elif* keyword to check more cases. We can have as many *elif* cases as we want; Python will go down the code checking each *elif* until it finds a True condition or reaches the default *else* block. sister_age = 15 brother_age = 12 if sister_age > brother_age: verdict="sister is older" elif sister_age == brother_age: verdict="sister and brother are the same age" else: verdict="brother is older" verdict You don't have to have an else block, if you don't need it. That just means there isn't default code to execute when none of the if or elif conditions are True: colour = "orange" season = "" if colour == "green" or colour == "red": season = "Christmas colour!" elif colour == "black" or colour == "orange": season = "Halloween colour!" elif colour == "pink": season = "Valentine's Day colour!" season Now go back to the cell above and change the first line to read colour = 'purple' Execute the cell again. Notice that, this time, nothing is printed. Let us use an *if* statement to conditionally define the value of a variable. First define a variable *x* to have a real number value of your choice. Then build an *if* statement to define a variable *y* that should be the [absolute value](http://mathworld.wolfram.com/AbsoluteValue.html) of *x*. In this exercise we are aiming at a general solution, i.e. one that would work for any value of *x*. Change the value of *x*, take positive and negative values, to check your solution works. ## Summary We covered the simplest data types found in most programming languages: * Numbers, further divided into integers, floating point numbers (floats) and complex numbers. * Strings, which are sequences of characters. * Booleans, True and False logical variables. Each of these data types has specific operations associated with them, and some operations which involve quantities of different types. Variables provide a way to call objects by a name. A variable can be of any type. The *if* statement provides a way to branch code execution: "if *something* do this, otherwise do that". It is a fundamental programming construct, and is present virtually in every program. ## Exercises ### Menu specials Given a string *vegetable* and a string *entree*, create string "Today our specials are: &lt;vegetable&gt; and &lt;entree&gt;". Try it on the following pairs: vegetable = 'asparagus' ; entree = 'pasta primavera' vegetable = 'artichoke' ; entree = 'steak frites' vegetable = 'kale' ; entree = 'fondue' ### Too long for twitter? Twitter messages can not be more than 140 characters. Given a string *tweet*, print to screen the sentence "Not for twits" if the string is longer that 140 characters, and the word "Soundbite" if the string is less than or equal to 140 characters. Try it on: tweet='The Analytical Engine weaves algebraic patterns, just as the Jacquard loom weaves flowers and leaves. -- Ada Lovelace, the first programmer' tweet='Four score and seven years ago our fathers brought forth on this continent a new nation, conceived in liberty, and dedicated to the proposition that all men are created equal.' ### Greatest out of 3 Define 3 variables *x*, *y* and *z* as three numbers of your choice. Write a piece of code that outputs the greatest of the three values. Change the values of *x*, *y* and *z* to check if your solution works. # Part 2 ## Grouping data together In the first part of this session we have looked at three basic data types used in programming: numbers, strings and booleans. All programs can in principle be written using just these data types, but to solve many problems it is often useful to use more sophisticated data types, provided by the programming language (or some extension). A useful and versatile data structure provided by Python are *lists*, and we will be making use of these during this course. *Lists* are similar to *arrays*, which you have seen before, but they are not the same, and we will highlight some differences below. ### Lists Lists provide a way of keeping and treating data together. We can create a list by putting its elements in a comma separated list enclosed in square brackets. days_of_the_week = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"] We can see that we have created an object with a different type type(days_of_the_week) We can access the members of a list using the *index* of that item: days_of_the_week[2] The index is an integer which specifies the element we want. A little confusingly Python uses 0 as the index of the first element of a list. Thus, in this example, the 0 element is "Sunday", 1 is "Monday", and so on. If we want to count back from the last element of a list we use a negative index. An index of -1 corresponds to the last element of the list, whilst an index of -2 corresponds to the second to last element, -3 the third last, etc. etc.. days_of_the_week[-1] If we try and provide an index that goes beyond the last element of a list Python will throw an IndexError (this is a common error in programming) days_of_the_week[8] As an exercise, retrieve "Thursday" from the list days_of_the_week. Besides selecting individual elements, we can also select ranges within the list using two integer numbers separated by a colon ':'. The sublist starts with the element defined by the starting index and include all elements up to **but not including** the element defined by the ending index. This process is called *slicing* working_days=days_of_the_week[1:6] working_days This behaviour of indexes can seem strange at first. It is perhaps helpful to think of the indexes when slicing as pointing *between* list elements, to the comma. ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"] | | | | | | | | 0 1 2 3 4 5 6 7 When slicing between 1 and 6, we are including the elements between the commas so numbered. This is a picture that can help you visualize what is going on and it should not be taken too literally. If we omit the ending index, the sublist will include all elements until the end of the list catholic_working_days=days_of_the_week[1:] catholic_working_days Conversely, if we omit the starting index, the elements will be taken from the beginning of the list. Define a variable jewish_working_days, including all the elements of days_of_the_week, except "Saturday". Besides extracting elements of the list, indexes can also be used to modify parts of the list days_of_the_week[1]="Bank holiday" days_of_the_week We can also specify a range in doing this days_of_the_week[1:3]=["Hard work","Hard work"] days_of_the_week We may specify discontinuous ranges on the list, by indicating a third number after another colon. This is a step index, specifying the increment to the list index. sport_days=days_of_the_week[1:6:2] sport_days This slicing mechanism can also be used to insert elements in the list days_of_the_week[5:5]=["Extra day"] days_of_the_week Once more we can think of the slicing indexes as pointing between list elements. In the "space between" list elements, we are assigning a new element, an insertion into the list. Lists are very general and its elements do not have to have the same data type. For example, we can have both strings and number is the same list ["Today", 7, 99.3, "", 5**2] List elements can even be other lists. Tables of data usually take this form outer_list=[["dozens","units"],[0.5,6],[1,12],[2,24]] Just as we can access the members of a list with an index: outer_list[2] So too we can access the members of an inner list by an extra index (do not confuse this notation with the list range we used above) outer_list[2][1] The indices are operating sequentially. We can use round brackets to make this clearer. (outer_list[2])[1] The first index, is selecting from outer_list the element index 2, which is the list [1,12]. The second index is selecting from this list the elements index 1. If the above looks a bit confusing here's an alternative way of getting the same answer that shows what's happening when this double index is being used: inner_list = outer_list[2] inner_element = inner_list[1] inner_element #### Operations on lists There are many operations involving lists provided by the Python language. We shall not try to be exhaustive, we will start with a few simple operations that will allow us to do most of the work, and introduce useful features as we go along. First we will note a certain analogy between lists and strings: lists are an ordered collection of elements, while strings are ordered collections of characters. Indeed we can use indexing in strings in a similar way we used them with lists analogy="A string is an ordered collection of characters" analogy[0] analogy[15:33] We could thus expect that some of the operations on strings could have a similar behaviour on lists. We can thus use the function *len* to determine the length of a list len(days_of_the_week) Try to predict the result of the following command len(days_of_the_week[1:5]) We can use the opertor '+' to perform list concatenation, and can be used to build bigger lists [1,2,3]+[4,5,6] Build the list \[1,2,3,"something",10\] by combining 3 different lists. The operator \* with an integer yields the repetition of the list elements 5*[0,1] 3*[[1,2]] ### Lists vs arrays As mentioned earlier, there are many similarities between *lists* and *arrays*, but there are also very important differences, some of which may already be apparent from what we have covered so far. Arrays are not a primitive type in Python, but is a data type made available by the NumPy package which is automatically loaded with the Pylab environment. So in order to use arrays we need to load this package in one form or the other from numpy import array Arrays can be formed from suitable lists by using the function *array()* simple_list=[1,2,3] simple_array=array(simple_list) simple_array Any array can be converted back to a list using the function *list()* simple_list2=list(simple_array) simple_list2==simple_list While any array can be made a list, not all lists can be made arrays. While lists can be collections of any type of data, arrays must be **regular collections of numbers only**, i.e. if we wish to convert a list of lists of length 3, all elements must have length 3 complete_list=[[1,2,3],[4,5,6],[7,8,9],[10,11,12]] complete_array=array(complete_list) complete_array The fact that arrays are in general regular, allows for a more sophisticated indexing. <img src="numpy_indexing.png" /> For example we can select columns of the array (elements on the same position of every sub-array). Before the coma we specify row range, and after the comma the column complete_array[:,1] It is also possible to define some sort of mitigated arrays from incomplete lists, or lists of mixed types which are not numbers incomplete_list=[[1,2,3],[4,5],[7,8,9],[10,11,12]] incomplete_array=array(incomplete_list) incomplete_array mixed_list=[1/137,3,"word",False] mixed_array=array(mixed_list) mixed_array Note that is both cases we obtain an odd object type: "dtype=object" or dtype='&lr;U21'. These type of arrays are in general much less useful, and in particular they don't allow for convenient [row,column] indexing incomplete_array[:,1] Operations on lists and arrays also behave differently. We have seen that operations with list have some resemblance to operations on strings. For example the '+' operator in lists gives concatenation [1,2,3]+[2,2,2] Arrays on the other hand behave like vectors. Adding two arrays gives a vector sum, i.e. summing elements in the same position array([1,2,3])+array([2,2,2]) Also, the '\*' operation in lists gives element repetition 3*[1,2,3] in arrays however it corresponds to vector multiplication by a scalar, each element of the array is multiplied 3*array([1,2,3]) Arrays can be extremely useful to work with numerical data. We shall make use of them at later stages of this course, but for the most part we will be making use of the more versatile lists. ### Extra: Variations of lists Besides lists (and arrays), Python possesses other similar data structures with somewhat different properties that make them more suited for some applications. We shall not be making use of these during the course as lists are more general, but you may find them when looking at code written by others. Below we cover only the simplest properties of these other data structures, but you can [read more](http://docs.python.org/3/tutorial/datastructures.html#tuples-and-sequences) about them elsewhere. #### Tuples Tuples are indicated by round brackets tup=(1,2,3,4,"last") We can access its elements like lists tup[1:3] But we cannot change its content tup[0]=20 #### Sets Sets are indicated by a sequence inside curly brackets (or the *set()* function). While lists can have many repeated elements, sets eliminate the redundancy and only keep one element of each set([1,1,1,2,2,2]) If we make a set from a string, the string is "destroyed" as we obtain a set with all characters used in the string, but lose the order that potentially gave meaning to the string set("The quick brown fox jumps over the lazy dog!") #### Dictionaries While list are accessed by a position index, dictionaries are accessed by a label atomic_number={"He":2,"Ne":10,"Ar":18,"Kr":36,"Xe":54,"Rn":86} atomic_number["Ar"] ## Summary List provide a way to group together and manipulate data. Imagine we were dealing with a dataset with the maximum temperature of every day in the year. It would be unpractical if we had to assign each value to a different variable. Lists can contain data of any type at the same time. We can also have lists of lists. We learned how to access different elements inside a list. Lists are related to the data structure *array*. There are however important differences to be aware of and operations carried on each have a different behaviour. In the next workshop we will be looking at loop, which allow for manipulating lists in a more versatile way.
github_jupyter
``` %pylab inline from __future__ import division from __future__ import print_function import pandas as pd import seaborn as sb from collections import Counter ``` ## Malware Classification Through Dynamically Mined Traces ### 1. The Dataset The dataset used in this notebook can be freely downloaded from the [csmining website](http://www.csmining.org/index.php/malicious-software-datasets-.html), where there's also an easy explanation on the nature of the dataset and its strenghts/weaknesses. For a quick recap: the dataset is a made of traces of API calls from 387 windows programs, some of which are malware. The malware programs are labelled as 1, whereas the 'goodware' programs have a 0 label. Here's a line: 1,LoadLibraryW HeapAlloc HeapAlloc HeapFree HeapAlloc HeapFree HeapFree NtOpenKey LoadLibraryW GetProcAddress GetProcAddress [...] Let's start exploring the dataset. ``` dataset = pd.read_csv('CSDMC_API_Train.csv') dataset.columns = ['target','trace'] print(dataset.keys()) print(dataset.columns) ``` Each trace is simply a string, representing a list of API calls separated with a space and residing all in the first column. So the first thing to do is to split it into an actual python list, creating a list of traces that will each be represented by a tuple containing the trace itself and its classification into 'malware' or 'goodware'. ``` traces = [] for i in dataset.index: traces.append((dataset.iloc[i]['trace'].strip().split(' '), dataset.iloc[i]['target'])) print ('A trace: ' , type(traces[0][0])) print ('A label: ', type(traces[8][1])) ``` To gain some additional knowledge on the dataset we could check its bias, or how well are the samples distributed between malware and goodware. Let's count how many ones and zeroes there are in the target column. ``` c_target = Counter([x[1] for x in traces]) print(c_target) ``` It seems like the dataset is pretty biased towards malware, as there are few samples of benign applications being run. It's almost the polar opposite of what would happen in a randomly sampled dataset from real world applications, as malware is usually a one digit percentage of the set of every application being released. But let's not despair, this will actually make learning easier. It might hurt in the generalization tho. Here's a graph showing the obvious bias: ``` plt.figure(figsize=(6,8)) plt.xticks(np.arange(2) + 1.4, ['Goodware', 'Malware']) plt.title('Dataset Bias') plt.ylabel('Number of Program Traces') plt.xlabel('Classification') plt.bar([1,2], c_target.values()) ``` ### 2. Initial Feature Mining Now it's time to mine for features, as the dataset itself doesn't really lend itself to an easy classification with a Machine Learning algorithm. Not out of the box at least. The raw traces present some peculiar challenges for a smooth classification: 1. They are composed of strings 2. They have various length (makes it hard to fit them in a matrix with fixed width) 3. They present a lot of repeated data points We need numerical features, and possibly a way to organize everything. The first idea is to count how many times a given API call is contained in each trace, this should yield positive results during learning if there's any correlation at all with the quantity of calls made to a specific API during a program run and a malicious behaviour. ``` counted_traces = [] for trace, target in traces: counted_traces.append((Counter(trace), target)) ``` Just to get an immediate feedback let's print a list of the first 20 traces, and look at the 3 most used API calls in each trace. The diagnosis is printed at the end to give some perspective. ``` diagnosis = '' for i in range(20): if counted_traces[i][1] == 0: diagnosis = 'OK' else: diagnosis = 'MALWARE' trace_sample = counted_traces[i][0].most_common(3) print(i, ')', trace_sample, diagnosis) ``` We can obtain some good information and maybe some ideas from this alone: 1. The only two good samples have the shortest program run and the longest one, this might not be relevant in general but it's worth investigating 2. The most popular API calls are roughly the same for each program run, so maybe they won't be incredibly useful for classification Also, this might be the shortest program run ever (my guess is it crashed soon after loading): ``` counted_traces[11][0] ``` Maybe then it's possible we'll need the length of each trace and the number of times an API has been called during a program run, and that's all information we can freely gather from the data we have assembled so far. But the *absolute* number of API calls in a program trace isn't a very useful feature, as it mostly depends on the length of each trace, so we'll normalize it by searching for the **frequency** of each API call in a program run. And since we will have the frequencies associated to each API call, maybe we can see if the frequency of the most used API call is useful for classification. Since it's now time to gather more than one feature and it's better to keep everything tidy, let's generate a list of dictionaries that will contain the following fields: **'ID'** : index of the trace, given by the enumerate() method **'Counter'** : Counter containing the API calls and how many times they have been called **'Freq'** : frequency at which a certain API call has been used in a program trace **'Length'** : Length of the trace **'MostFreqCall'** : The most common API call and its frequency **'Target'** : 1 or 0, depending on the maliciousness of the sample To be honest I'm just glad I could use the name 'dict_traces'. ``` dict_traces = [] #a list of dicts for i, t in enumerate(counted_traces): trace, target = t max_freq = 0 most_common = () length = len(traces[i][0]) freq_dict = {} for key in trace: freq = trace[key] / length freq_dict[key] = freq if freq > max_freq: max_freq = freq most_common = (key, freq) d = {'ID' : i, 'Counter' : trace, 'Freq' : freq_dict, 'Length' : length, 'MostFreqCall' : most_common, 'Target' : target} dict_traces.append(d) print(dict_traces[0].keys()) print(dict_traces[0]['MostFreqCall']) ``` What is the most frequent "most frequent call"? Since the most popular API calls will inevitably be used by every program run, be it malicious or not, maybe we can avoid them. ``` most_freq_call_list = [] for d_t in dict_traces: call, freq = d_t['MostFreqCall'] most_freq_call_list.append(call) c = Counter(most_freq_call_list) print('Maybe we can avoid these: ', c.most_common(3)) ``` Here's a graph showing the N most frequent "most frequent call". As we can see the first 4 are pretty noticeable, then they drop fast: ``` N = 12 plt.figure(figsize=(12,8)) plt.title('Most frequent "most frequent call"') plt.ylabel('Frequency') y = [x[1]/len(dict_traces) for x in c.most_common(N)] plt.bar(np.arange(N) + 0.2, y) plt.xticks(np.arange(N) + 0.6, [x[0] for x in c.most_common(N)], rotation=60) plt.ylim((0, 0.5)) ``` A further trasformation in our data is needed before we start learning, let's separate the target from the data points. This will be useful to render the code more readable, and to have another quick glimpse into how biased the dataset is. ``` target = [] for d in dict_traces: target.append(d['Target']) print(target) ``` As we can see from the density of the ones, our algorithm would do pretty well if it just guessed 'malware' all the time: ``` p_malware = c_target[1] / len(target) print('Accuracy if I always guess "malware" = ', p_malware) print('False positives: ', 1 - p_malware) ``` Of course false negatives will be exactly 0% in this particular instance so, generally speaking, this wouldn't be a bad result. But that wouldn't be a very realistic scenario in a learned classifier, and even then that would mean that it actually learnt something from the dataset (the target's distribution), although it shoudln't be useful at all for generalizing. Let's see how a really dumb classifier would fare, by just guessing 'malware' and 'goodware' with 50% chance (this time accounting for both false positives and false negatives): ``` p_chance_mal = (p_malware * 0.5) p_chance_good = (c_target[0] / len(target)) * 0.5 print ('''Probability of getting it right by guessing with 50%%: - False Positive Ratio: %f - False Negative Ratio: %f ''' % (1 - p_chance_mal, 1 - p_chance_good)) ``` Now these are horrible ratios, let's hope we can do better than this. ## 3. Learning It's time to finally try and learn something. Throughout the rest of the notebook we'll use various classifiers and functions from the [scikit-learn](http://scikit-learn.org/) library. First off we'll need a classifier, and since I'm a fan of ensemble learning we'll start with a Random Forest classifier initialized with 20 estimators and a random state set to 42. The random state is very important, as it will help with the reproducibility of this study. ``` from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=20, random_state=42) ``` Now we can't start learning right away, as our dataset should be first divided into a 'train' set and a 'test' set, to ensure a bit of generalization. We could do this manually, by randomly selecting a chunk of the dataset (usually 75%) for the training part and leaving the rest for testing, but we could still generate a lucky division and receive optimistic results that won't generalize well in the real world. Thankfully scikit-learn has provided a neat implementation of the KFold algorithm that will allow us to generate how many folds we need. ``` from sklearn.cross_validation import KFold kf = KFold(len(target), n_folds=3, random_state=42) ``` Another little adjustment is needed before using scikit-learn's algorithms, as they expect the data to be indexed vertically, but thankfully again, numpy has the solution. We're going to create a numpy array with each trace length and reshape it accordingly. Let'stry and learn only from the length of the traces: ``` scores = [] for train, test in kf: train_data = np.array([dict_traces[i]['Length'] for i in train]).reshape(len(train), 1) test_data = np.array([dict_traces[i]['Length'] for i in test]).reshape(len(test), 1) model.fit(train_data, [target[i] for i in train]) scores.append(model.score(test_data, [target[i] for i in test])) print(scores) print(np.array(scores).mean()) ``` We have chosen to learn from 3 folds and already our classifier seems to produce good results even with one feature. This might be because it's a pretty small dataset and it's kinda biased. Since we're going to try and learn from different features and maybe different classifiers, it's best to keep track of the scores in a global way, just to visualize the improvements over time (or lack thereof). ``` global_scores = {} global_scores['Length'] = scores ``` Another feature we mined is the most frequent API call, let's see how well it does by itself. Since classifiers work mainly in dimensional data, we need a way to encode the API call into an integer, maybe by using a dictionary. This is a very rudementary but effective way: ``` most_freq_list = [x['MostFreqCall'][0] for x in dict_traces] most_freq_counter = Counter(most_freq_list) most_freq_dict = {} index = 0 for call in most_freq_counter.keys(): most_freq_dict[call] = index index += 1 print(most_freq_dict) ``` The learning process is basically the same as before, so maybe it's time to encode it in a function. ``` model = RandomForestClassifier(n_estimators=20, random_state=42) scores = [] for train, test in kf: train_data = np.array([most_freq_dict[dict_traces[i]['MostFreqCall'][0]] for i in train]).reshape(len(train), 1) test_data = np.array([most_freq_dict[dict_traces[i]['MostFreqCall'][0]] for i in test]).reshape(len(test), 1) model.fit(train_data, [target[i] for i in train]) scores.append(model.score(test_data, [target[i] for i in test])) global_scores['MostFreqCall'] = scores print(scores) print(np.array(scores).mean()) ``` Wait. ``` print(p_malware) print(np.array(scores).mean()) ``` This is probably just a coincidence, but the accuracy of our classifier is exactly the same as the hypothetical classifier that always guesses 'malware'. And it's a weirdly good result, since the most frequent api call might be correlated with the classification but this is too much. Again, it's probably due to the size of the dataset or its skewness. I think we can safely assume this is the lowest score we can get with any classifier. To improve on this, lets try to learn from the 2 features we just mined. ``` model = RandomForestClassifier(n_estimators=20, random_state=42) scores = [] for train, test in kf: most_freq_train_data = np.array([most_freq_dict[dict_traces[i]['MostFreqCall'][0]] for i in train]).reshape(len(train), 1) length_train_data = np.array([dict_traces[i]['Length'] for i in train]).reshape(len(train), 1) train_data = np.append(most_freq_train_data, length_train_data,1) most_freq_test_data = np.array([most_freq_dict[dict_traces[i]['MostFreqCall'][0]] for i in test]).reshape(len(test), 1) length_test_data = np.array([dict_traces[i]['Length'] for i in test]).reshape(len(test), 1) test_data = np.append(most_freq_test_data, length_test_data,1) model.fit(train_data, [target[i] for i in train]) scores.append(model.score(test_data, [target[i] for i in test])) global_scores['Length + MostFreqCall'] = scores print(scores) print(np.array(scores).mean()) ``` This is a good improvement, even a 5% increase at this stage can be beneficial, we'll see if this is the right direction. ## 4. Reorganizing Features One of the best aspects of the scikit-learn library is their readily-available datasets, which are either already present in the library's path, or provide a simple function that will download them. Since we're mining features from our dataset, we could use the same structure as shown below: ``` from sklearn.datasets import load_iris iris = load_iris() print(iris.keys()) print(iris['feature_names']) ``` This is good practice, in case we later want to release this to the public or even in case someone wants to expand on this. Starting with the easy things, a description and the target: ``` m_descr = """ Malware Traces Dataset Notes: --------- Dataset characteristics: :Number of Instances: 387 (319 malware and 68 goodware) :Number of Attributes: 2 :Attribute Information: - trace length - most frequent API call (encoded with an integer) - class: - Malware - Goodware """ malware_dataset = { 'target_names' : ['Goodware', 'Malware'], 'DESCR' : m_descr, 'target' : np.array(target) } ``` Now for the hard part: data and feature_names. We'll need to unify the 2 features we have used until now: ``` m_most_freq_data = np.array([most_freq_dict[dict_traces[i]['MostFreqCall'][0]] for i in range(len(dict_traces))]).reshape(len(dict_traces), 1) m_length_data = np.array([dict_traces[i]['Length'] for i in range(len(dict_traces))]).reshape(len(dict_traces), 1) m_data = np.append(m_most_freq_data, m_length_data,1) malware_dataset['data'] = m_data malware_dataset['feature_names'] = ['trace length', 'most frequent call'] ``` ## 5. Reorganizing Learning Since now our dataset is clean and organized, we can streamline the learning process aswell. The function learn() will take in input a classifier, the features and the target points, while returning the scores as we have used until now (raw scores and their mean). I also added an option to plot the confusion matrix for the learn classifier and the possibility to save the scores into the global_scores variable we have initialized a while ago. ``` from sklearn.metrics import confusion_matrix def learn(model, data, target, descr=None, n_folds=3, plot=False): ''' "descr" is an optional parameter that will save the results in global_scores for later visualization "n_folds" is there just in case I want to change it ''' kf = KFold(data.shape[0], n_folds=n_folds, random_state=42) scores = [] best_score = 0 best_split = () for train, test in kf: #this is easier to read model.fit(data[train], target[train]) m_score = model.score(data[test], target[test]) scores.append(m_score) if plot and m_score > best_score: best_score = m_score best_split = (train, test) #this plots a simple confusion matrix if plot: train, test = best_split model.fit(data[train], target[train]) cm = confusion_matrix(target[test], model.predict(data[test])) plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues) plt.title('Confusion Matrix') plt.xticks(np.arange(len(malware_dataset['target_names'])), malware_dataset['target_names']) plt.yticks(np.arange(len(malware_dataset['target_names'])), malware_dataset['target_names'], rotation=90) plt.ylabel('Actual Label') plt.xlabel('Predicted Label') plt.tight_layout() if descr != None: global_scores[descr] = scores return (scores, np.array(scores).mean()) ``` Let's try it out: ``` model = RandomForestClassifier(n_estimators=20, random_state = 42) data = malware_dataset['data'] target = malware_dataset['target'] print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target, plot=True)) ``` This is exactly the same result as before but this was expected, as that's why we initialized the same random state. Now it's time to mine for more features, since the most frequent call kinda improved our classification when paired with the length of the trace, maybe the 2nd and 3rd most frequent calls will add to it? ``` print(dict_traces[0].keys()) print(dict_traces[0]['Counter'].most_common(4)[0]) m_second_most_freq = [] for trace in dict_traces: m_second_most_freq.append(trace['Counter'].most_common(2)[1]) m_s_counter = Counter([x[0] for x in m_second_most_freq]) ``` This is the same process we used to encode numerically the API calls in the first 'most frequent' feature, it's very rough but it gets the job done. It could also be methodologically wrong, as we're using different encodings for some of the same API calls. ``` m_s_dict = {} index = 0 for item in m_s_counter.keys(): m_s_dict[item] = index index += 1 print(m_s_dict) m_s_list = [m_s_dict[x[0]] for x in m_second_most_freq] m_s_data = np.array(m_s_list).reshape(len(m_s_list), 1) ``` Let's add it to the existing feature set. ``` malware_dataset['data'] = np.append(malware_dataset['data'], m_s_data, 1) malware_dataset['feature_names'].append('second most frequent call') print(malware_dataset['feature_names']) ``` Has it already impacted the classification? ``` model = RandomForestClassifier(n_estimators=20, random_state=42) data = malware_dataset['data'] target = malware_dataset['target'] descr = 'Length + MostFreqCall + SecondMostFreqCall' print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target, descr, plot=True)) ``` The overall improvement is negligible, but looking at the folds it seems like two of them were more susceptible to the new feature and the middle one didn't really take notice. So it could be a step in the right direction. Maybe it would help to visualize our improvements over time: ``` def plot_improvement(): plt.figure(figsize=(13,5)) t_counter = Counter(target) #assuming an algorithm that always guesses "Malware" as the baseline best_guess = t_counter[1] / len(target) plt.plot(np.arange(len(global_scores.keys()) + 1), [best_guess] + [np.array(x).mean() for x in global_scores.values()]) plt.xticks(np.arange(len(global_scores.keys())) + 1, global_scores.keys(), rotation=60) plt.xlim((0, len(global_scores.keys()) + 0.2)) plt.ylabel('Accuracy') print('If we just guess "malware" we get an accuracy of: ', best_guess) print('Our best classificator has an accuracy of: ', np.array([np.array(x).mean() for x in global_scores.values()]).max()) plot_improvement() ``` The improvement on the second most frequent call is clearly negligible, so we'll stop investigating in that direction. But it seems apparent that the frequency of the api calls has to be correlated in some way with the malicious behaviors of the samples so we might aswell try this new approach. The idea is simple, there are 10 most frequent apis throughout the dataset, and each trace presents them with a certain frequency: ``` m_10_most_common = [] for trace in dict_traces: freq_list = [] for t, f in most_freq_counter.most_common(10): freq_list.append(trace['Counter'][t] / trace['Length']) m_10_most_common.append(freq_list) print(m_10_most_common[2]) m_data_10 = np.array(m_10_most_common) ``` Let's update our feature set and the feature names: ``` malware_dataset['data'] = np.append(malware_dataset['data'], m_data_10, 1) print(malware_dataset['data'].shape) for i in range(10): malware_dataset['feature_names'].append(str(i + 1) + ' API call') ``` Now we can try to learn from all the features just mined at once: ``` model = RandomForestClassifier(n_estimators=20, random_state=42) data = malware_dataset['data'] target = malware_dataset['target'] descr = '10 MostFreqPerc' print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target, descr, plot=True)) plot_improvement() ``` Now, this is a very good improvement, and it seems like every fold is responding in the same way so it's not dependent on the random selection of the training set. But it's interesting to wonder if the newly mined features are just improving on the old ones or if they can be used on their own without any detraction from the classificiation. So, what is the accuracy of our classifier if we only learn from the 10 new features? ``` model = RandomForestClassifier(n_estimators=20, random_state=42) data = malware_dataset['data'][:, 2:] target = malware_dataset['target'] print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` Not bad, as expected. As a side note, I stopped plotting the confusion matrix as only false negatives were present. ## 6. Trying Different Classifiers We could go on and mine for features for a while, but an algorithm wich can discern between malware and goodware with ~95% accuracy is already a pretty good result for such a short study. Also, there's another direction where we could improve, and that's by trying out new models. - ***AdaBoost*** Until now we used Random Forest, which is just an ensemble classifier that uses Decision Trees as base classifier, but the scikit-learn library also provides us with an implementation of AdaBoost, an ensemble classifier that seems to do just the same thing (its default base classifier is a Decision Tree). So it might be interesting to see if we can get the same results. *Note: on the surface Ada Boost and Random Forest seem to be fairly similar, as they both combine the results of many underlying 'weaker' classifiers and construct a stronger learner, but they differ a lot in their core. Random Forest is a bagging algorithm and Ada Boost is a boosting algorithm.* ``` from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier base_class = DecisionTreeClassifier(random_state=42) model = AdaBoostClassifier(n_estimators=120,learning_rate=1.2, random_state=42) print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` The result is pretty similar, we can play a bit with the estimators and the learning rate but we won't get much better results than this. Also, if we put back the first 2 features it actually becomes worse. As a quick side note, we started with ensemble classifiers, but what about linear classifiers? Well, there's a reason to ignore them: ``` from sklearn.linear_model import Perceptron model = Perceptron(random_state=42) data = malware_dataset['data'][:, 2:] #if we try to learn from the first 2 features, the perceptron will take a dive print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` It would be pretty weird if the feature set we just constructed had any linear way to separate it into classes. So any linear model is out of the question, but there are weaker models than Ada Boost to try against our feature set: - ***Decision Tree Classifier*** This is the base classifier for both the Random Forest algorithm and for Ada Boost (at least in scikit-learn's implementation). It's basically an algorithm that tries to learn a Decision Tree to classify the problem at hand, using several heuristics. The learned Decision Tree isn't guaranteed to be the optimal one, as that would entail solving an NP-complete problem and breaking everything. ``` model = DecisionTreeClassifier(random_state=42) print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` - ***Naive Bayes*** Since ours is a simple classification job with only 2 classes, we might aswell try the most used classifier out there, Naive Bayes. Now, this might not be a really good idea, since Naive Bayes assumes each feature to be independent from the others (and it's not really our case), but it's worth a try since it usually works anyway. We'll try 3 different implementation of Naive Bayes, with different assumed probability distributions. ``` from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB models = [MultinomialNB(), GaussianNB(), BernoulliNB()] #try different first and last_index (0, 2) first_index = 0 last_index = 4 data = malware_dataset['data'][:, first_index : last_index] for model in models: print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` - ***Support Vector Machines*** SVMs technically are non-probabilistic binary linear classifiers, but with the kernel trick they can easily perform non-linear classifications. There are lots of parameters for SVMs (gamma, tolerance, penalty [...]) and of course the various kernels, so we'll see a handy way to automate the choice of these parameters with Grid Search. ``` from sklearn.svm import SVC kernels = ['linear', 'poly', 'rbf', 'sigmoid'] model = SVC(kernel='rbf') print('''Our algorithm has: - scores: %s - mean: %f ''' % learn(model, data, target)) ``` Instead of manually trying out new models and parameters, we can automate everything using the handy GridSearch: ``` from sklearn.grid_search import GridSearchCV param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1], 'kernel' : ['rbf', 'sigmoid'], #poly and linear hang up the whole notebook, beware 'degree' : [3, 4, 5]} KF = KFold(len(target), n_folds=2, random_state=42) grid = GridSearchCV(SVC(), param_grid=param_grid, cv=kf)#, verbose=3) #uncomment to see lots of prints first_index = 0 last_index = 10 data = malware_dataset['data'][:, first_index : last_index] grid.fit(data, target) print(grid.best_score_) print(grid.best_params_) ``` ## 8. Future Improvements This notebook might be updated or divided into more notebooks (it is pretty long), anyway there's lots of directions to take from here. a. Ulterior Feature Mining I doubt that we've found the best features for this classification job. b. Dimensionality Reduction Once we've mined for more features we can try to reduce the dimensionality of the problem using: - Isomap - TSNE (this works very well apparently) c. Biclustering
github_jupyter
# Aqua Circuit Interoperability Update _Donny Greenberg, Julien Gacon, Ali Javadi, Steve Wood, 24-Mar-19_ ## Basic Vision & End Goal * Make Aqua use circuits as a first-class currency, and feel more like an algorithms library _next to_ Terra, as users expect, rather than an independent library on top of it * No more `construct_circuit()` wrappers in Aqua * Promote Aquaโ€™s best circuity features into Terra to be broadly useful ## Proposal - Three steps 1. Circuit as a First-class Citizen in Aqua 1. Aqua algorithms accept circuits directly, no more circuit wrappers 1. Circuit Library with Enhanced QuantumCircuit Families and Convenient Prebuilts 1. Destination for most of Aqua's enhanced circuit wrappers 1. Critically, allows for lazily constructed circuit placeholders. 1. Usability Improvements to Promote up to QuantumCircuit 1. Make circuit construction in Terra more powerful with features in Aqua users like ## 1. Circuit as a First-class Citizen in Aqua * Anywhere previously calling `construct_circuit` now accepts circuits as-is, no questions asked * Typehints ask for a circuit, and are indifferent whether a circuit is from the circuit library (below) * Fully backwards compatible with Aqua's `construct_circuit`-based objects as long as we like * Maybe warnings where behavior is strange, e.g. no parameters in VQE ansatz ### Demo - VQC with newly built circuits Below, we demonstrate the execution of the Variational Quantum Classifier using no special circuit construction objects. ``` from qiskit import QuantumCircuit from qiskit.circuit import ParameterVector from qiskit.aqua.algorithms import VQC from qiskit.aqua.components.optimizers import SLSQP import numpy as np import itertools # Learning the one-hot encoding train_feats = np.eye(3).tolist() train_labels = [1,2,3] train = dict(zip(train_labels, train_feats)) print(train) feat_params = ParameterVector('ษธ', length=len(train_feats[0])) feat_map = QuantumCircuit(3) depth = 3 for _ in range(depth): feat_map.h(qubit=range(3)) [feat_map.rz(phi=p, qubit=i) for i, p in enumerate(feat_params)] [feat_map.crz(theta=p1*p2, control_qubit=q1, target_qubit=q2) for ((q1, p1), (q2,p2)) in itertools.combinations(enumerate(feat_params), 2)] feat_map.barrier() feat_map.draw(output='mpl') # Note: I need to calculate this number classifier_params = ParameterVector('ฮธ', length=19) classifier = QuantumCircuit(3) depth = 3 cp_iter = iter(classifier_params) next(cp_iter) for _ in range(depth): [classifier.ry(theta=next(cp_iter), qubit=j) for j in classifier.qubits] [classifier.crx(theta=next(cp_iter), control_qubit=q1, target_qubit=q2) for (q1, q2) in itertools.combinations(classifier.qubits, 2)] classifier.barrier() classifier.draw(output='mpl') vqc = VQC(optimizer=SLSQP(), data_circuit=feat_map, classifier_circuit=classifier, training_dataset=train, test_dataset=train) vqc.run() ``` ## 2. Circuit Library with Enhanced Flexibility Circuits and Convenient Prebuilts _Proposal: Move Aqua's circuit-constructor objects - e.g. Ansatze, QFTs, Arithmetic - into a broadly useful circuit-library as flexible QuantumCircuit objects with enhanced features._ #### New Concepts in the Circuit Library * Circuit Blueprints: Enhanced QuantumCircuit objects which are lazily populated and constructed, but print and interact as bona-fide circuits. * Not a new class, simply subclasses of QuantumCircuit which match the QuantumCircuit interface * Users generally shouldn't notice the difference, unless digging into circuit guts in debugging * Properties such as `.data`, `.parameters`, etc. which require real circuits, trigger construction and caching of constructed circuit * Meta-parameters, such as ansatz depth or connectivity, are mutable and edited lightly due to lazy construction. Setters trigger cached circuit wipe * Circuit Families * Collections of circuit blueprints or prebuilt circuits with extensions or use-case specific features - e.g. `PermutationCircuit`s can include properties which `ArithmeticCircuit`s do not. * Allow for more aggressive convenience functionality for specific use cases e.g. Ansatz automatically allocating parameters during construction. #### Options for Circuit Library Placement 1. Inside Terra, with integration tests 1. Pros - Consistent with user expectations that Terra contains circuit building blocks 1. Cons - Unlike other areas, Terra has many directories in the base qiskit level, include `qiskit/circuit/`. The library could not clearly be a `circuits` directory alongside Terra, but would likely be hidden inside `qiskit/circuit/library/`. May complicate Aqua development, requiring frequent multi-repo PRs and exactly synced releases. 1. Inside Aqua, with Qiskit-wide utility, no Aqua concepts 1. Pros - Can be placed in a `qiskit/circuit_library` directory alongside `qiskit/aqua`, giving clear delineation as an important library of circuits. 1. Cons - Users may not expect to find this in Aqua, and distinction between "complicated Terra gate" (e.g. MCT) and "simple library circuit" would make keeping these so far apart strange. 1. In its own repo 1. Pros - Clear importance and delineation. 1. Cons - Another repo. #### Options for Circuit Family Organization The circuit library is still a work in progress. In each of the below options, we can make all of the circuits importable from the base init, allowing us to iterate on the directory organization without breaking changes to circuit usage. This way, we can ensure that the circuits are in the correct location in Qiskit for Aqua to begin using them, rather than wait for the library to be complete, and then execute a breaking change to merge in Aqua's circuits. 1. **Organize By Circuit Purposes** 1. Data Preparation Circuits 1. Data Feature Maps 1. Probability Distributions 1. NLocal Circuits (name pending - needs to reflect purpose and availability of Optimization-specific features and properties, such as optimization_bounds and auto-parameterization). 1. TwoLocalCircuit 1. NLocalCircuit 1. Ry, RyRz, SwapRz 1. Arithmetic Circuits 1. Adders 1. Reciprocals 1. MCTs 1. Hamming weight 1. Basis Change Circuits 1. QFTs 1. QFT Placeholder/Base 1. QFT circuits 1. QWT Circuits 1. DCT Circuits 1. Pauli Basis Change 1. Oracle Circuits 1. Truth table 1. Logical expression 1. Phase oracle 1. Permutation oracle 1. Function Testing Circuits 1. Fourier Checking 1. Hidden shift with Bent Boolean functions 1. Ideal Hidden Linear Function circuits 1. Benchmarking 1. Near-Clifford / graph states 1. Random uniform 1. Quantum Volume 1. CNOT Dihedral (from Ignis) 1. **Organize By Circuit Form Factors** - Organization followed by internal quantum-circuits repo. Methodology is organization by the subcircuits and organization of the circuits themselves. 1. Random uniform 1. NLocal Circuits 1. NLocal, TwoLocal, Ry, RyRz, SwapRz 1. Linear rotation 1. Near-Clifford / graph states 1. Pauli Basis Change 1. Quantum volume 1. Quantum Fourier transform 1. Ideal HLF circuits 1. Hamming weight 1. Hidden shift with bent Boolean functions 1. Multiply-controlled NOT gate 1. IQP circuits 1. Fourier checking 1. Unresolved - It's unclear into which families the following circuits fall in the above grouping: 1. Artimetic 1. QFT vs. CNOT based adders 1. Cutoff vs. long-division based reciprocals 1. Oracle circuits 1. Data Feature Maps 1. Broader than forrelation by accepting different paulis to evolve, rather than just Z^n, but also each only half of a forrelation circuit 1. Can be other classes of hard circuits, not only forrelation-based 1. **Some Purpose, Some Complexity Families** - Allow both circuit purpose families and circuit form-factor families, allowing for custom enhancements or functionality in either family type. Circuits can act as placeholders (e.g. permutation) to be filled in by a choice of several synthesis implementations later. Circuits can also import circuits from other families so both practical and theoretical expectations are met without code duplication. 1. Data Preparation Circuits 1. Data Feature Maps 1. Probability Distributions 1. Arithmetic Circuits 1. Basis Change Circuits 1. Quantum Fourier Transform Circuits 1. Oracle Circuits 1. N Local Circuits 1. NLocal, TwoLocal, Ry, RyRz, SwapRz 1. Near-Clifford / Graph State Circuits 1. Quantum Volume Circuits 1. Ideal Hidden Linear Function Circuits 1. Hamming Weight Circuits 1. Hidden Shift with Bent Boolean Function Circuits 1. Multiply-controlled NOT Gate Circuits 1. IQP Circuits 1. Fourier Checking Circuits 1. **Two Subdirectories** - One corresponding to circuit purpose families, one corresponding to circuit complexity families. All circuits can be imported from `qiskit.circuit.library` so organization is aesthetic. 1. Circuit Purpose Families 1. Data Preparation Circuits 1. Data feature maps 1. probability_distributions 1. NLocal Circuits 1. TwoLocalCircuit, NLocalCircuit, Ry, RyRz, SwapRz 1. Arithmetic Circuits 1. Basis Change Circuits 1. QFT, QWT, DCT 1. Pauli Basis Change 1. Oracle Circuits - Truth table, Logical expression, Phase oracle, Permutation oracle 1. Circuit Complexity Families 1. Random uniform 1. Hardware efficient 1. Near-Clifford / graph states 1. Quantum volume 1. Quantum Fourier transform 1. Ideal HLF circuits 1. Hamming weight 1. Hidden shift with bent Boolean functions 1. Multiply-controlled NOT gate 1. IQP circuits 1. Fourier checking _Additional Proposal: HardwareEfficient base class - abstract `target_backend` setter (QFTs, adders, NLocal, etc. can choose how to interpret what to do with backend information and provide specially tailored circuits for hardware which are not available by transpilation alone.)_ ##### Organization Recommendation: Option 1 or 3 Option 1 or 3 introduces the most broadly useful organization to both the algorithms and complexity theory users, attempting to align with both of their expectations for circuit placement. They provide the greatest chance of mutual exclusivity in families, allowing families to be grouped in whichever way most naturally delineates them from the other circuit families based on industry conventions (rather than attempt at some form of objective delineation, which is unlikely to be future-proof). Minimizing ambiguities is a worthwhile long-term investment when this library could reasonably grow to hundreds of classes. We recommend beginning with Option 1 _quietly_ to be able to migrate the circuits out of Aqua, and reorganize within the library as it is formally built out and new circuit families are added. This is functionally identical to Option 3, as the form-factor circuit families are not yet ready to be pushed to the library, and will need to be merged as a separate step. This will also allow for more time and deliberation about this complex organization question. Allowing all circuits to be imported from the base circuit library directory prevents the impending reorganizations from introducing breaking changes. ### Demo 2 - Powerful New QuantumCircuit Objects ``` from chemistry.code.molecule import Molecule from qiskit.chemistry.components.initial_states import HartreeFock from qiskit.aqua.algorithms import VQE # from qiskit.circuit_.library import RyRz from qiskit import BasicAer qasm = BasicAer.get_backend('qasm_simulator') hh = Molecule(geometry=[['H', [0., 0., 1.]], ['H', [0., 0.45, 1.]], ]) hamiltonian = hh.get_qubitop_hamiltonian() molecular_wf = HartreeFock(hh) + RyRz(depth=3, entanglement='linear') gse = VQE(wf_ansatz=molecular_wf, optimizer=SLSQP, backend=qasm).compute_minimum_eigenvalue(hamiltonian) print(gse) molecular_qf.target_backend = IBMQ.get_backend('ibmq_valencia') ``` ## 3. QuantumCircuit Usability Improvements Aqua's circuit_constructors have accumulated many powerful features not present in QuantumCircuit. No changes are strictly needed to QuantumCircuit to support the above proposals, but we can promote some of these improvements up to QuantumCircuit base to make these features broadly available. * Suggested for immediate promotion: * Mutable qubit number (delete qubits, or extend circuit) for anonymous register circuits * `.parameters` returns a list instead of a set, as parameter randomization is inconvenient * Further opportunities for radical circuit control * Lazy parameterization - When no parameter is specified in a parameterized standard gate, create a new one for the user. We can do this lazily, and only create the full list when `.parameters` is called, in which case the list is "locked in." * Lazy broadcasting - Similar to the NLocal Circuits. Allow the user to specify groups of qubits to which to apply a gate in circuit construction, but only construct these duplicates when the circuit data is actually needed. Allow users to manipulate these gate applications. * What weโ€™d need to do to implement these two Demo 4 - Interface demo of further opportunities ``` # Working notes - Captures new features but doesn't highlight them exactly my_c = QuantumCircuit(4) my_c.h(qubits='all') my_c.cu3(qubits='full', theta=.5) # other two parameters are set to be parameterized under the hood my_c.h(qubits='all') my_c.rz(qubits='all', phi=Parameter('theta')) # Sets the same parameter for all of them ```
github_jupyter
# TensorFlow Visual Recognition Sample Application Part 1 ## Define the model metadata ``` import tensorflow as tf import requests models = { "mobilenet": { "base_url":"https://github.com/DTAIEB/Thoughtful-Data-Science/raw/master/chapter%206/Visual%20Recognition/mobilenet_v1_0.50_224", "model_file_url": "frozen_graph.pb", "label_file": "labels.txt", "output_layer": "MobilenetV1/Predictions/Softmax" } } # helper method for reading attributes from the model metadata def get_model_attribute(model, key, default_value = None): if key not in model: if default_value is None: raise Exception("Require model attribute {} not found".format(key)) return default_value return model[key] ``` ## Helper methods for loading the graph and labels for a given model ``` # Helper method for resolving url relative to the selected model def get_url(model, path): return model["base_url"] + "/" + path # Download the serialized model and create a TensorFlow graph def load_graph(model): graph = tf.Graph() graph_def = tf.GraphDef() graph_def.ParseFromString( requests.get( get_url( model, model["model_file_url"] ) ).content ) with graph.as_default(): tf.import_graph_def(graph_def) return graph # Load the labels def load_labels(model, as_json = False): labels = [line.rstrip() \ for line in requests.get( get_url( model, model["label_file"] ) ).text.split("\n") \ if line != ""] if as_json: return [{"index": item.split(":")[0], "label" : item.split(":")[1]} for item in labels] return labels ``` ## Use BeautifulSoup to scrape the images from a given url ``` from bs4 import BeautifulSoup as BS import re # return an array of all the images scraped from an html page def get_image_urls(url): # Instantiate a BeautifulSoup parser soup = BS(requests.get(url).text, "html.parser") # Local helper method for extracting url def extract_url(val): m = re.match(r"url\((.*)\)", val) val = m.group(1) if m is not None else val return "http:" + val if val.startswith("//") else val # List comprehension that look for <img> elements and backgroud-image styles return [extract_url(imgtag['src']) for imgtag in soup.find_all('img')] + [ \ extract_url(val.strip()) for key,val in \ [tuple(selector.split(":")) for elt in soup.select("[style]") \ for selector in elt["style"].strip(" ;").split(";")] \ if key.strip().lower()=='background-image' \ ] ``` ## Helper method for downloading an image into a temp file ``` import tempfile def download_image(url): response = requests.get(url, stream=True) if response.status_code == 200: with tempfile.NamedTemporaryFile(delete=False) as f: for chunk in response.iter_content(2048): f.write(chunk) return f.name else: raise Exception("Unable to download image: {}".format(response.status_code)) ``` ## Decode an image into a tensor ``` # decode a given image into a tensor def read_tensor_from_image_file(model, file_name): file_reader = tf.read_file(file_name, "file_reader") if file_name.endswith(".png"): image_reader = tf.image.decode_png(file_reader, channels = 3,name='png_reader') elif file_name.endswith(".gif"): image_reader = tf.squeeze(tf.image.decode_gif(file_reader,name='gif_reader')) elif file_name.endswith(".bmp"): image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader') else: image_reader = tf.image.decode_jpeg(file_reader, channels = 3, name='jpeg_reader') float_caster = tf.cast(image_reader, tf.float32) dims_expander = tf.expand_dims(float_caster, 0); # Read some info from the model metadata, providing default values input_height = get_model_attribute(model, "input_height", 224) input_width = get_model_attribute(model, "input_width", 224) input_mean = get_model_attribute(model, "input_mean", 0) input_std = get_model_attribute(model, "input_std", 255) resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width]) normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std]) sess = tf.Session() result = sess.run(normalized) return result ``` ## Score_image method that run the model and return the top 5 candidate answers ``` import numpy as np # classify an image given its url def score_image(graph, model, url): # Get the input and output layer from the model input_layer = get_model_attribute(model, "input_layer", "input") output_layer = get_model_attribute(model, "output_layer") # Download the image and build a tensor from its data t = read_tensor_from_image_file(model, download_image(url)) # Retrieve the tensors corresponding to the input and output layers input_tensor = graph.get_tensor_by_name("import/" + input_layer + ":0"); output_tensor = graph.get_tensor_by_name("import/" + output_layer + ":0"); with tf.Session(graph=graph) as sess: # Execute the output, overriding the input tensor with the one corresponding # to the image in the feed_dict argument results = sess.run(output_tensor, {input_tensor: t}) results = np.squeeze(results) # select the top 5 candidate and match them to the labels top_k = results.argsort()[-5:][::-1] labels = load_labels(model) return [(labels[i].split(":")[1], results[i]) for i in top_k] ``` ## Test the model using a Flickr page ``` model = models['mobilenet'] graph = load_graph(model) image_urls = get_image_urls("https://www.flickr.com/search/?text=cats") for url in image_urls: results = score_image(graph, model, url) print("Results for {}: \n\t{}".format(url, results)) ```
github_jupyter
# k-NN movie reccomendation | User\Film | Movie A | Movie B | Movie C | ... | Movie # | |---------------------------------------------------------| | **User A**| 3 | 4 | 0 | ... | 5 | | **User B**| 0 | 3 | 2 | ... | 0 | | **User C**| 4 | 1 | 3 | ... | 4 | | **User D**| 5 | 3 | 2 | ... | 3 | | ... | ... | ... | ... | ... | ... | | **User #**| 2 | 1 | 1 | ... | 4 | Task: For a new user find k similar users based on movie rating and recommend few new, previously unseen, movies to the new user. Use mean rating of k users to find which one to recommend. Use cosine similarity as distance function. User didnt't see a movie if he didn't rate the movie. ``` # Import necessary libraries import tensorflow as tf import numpy as np # Define paramaters set_size = 1000 # Number of users in dataset n_features = 300 # Number of movies in dataset K = 3 # Number of similary users n_movies = 6 # Number of movies to reccomend # Generate dummy data data = np.array(np.random.randint(0, 6, size=(set_size, n_features)), dtype=np.float32) new_user = np.array(np.random.randint(0, 6, size=(1, n_features)), dtype=np.float32) # Find the number of movies that user did not rate not_rated = np.count_nonzero(new_user == 0) # Case in which the new user rated all movies in our dataset if not_rated == 0: print('Regenerate new user') # Case in which we try to recommend more movies than user didn't see if not_rated < n_movies: print('Regenerate new user') # Print few examples # print(data[:3]) # print(new_user) # Input train vector X1 = tf.placeholder(dtype=tf.float32, shape=[None, n_features], name="X1") # Input test vector X2 = tf.placeholder(dtype=tf.float32, shape=[1, n_features], name="X2") # Implement finding the k nearest users ``` # Locally weighted regression (LOWESS) ``` # Import necessary libraries import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # Load data as numpy array x, y = np.loadtxt('../../data/02_LinearRegression/polynomial.csv', delimiter=',', unpack=True) m = x.shape[0] x = (x - np.mean(x, axis=0)) / np.std(x, axis=0) y = (y - np.mean(y)) / np.std(y) # Graphical preview %matplotlib inline fig, ax = plt.subplots() ax.set_xlabel('X Labe') ax.set_ylabel('Y Label') ax.scatter(x, y, edgecolors='k', label='Data') ax.grid(True, color='gray', linestyle='dashed') X = tf.placeholder(tf.float32, name='X') Y = tf.placeholder(tf.float32, name='Y') w = tf.Variable(0.0, name='weights') b = tf.Variable(0.0, name='bias') # TODO: create model, cost function and optimization with tf.Session() as sess: # Initialize the necessary variables, in this case, w and b sess.run(tf.global_variables_initializer()) # TODO: Implement optimization # Output the values of w and b w1, b1 = sess.run([w, b]) print(sess.run(t_w, feed_dict={X: 1.4})) print('W: %f, b: %f' % (w1, b1)) print('Cost: %f' % sess.run(cost, feed_dict={X: x, Y: y})) # Append hypothesis that we found on the plot x1 = np.linspace(-1.0, 0.0, 50) ax.plot(x1, x1 * w1 + b1, color='r', label='Predicted') ax.plot(x1, np.exp(-(x1 - point_x) ** 2 / (2 * 0.15 ** 2)), color='g', label='Weight function') ax.legend() fig ```
github_jupyter
## Quantum Fourier Transform ``` import numpy as np from numpy import pi from qiskit import QuantumCircuit, transpile, assemble, Aer, IBMQ from qiskit.providers.ibmq import least_busy from qiskit.tools.monitor import job_monitor from qiskit.visualization import plot_histogram, plot_bloch_multivector # doing it for a 3 qubit case qc = QuantumCircuit(3) qc.h(2) qc.draw('mpl') # we want to turn this to extra quarter if qubit 1 is in |1> # apply the CROT from qubit 1 to to qubit 2 qc.cp(pi/2,1,2) qc.draw('mpl') # we want an another eighsths turn if the least significant bit # 0 has the value |1> # apply CROT from qubit 2 to qubit 1 qc.cp(pi/4,0,2) qc.draw('mpl') # doing the same for the rest two qubits qc.h(1) qc.cp(pi/2,0,1) qc.h(0) qc.draw('mpl') # and then swap the 0 and 2 qubit to complete the QFT qc.swap(0,2) qc.draw('mpl') ``` This is one way to create the QFT circuit, but we can also make a function to make that. ``` def qft_rotations(circuit,n): if n == 0: return circuit n -= 1 circuit.h(0) for qubit in range(n): circuit.cp(pi/2**(n-qubit), qubit,n) # so qc = QuantumCircuit(4) qft_rotations(qc,4) qc.draw('mpl') # how scaling works from qiskit_textbook.widgets import scalable_circuit scalable_circuit(qft_rotations) # we can modify the prev function def qft_rotations(circuit,n): if n == 0: return circuit n -= 1 circuit.h(n) for qubit in range(n): circuit.cp(pi/2**(n-qubit), qubit,n) qft_rotations(circuit,n) qc = QuantumCircuit(4) qft_rotations(qc,4) qc.draw('mpl') scalable_circuit(qft_rotations) # now adding the swap gates def swap_registeres(circuit, n): for qubit in range(n//2): circuit.swap(qubit, n-qubit-1) return circuit def qft(circuit,n): qft_rotations(circuit,n) swap_registeres(circuit,n) return circuit qc = QuantumCircuit(8) qft(qc,8) qc.draw('mpl') scalable_circuit(qft) ``` ## How the Circuit Works? ``` bin(7) # encode this qc = QuantumCircuit(3) for i in range(3): qc.x(i) qc.draw('mpl') # display in the aer simulator sim = Aer.get_backend("aer_simulator") qc_init = qc.copy() qc_init.save_statevector() statevector = sim.run(qc_init).result().get_statevector() plot_bloch_multivector(statevector) # now call the qft function qft(qc,3) qc.draw('mpl') qc.save_statevector() statevector = sim.run(qc).result().get_statevector() plot_bloch_multivector(statevector) ``` ### Running it on Real Quantum Device ``` def inverse_qft(circuit,n): qft_circ = qft(QuantumCircuit(n), n) invqft_circuit = qft_circ.inverse() # add it to first n qubits circuit.append(invqft_circuit, circuit.qubits[:n]) return circuit.decompose() # now do it fo the 7 nqubits = 3 number = 7 qc = QuantumCircuit(nqubits) for qubit in range(nqubits): qc.h(qubit) qc.p(number*pi/4,0) qc.p(number*pi/2,1) qc.p(number*pi,2) qc.draw('mpl') qc_init = qc.copy() qc_init.save_statevector() sim = Aer.get_backend("aer_simulator") statevector = sim.run(qc_init).result().get_statevector() plot_bloch_multivector(statevector) # now the inverse QFT qc = inverse_qft(qc, nqubits) qc.measure_all() qc.draw('mpl') # Load our saved IBMQ accounts and get the least busy backend device with less than or equal to nqubits IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= nqubits and not x.configuration().simulator and x.status().operational==True)) print("least busy backend: ", backend) shots = 2048 transpiled_qc = transpile(qc, backend, optimization_level=3) job = backend.run(transpiled_qc, shots=shots) job_monitor(job) counts = job.result().get_counts() plot_histogram(counts) ```
github_jupyter
# Expression trees in PyBaMM The basic data structure that PyBaMM uses to express models is an expression tree. This data structure encodes a tree representation of a given equation. The expression tree is used to encode the equations of both the original symbolic model, and the discretised equations of that model. Once discretised, the model equations are then passed to the solver, which must then evaluate the discretised expression trees in order to perform the time-stepping. The expression tree must therefore satisfy three requirements: 1. To encode the model equations, it must be able to encode an arbitrary equation, including unary and binary operators such as `*`, `-`, spatial gradients or divergence, symbolic parameters, scalar, matrices and vectors. 2. To perform the time-stepping, it must be able to be evaluated, given the current state vector $\mathbf{y}$ and the current time $t$ 3. For solvers that require it, its gradient with respect to a given variable must be able to be evaluated (once again given $\mathbf{y}$ and $t$) As an initial example, the code below shows how to construct an expression tree of the equation $2y(1 - y) + t$. We use the `pybamm.StateVector` to represent $\mathbf{y}$, which in this case will be a vector of size 1. The time variable $t$ is already provided by PyBaMM and is of class `pybamm.Time`. ``` %pip install pybamm -q # install PyBaMM if it is not installed import pybamm import numpy as np y = pybamm.StateVector(slice(0,1)) t = pybamm.t equation = 2*y * (1 - y) + t equation.visualise('expression_tree1.png') ``` ![](expression_tree1.png) Once the equation is constructed, we can evaluate it at a given $t=1$ and $\mathbf{y}=\begin{pmatrix} 2 \end{pmatrix}$. ``` equation.evaluate(1, np.array([2])) ``` We can also calculate the expression tree representing the gradient of the equation with respect to $t$, ``` diff_wrt_equation = equation.diff(t) diff_wrt_equation.visualise('expression_tree2.png') ``` ![](expression_tree2.png) ...and evaluate this expression, ``` diff_wrt_equation.evaluate(t=1, y=np.array([2]), y_dot=np.array([2])) ``` ## The PyBaMM Pipeline Proposing, parameter setting and discretising a model in PyBaMM is a pipeline process, consisting of the following steps: 1. The model is proposed, consisting of equations representing the right-hand-side of an ordinary differential equation (ODE), and/or algebraic equations for a differential algebraic equation (DAE), and also associated boundary condition equations 2. The parameters present in the model are replaced by actual scalar values from a parameter file, using the [`pybamm.ParamterValues`](https://pybamm.readthedocs.io/en/latest/source/parameters/parameter_values.html) class 3. The equations in the model are discretised onto a mesh, any spatial gradients are replaced with linear algebra expressions and the variables of the model are replaced with state vector slices. This is done using the [`pybamm.Discretisation`](https://pybamm.readthedocs.io/en/latest/source/spatial_methods/discretisation.html) class. ## Stage 1 - Symbolic Expression Trees At each stage, the expression tree consists of certain types of nodes. In the first stage, the model is first proposed using [`pybamm.Parameter`](https://pybamm.readthedocs.io/en/latest/source/expression_tree/parameter.html), [`pybamm.Variable`](https://pybamm.readthedocs.io/en/latest/source/expression_tree/variable.html), and other [unary](https://pybamm.readthedocs.io/en/latest/source/expression_tree/unary_operator.html) and [binary](https://pybamm.readthedocs.io/en/latest/source/expression_tree/binary_operator.html) operators (which also includes spatial operators such as [`pybamm.Gradient`](https://pybamm.readthedocs.io/en/latest/source/expression_tree/unary_operator.html#pybamm.Gradient) and [`pybamm.Divergence`](https://pybamm.readthedocs.io/en/latest/source/expression_tree/unary_operator.html#pybamm.Divergence)). For example, the right hand side of the equation $$\frac{d c}{dt} = D \nabla \cdot \nabla c$$ can be constructed as an expression tree like so: ``` D = pybamm.Parameter('D') c = pybamm.Variable('c', domain=['negative electrode']) dcdt = D * pybamm.div(pybamm.grad(c)) dcdt.visualise('expression_tree3.png') ``` ![](expression_tree3.png) ## Stage 2 - Setting parameters In the second stage, the `pybamm.ParameterValues` class is used to replace all the parameter nodes with scalar values, according to an input parameter file. For example, we'll use a this class to set $D = 2$ ``` parameter_values = pybamm.ParameterValues({'D': 2}) dcdt = parameter_values.process_symbol(dcdt) dcdt.visualise('expression_tree4.png') ``` ![](expression_tree4.png) ## Stage 3 - Linear Algebra Expression Trees The third and final stage uses the `pybamm.Discretisation` class to discretise the spatial gradients and variables over a given mesh. After this stage the expression tree will encode a linear algebra expression that can be evaluated given the state vector $\mathbf{y}$ and $t$. **Note:** for demonstration purposes, we use a dummy discretisation below. For a more complete description of the `pybamm.Discretisation` class, see the example notebook [here](https://github.com/pybamm-team/PyBaMM/blob/develop/examples/notebooks/spatial_methods/finite-volumes.ipynb). ``` from tests import get_discretisation_for_testing disc = get_discretisation_for_testing() disc.y_slices = {c.id: [slice(0, 40)]} dcdt = disc.process_symbol(dcdt) dcdt.visualise('expression_tree5.png') ``` ![](expression_tree5.png) After the third stage, our expression tree is now able to be evaluated by one of the solver classes. Note that we have used a single equation above to illustrate the different types of expression trees in PyBaMM, but any given models will consist of many RHS or algebraic equations, along with boundary conditions. See [here](https://github.com/pybamm-team/PyBaMM/tree/develop/examples/notebooks/Creating%20Models) for more details of PyBaMM models. ## References The relevant papers for this notebook are: ``` pybamm.print_citations() ```
github_jupyter
# ์‘์šฉํ†ต๊ณ„ํ•™ (11์ฃผ์ฐจ) 5์›” 12์ผ > GLM, ์ผ๋ฐ˜ํ™”์„ ํ˜•๋ชจํ˜• - toc:true - branch: master - badges: true - comments: false - author: ์ตœ์„œ์—ฐ - categories: [Applied Statistics, GLM, ์ผ๋ฐ˜ํ™” ์„ ํ˜• ๋ชจํ˜•] ``` #hide options(jupyter.plot_scale=4) options(repr.plot.width=8,repr.plot.height=6,repr.plot.res=300) #options(jupyter.rich_display=FALSE) #options(max.print=1000) ``` ๋‹ค์ค‘๊ณต์„ ์„ฑ์ด ์กด์žฌํ•˜๋Š” ์ƒํ™ฉ์„ ๊ฐ€์ •ํ•˜๊ณ  ๋‹ค์ค‘๊ณต์„ ์„ฑ์„ ์–ด๋А ์ •๋„ ์ œ๊ฑฐํ•œ ๋ชจํ˜• (M1)๊ณผ ๋‹ค์ค‘๊ณต์„ ์„ฑ์ด ๋‚ด์žฌ๋˜์–ด ์žˆ๋Š” ๋ชจํ˜• (M2) ์„ ๊ณ ๋ คํ•˜์—ฌ ๋‘ ๋ชจํ˜•์˜ ์˜ˆ์ธก๋ ฅ์„ ๋ชจ์˜์‹คํ—˜์„ ํ†ตํ•ด ๋น„๊ตํ•˜์—ฌ๋ผ, ๋‹จ, ์‹คํ—˜์€ ์—ฌ๋Ÿฌ ๋ฒˆ ๋ฐ˜๋ณตํ•˜์—ฌ ํ‰๊ท ์ ์ธ ๊ฒฐ๊ณผ๋ฅผ reportํ•˜๋˜ ์„ค๋ช…๋ณ€์ˆ˜์˜ ๊ฐœ์ˆ˜๋Š” 3๊ฐœ ์ด์ƒ์œผ๋กœ ์„ค์ •ํ•˜์—ฌ๋ผ. ์ด๋ฏธ ์กด์žฌํ•˜๋Š” ๋ฌธ์„œ๋“ค์„ ์ฐธ๊ณ ํ•˜๊ฑฐ๋‚˜ ์žฌํ˜„ํ•ด๋„ ๋ฌด๋ฐฉํ•จ. (์ฒจ๋ถ€๋œ ๋ฌธ์„œ ์ฐธ๊ณ ) # ์ผ๋ฐ˜ํ™”์„ ํ˜•๋ชจํ˜• : Generalized linear model I ์ด ๊ฐ•์˜๋…ธํŠธ๋Š” `Extending the linear model with R` (2016), 2nd edition, Julian J. Faraway, Chapman and Hall. ์„ ๋ฐ”ํƒ•์œผ๋กœ ์ž‘์„ฑ๋˜์—ˆ์Šต๋‹ˆ๋‹ค. ## 1. Binary response ### 1.1 Heart Disease Example ์ƒŒํ”„๋ž€์‹œ์Šค์ฝ”์— ์‚ฌ๋Š” 39์„ธ์—์„œ 59์„ธ ์‚ฌ์ด์˜ ๊ฑด๊ฐ•ํ•œ ์„ฑ์ธ๋‚จ์„ฑ 3154๋ช…์— ๋Œ€ํ•˜์—ฌ 8๋…„ ๋ฐ˜ ๋™์•ˆ ๊ด€์ฐฐํ•˜์—ฌ ์‹ฌ์žฅ๊ด€์ƒ๋™๋งฅ์งˆํ™˜์ด ๋‚˜ํƒ€๋‚ฌ๋Š”์ง€ ์—ฌ๋ถ€๋ฅผ ๊ด€์ฐฐํ•˜์˜€๋‹ค. ์—ฌ๊ธฐ์„œ target ๋ณ€์ˆ˜ `chd` ; coronary heat disease developed ๋Š” factor๋ณ€์ˆ˜๋กœ ์ทจ๊ธ‰ํ•  ์ˆ˜ ์žˆ๊ณ  `no` `yes` ๋‘ ์ˆ˜์ค€์„ ๊ฐ€์ง„๋‹ค. ๋‹ค๋ฅธ ์š”์ธ๋“ค์ด ์งˆํ™˜์—ฌ๋ถ€์™€ ์–ด๋–ป๊ฒŒ ์—ฐ๊ด€๋˜๋Š”์ง€๋ฅผ ์•Œ๊ณ ์ž ํ•œ๋‹ค๋ฉด target ๋ณ€์ˆ˜๊ฐ€ ์—ฐ์†์ด ์•„๋‹Œ ์ด์ง„(binary)ํ˜•์ด๋ฏ€๋กœ ์ง€๊ธˆ๊นŒ์ง€ ๋‹ค๋ฃจ์—ˆ๋˜ ์ „ํ˜•์ ์ธ ์„ ํ˜•๋ชจํ˜•์€ ์ ํ•ฉํ•˜์ง€ ์•Š๋‹ค. - no: 0, yes: 1 $$chdโˆผheight+cigarette$$ - ๋‹ด๋ฐฐ๋Š” ํ•˜๋ฃจ์— ๋ช‡ ๊ฐœํ”ผ ํ”ผ์› ๋Š”์ง€ - $y = \beta_0 + \beta_1 h + \beta_2 c + \epsilon$ - $\beta_0, \beta_1, \beta_2 \in R$ ๋ช‡ ๊ฐ€์ง€ ๊ทธ๋ฆผ์„ ํ†ตํ•ด ์ž๋ฃŒ๋ฅผ ์‚ดํŽด๋ณด์ž. - ์—ฐ์†ํ˜•์—์„œ๋Š” ์‚ฐ์ ๋„, ์ด์ง„ํ˜•์—์„œ๋Š” ๋ฐ•์Šคํ”Œ๋ž?! ``` data(wcgs, package="faraway") summary(wcgs[,c("chd","height","cigs")]) plot(height ~ chd, wcgs) ``` - ํ‚ค๊ฐ€ ์ž‘์œผ๋ฉด ์งˆ๋ณ‘์ด ๊ฑธ๋ฆฌ๋Š” ๊ฑด๊ฐ€? ๋ช…ํ™•ํ•˜์ง„ ์•Š๋‹ค. ``` wcgs$y <- ifelse(wcgs$chd == "no",0,1) plot(jitter(y,0.1) ~ jitter(height), wcgs, xlab="Height", ylab="Heart Disease", pch=".") library(ggplot2) ggplot(wcgs, aes(x=height, color=chd)) + geom_histogram(position="dodge", binwidth=1) ``` - no๊ฐ€ ๋šœ๋ ทํ•˜๊ฒŒ ๋งŽ์ด ๋ถ„ํฌํ•ด์žˆ๋Š” ๋ชจ์Šต ``` ggplot(wcgs, aes(x=cigs, color=chd)) + geom_histogram(position="dodge", binwidth=5, aes(y=..density..)) ``` - yes๊ฐ€ ๋†’์€ ๊ฒฝํ–ฅ ``` ggplot(wcgs, aes(x=height,y=cigs))+geom_point(alpha=0.2, position=position_jitter())+facet_grid(~ chd) ``` ํ‚ค ํ˜น์€ ํก์—ฐ๋Ÿ‰์ด ์งˆํ™˜์—ฌ๋ถ€์™€ ์—ฐ๊ด€์ด ์žˆ์–ด ๋ณด์ด๋Š”๊ฐ€? ๋งŒ์•ฝ ๊ทธ๋ ‡๋‹ค๋ฉด ์–ด๋–ป๊ฒŒ ๋ชจํ˜•ํ™” ํ•  ์ˆ˜ ์žˆ๊ฒ ๋Š”๊ฐ€? ### 1.2 Conditional mean regression ์„ ํ˜•๋ชจํ˜•์€ ๋ฐ˜์‘๋ณ€์ˆ˜ $Y$๋ฅผ ์„ค๋ช…๋ณ€์ˆ˜ $x_1,โ€ฆ,x_p$์˜ ์„ ํ˜•๊ฒนํ•ฉ $+ noise$ ๋กœ ํ‘œํ˜„ํ•œ ๊ฒƒ์ด๋‹ค. ์‚ฌ์‹ค `์„ ํ˜•`์ด๋ผ๋Š” ์˜๋ฏธ๋Š” ์˜ค์ฐจํ•ญ์„ ์ œ์™ธํ•œ ๋ถ€๋ถ„์„ ์„ ํ˜•์œผ๋กœ ๋ชจํ˜•ํ™”ํ•œ๋‹ค๋Š” ์˜๋ฏธ๋กœ ๋ณผ ์ˆ˜ ์žˆ๋‹ค. - ์„ค๋ช…๋ณ€์ˆ˜๋ฅผ linear๋กœ ๋ชจ๋ธ๋ง $$E(Y|x_1,โ€ฆ,x_p)=ฮฒ_0+ฮฒ_1x_1+โ€ฆ+ฮฒ_px_p$$ - $Y = f(X_1, \dots, X_p) + \epsilon$ - $E(\epsilon|x_1,\dots,x_p) = 0$ - $Y = 0 or 1$ - $E(Y) = p \times 1 + (1-p) \times 0 = p = P_r(Y=1)$ ์ฆ‰, ์„ ํ˜•๋ชจํ˜•์€ ์„ค๋ช…๋ณ€์ˆ˜๊ฐ€ ์ฃผ์–ด์กŒ์„ ๋•Œ ๋ฐ˜์‘๋ณ€์ˆ˜์˜ ์กฐ๊ฑด๋ถ€ ๊ธฐ๋Œ€๊ฐ’(ํ‰๊ท )์„ ์„ค๋ณ‘๋ณ€์ˆ˜๋“ค์˜ ์„ ํ˜•๊ฒฐํ•ฉ์œผ๋กœ ๋ชจํ˜•ํ™”ํ•˜๊ณ  ์žˆ๋Š” ๊ฒƒ์ด๋‹ค. ๋ฐ˜์‘๋ณ€์ˆ˜ $Y$๊ฐ€ $Binary$์ธ ๊ฒฝ์šฐ ์กฐ๊ฑด๋ถ€ ๊ธฐ๋Œ€๊ฐ’์€ ์กฐ๊ฑด๋ถ€ ํ™•๋ฅ ๊ณผ ๊ฐ™์œผ๋ฏ€๋กœ, $$E(Y|x_1,โ€ฆ,x_p)=P(Y=1|x_1,โ€ฆ,x_p)=ฮฒ_0+ฮฒ_1x_1+โ€ฆ+ฮฒ_px_p$$ - R(์‹ค์ˆ˜ ์ „์ฒด ๊ฐ€๋Šฅ) ๊ณผ ๊ฐ™์€ ๋ชจํ˜•ํ™”๊ฐ€ ๊ฐ€๋Šฅํ•˜๋‹ค. ํ•˜์ง€๋งŒ ์œ„์™€ ๊ฐ™์€ ๋ชจํ˜•์€ ๋ชจ์ˆ˜์ถ”์ •์— ๋”ฐ๋ผ ์ฃผ์–ด์ง€๋Š” ์กฐ๊ฑด๋ถ€ ํ™•๋ฅ ์˜ ์ถ”์ •์น˜๊ฐ€ $proper range( [0,1] )$๋ฅผ ๋ณด์žฅํ•ด์ฃผ์ง€ ๋ชปํ•œ๋‹ค. ์ด๋Š” ์ž๋ฃŒ์˜ ๋ฒ”์œ„๋ฅผ ๋ฒ—์–ด๋‚˜๋Š” ๋ถ€๋ถ„์—์„œ ํŠนํžˆ ๋ฐœ์ƒํ•  ์ˆ˜ ์žˆ๋Š” ๋ฌธ์ œ์ด๋‹ค. - ์Œ์ˆ˜์˜ˆ์ธก์ด๋‚˜ ๋“œ๋ฌธ๋“œ๋ฌธ ์žˆ๋Š” ๋ฐ์ดํ„ฐ๋กœ ๋ถˆ์•ˆ์ •ํ•œ ์˜ˆ์ธก ์ด๋ฅผ ํ•ด๊ฒฐํ•  ์ˆ˜ ์žˆ๋Š” ๊ฐ„๋‹จํ•œ ํ•ด๊ฒฐ์ฑ… ์ค‘ ํ•˜๋‚˜๋Š” $proper range$๋ฅผ ๋ณด์žฅ ํ•  ์ˆ˜ ์žˆ๋Š” ๋ณ€ํ™˜ํ•จ์ˆ˜ $g$๋ฅผ ๊ณ ๋ คํ•˜๋Š” ๊ฒƒ์ด๋‹ค. $$g(P(Y=1|x_1,โ€ฆ,x_p))=ฮฒ_0+ฮฒ_1x_1+โ€ฆ+ฮฒ_px_p$$ - GLM - $X^{\top} \beta$ ๋ฐ˜์‘๋ณ€์ˆ˜๊ฐ€ $Binary$์ธ ๊ฒฝ์šฐ ์ด๋Ÿฐ ๋ณ€ํ™˜ ํ•จ์ˆ˜๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์€ ์ •์˜์—ญ๊ณผ ๊ณต์—ญ์„ ๊ฐ€์ง€๋Š” ๊ฒƒ์œผ๋กœ ์„ ํƒํ•œ๋‹ค. $$g:(0,1)โ†’R$$ ๋ณ€ํ™˜ํ•จ์ˆ˜๋Š” ๋‹ค์–‘ํ•˜๊ฒŒ ์„ ํƒ๋  ์ˆ˜ ์žˆ์œผ๋ฉฐ, ๋ฐ˜์‘๋ณ€์ˆ˜์˜ ํŠน์„ฑ์— ๋”ฐ๋ผ ๋‹ค๋ฅธ ๋ณ€ํ™˜ํ•จ์ˆ˜๋ฅผ ๊ณ ๋ คํ•˜๋Š” ๊ฒƒ์ด ์ž์—ฐ์Šค๋Ÿฌ์šธ ๊ฒƒ์ด๋‹ค. ### 1.3 ๋กœ์ง€์Šคํ‹ฑ ํšŒ๊ท€ : Logistic regression ๋กœ์ง€์Šคํ‹ฑ ํšŒ๊ท€๋ชจํ˜•์€ $Binary response$์ธ ๊ฒฝ์šฐ ๊ฐ€์žฅ ํ”ํ•˜๊ฒŒ ์‚ฌ์šฉ๋˜๋Š” ๋ชจํ˜•์ด๋‹ค. $Y_iโˆˆ \{ 0 ,1 \} , i=1,โ€ฆ,n$์ด๊ณ  $P(Y_i=1)=p_iโˆˆ(0,1)$์ด๋ผ ํ•˜์ž. ๊ทธ๋ฆฌ๊ณ  ๋‹ค์Œ๊ณผ ๊ฐ™์€ ๋ชจํ˜•์„ ์ƒ๊ฐํ•˜์ž. $$ฮท_i=g(p_i)=ฮฒ_0+ฮฒ_1x_{i1}+โ‹ฏ+ฮฒ_qx_{qi}$$ = $X_i^{\top} \beta$ ์—ฌ๊ธฐ์„œ $ฮท_i$๋ฅผ $linear predictor$๋ผ ํ•œ๋‹ค. ์ด ๋ชจํ˜•์—์„œ๋Š” ์„ค๋ช…๋ณ€์ˆ˜์˜ ์„ ํ˜•๊ฒฐํ•ฉ์ด $g$๋ฅผ ํ†ตํ•ด์„œ $p_i$์™€ ์—ฐ๊ฒฐ๋œ๋‹ค. ์ด ๋•Œ ๋ณ€ํ™˜ํ•จ์ˆ˜ $g$๋ฅผ ์—ฐ๊ฒฐํ•จ์ˆ˜($link function$)์ด๋ผ ํ•˜๊ณ  ์ด๋Ÿฐ ํ˜•ํƒœ์˜ ๋ชจํ˜•ํ™”๋ฅผ ํ†ต์นญํ•˜์—ฌ ์ผ๋ฐ˜ํ™”์„ ํ˜•๋ชจํ˜• `Generalized linear model (GLM)`์ด๋ผ ๋ถ€๋ฅธ๋‹ค. ์‚ฌ์‹ค ์•ž์„œ ๋ฐฐ์šด ์ „ํ†ต์ ์ธ ์„ ํ˜•๋ชจํ˜•์€ $g(x)=x$์ธ GLM์˜ ํŠน์ˆ˜ํ•œ ํ˜•ํƒœ๋กœ ๋ณผ ์ˆ˜ ์žˆ๋‹ค. ์•ž์„œ ์–ธ๊ธ‰ํ–ˆ๋“ฏ์ด $g$์— ๋Œ€ํ•œ ์„ ํƒ์€ ์ •๋‹ต์ด ์žˆ๋Š” ๊ฒƒ์€ ์•„๋‹ˆ๋‚˜ ๊ณ„์‚ฐ์ƒ์˜ ์ด์œ , ์ถ”์ •๋Ÿ‰์˜ ์„ฑ์งˆ๊ณผ ๊ด€๋ จ๋œ ์ด์œ , ๋ชจํ˜•์˜ ํ•ด์„๊ณผ ๊ด€๋ จ๋œ ์ด์œ , ๋ฐ์ดํ„ฐ์™€์˜ ์ ํ•ฉ์„ฑ๊ณผ ๊ด€๋ จ๋œ ์ด์œ  ๋“ฑ์œผ๋กœ ์ฃผ๋กœ ์“ฐ์—ฌ์ง€๋Š” ํ•จ์ˆ˜๋“ค์ด ์กด์žฌํ•œ๋‹ค. $Binary response$์˜ ๊ฒฝ์šฐ ๊ฐ€์žฅ ์ผ๋ฐ˜์ ์œผ๋กœ ์“ฐ์ด๋Š” ์—ฐ๊ฒฐํ•จ์ˆ˜๋Š” $logit$ ์—ฐ๊ฒฐํ•จ์ˆ˜์ด๋ฉฐ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜๋œ๋‹ค. - link function ์‚ฌ์šฉํ•˜๊ธฐ ๋•Œ๋ฌธ์— ์ถ”์ •๋Ÿ‰ ์„ฑ์งˆ๊ณผ ๊ด€๋ จ๋  ์ˆ˜ ์žˆ์ง€ - ์–ด๋–ค ์—ฐ๊ฒฐํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š”์ง€์— ๋”ฐ๋ผ ๋ชจํ˜•์ด ๋‹ฌ๋ผ์ง€๋‚Ÿ. - ๋” ์ข‹์€ ์ ํ•ฉ๋„๋ฅผ ๊ฐ€์ง„ link function์„ ์ฐพ์•„์•ผ ํ•จ. $$g(t)=logit(t)=log\frac{t}{1โˆ’t}$$ - $h: R \to (0,1)$ - $lim_{t \to 0} g(t) = log 0 = - \infty$ - $lim_{t \to 1} g(t) = log \infty = \infty$ ์ด ํ•จ์ˆ˜๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜๋˜๋Š” $logistic$ ํ•จ์ˆ˜์˜ ์—ญํ•จ์ˆ˜๋กœ ์•Œ๋ ค์ ธ ์žˆ๋‹ค. $$g^{โˆ’1}(t)=h(t)=\frac{1}{1+exp(โˆ’t)}=\frac{exp(t)}{1+exp(t)}$$ $logit$ ์—ฐ๊ฒฐํ•จ์ˆ˜๋ฅผ ๊ณ ๋ คํ•˜๋Š” $GLM$์„ ๋กœ์ง€์Šคํ‹ฑ ๋ชจํ˜•์ด๋ผ ๋ถ€๋ฅด๊ณ  ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ‘œํ˜„๋œ๋‹ค. $$log\frac{p_i}{1โˆ’p_i}=ฮฒ_0+ฮฒ_1x_{i1}+โ‹ฏ+ฮฒ_qx_{qi}$$ - $p_i$ ์ž์ฒด์˜ ์›€์ง์ž„์ด ์•„๋‹ˆ๋ผ ์—ฐ๊ฒฐํ•จ์ˆ˜๋„ ํ•จ๊ป˜ ๊ณ ๋ คํ•ด์•ผํ•œ๋‹ค. ๋‹ฌ๋ผ์งˆ ์ˆ˜ ์žˆ์œผ๋‹ˆ๊นŒ - $x$๊ธฐ ์ฆ๊ฐ€ํ•˜๋ฉด $p$๋„ ์ฆ๊ฐ€ํ•˜๊ณ (positive), $x$๊ฐ€ ๊ฐ์†Œํ•˜๋ฉด $p$๋„ ๊ฐ์†Œํ• ๊ฑธ(negative)? ์œ„์—์„œ ์–‘๋ณ€์ด ๋ชจ๋‘ $R$์˜ $range$๋ฅผ ๊ฐ€์ง์„ ์•Œ ์ˆ˜ ์žˆ๋‹ค. - $\hat{p}_i = P(Y_i=1|x_{i1}, \dots, x_{qi} ) = \frac{exp(\hat{ฮท_i})}{1+exp(\hat{ฮท_i})}$ - $1-\hat{p}_i = \frac{1}{1+exp(\hat{ฮท_i})}$ ``` library(faraway) curve(ilogit(x),-6,6, xlab=expression(eta), ylab="p") ``` - linear๋กœ ํ•˜๋ฉด (0,1) ๋ฒ”์œ„ ๋ฒ—์–ด๋‚  ์ˆ˜ ์žˆ๋‹ค. $logit$ ํ•จ์ˆ˜์˜ ์—ญํ•จ์ˆ˜ ์ฆ‰, $logistic$ ํ•จ์ˆ˜๋Š” $S$์ž ํ˜•ํƒœ์˜ ๊ณก์„ ๋ชจ์–‘์„ ๋ ๋ฉฐ ์ค‘์‹ฌ๋ถ€๋ถ„์—์„œ๋Š” ์ง์„ ๊ณผ ์œ ์‚ฌํ•œ ํ˜•ํƒœ์ž„์„ ์•Œ ์ˆ˜ ์žˆ๋‹ค. ๋ชจํ˜•์ด ์ •์˜๋˜์—ˆ์œผ๋ฉด ๋‹ค์Œ์œผ๋กœ ๋ชจ์ˆ˜์˜ ์ถ”์ •๋Ÿ‰์„ ์ œ์‹œํ•˜์—ฌ์•ผ ํ•œ๋‹ค. ์„ ํ˜•๋ชจํ˜•์—์„œ ์‚ฌ์šฉํ•˜์˜€๋˜ ์ตœ์†Œ์ œ๊ณฑ๋ฒ•์˜ ์›๋ฆฌ๋ฅผ ์ฐจ์šฉํ•  ์ˆ˜ ์žˆ์„๊นŒ? ์ผ๋ฐ˜์ ์œผ๋กœ $GLM$์—์„œ๋Š” ์ตœ์†Œ์ œ๊ณฑ์˜ ์›๋ฆฌ๋ณด๋‹ค๋Š” ๋ฐ˜์‘๋ณ€์ˆ˜์˜ ๋ถ„ํฌํŠน์„ฑ์„ ์ด์šฉํ•  ์ˆ˜ ์žˆ๋Š” ์ตœ๋Œ€๊ฐ€๋Šฅ๋„์ถ”์ •๋ฒ•์„ ์‚ฌ์šฉํ•œ๋‹ค. $$l(ฮฒ)=โˆ‘_{i=1}^{n}[y_iฮท_iโˆ’log(1+exp^{ฮท_i})]$$ - $P(y_i = 1| x_{i1},\dots,x_{iq}) = p_i)$ - $y_{i|x_{i1},\dots,x_{iq}} \sim Bernoulli(p_i)$ - ํ™•๋ฅ ์งˆ๋Ÿ‰ํ•จ์ˆ˜$f(y_i) = p_i^{y_i}(1-p_i)^{1-y_i}, y_i = 0,1$ - $L = \Pi^{n}_{i=1} f(y_i) = \Pi^{n}_{i=1} p_{i}^{y_i}(1-p_i)^{1-y_i}$ - $l = \log L = \sum^{n}_{i=1} (y_i \log p_i + (1-y_i) \log(1-p_i))$ - $= \sum^{n}_{i=1}(y_i \log p_i - y_i \log (1-p_i) + \log (1-p_i))$ - $= \sum^{n}_{i=1} (y_i(log\frac{p_i}{1-p_i}) + \log (1-p_i))$ - $\log \frac{p_i}{1-p_i} = \beta_0 + \beta_1 x_{i1} + \dots + \beta_q x_{qi} = ฮท_i = X_{i}^{\top}\beta$ - $\_i = \frac{exp(\hat{ฮท_i})}{a+exp(\hat{ฮท_i})}$ - $\sum^{n}_{i=1} [y_i ฮท_i - \log (1+exp(\hat{ฮท_i}))]$ - $argmax_{\beta = \beta_0,\dots, \beta_q} l(\beta) = MLE$ - $y_i = ฮฒ_0+ฮฒ_1x_{i1}+โ‹ฏ+ฮฒ_qx_{qi} + \epsilon$ - $argmin_{\beta_0,\dots,\beta_q}\sum(y_i - \hat{y})^2$ - ์ตœ์†Œ์ œ๊ณฑ๋ฒ• ์‚ฌ์šฉ? - $y_i$๋Š” 0 ๋˜๋Š” 1์˜ ๊ฐ’์ด ๋‚˜์˜ค๊ณ  - $\hat{y}$๋Š” 0์—์„œ 1์‚ฌ์ด์˜ ๊ฐ’์ด ๋‚˜์˜จ๋‹ค. - ์–ด๋–ป๊ฒŒ ๊ณ„์‚ฐํ•ด? $y_i$๋ฅผ ์‹ค์ˆ˜ํ™”์‹œํ‚ค์ž ์œ„ ๋กœ๊ทธ๊ฐ€๋Šฅ๋„ํ•จ์ˆ˜๋ฅผ ฮฒ์— ๋Œ€ํ•ด์„œ ์ตœ๋Œ€ํ™”์‹œํ‚ค๋ฉด ๊ทธ ๊ฐ’์ด ์ถ”์ •๋Ÿ‰์ด ๋œ๋‹ค. - ๋ฐ‘์—์„œ family์— ๋ฐ˜์‘๋ณ€์ˆ˜์˜ ํŠน์„ฑ์„ ์ž…๋ ฅํ•ด์ฃผ์ž - continuous? binomial? ``` lmod <- glm(chd ~ height + cigs, family = binomial, wcgs) summary(lmod) ``` - $g(\hat{p}_i) = log \frac{\hat{p}_i}{1-\hat{p}_i} = -4.50 + 0.025 h + 0.023 c$ - ํ‚ค๊ฐ€ ์ปค์ง€๋ฉด, ๋‹ด๋ฐฐ๋ฅผ ๋งŽ์ด ํ”ผ๋ฉด ์งˆํ™˜ ๊ฒฐ๋ฆด ํ™•๋ฅ ์ด ์ฆ๊ฐ€ํ•˜๋Š”๊ฐ€? - ๋ช…ํ™•ํžˆ ๋งํ•˜๋ฉด $log \frac{\hat{p}_i}{1-\hat{p}_i}$์ด ์ฆ๊ฐ€ํ•˜์ง€ - monotone increasing function ![](https://th.bing.com/th/id/OIP.3X_qmy8T6dbFWi8s1s6_aQAAAA?w=191&h=180&c=7&r=0&o=5&dpr=1.12&pid=1.7) - ์ฆ‰, h,c๊ฐ€ ์ฆ๊ฐ€ํ•˜๋ฉด $\hat{p}_i$๊ฐ€ ์ฆ๊ฐ€ํ•œ๋‹ค๊ณ  ํ•ด์„๊ฐ€๋Šฅ, ์ฆ‰ ๋ฐฉํ–ฅ์œผ๋กœ ํ•ด์„๊ฐ€๋Šฅ null deviance์—์„œ residual deviance๋กœ 32.3 ์ •๋„ ๊ฐ์†Œํ–ˆ๋‹ค. - null deviance๋Š” parameter ๋‹ค ๋บ€ ๊ฐ€์žฅ ์ž‘์€ deviance๋ผ๊ณ  ์ƒ๊ฐ - residual deviance = $D_M = -2(\log L_M - \log L_s)$ ```r Null deviance: 1781.2 on 3153 degrees of freedom Residual deviance: 1749.0 on 3151 degrees of freedom ``` ``` (beta <- coef(lmod)) plot(jitter(y,0.1) ~ jitter(height), wcgs, xlab="Height", ylab="Heart Disease",pch=".") curve(ilogit(beta[1] + beta[2]*x + beta[3]*0),add=TRUE) # predicted curve with non-smokers curve(ilogit(beta[1] + beta[2]*x + beta[3]*20),add=TRUE,lty=2) # predicted curve with smokers ``` - ํก์—ฐ์„ ์•ˆํ•˜๋Š” ์ง‘๋‹จ๊ณผ - $\hat{\beta}_0 + \hat{\beta}_1 \times h + \hat{\beta}_2 \times 0$ - ํก์—ฐ์„ ํ•œ ๊ฐ‘ํ•˜๋Š” ์ง‘๋‹จ - $\hat{\beta}_0 + \hat{\beta}_1 \times h + \hat{\beta}_2 \times 20$ - ํก์—ฐ๋Ÿ‰์— ๋”ฐ๋ผ ์–ด๋–ป๊ฒŒ ๋ณ€ํ• ๊นŒ? - $\hat{\beta})_1 >0$ ``` plot(jitter(y,0.1) ~ jitter(cigs), wcgs, xlab="Cigarette Use", ylab="Heart Disease",pch=".") curve(ilogit(beta[1] + beta[2]*60 + beta[3]*x),add=TRUE) # predicted curve with short men curve(ilogit(beta[1] + beta[2]*78 + beta[3]*x),add=TRUE,lty=2) # predicted curve with tall men ``` - ํ‚ค ์ž‘์€ ์ง‘๋‹จ๊ณผ - $\hat{\beta}_0 + \hat{\beta}_1 \times 60 + \hat{\beta}_2 \times c$ - ํ‚ค ํฐ ์ง‘๋‹จ - $\hat{\beta}_0 + \hat{\beta}_1 \times 78 + \hat{\beta}_2 \times c$ - ํ‚ค์— ๋”ฐ๋ผ ์–ด๋–ป๊ฒŒ ๋ณ€ํ• ๊นŒ? ๊ณ„์ˆ˜์— ๋Œ€ํ•œ ํ•ด์„์€ ์ผ๋ฐ˜์ ์ธ ์„ ํ˜•๋ชจํ˜•๊ณผ ์œ ์‚ฌํ•˜๋‹ค. ํ•˜์ง€๋งŒ, Y์™€ ์ง์ ‘ ์—ฐ๊ฒฐํ•˜์—ฌ ํ•ด์„ํ•  ์ˆ˜ ์—†๊ณ  ์—ฐ๊ฒฐํ•จ์ˆ˜์™€ ํ•จ๊ป˜ ํ•ด์„ํ•˜์—ฌํ•จ ํ•จ์„ ์œ ์˜ํ•˜์—ฌ๋ผ. - ์ž์„ธํ•œ ํ•ด์„์„ ์œ„ํ•ด ์—ฐ๊ด€์„ฑ์˜ ๋ฐฉํ–ฅ์€ ์—ฐ๊ฒฐํ•จ์ˆ˜์™€ ์ƒ๊ด€์—†์ด ํ•  ์ˆ˜ ์žˆ๋‹ค.(why?) ### 1.4 ์˜ค์ฆˆ : Interpreting Odds ๋กœ์ง€์Šคํ‹ฑ ๋ชจํ˜•์€ ์˜ํ•™๋ถ„์•ผ์—์„œ ํŠนํžˆ ๋„๋ฆฌ ํ™œ์šฉ๋œ๋‹ค. ๊ทธ ์ด์œ  ์ค‘ ํ•˜๋‚˜๋Š” ์˜ค์ฆˆ๊ฐ’ ํ˜น์€ ์˜ค์ฆˆ๋น„์— ๋Œ€ํ•œ ํ•ด์„์ด ๋ชจํ˜•์œผ๋กœ๋ถ€ํ„ฐ directํ•˜๊ฒŒ ๊ฐ€๋Šฅํ•˜๊ธฐ ๋•Œ๋ฌธ์ด๋‹ค. ์˜ค์ฆˆ๊ฐ’์€ chance์— ๋Œ€ํ•œ ํ™•๋ฅ ์˜ ๋Œ€์ฒด์žฌ๋กœ ๋ณผ ์ˆ˜ ์žˆ๋‹ค. $$Odd=\frac{p}{1โˆ’p} โˆˆ(0,โˆž)$$ - ex) $p=0.8, 1-p=0.2$ - $\frac{p}{1-p} = \frac{0.8}{0.2} = 4$ - ์ด๊ธธ ํ™•๋ฅ ์ด ์งˆ ํ™•๋ฅ ์˜ 4๋ฐฐ - ex) $p=0.75, 1-p=0.25$ - $\frac{p}{1-p} = \frac{0.75}{0.25} = 3$ - ์ด๊ธธ ํ™•๋ฅ ์ด ์งˆ ํ™•๋ฅ ์˜ 3๋ฐฐ ๋งŒ์•ฝ ์„ค๋ช…๋ณ€์ˆ˜๊ฐ€ ๋‘ ๊ฐœ๋ผ๋ฉด log-odds๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ํ‘œํ˜„๋œ๋‹ค. $$log(\frac{p}{1โˆ’p})=ฮฒ_0+ฮฒ_1x_1+ฮฒ_2x_2$$ ๋•Œ๋กœ๋Š” ์„œ๋กœ ๋‹ค๋ฅธ ๊ฐœ์ฒด๋“ค์˜ ์˜ค์ฆˆ๊ฐ’์„ ๋น„๊ตํ•˜๊ณ ์ž ํ•œ๋‹ค. ๋งŒ์•ฝ, $x_1$์ด 1๋‹จ์œ„๋งŒํผ ๋ณ€ํ•œ๋‹ค๋ฉด ํ˜น์€ $x_1$์ด 0๊ณผ 1์˜ ๊ฐ’์„ ๊ฐ€์ง€๋Š” factor๋ผ๋ฉด ๋‘ ๊ฒฝ์šฐ์˜ ์˜ค์ฆˆ๊ฐ’์˜ ๋ณ€ํ™”๋Š” $exp(ฮฒ_1)$์ด ๋œ๋‹ค. ||$x_1$|$x_2$| |---|---|---| |A|65|10| |B|65|11| $\log \frac{p_A}{1-p_A} = \beta_0 + \beta_1 10 + \beta_2 65$ $\log \frac{p_B}{1-p_B} = \beta_0 + \beta_1 11 + \beta_2 65$ $\to \log \frac{p_B}{1-p_B} - \log \frac{p_A}{1-p_A} = \beta_1$ $\frac{\frac{p_B}{1-p_B}}{\frac{p_A}{1-p_A}} = exp(\beta_1)$ - $x$๊ฐ€ 1 ์ฆ๊ฐ€ํ–ˆ์„๋•Œ ์˜ค์ฆˆ๊ฐ’์˜ ๋ณ€ํ™” ``` exp(beta) exp(beta[3]*20) ``` ์ด๋ฅผ ์ผ๋ฐ˜ํ™”ํ•˜์—ฌ ์˜ค์ฆˆ๋น„(odds ratio)๋กœ ์“ฐ๊ธฐ๋„ ํ•œ๋‹ค. $$Odds ratio (OR)=\frac{p_1}{1โˆ’p_1} \big/ \frac{p_2}{1โˆ’p_2}, \begin{cases} 1 & 1\approx2\\ >1 &1>2\\ <1 &1<2\end{cases}$$ ์˜ค์ฆˆ๋น„๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜๋˜๋Š” ์ƒ๋Œ€์œ„ํ—˜๋„(relative risk)์™€๋Š” ์•ฝ๊ฐ„ ๋‹ค๋ฅด๋‹ค. $$Relative risk (RR)=\frac{p_1}{p_2}, \begin{cases} 1 & 1\approx2\\ >1 &1>2\\ <1 &1<2\end{cases}$$ ์ง๊ด€์ ์œผ๋กœ๋Š” ์ƒ๋Œ€์œ„ํ—˜๋„๊ฐ€ ์ดํ•ดํ•˜๊ธฐ ๋” ์‰ฝ๋‹ค. ํ•˜์ง€๋งŒ, ์ด ๊ฐ’์€ ์–ด๋–ค ์‹คํ—˜ํ™˜๊ฒฝ์—์„œ๋Š” ๊ณ„์‚ฐ์ด ๋ถˆ๊ฐ€๋Šฅํ•˜๋‹ค. - ์ฝ”ํ˜ธํŠธ ์—ฐ๊ตฌ - ๋ˆ, ์‹œ๊ฐ„์ด ๋งŽ์ด ๋“ ๋‹ค. - ํŠนํžˆ ํฌ๊ท€์งˆ๋ณ‘์€ ์ƒ˜ํ”Œ์ด ๋งŽ์ด ํ•„์š”ํ•˜๋‹ค. - ๊ทธ๋ฃน์„ ๋‚˜๋ˆ„์–ด ์œ„ํ—˜๋„ ์ธก์ • - Odds ratio ๊ณ„์‚ฐ ๊ฐ€๋Šฅํ•˜๋‹ค. - Rerative Risk ๊ณ„์‚ฐ ๊ฐ€๋Šฅํ•˜๋‹ค. - ํ›„ํ–ฅ์  ์—ฐ๊ตฌ - ๋‚˜์ค‘์— ๋ฌผ์–ด๋ณด๋Š” ์—ฐ๊ตฌ - Rerative Ratio ๊ตฌํ•˜๊ธฐ ํž˜๋“ค๋‹ค. ์ œ์–ด๊ฐ€ ํž˜๋“ค๊ธฐ ๋•Œ๋ฌธ - Odds ratio ๊ณ„์‚ฐ์€ ๊ฐ€๋Šฅ ||$x_1$|$x_2$| |---|---|---| |A|68|20| |B|68|0| ``` c(ilogit(sum(beta*c(1,68,20))),ilogit(sum(beta*c(1,68,0)))) ``` - 1๋ณด๋‹ค ์ž‘๋„ค? ``` ilogit(sum(beta*c(1,68,20)))/ilogit(sum(beta*c(1,68,0))) ``` - ์งˆ๋ณ‘์— ๊ฑธ๋ฆด ํ™•๋ฅ ์ด ๋‹ด๋ฐฐ๋ฅผ20๊ฐœํ”ผ ํˆ์„๋•Œ๊ฐ€ ์•ˆ ํˆ์„๋•Œ๋ณด๋‹ค 1.5๋ฐฐ ์ •๋„ ๋†’๋‹ค. - ilogit ์€ logit์˜ ์—ญํ•จ์ˆ˜ ์—ฌ๊ธฐ์„œ๋Š” ์ƒ๋Œ€์œ„ํ—˜๋„์™€ ์˜ค์ฆˆ๋น„์˜ ๊ฐ’์ด ๊ต‰์žฅํžˆ ๋น„์Šทํ•˜๋‹ค. ํฌ๋ฐ•ํ•˜๊ฒŒ ๋ฐœ์ƒํ•˜๋Š” ์‚ฌ๊ฑด์˜ ๊ฒฝ์šฐ ์œ„ ๋‘ ๊ฐ’์€ ๋ณดํ†ต ํฌ๊ฒŒ ์ฐจ์ด๋‚˜์ง€ ์•Š์œผ๋‚˜ ๊ทธ๋ ‡์ง€ ์•Š์œผ๋ฉด ๋งค์šฐ ๋‹ฌ๋ผ์ง„๋‹ค. - ๋งŒ์•ฝ, $p_1. p_2$๊ฐ€ ์ž‘๋‹ค๋ฉด? $p_1,p_2 \approx 0$? $\frac{\frac{p_1}{1-p_1}}{\frac{p_2}{1-p_2}} \approx \frac{p_1}{p_2} \neq \frac{p_1}{p_2}$ - ๋„ˆ๋ฌด ์ž‘์œผ๋ฉด ๊ฐ’์ด ๋น„์Šทํ•˜๊ฒŒ ๋‚˜์˜จ๋‹ค. - ์ปค์ง€๋ฉด ๋น„์Šทํ•˜์ง€ ์•Š๊ฒ ์ง€๋งŒ, ์ ์–ด๋„ ์–‘/์Œ์ˆ˜๋Š” ๊ฐ™๋‹ค. ``` p1 = 0.05;p2 = 0.03 p2/p1; (p2/(1-p2))/(p1/(1-p1)) ``` - ๋น„์Šทํ•˜๋„ค ``` p1 = 0.5;p2 = 0.3 p2/p1; (p2/(1-p2))/(p1/(1-p1)) ``` - ์ ์  ์ฐจ์ด๋‚˜๊ธฐ ์‹œ์ž‘ ``` p1 = 0.95;p2 = 0.97 p2/p1; (p2/(1-p2))/(p1/(1-p1)) ``` - ๊ฑฐ์˜ ์•ˆ ๋น„์Šทํ•˜๋‹ค๊ณ  ๋ด์•ผ์ง€. if $exp(\beta_1)>1$? - Odds ratio vs Rerative risk - $\frac{p_B}{1-p_B} > \frac{p_A}{1-p_A} \therefore p_B > p_A , \beta > 0$ if $exp(\beta_1)<1$? - Odds ratio vs Rerative risk - $\frac{p_B}{1-p_B} < \frac{p_A}{1-p_A} \therefore p_B < p_A , \beta < 0$ Odds ratio ๋Š” Rerative Risk ๋ณด๋‹ค ์ฆํญ๋˜๋Š” ๊ฒฝํ–ฅ์ด ์žˆ๋‹ค. $\frac{p_A}{1-p_A}, p_A = \frac{1}{3}, p_B=\frac{1}{4}, p_A > p_B$ - A๊ฐ€ ๋” ์œ„ํ—˜ํ•˜๋‹ค. ์ฆ‰, ์งˆ๋ณ‘๋ฐœ์ƒ ํ™•๋ฅ ์ด ๋” ํฌ๋‹ค. $\frac{\frac{p_A}{1-p_A}}{\frac{p_B}{1-p_B}} = \frac{\frac{\frac{1}{3}}{\frac{2}{3}}}{\frac{\frac{1}{4}}{\frac{3}{4}}} = \frac{\frac{1}{2}}{\frac{1}{3}} = \frac{3}{2} = 1.5$ - ์ฆ‰, A์˜ Odds์™€ B์˜ Odds ์‚ฌ์ด์˜ ratio - A์˜ Odds๊ฐ€ b์˜ Odds ๋ณด๋‹ค 1.5๋งŒํผ ํฌ๋‹ค, $\frac{p_A}{p_B} = \frac{\frac{1}{3}}{\frac{1}{4}} = \frac{4}{3} = 1.333$ - ์›๋ž˜ 1.3๋ฐฐ์ธ๋ฐ Odds ratio๋Š” 1.5? - ์ฆํญ๋˜์—ˆ๋„ค? if $p_A = p_B$? - Odds ratio = 1 - Reratve Risk = 1 ### 1.5 Inference ์ถ”์ • ์ดํ›„์—๋Š” ์ ์ ˆํ•œ ํ†ต๊ณ„์  ์ถ”๋ก  ๊ณผ์ •์ด ํ•„์š”ํ•  ์ˆ˜ ์žˆ๋‹ค. ์ด๋ฅผ ์œ„ํ•ด์„œ๋Š” ์ž”์ฐจ์ œ๊ณฑํ•ฉ์„ ์ผ๋ฐ˜ํ™”ํ•œ ๊ฐœ๋…์ด ํ•„์š”ํ•˜๋‹ค. ์„ ํ˜•๋ชจํ˜•์—์„œ ์ž”์ฐจ์ œ๊ณฑํ•ฉ์€ ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜๋˜์—ˆ๋‹ค. $$โˆ‘^{n}_{i=1}(y_iโˆ’\hat{y_i})^2$$ - $s(\beta) \sum^{n}_{i=1}(y_i - X_{i}^{\top} \beta )^2$ - $s(\hat{\beta} = \sum^{n}_{i=1}(y - \hat{y})^2 = \sum^{n}_{i=1} (y-X\beta)^2 = SSE$ - $-2[l_{\mathcal{M}} - l_s] \ge 0$, ๋‹จ $l_{\mathcal{M}}\le l_s$ ์ด๋Š” ์„ ํ˜•๋ชจํ˜•์ด ์ž๋ฃŒ๋ฅผ ์–ผ๋งˆ๋‚˜ ์ž˜ ์ ํ•ฉํ•˜๋Š”์ง€๋ฅผ ํ‰๊ฐ€ํ•˜๋Š” ํ•˜๋‚˜์˜ ์ธก๋„์ด๋‹ค. ์ด๋ฅผ ์ผ๋ฐ˜ํ™”ํ•˜์—ฌ Deviance๋ผ๋Š” ๊ฐœ๋…์„ ์†Œ๊ฐœํ•œ๋‹ค. ํŠน์ • ๋ชจํ˜•$(M)$์— ๋Œ€ํ•œ deviance๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์ •์˜๋œ๋‹ค. - deviance๋ฅผ ๊ฐ€์žฅ ๋งŽ์ด ์‚ฌ์šฉํ•˜๊ณ , deviance๊ฐ€ ์ž‘์„์ˆ˜๋ก ํฌํ™”๋ชจํ˜•์ด๋‹ค. - $SSE_s = 0, SSE_\mathcal{M} = \sum^{n}_{i=1} (y_i - \hat{y})^2$ $$D_M=โˆ’2 \log \big( \frac{L_{\mathcal{M}}}{L_S} \big)$$ - $-2(\log l_{\mathcal{M}} - \log l_s) \ge 0$ - $L(\hat{\beta}) = l_{\mathcal{M}}$ - $exp(\beta) = l_{\mathcal{M}}$ ์—ฌ๊ธฐ์„œ $L_{\mathcal{M}}$์€ **์ตœ๋Œ€ํ™”๋œ ๊ฐ€๋Šฅ๋„ ๊ฐ’**์„ ์˜๋ฏธํ•œ๋‹ค. $L_S$๋ž€ **ํฌํ™”๋ชจํ˜•(Saturated model)์—์„œ์˜ ์ตœ๋Œ€ํ™”๋œ ๊ฐ€๋Šฅ๋„ ๊ฐ’**์„ ์˜๋ฏธํ•œ๋‹ค. ํฌํ™”๋ชจํ˜•์ด๋ž€ ์ž๋ฃŒ ํ•˜๋‚˜๋‹น ๋ชจ์ˆ˜๋ฅผ ํ•˜๋‚˜์”ฉ ๋Œ€์‘์‹œ์ผœ perfect fit์„ ๊ฐ€์ ธ์˜ค๋Š” ๋ชจํ˜•์„ ์˜๋ฏธํ•œ๋‹ค. ์œ„ deviance๊ฐ’์ด ์ž‘์„ ์ˆ˜๋ก ๋ชจํ˜•์˜ ์ ํ•ฉ๋„๊ฐ€ ๋†’์€ ๊ฒƒ์œผ๋กœ ํ‰๊ฐ€ํ•œ๋‹ค. ์ ์ ˆํ•œ ๊ฐ€์ • ํ•˜์—์„œ deviance๊ฐ’์€ ์ ์ ˆํ•œ ์ž์œ ๋„๋ฅผ ๊ฐ€์ง€๋Š” **์นด์ด์ œ๊ณฑ ๋ถ„ํฌ**๋กœ ๊ทผ์‚ฌํ•  ์ˆ˜ ์žˆ๋Š”๋ฐ ์ด๋ฅผ ์ด์šฉํ•˜์—ฌ ๋ชจํ˜•์— ๋Œ€ํ•œ ์œ ์˜์„ฑ ๊ฒ€์ •์„ ์ˆ˜ํ–‰ํ•  ์ˆ˜ ์žˆ๊ฒŒ ๋œ๋‹ค. - $D_{\mathcal{M}} \sim \chi^2_{(K)}$ ์ฐธ๊ณ ๋กœ deviance๋Š” ์˜ค์ฐจํ•ญ์ด ์ •๊ทœ๋ถ„ํฌ์ธ ์„ ํ˜•๋ชจํ˜•์—์„œ๋Š” ์ž”์ฐจ์ œ๊ณฑํ•ฉ์˜ ์ •์˜์™€ ์ผ์น˜ํ•˜๊ฒŒ ๋จ์ด ์•Œ๋ ค์ ธ ์žˆ๋‹ค. R ๊ฒฐ๊ณผ์ฐฝ์—์„œ `Null deviance` ๋Š” ์„ค๋ช…๋ณ€์ˆ˜๋ฅผ ํ•˜๋‚˜๋„ ํฌํ•จ์‹œํ‚ค์ง€ ์•Š์€ ๋ชจํ˜•์—์„œ์˜ deviance๋ฅผ ์˜๋ฏธํ•˜๊ณ  `Residual deviance`๊ฐ€ ํ˜„์žฌ ์ ํ•ฉ๋œ ๋ชจํ˜•์˜ deviance๋ฅผ ์˜๋ฏธํ•œ๋‹ค. ๋งŒ์•ฝ ์ด ๋‘˜ ์‚ฌ์ด์— ํฐ ์ฐจ์ด๊ฐ€ ์žˆ๋‹ค๋ฉด ํ˜„์žฌ ๋ชจํ˜•์ด ์œ ์˜์„ฑ์ด ์žˆ๋‹ค๋Š” ์˜๋ฏธ์ด๋‹ค (why?). ``` 1-pchisq(32.2,2) # differnece between deviances, difference between models ``` - nulldeviance 17812 - deviance 1749 = 32.2 - ์œ ์˜๋ฏธํ•œ ๋ณ€ํ™”์ผ๊นŒ? - $H_0 : \beta_1 \in \beta_2 = 0$ vs $H_1 : not H_0$ ๋ชจํ˜•์€ ์œ ์˜์ˆ˜์ค€ 0.05์—์„œ ์œ ์˜ํ•˜๋‹ค. ๊ฐœ๋ณ„๋ณ€์ˆ˜์˜ ์œ ์˜์„ฑ์€ anovaํ•จ์ˆ˜๋ฅผ ์ด์šฉํ•˜์—ฌ ๊ฒ€์ •ํ•  ์ˆ˜ ์žˆ๋‹ค. ``` lmodc <- glm(chd ~ cigs, family = binomial, wcgs) anova(lmodc,lmod, test="Chi") ``` - p๊ฐ’์€ 0.3374๋กœ ์œ ์˜๋ฏธํ•œ ๋ณ€ํ™”(D๊ฐ์†Œ)๋ฅผ ๊ฐ€์ ธ์˜ค์ง€ ์•Š์•˜๋‹ค. - ์ž์œ ๋„๋„ 1๋กœ, ํŒŒ๋ผ๋ฉ”ํ„ฐ ์ฐจ์ด๋„ 1์ด๋‹ค. - $H_0: \beta_\eta$ vs $H_1 : \beta_\eta \ne 0$ ``` confint(lmod) # confidence intervals ``` ### 1.6 Goodness of fit - logistic์˜ ํ•„์ˆ˜ step์€ ์•„๋‹ˆ๋‹ค. - ๊ฐ€์žฅ ์ž˜ ์ ํ•ฉํ•˜๋Š” $\hat{\beta}$ ์ž˜ ์ฐพ์•˜๋‚˜? ``` library(glmtoolbox) hltest(lmod) ``` - ์œ„ ๊ฒฐ๊ณผ ๋ณด๋Š”๋ฐ obserded์™€ expected ๊ฐ’์˜ ์ฐจ์ด๊ฐ€ ๋„ˆ๋ฌด ์ปค์„œ ํ™•์ธ ํ›„ ์•Œ๋ ค์ฃผ์‹ ๋Œ€~ - p๊ฐ’๋„ ์ข‹์ง€ ์•Š์•˜๋‹ค.(๋‚ฎ์•˜๋‹ค.) The logistic model fits data well? : Hosmer-Lemeshow test https://en.wikipedia.org/wiki/Hosmer%E2%80%93Lemeshow_test ### 1.7 Other link functions ๋กœ์ง€์Šคํ‹ฑ ๋ชจํ˜•์ด ๊ฐ€์žฅ ํญ๋„“๊ฒŒ ์“ฐ์ด์ง€๋งŒ ๋‹ค๋ฅธ ์—ฐ๊ฒฐํ•จ์ˆ˜๋“ค์„ ๊ณ ๋ คํ•  ์ˆ˜๋„ ์žˆ๋‹ค. ๋‹จ, ์—ฐ๊ฒฐํ•จ์ˆ˜๋Š” ๋‹ค์Œ์˜ ์„ฑ์งˆ์„ ๋งŒ์กฑํ•ด์•ผ ํ•  ๊ฒƒ์ด๋‹ค. - ์ฆ๊ฐ€ํ•จ์ˆ˜ - ์ ์ ˆํ•œ range๋ฅผ ๋ณด์žฅ - $g(0,1) \to \cal{R}$ ์‹ค์ˆ˜ ์ „์ฒด๋ฅผ ์•„์šฐ๋ฅด๋ฉด ์ข‹๊ฒ ๋‹ค. ๋Œ€์ฒด์—ฐ๊ฒฐํ•จ์ˆ˜๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ๊ฒƒ๋“ค์—๋Š” ๋‹ค์Œ์ด ์žˆ๋‹ค. - Probit : $g=ฮฆ^{โˆ’1}(p) , ฮฆ : N(0,1)$์˜ CDF. - CDF $ฮฆ: \cal{R} \to (0,1)$ - $ฮฆ^{-1} : (0,1) \to \cal{R}$ - Complementary log-log : $g=log(โˆ’log(1โˆ’p))$. - $g(0,1) \to \cal{R}$ - Cauchit : $g=\tan(ฯ€(pโˆ’0.5))$ ``` g = seq(-5,5,0.01) plot(g,ilogit(g),type="l") lines(g,pnorm(g),col=2) lines(g,1-exp(-exp(g)),col=3) lines(g,atan(g)/pi + 0.5,col=4) abline(v=0,h=0.5,col=2,lty="dashed") ``` - ์–ด๋–ค link function์„ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๋Š”๊ฐ€? ๋‹ต์€ ์—†๋‹ค. - ๊ฒ€์€์ƒ‰ ์„ : logit link - ์ (0,0.5)์— ๋Œ€ํ•ด ๋Œ€์นญ - ๋นจ๊ฐ„์ƒ‰ ์„ : Probit : $ฮฆ^{-1}(p_i) = ฮฆ(X_i \beta)$ - ์ (0,0.5)์— ๋Œ€ํ•ด ๋Œ€์นญ - ํŒŒ๋ž€์ƒ‰ ์„ : Cauchit - ์ (0,0.5)์— ๋Œ€ํ•ด ๋Œ€์นญ - ์—ฐ๋‘์ƒ‰ ์„ : Complementary log-log : $g=log(โˆ’log(1โˆ’p))$ - ๋นจ๋ฆฌ ์ฆ๊ฐ€ -> ์ฒœ์ฒœํžˆ ์ฆ๊ฐ€ - ์—ฐ๋‘์ƒ‰ ์„ ๋งŒ ๋Œ€์นญ์ด ์•„๋‹Œ ๊ฒƒ์„ ํ™•์ธ ํ•  ์ˆ˜ ์žˆ๋‹ค. - ์•ฝ๋ฌผ ๋ฐ˜์‘ ๋“ฑ ์–ด๋–ค ์—ฐ๊ฒฐํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ• ์ง€๋Š” ๋ช‡ ๊ฐ€์ง€ ์š”์†Œ์— ๋”ฐ๋ผ ๊ฒฐ์ •ํ•  ์ˆ˜ ์žˆ์ง€๋งŒ ์ •ํ•ด์ง„ ๋‹ต์€ ์—†๋‹ค. ๋กœ์ง€์Šคํ‹ฑ ๋ชจํ˜•์ด ๊ฐ€์žฅ ๋„๋ฆฌ ์“ฐ์ด๊ธฐ๋Š” ํ•˜์ง€๋งŒ ์ƒํ™ฉ์— ๋”ฐ๋ผ ๋‹ค๋ฅธ ๋ชจํ˜•์„ ๋” ์ž์—ฐ์Šค๋Ÿฝ๊ฒŒ ๋ฐ›์•„๋“ค์ด๋Š” ๋ถ„์•ผ๋„ ์กด์žฌํ•˜๋ฉฐ ์กฐ๊ธˆ ๋” ํ™•์žฅ๋œ ๋ชจํ˜•์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒฝ์šฐ์—๋Š” ํŠน์ • ์—ฐ๊ฒฐํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ด ํ•„์š”ํ•  ์ˆ˜ ์žˆ๋‹ค. ํ•œ ๊ฐ€์ง€ ๊ธฐ์–ตํ•  ๊ฒƒ์€ cloglog ์—ฐ๊ฒฐํ•จ์ˆ˜๋Š” ๋Œ€์นญ์„ฑ์งˆ์„ ๊ฐ€์ง€์ง€ ์•Š๋Š”๋‹ค๋Š” ์ ์ด๋‹ค. ``` lmod_l <- glm(chd ~ cigs + height, family = binomial, wcgs) lmod_c <- glm(chd ~ cigs + height, family = binomial(link = cloglog), wcgs) lmod_p <- glm(chd ~ cigs + height, family = binomial(link = probit), wcgs) summary(lmod_l) ``` - Complementary log-log ``` summary(lmod_c) ``` - prob ``` summary(lmod_p) ``` - estimate๊ฐ’์ด ๋‹ฌ๋ผ์กŒ๋‹ค? - ํ•˜์ง€๋งŒ ์‹ค์ œ๋กœ probability๋ฅผ ๋‹ค๋ฅด๊ฒŒ ์ฃผ์ง„ ์•Š์•˜๋‹ค. - ์ ํ•ฉ๋„๊ฐ€ `1748.7`๋กœ ๊ฐ€์žฅ ์ข‹๋‹ค๊ณ  ๋ณผ ์ˆ˜ ์žˆ์ง€๋งŒ. ์†Œ์ˆ˜์  ์ฐจ์ด ๋ฐ–์— ๋‚˜์ง€ ์•Š์•˜์ž. - ์ ํ•ฉ๋„ ์ข‹์€ ๊ฒƒ์„ ์ฐพ๋Š”๊ฒŒ ์ข‹์œผ๋‚˜, ํฐ ์ฐจ์ด๊ฒŒ ์—†๋‹ค๋ฉด logistic์„ ์“ฐ์ž!
github_jupyter
# Introduction Do higher film budgets lead to more box office revenue? Let's find out if there's a relationship using the movie budgets and financial performance data that I've scraped from [the-numbers.com](https://www.the-numbers.com/movie/budgets) on **May 1st, 2018**. <img src=https://i.imgur.com/kq7hrEh.png> # Import Statements ``` import pandas as pd import matplotlib.pyplot as plt ``` # Notebook Presentation ``` pd.options.display.float_format = '{:,.2f}'.format from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() ``` # Read the Data ``` data = pd.read_csv('cost_revenue_dirty.csv') ``` # Explore and Clean the Data **Challenge**: Answer these questions about the dataset: 1. How many rows and columns does the dataset contain? 2. Are there any NaN values present? 3. Are there any duplicate rows? 4. What are the data types of the columns? ``` ``` ### Data Type Conversions **Challenge**: Convert the `USD_Production_Budget`, `USD_Worldwide_Gross`, and `USD_Domestic_Gross` columns to a numeric format by removing `$` signs and `,`. <br> <br> Note that *domestic* in this context refers to the United States. ``` ``` **Challenge**: Convert the `Release_Date` column to a Pandas Datetime type. ``` ``` ### Descriptive Statistics **Challenge**: 1. What is the average production budget of the films in the data set? 2. What is the average worldwide gross revenue of films? 3. What were the minimums for worldwide and domestic revenue? 4. Are the bottom 25% of films actually profitable or do they lose money? 5. What are the highest production budget and highest worldwide gross revenue of any film? 6. How much revenue did the lowest and highest budget films make? ``` ``` # Investigating the Zero Revenue Films **Challenge** How many films grossed $0 domestically (i.e., in the United States)? What were the highest budget films that grossed nothing? ``` ``` **Challenge**: How many films grossed $0 worldwide? What are the highest budget films that had no revenue internationally? ``` ``` ### Filtering on Multiple Conditions ``` ``` **Challenge**: Use the [`.query()` function](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html) to accomplish the same thing. Create a subset for international releases that had some worldwide gross revenue, but made zero revenue in the United States. Hint: This time you'll have to use the `and` keyword. ``` ``` ### Unreleased Films **Challenge**: * Identify which films were not released yet as of the time of data collection (May 1st, 2018). * How many films are included in the dataset that have not yet had a chance to be screened in the box office?ย  * Create another DataFrame called data_clean that does not include these films. ``` # Date of Data Collection scrape_date = pd.Timestamp('2018-5-1') ``` ### Films that Lost Money **Challenge**: What is the percentage of films where the production costs exceeded the worldwide gross revenue? ``` ``` # Seaborn for Data Viz: Bubble Charts ``` ``` ### Plotting Movie Releases over Time **Challenge**: Try to create the following Bubble Chart: <img src=https://i.imgur.com/8fUn9T6.png> ``` ``` # Converting Years to Decades Trick **Challenge**: Create a column in `data_clean` that has the decade of the release. <img src=https://i.imgur.com/0VEfagw.png width=650> Here's how: 1. Create a [`DatetimeIndex` object](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DatetimeIndex.html) from the Release_Date column. 2. Grab all the years from the `DatetimeIndex` object using the `.year` property. <img src=https://i.imgur.com/5m06Ach.png width=650> 3. Use floor division `//` to convert the year data to the decades of the films. 4. Add the decades as a `Decade` column to the `data_clean` DataFrame. ``` ``` ### Separate the "old" (before 1969) and "New" (1970s onwards) Films **Challenge**: Create two new DataFrames: `old_films` and `new_films` * `old_films` should include all the films before 1969 (up to and including 1969) * `new_films` should include all the films from 1970 onwards * How many films were released prior to 1970? * What was the most expensive film made prior to 1970? ``` ``` # Seaborn Regression Plots ``` ``` **Challenge**: Use Seaborn's `.regplot()` to show the scatter plot and linear regression line against the `new_films`. <br> <br> Style the chart * Put the chart on a `'darkgrid'`. * Set limits on the axes so that they don't show negative values. * Label the axes on the plot "Revenue in \$ billions" and "Budget in \$ millions". * Provide HEX colour codes for the plot and the regression line. Make the dots dark blue (#2f4b7c) and the line orange (#ff7c43). Interpret the chart * Do our data points for the new films align better or worse with the linear regression than for our older films? * Roughly how much would a film with a budget of $150 million make according to the regression line? ``` ``` # Run Your Own Regression with scikit-learn $$ REV \hat ENUE = \theta _0 + \theta _1 BUDGET$$ ``` ``` **Challenge**: Run a linear regression for the `old_films`. Calculate the intercept, slope and r-squared. How much of the variance in movie revenue does the linear model explain in this case? ``` ``` # Use Your Model to Make a Prediction We just estimated the slope and intercept! Remember that our Linear Model has the following form: $$ REV \hat ENUE = \theta _0 + \theta _1 BUDGET$$ **Challenge**: How much global revenue does our model estimate for a film with a budget of $350 million? ``` ```
github_jupyter
``` import os import matplotlib.pyplot as plt import glob from PIL import Image import numpy as np from sklearn.utils import shuffle from tensorflow.python import keras from tensorflow.python.keras import Sequential from tensorflow.python.keras.layers import Dense, InputLayer, Conv2D, MaxPool2D, Flatten, BatchNormalization from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import keras.backend as K from keras.callbacks import History loss_ges = np.array([]) val_loss_ges = np.array([]) %matplotlib inline np.set_printoptions(precision=4) np.set_printoptions(suppress=True) Input_dir='data_resize_all' files = glob.glob(Input_dir + '/*.*') x_data = [] y_data = [] for aktfile in files: test_image = Image.open(aktfile) test_image = np.array(test_image, dtype="float32") test_image = np.reshape(test_image, (32,32,3)) base = os.path.basename(aktfile) zahl = (int(base[8:10])) / 100 x_data.append(test_image) zw = np.array([zahl]) y_data.append(zw) x_data = np.array(x_data) y_data = np.array(y_data) print(x_data.shape) print(y_data.shape) x_data, y_data = shuffle(x_data, y_data) X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2) model = Sequential() model.add(BatchNormalization(input_shape=(32,32,3))) model.add(Conv2D(8, (5, 5), input_shape=(32,32,3), padding='same', activation="relu")) model.add(MaxPool2D(pool_size=(4,4))) model.add(Conv2D(4, (5, 5), padding='same')) model.add(MaxPool2D(pool_size=(4,4))) model.add(Conv2D(4, (3, 3), padding='same')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(16,activation="relu")) model.add(Dense(2,activation="linear")) model.add(Dense(1)) model.summary() def Periodic_Loss(y_true, y_pred): dif1 = K.abs(y_pred - y_true) dif2 = K.abs(dif1 + K.constant(1)) # dif3 = K.abs(dif1 + K.constant(-1)) dif = K.minimum(dif1, dif2) # dif = K.minimum(dif, dif3) ret = K.mean(K.square(dif), axis=-1) return ret model.compile(loss=Periodic_Loss, optimizer=keras.optimizers.Adadelta(), metrics = ["accuracy"]) #model.compile(loss=keras.losses.mean_squared_error, optimizer=keras.optimizers.Adadelta(), metrics = ["accuracy"]) Batch_Size = 8 Epoch_Anz = 20 Shift_Range = 0 Brightness_Range = 0.3 datagen = ImageDataGenerator(width_shift_range=[-Shift_Range,Shift_Range], height_shift_range=[-Shift_Range,Shift_Range],brightness_range=[1-Brightness_Range,1+Brightness_Range]) train_iterator = datagen.flow(X_train, y_train, batch_size=Batch_Size) validation_iterator = datagen.flow(X_test, y_test, batch_size=Batch_Size) history = model.fit_generator(train_iterator, validation_data = validation_iterator, epochs = Epoch_Anz) loss_ges = np.append(loss_ges, history.history['loss']) val_loss_ges = np.append(val_loss_ges, history.history['val_loss']) plt.semilogy(history.history['loss']) plt.semilogy(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train','eval'], loc='upper left') plt.show() Input_dir='data_resize_all' files = glob.glob(Input_dir + '/*.*') res = [] for aktfile in files: base = os.path.basename(aktfile) zahl = (int(base[8:10])) / 100 test_image = Image.open(aktfile) test_image = np.array(test_image, dtype="float32") img = np.reshape(test_image,[1,32,32,3]) classes = model.predict(img) zw1 = zahl zw2 = round(classes[0][0], 2) zw3 = round(classes[0][0] - zahl, 2) zw4a = abs(zw3) zw4b = abs(zw3+1) zw4c = abs(zw3-1) zw4 = zw3 if zw4b < zw4a: zw4 = zw3+1 zw4a = zw4b if zw4c < zw4a: zw4 = zw3-1 res.append(np.array([zw1, zw2, zw3, zw4])) # print(base, ', ', zw1, ', ', round(zw2, 2), ', ', round(zw3, 2), ', ', round(zw4, 2)) res = np.asarray(res) statistic = np.array([np.mean(res[:,3]), np.std(res[:,3]), np.min(res[:,3]), np.max(res[:,3])]) print(statistic) res_step_1 = res plt.plot(res[:,0]) plt.plot(res[:,1]) plt.title('Result') plt.ylabel('Analog Value') plt.xlabel('#Picture') plt.legend(['real','model'], loc='upper left') plt.show() plt.plot(res[:,3]) plt.title('Deviation') plt.ylabel('Deviation from expected value') plt.xlabel('#Picture') plt.legend(['model'], loc='upper left') plt.show() Batch_Size = 8 Epoch_Anz = 40 Shift_Range = 1 Brightness_Range = 0.3 datagen = ImageDataGenerator(width_shift_range=[-Shift_Range,Shift_Range], height_shift_range=[-Shift_Range,Shift_Range],brightness_range=[1-Brightness_Range,1+Brightness_Range]) train_iterator = datagen.flow(X_train, y_train, batch_size=Batch_Size) validation_iterator = datagen.flow(X_test, y_test, batch_size=Batch_Size) history = model.fit_generator(train_iterator, validation_data = validation_iterator, epochs = Epoch_Anz) loss_ges = np.append(loss_ges, history.history['loss']) val_loss_ges = np.append(val_loss_ges, history.history['val_loss']) plt.semilogy(history.history['loss']) plt.semilogy(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train','eval'], loc='upper left') plt.show() Input_dir='data_resize_all' files = glob.glob(Input_dir + '/*.*') res = [] for aktfile in files: base = os.path.basename(aktfile) zahl = (int(base[8:10])) / 100 test_image = Image.open(aktfile) test_image = np.array(test_image, dtype="float32") img = np.reshape(test_image,[1,32,32,3]) classes = model.predict(img) zw1 = zahl zw2 = round(classes[0][0], 2) zw3 = round(classes[0][0] - zahl, 2) zw4a = abs(zw3) zw4b = abs(zw3+1) zw4c = abs(zw3-1) zw4 = zw3 if zw4b < zw4a: zw4 = zw3+1 zw4a = zw4b if zw4c < zw4a: zw4 = zw3-1 res.append(np.array([zw1, zw2, zw3, zw4])) # print(base, ', ', zw1, ', ', round(zw2, 2), ', ', round(zw3, 2), ', ', round(zw4, 2)) res = np.asarray(res) statistic = np.array([np.mean(res[:,3]), np.std(res[:,3]), np.min(res[:,3]), np.max(res[:,3])]) print(statistic) res_step_1 = res plt.plot(res[:,0]) plt.plot(res[:,1]) plt.title('Result') plt.ylabel('Analog Value') plt.xlabel('#Picture') plt.legend(['real','model'], loc='upper left') plt.show() plt.plot(res[:,3]) plt.title('Deviation') plt.ylabel('Deviation from expected value') plt.xlabel('#Picture') plt.legend(['model'], loc='upper left') plt.ylim(-0.3, 0.3) plt.show() model.save("test.h5") plt.semilogy(loss_ges) plt.semilogy(val_loss_ges) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train','eval'], loc='upper left') plt.ylim(1E-1, 5E-5) plt.show() ```
github_jupyter